prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import datetime
import os
from concurrent.futures import ProcessPoolExecutor
from math import ceil
import pandas as pd
# In[] 读入源数据
def get_source_data():
# 源数据路径
DataPath = 'data/'
# 读入源数据
off_train = pd.read_csv(os.path.join(DataPath, 'ccf_offline_stage1_train.csv'),
parse_dates=['Date_received', 'Date'])
off_train.columns = ['User_id', 'Merchant_id', 'Coupon_id', 'Discount_rate', 'Distance', 'Date_received', 'Date']
on_train = pd.read_csv(os.path.join(DataPath, 'ccf_online_stage1_train.csv'), parse_dates=['Date_received', 'Date'])
on_train.columns = ['User_id', 'Merchant_id', 'Action', 'Coupon_id', 'Discount_rate', 'Date_received', 'Date']
off_test = pd.read_csv(os.path.join(DataPath, 'ccf_offline_stage1_test_revised.csv'), parse_dates=['Date_received'])
off_test.columns = ['User_id', 'Merchant_id', 'Coupon_id', 'Discount_rate', 'Distance', 'Date_received']
print(off_train.info())
print(off_train.head(5))
return off_train, on_train, off_test
# In[] null,na 特殊处理
def null_process_offline(dataset, predict=False):
dataset.Distance.fillna(11, inplace=True)
dataset.Distance = dataset.Distance.astype(int)
dataset.Coupon_id.fillna(0, inplace=True)
dataset.Coupon_id = dataset.Coupon_id.astype(int)
dataset.Date_received.fillna(date_null, inplace=True)
dataset[['discount_rate_x', 'discount_rate_y']] = dataset[dataset.Discount_rate.str.contains(':') == True][
'Discount_rate'].str.split(':', expand=True).astype(int)
dataset['discount_rate'] = 1 - dataset.discount_rate_y / dataset.discount_rate_x
dataset.discount_rate = dataset.discount_rate.fillna(dataset.Discount_rate).astype(float)
if predict:
return dataset
else:
dataset.Date.fillna(date_null, inplace=True)
return dataset
def null_process_online(dataset):
dataset.Coupon_id.fillna(0, inplace=True)
# online.Coupon_id = online.Coupon_id.astype(int)
dataset.Date_received.fillna(date_null, inplace=True)
dataset.Date.fillna(date_null, inplace=True)
return dataset
# In[] 生成交叉训练集
def data_process(off_train, on_train, off_test):
# train feature split
# 交叉训练集一:收到券的日期大于4月14日和小于5月14日
time_range = ['2016-04-16', '2016-05-15']
dataset1 = off_train[(off_train.Date_received >= time_range[0]) & (off_train.Date_received <= time_range[1])].copy()
dataset1['label'] = 0
dataset1.loc[
(dataset1.Date != date_null) & (dataset1.Date - dataset1.Date_received <= datetime.timedelta(15)), 'label'] = 1
# 交叉训练集一特征offline:线下数据中领券和用券日期大于1月1日和小于4月13日
time_range_date_received = ['2016-01-01', '2016-03-31']
time_range_date = ['2016-01-01', '2016-04-15']
feature1_off = off_train[(off_train.Date >= time_range_date[0]) & (off_train.Date <= time_range_date[1]) | (
(off_train.Coupon_id == 0) & (off_train.Date_received >= time_range_date_received[0]) & (
off_train.Date_received <= time_range_date_received[1]))]
# 交叉训练集一特征online:线上数据中领券和用券日期大于1月1日和小于4月13日[on_train.date == 'null' to on_train.coupon_id == 0]
feature1_on = on_train[(on_train.Date >= time_range_date[0]) & (on_train.Date <= time_range_date[1]) | (
(on_train.Coupon_id == 0) & (on_train.Date_received >= time_range_date_received[0]) & (
on_train.Date_received <= time_range_date_received[1]))]
# 交叉训练集二:收到券的日期大于5月15日和小于6月15日
time_range = ['2016-05-16', '2016-06-15']
dataset2 = off_train[(off_train.Date_received >= time_range[0]) & (off_train.Date_received <= time_range[1])]
dataset2['label'] = 0
dataset2.loc[
(dataset2.Date != date_null) & (dataset2.Date - dataset2.Date_received <= datetime.timedelta(15)), 'label'] = 1
# 交叉训练集二特征offline:线下数据中领券和用券日期大于2月1日和小于5月14日
time_range_date_received = ['2016-02-01', '2016-04-30']
time_range_date = ['2016-02-01', '2016-05-15']
feature2_off = off_train[(off_train.Date >= time_range_date[0]) & (off_train.Date <= time_range_date[1]) | (
(off_train.Coupon_id == 0) & (off_train.Date_received >= time_range_date_received[0]) & (
off_train.Date_received <= time_range_date_received[1]))]
# 交叉训练集二特征online:线上数据中领券和用券日期大于2月1日和小于5月14日
feature2_on = on_train[(on_train.Date >= time_range_date[0]) & (on_train.Date <= time_range_date[1]) | (
(on_train.Coupon_id == 0) & (on_train.Date_received >= time_range_date_received[0]) & (
on_train.Date_received <= time_range_date_received[1]))]
# 测试集
dataset3 = off_test
# 测试集特征offline :线下数据中领券和用券日期大于3月15日和小于6月30日的
time_range = ['2016-03-16', '2016-06-30']
feature3_off = off_train[((off_train.Date >= time_range[0]) & (off_train.Date <= time_range[1])) | (
(off_train.Coupon_id == 0) & (off_train.Date_received >= time_range[0]) & (
off_train.Date_received <= time_range[1]))]
# 测试集特征online :线上数据中领券和用券日期大于3月15日和小于6月30日的
feature3_on = on_train[((on_train.Date >= time_range[0]) & (on_train.Date <= time_range[1])) | (
(on_train.Coupon_id == 0) & (on_train.Date_received >= time_range[0]) & (
on_train.Date_received <= time_range[1]))]
# get train feature
ProcessDataSet1 = get_features(dataset1, feature1_off, feature1_on)
ProcessDataSet2 = get_features(dataset2, feature2_off, feature2_on)
ProcessDataSet3 = get_features(dataset3, feature3_off, feature3_on)
return ProcessDataSet1, ProcessDataSet2, ProcessDataSet3
def get_features(dataset, feature_off, feature_on):
dataset = get_offline_features(dataset, feature_off)
return get_online_features(feature_on, dataset)
# In[] 定义获取feature的函数
def get_offline_features(X, offline):
# X = X[:1000]
print(len(X), len(X.columns))
temp = offline[offline.Coupon_id != 0]
coupon_consume = temp[temp.Date != date_null]
coupon_no_consume = temp[temp.Date == date_null]
user_coupon_consume = coupon_consume.groupby('User_id')
X['weekday'] = X.Date_received.dt.weekday
X['day'] = X.Date_received.dt.day
# # 距离优惠券消费次数
# temp = coupon_consume.groupby('Distance').size().reset_index(name='distance_0')
# X = pd.merge(X, temp, how='left', on='Distance')
#
# # 距离优惠券不消费次数
# temp = coupon_no_consume.groupby('Distance').size().reset_index(name='distance_1')
# X = pd.merge(X, temp, how='left', on='Distance')
#
# # 距离优惠券领取次数
# X['distance_2'] = X.distance_0 + X.distance_1
#
# # 距离优惠券消费率
# X['distance_3'] = X.distance_0 / X.distance_2
# temp = coupon_consume[coupon_consume.Distance != 11].groupby('Distance').size()
# temp['d4'] = temp.Distance.sum() / len(temp)
# X = pd.merge(X, temp, how='left', on='Distance')
'''user features'''
# 优惠券消费次数
temp = user_coupon_consume.size().reset_index(name='u2')
X = pd.merge(X, temp, how='left', on='User_id')
# X.u2.fillna(0, inplace=True)
# X.u2 = X.u2.astype(int)
# 优惠券不消费次数
temp = coupon_no_consume.groupby('User_id').size().reset_index(name='u3')
X = pd.merge(X, temp, how='left', on='User_id')
# 使用优惠券次数与没使用优惠券次数比值
X['u19'] = X.u2 / X.u3
# 领取优惠券次数
X['u1'] = X.u2.fillna(0) + X.u3.fillna(0)
# 优惠券核销率
X['u4'] = X.u2 / X.u1
# 普通消费次数
temp = offline[(offline.Coupon_id == 0) & (offline.Date != date_null)]
temp1 = temp.groupby('User_id').size().reset_index(name='u5')
X = pd.merge(X, temp1, how='left', on='User_id')
# 一共消费多少次
X['u25'] = X.u2 + X.u5
# 用户使用优惠券消费占比
X['u20'] = X.u2 / X.u25
# 正常消费平均间隔
temp = pd.merge(temp, temp.groupby('User_id').Date.max().reset_index(name='max'))
temp = pd.merge(temp, temp.groupby('User_id').Date.min().reset_index(name='min'))
temp = pd.merge(temp, temp.groupby('User_id').size().reset_index(name='len'))
temp['u6'] = ((temp['max'] - temp['min']).dt.days / (temp['len'] - 1))
temp = temp.drop_duplicates('User_id')
X = pd.merge(X, temp[['User_id', 'u6']], how='left', on='User_id')
# 优惠券消费平均间隔
temp = pd.merge(coupon_consume, user_coupon_consume.Date.max().reset_index(name='max'))
temp = pd.merge(temp, temp.groupby('User_id').Date.min().reset_index(name='min'))
temp = pd.merge(temp, temp.groupby('User_id').size().reset_index(name='len'))
temp['u7'] = ((temp['max'] - temp['min']).dt.days / (temp['len'] - 1))
temp = temp.drop_duplicates('User_id')
X = pd.merge(X, temp[['User_id', 'u7']], how='left', on='User_id')
# 15天内平均会普通消费几次
X['u8'] = X.u6 / 15
# 15天内平均会优惠券消费几次
X['u9'] = X.u7 / 15
# 领取优惠券到使用优惠券的平均间隔时间
temp = coupon_consume.copy()
temp['days'] = (temp.Date - temp.Date_received).dt.days
temp = (temp.groupby('User_id').days.sum() / temp.groupby('User_id').size()).reset_index(name='u10')
X = pd.merge(X, temp, how='left', on='User_id')
# 在15天内使用掉优惠券的值大小
X['u11'] = X.u10 / 15
# 领取优惠券到使用优惠券间隔小于15天的次数
temp = coupon_consume.copy()
temp['days'] = (temp.Date - temp.Date_received).dt.days
temp = temp[temp.days <= 15]
temp = temp.groupby('User_id').size().reset_index(name='u21')
X = pd.merge(X, temp, how='left', on='User_id')
# 用户15天使用掉优惠券的次数除以使用优惠券的次数
X['u22'] = X.u21 / X.u2
# 用户15天使用掉优惠券的次数除以领取优惠券未消费的次数
X['u23'] = X.u21 / X.u3
# 用户15天使用掉优惠券的次数除以领取优惠券的总次数
X['u24'] = X.u21 / X.u1
# 消费优惠券的平均折率
temp = user_coupon_consume.discount_rate.mean().reset_index(name='u45')
X = pd.merge(X, temp, how='left', on='User_id')
# 用户核销优惠券的最低消费折率
temp = user_coupon_consume.discount_rate.min().reset_index(name='u27')
X = pd.merge(X, temp, how='left', on='User_id')
# 用户核销优惠券的最高消费折率
temp = user_coupon_consume.discount_rate.max().reset_index(name='u28')
X = pd.merge(X, temp, how='left', on='User_id')
# 用户核销过的不同优惠券数量
temp = coupon_consume.groupby(['User_id', 'Coupon_id']).size()
temp = temp.groupby('User_id').size().reset_index(name='u32')
X = pd.merge(X, temp, how='left', on='User_id')
# 用户领取所有不同优惠券数量
temp = offline[offline.Date_received != date_null]
temp = temp.groupby(['User_id', 'Coupon_id']).size().reset_index(name='u47')
X = pd.merge(X, temp, how='left', on=['User_id', 'Coupon_id'])
# 用户核销过的不同优惠券数量占所有不同优惠券的比重
X['u33'] = X.u32 / X.u47
# 用户平均每种优惠券核销多少张
X['u34'] = X.u2 / X.u47
# 核销优惠券用户-商家平均距离
temp = offline[(offline.Coupon_id != 0) & (offline.Date != date_null) & (offline.Distance != 11)]
temp = temp.groupby('User_id').Distance
temp = pd.merge(temp.count().reset_index(name='x'), temp.sum().reset_index(name='y'), on='User_id')
temp['u35'] = temp.y / temp.x
temp = temp[['User_id', 'u35']]
X = pd.merge(X, temp, how='left', on='User_id')
# 用户核销优惠券中的最小用户-商家距离
temp = coupon_consume[coupon_consume.Distance != 11]
temp = temp.groupby('User_id').Distance.min().reset_index(name='u36')
X = pd.merge(X, temp, how='left', on='User_id')
# 用户核销优惠券中的最大用户-商家距离
temp = coupon_consume[coupon_consume.Distance != 11]
temp = temp.groupby('User_id').Distance.max().reset_index(name='u37')
X = pd.merge(X, temp, how='left', on='User_id')
# 优惠券类型
discount_types = [
'0.2', '0.5', '0.6', '0.7', '0.75', '0.8', '0.85', '0.9', '0.95', '30:20', '50:30', '10:5',
'20:10', '100:50', '200:100', '50:20', '30:10', '150:50', '100:30', '20:5', '200:50', '5:1',
'50:10', '100:20', '150:30', '30:5', '300:50', '200:30', '150:20', '10:1', '50:5', '100:10',
'200:20', '300:30', '150:10', '300:20', '500:30', '20:1', '100:5', '200:10', '30:1', '150:5',
'300:10', '200:5', '50:1', '100:1',
]
X['discount_type'] = -1
for k, v in enumerate(discount_types):
X.loc[X.Discount_rate == v, 'discount_type'] = k
# 不同优惠券领取次数
temp = offline.groupby(['User_id', 'Discount_rate']).size().reset_index(name='u41')
X = pd.merge(X, temp, how='left', on=['User_id', 'Discount_rate'])
# 不同优惠券使用次数
temp = coupon_consume.groupby(['User_id', 'Discount_rate']).size().reset_index(name='u42')
X = pd.merge(X, temp, how='left', on=['User_id', 'Discount_rate'])
# 不同优惠券不使用次数
temp = coupon_no_consume.groupby(['User_id', 'Discount_rate']).size().reset_index(name='u43')
X = pd.merge(X, temp, how='left', on=['User_id', 'Discount_rate'])
# 不同打折优惠券使用率
X['u44'] = X.u42 / X.u41
# 满减类型优惠券领取次数
temp = offline[offline.Discount_rate.str.contains(':') == True]
temp = temp.groupby('User_id').size().reset_index(name='u48')
X = pd.merge(X, temp, how='left', on='User_id')
# 打折类型优惠券领取次数
temp = offline[offline.Discount_rate.str.contains('\.') == True]
temp = temp.groupby('User_id').size().reset_index(name='u49')
X = pd.merge(X, temp, how='left', on='User_id')
'''offline merchant features'''
# 商户消费次数
temp = offline[offline.Date != date_null].groupby('Merchant_id').size().reset_index(name='m0')
X = pd.merge(X, temp, how='left', on='Merchant_id')
# 商家优惠券被领取后核销次数
temp = coupon_consume.groupby('Merchant_id').size().reset_index(name='m1')
X = pd.merge(X, temp, how='left', on='Merchant_id')
# 商户正常消费笔数
X['m2'] = X.m0.fillna(0) - X.m1.fillna(0)
# 商家优惠券被领取次数
temp = offline[offline.Date_received != date_null].groupby('Merchant_id').size().reset_index(name='m3')
X = pd.merge(X, temp, how='left', on='Merchant_id')
# 商家优惠券被领取后核销率
X['m4'] = X.m1 / X.m3
# 商家优惠券被领取后不核销次数
temp = coupon_no_consume.groupby('Merchant_id').size().reset_index(name='m7')
X = pd.merge(X, temp, how='left', on='Merchant_id')
# 商户当天优惠券领取次数
temp = X[X.Date_received != date_null]
temp = temp.groupby(['Merchant_id', 'Date_received']).size().reset_index(name='m5')
X = pd.merge(X, temp, how='left', on=['Merchant_id', 'Date_received'])
# 商户当天优惠券领取人数
temp = X[X.Date_received != date_null]
temp = temp.groupby(['User_id', 'Merchant_id', 'Date_received']).size().reset_index()
temp = temp.groupby(['Merchant_id', 'Date_received']).size().reset_index(name='m6')
X = pd.merge(X, temp, how='left', on=['Merchant_id', 'Date_received'])
# 商家优惠券核销的平均消费折率
temp = coupon_consume.groupby('Merchant_id').discount_rate.mean().reset_index(name='m8')
X = pd.merge(X, temp, how='left', on='Merchant_id')
# 商家优惠券核销的最小消费折率
temp = coupon_consume.groupby('Merchant_id').discount_rate.max().reset_index(name='m9')
X = pd.merge(X, temp, how='left', on='Merchant_id')
# 商家优惠券核销的最大消费折率
temp = coupon_consume.groupby('Merchant_id').discount_rate.min().reset_index(name='m10')
X = pd.merge(X, temp, how='left', on='Merchant_id')
# 商家优惠券核销不同的用户数量
temp = coupon_consume.groupby(['Merchant_id', 'User_id']).size()
temp = temp.groupby('Merchant_id').size().reset_index(name='m11')
X = pd.merge(X, temp, how='left', on='Merchant_id')
# 商家优惠券领取不同的用户数量
temp = offline[offline.Date_received != date_null].groupby(['Merchant_id', 'User_id']).size()
temp = temp.groupby('Merchant_id').size().reset_index(name='m12')
X = pd.merge(X, temp, how='left', on='Merchant_id')
# 核销商家优惠券的不同用户数量其占领取不同的用户比重
X['m13'] = X.m11 / X.m12
# 商家优惠券平均每个用户核销多少张
X['m14'] = X.m1 / X.m12
# 商家被核销过的不同优惠券数量
temp = coupon_consume.groupby(['Merchant_id', 'Coupon_id']).size()
temp = temp.groupby('Merchant_id').size().reset_index(name='m15')
X = pd.merge(X, temp, how='left', on='Merchant_id')
# 商家领取过的不同优惠券数量的比重
temp = offline[offline.Date_received != date_null].groupby(['Merchant_id', 'Coupon_id']).size()
temp = temp.groupby('Merchant_id').count().reset_index(name='m18')
X = pd.merge(X, temp, how='left', on='Merchant_id')
# 商家被核销过的不同优惠券数量占所有领取过的不同优惠券数量的比重
X['m19'] = X.m15 / X.m18
# 商家被核销优惠券的平均时间
temp = pd.merge(coupon_consume, coupon_consume.groupby('Merchant_id').Date.max().reset_index(name='max'))
temp = pd.merge(temp, temp.groupby('Merchant_id').Date.min().reset_index(name='min'))
temp = pd.merge(temp, temp.groupby('Merchant_id').size().reset_index(name='len'))
temp['m20'] = ((temp['max'] - temp['min']).dt.days / (temp['len'] - 1))
temp = temp.drop_duplicates('Merchant_id')
X = pd.merge(X, temp[['Merchant_id', 'm20']], how='left', on='Merchant_id')
# 商家被核销优惠券中的用户-商家平均距离
temp = coupon_consume[coupon_consume.Distance != 11].groupby('Merchant_id').Distance
temp = pd.merge(temp.count().reset_index(name='x'), temp.sum().reset_index(name='y'), on='Merchant_id')
temp['m21'] = temp.y / temp.x
temp = temp[['Merchant_id', 'm21']]
X = pd.merge(X, temp, how='left', on='Merchant_id')
# 商家被核销优惠券中的用户-商家最小距离
temp = coupon_consume[coupon_consume.Distance != 11]
temp = temp.groupby('Merchant_id').Distance.min().reset_index(name='m22')
X = pd.merge(X, temp, how='left', on='Merchant_id')
# 商家被核销优惠券中的用户-商家最大距离
temp = coupon_consume[coupon_consume.Distance != 11]
temp = temp.groupby('Merchant_id').Distance.max().reset_index(name='m23')
X = pd.merge(X, temp, how='left', on='Merchant_id')
"""offline coupon features"""
# 此优惠券一共发行多少张
temp = offline[offline.Coupon_id != 0].groupby('Coupon_id').size().reset_index(name='c1')
X = | pd.merge(X, temp, how='left', on='Coupon_id') | pandas.merge |
"""
special column names:
mle -- pivot at unpenalized MLE
truth -- pivot at true parameter
pvalue -- tests of H0 for each variable
count -- how many runs (including last one) until success
active -- was variable truly active
naive_pvalue --
cover --
naive_cover --
"""
from __future__ import division
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import probplot, uniform
import statsmodels.api as sm
def collect_multiple_runs(test_fn, columns, nrun, summary_fn, *args, **kwargs):
"""
Assumes a wait_for_return_value test...
"""
dfs = []
for i in range(nrun):
print(i)
count, result = test_fn(*args, **kwargs)
#print(result)
#print(len(np.atleast_1d(result[0])))
if hasattr(result, "__len__"):
df_i = pd.DataFrame(index=np.arange(len(np.atleast_1d(result[0]))),
columns=columns + ['count', 'run'])
else:
df_i = pd.DataFrame(index=np.arange(1),
columns=columns + ['count', 'run'])
df_i = pd.DataFrame(index=np.arange(len(np.atleast_1d(result[0]))),
columns=columns + ['count', 'run'])
df_i.loc[:,'count'] = count
df_i.loc[:,'run'] = i
for col, v in zip(columns, result):
df_i.loc[:,col] = np.atleast_1d(v)
df_i['func'] = [str(test_fn)] * len(df_i)
dfs.append(df_i)
if summary_fn is not None:
summary_fn(pd.concat(dfs))
return pd.concat(dfs)
def pvalue_plot(multiple_results, screening=False, fig=None, label = '$H_0$', colors=['b','r']):
"""
Extract pvalues and group by
null and alternative.
"""
P0 = multiple_results['pvalue'][~multiple_results['active_var']]
P0 = P0[~pd.isnull(P0)]
PA = multiple_results['pvalue'][multiple_results['active_var']]
PA = PA[~pd.isnull(PA)]
if fig is None:
fig = plt.figure()
ax = fig.gca()
fig.suptitle('Null and alternative p-values')
grid = np.linspace(0, 1, 51)
if len(P0) > 0:
ecdf0 = sm.distributions.ECDF(P0)
F0 = ecdf0(grid)
ax.plot(grid, F0, '--o', c=colors[0], lw=2, label=label)
if len(PA) > 0:
ecdfA = sm.distributions.ECDF(PA)
FA = ecdfA(grid)
ax.plot(grid, FA, '--o', c=colors[1], lw=2, label=r'$H_A$')
ax.plot([0, 1], [0, 1], 'k-', lw=1)
ax.set_xlabel("observed p-value", fontsize=18)
ax.set_ylabel("empirical CDF", fontsize=18)
ax.legend(loc='lower right', fontsize=18)
if screening:
screen = 1. / np.mean(multiple_results.loc[multiple_results.index == 0,'count'])
ax.set_title('Screening: %0.2f' % screen)
return fig
def naive_pvalue_plot(multiple_results, screening=False, fig=None, colors=['r', 'g']):
"""
Extract naive pvalues and group by
null and alternative.
"""
P0 = multiple_results['naive_pvalues'][~multiple_results['active_var']]
P0 = P0[~pd.isnull(P0)]
PA = multiple_results['naive_pvalues'][multiple_results['active_var']]
PA = PA[~pd.isnull(PA)]
if fig is None:
fig = plt.figure()
ax = fig.gca()
fig.suptitle('Null and alternative p-values')
grid = np.linspace(0, 1, 51)
if len(P0) > 0:
ecdf0 = sm.distributions.ECDF(P0)
F0 = ecdf0(grid)
ax.plot(grid, F0, '--o', c=colors[0], lw=2, label=r'Naive p-values')
if len(PA) > 0:
ecdfA = sm.distributions.ECDF(PA)
FA = ecdfA(grid)
ax.plot(grid, FA, '--o', c=colors[1], lw=2, label=r'$H_A$ naive')
ax.plot([0, 1], [0, 1], 'k-', lw=2)
ax.set_xlabel("Observed p-pvalue", fontsize=18)
ax.set_ylabel("Empirical CDF", fontsize=18)
ax.legend(loc='lower right', fontsize=18)
if screening:
screen = 1. / np.mean(multiple_results.loc[multiple_results.index == 0,'count'])
ax.set_title('Screening: %0.2f' % screen)
return fig
def split_pvalue_plot(multiple_results, screening=False, fig=None):
"""
Compare pvalues where we have a split_pvalue
"""
have_split = ~ | pd.isnull(multiple_results['split_pvalue']) | pandas.isnull |
# Stat_Canada.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
'''
Pulls Statistics Canada data on water intake and discharge for 3 digit NAICS from 2005 - 2015
'''
import pandas as pd
import io
import zipfile
import pycountry
from flowsa.common import *
def sc_call(url, sc_response, args):
"""
Convert response for calling url to pandas dataframe, begin parsing df into FBA format
:param url: string, url
:param sc_response: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
# Convert response to dataframe
# read all files in the stat canada zip
with zipfile.ZipFile(io.BytesIO(sc_response.content), "r") as f:
# read in file names
for name in f.namelist():
# if filename does not contain "MetaData", then create dataframe
if "MetaData" not in name:
data = f.open(name)
df = pd.read_csv(data, header=0)
return df
def sc_parse(dataframe_list, args):
"""
Functions to being parsing and formatting data into flowbyactivity format
:param dataframe_list: list of dataframes to concat and format
:param args: arguments as specified in flowbyactivity.py ('year' and 'source')
:return: dataframe parsed and partially formatted to flowbyactivity specifications
"""
# concat dataframes
df = | pd.concat(dataframe_list, sort=False) | pandas.concat |
import time
import pandas as pd
class Node:
def __init__(self, nid, label, full_id, set_time = True):
self.full_id = full_id
self.nid = nid
self.label = label
if(set_time):
self.start = time.time()
else:
self.start = pd.NA
self.end = pd.NA
self.child = {}
class Timer:
# Constructor
def __init__(self, verbose = 0, seq = False):
"""
Description :
Constructor of the object
Parameters :
verbose : has different levels of logging while starting/ending a subsection. It can take the following values :
0 : No logging while start() or end() are called (DEFAULT)
1 : Print only section names when their respective start()/end() are called
2 : Print section names with time the section started
3 : Print section name, its start date and start time
Example :
from ScriptTimer import Timer
st = Timer(verbose = 2)
"""
self.tree = Node(0, 'Entire Script', '0', False)
self.seq = seq
self.latest_section = ''
self.incomplete_end = False
self.raw_data = []
self.table = pd.DataFrame()
self.changed = False
self.started = False
self.verbose = verbose
# Interface Functions
def start(self, section = '0', label = 'Start'):
"""
Description :
This function starts the script timer/any subsection of the script
Arguments :
* section : Single string containing numbers seperated by fullstop.
Represents the section of code being tested
* label : Name given to the section of the code. Useful in case of many/complicated sections
If no label is given, then a default label of '-No Label-' is given
Examples :
from ScriptTimer import Timer
st = Timer(verbose = 2)
# 1. start overall script timer (optional)
timer.start()
# 2. Typical command to start a particular section
timer.start('1', 'Data Cleaning')
# 3. starting a new section automatically stops the previous running section
# (in this case, section 1 (Data Cleaning) is ended automatically before starting section 2 ( Data Preprocessing))
timer.start('2', 'Data Preporcessing')
"""
seclist = self.__listview(section)
if(pd.isna(self.tree.start)):
self.tree.start = time.time()
if(section == '0'):
return
if(self.latest_section != '' and self.incomplete_end and section.startswith(self.latest_section) == False and section != self.latest_section):
self.end(self.latest_section)
self.__insert_node(self.tree, seclist, label)
self.latest_section = section
self.incomplete_end = True
self.changed = True
if(self.started == False):
self.started = True
def end(self, section = '0'):
"""
Description :
Ends a particular subsection, or even the entire timer
Arguments :
* section : Single string containing numbers seperated by fullstop.
Represents the section of code being tested
Examples :
from ScriptTimer import Timer
timer = Timer(verbose = 2)
# 1. Typical command to start and end a particular section of code
timer.start('1', 'Data Cleaning')
#
# your code
# ending section 1
timer.end('1')
# 2. end whole timer
timer.end()
"""
if(section == '0'):
if(pd.isna(self.tree.start)):
print('Timer has not been started yet!')
return
if(self.incomplete_end):
self.tree.end = self.__insert_node_endtime(self.tree, self.__listview(self.latest_section))
else:
self.tree.end = time.time()
else:
if(self.latest_section.startswith(section)):
self.__insert_node_endtime(self.tree, self.__listview(self.latest_section))
else:
self.__insert_node_endtime(self.tree, self.__listview(section))
self.incomplete_end = False
self.changed = True
def show(self, level = 1000, verbose = 0):
"""
Description :
This function is used to print the various subsections and their respective time taken
Arguments :
* level : The inner levels of sections upto which time-taken must be shown
* verbose : Levels of output to be shown. It can take the values of :
* 0 :
* Label : label of the section
* Time_Taken : time taken by the section
* And All above columns
* 1 :
* Chronology : order in which the sections were run in the script
* And All above columns
* 2 :
* Start_Time
* End_Time
* And All above columns
* 3 :
* Start_Date
* End_Date
* And All above columns
* 4 :
* Raw_Time : time taken by section in Epoch format
* Raw_Start: start time of section in Epoch format
* Raw_End : end time of section in Epoch format
* And All above columns
Example :
from ScriptTimer import Timer
import time
timer = Timer(verbose = 2)
timer.start('1', 'Data Cleaning')
time.sleep(2)
timer.end('1')
timer.show(verbose = 2)
"""
reveal = {
0 : ['Label', 'Time_Taken'],
1 : ['Label', 'Time_Taken', 'Chronology'],
2 : ['Label', 'Time_Taken', 'Chronology', 'Start_Time', 'End_Time'],
3 : ['Label', 'Time_Taken', 'Chronology', 'Start_Date', 'Start_Time', 'End_Date', 'End_Time'],
4 : ['Label', 'Time_Taken', 'Chronology', 'Start_Date', 'Start_Time', 'End_Date', 'End_Time', 'Raw_Time', 'Raw_Start', 'Raw_End'],
}
if(self.started == False):
print('Timer has not started yet!')
return
if(self.changed or len(self.table) == 0):
self.__create_table()
print(self.table[self.table.Level <= level][reveal[verbose]])
self.changed = False
def reset(self, seq = False):
"""
Description :
Resets the timer object
Arguments : NONE
Example :
from ScriptTimer import Timer
import time
timer = Timer(verbose = 2)
timer.start('1', 'Data Cleaning')
time.sleep(2)
timer.end('1')
timer.show(verbose = 2)
timer.reset()
"""
self.__init__(seq)
def get_table(self):
"""
Description :
Returns a table containing details of sections, time taken etc
Arguments : NONE
Example :
from ScriptTimer import Timer
import pandas as pd
import time
timer = Timer(verbose = 2)
timer.start('1', 'Data Cleaning')
time.sleep(2)
timer.end('1')
timer.show(verbose = 2)
# df will contain the pandas dataframe
df = timer.get_table()
"""
return self.table
def save(self, filename = 'Timer_Logs'):
"""
Description :
Saves the timer dataframe into a csv file in the present working directory
The dataframe contains information about sections, time taken, chronology etc
If you want the csv in another directory, enter filename relative to pwd
or just use get_table() and obtain the dataframe. You can do whatever then
Arguments :
* filename : name of csv file. Default file name is 'Timer_Logs'
Example :
from ScriptTimer import Timer
import time
timer = Timer(verbose = 2)
timer.start('1', 'Data Cleaning')
time.sleep(2)
timer.end('1')
timer.show(verbose = 2)
timer.save()
# notice a csv file in your pwd
"""
self.table.to_csv( filename + '.csv')
# Background Functions
def __insert_node(self, root, seclist, label):
nid = seclist[0]
if(nid<= 0):
print('Section no.s can only start with 1! (did not start this section)')
return
if(len(seclist) == 1):
if nid not in root.child.keys():
root.child[nid] = Node(nid, label, self.__get_full_id(root.full_id, nid))
self.__print_logs(root.child[nid])
return
else:
root.child[nid].start = time.time()
root.child[nid].child = {}
self.__print_logs(root.child[nid])
else:
if(nid not in root.child.keys()):
root.child[nid] = Node(nid, '-No Label-', self.__get_full_id(root.full_id, nid))
self.__print_logs(root.child[nid])
self.__insert_node(root.child[nid], seclist[1:], label)
def __insert_node_endtime(self, root, seclist):
nid = seclist[0]
if(nid<= 0):
print('There is no such section -_-')
return 0
if(len(seclist) == 1):
if(nid not in root.child.keys()):
print('There is no such section -_-')
return 0
root.child[nid].end = time.time()
root.end = root.child[nid].end
self.__print_logs(root.child[nid], start = False)
return root.end
else:
if(nid not in root.child.keys()):
print('There is no such section!!')
return
end = self.__insert_node_endtime(root.child[nid], seclist[1:])
if(end!=0):
root.end = end
return end
def __create_table(self):
self.raw_data = []
self.__get_rows(self.tree)
temp = pd.DataFrame(columns = ['Section', 'Label', 'Raw_Start', 'Raw_End', 'Level'], data = self.raw_data)
temp['Chronology'] = temp.Raw_Start.rank()
temp['Raw_Time'] = temp.Raw_End - temp.Raw_Start
temp['Time_Taken'] = temp.Raw_Time.apply(lambda x : self.__get_time_units(x))
temp['Start_Date'] = temp.Raw_Start.apply(lambda x : self.__get_date(x))
temp['Start_Time'] = temp.Raw_Start.apply(lambda x : self.__get_time(x))
temp['End_Date'] = temp.Raw_End.apply(lambda x : self.__get_date(x))
temp['End_Time'] = temp.Raw_End.apply(lambda x : self.__get_time(x))
temp['Time_Taken'] = temp.apply(lambda x : 'Still Running' if pd.isna(x['Raw_End']) else x['Time_Taken'], axis = 1)
temp.set_index('Section', inplace = True)
self.table = temp
def __get_rows(self, root):
rtime = self.__get_time_units(root.end - root.start)
self.raw_data.append([root.full_id, root.label, root.start, root.end, len(self.__listview(root.full_id))])
if(root.child == {}):
return
else:
keys = list(root.child.keys())
keys.sort()
for i in keys:
self.__get_rows(root.child[i])
def __get_time_units(self, n):
if(n < 60):
return str(round(n, 4))+ (' Sec' if(n == 1) else ' Secs')
n1 = n/60
if(n1 < 60):
return str(round(n1, 4))+ (' Min' if n1 == 1 else ' Mins')
n2 = n1/60
if(n2 < 24):
return str(round(n2, 4))+ (' Hr' if n2 == 1 else ' Hrs')
n3 = n2/24
return str(round(n3, 4))+ (' Day' if n3 == 1 else ' Days')
def __get_full_id(self, prev_id, nid):
if(prev_id == '0'):
return str(nid)
return prev_id + '.' + str(nid)
def __listview(self, section):
return [int(i) for i in section.split('.')]
def __get_date(self, epoch):
if( | pd.isna(epoch) | pandas.isna |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of exma (https://github.com/fernandezfran/exma/).
# Copyright (c) 2021, <NAME>
# License: MIT
# Full Text: https://github.com/fernandezfran/exma/blob/master/LICENSE
# ============================================================================
# IMPORTS
# ============================================================================
import exma.electrochemistry
import numpy as np
import pandas as pd
import pytest
# ============================================================================
# TESTS
# ============================================================================
def test_fractional_volume_change():
"""Test the fractional volume change."""
reffvc = np.array([0.2, 0.4, 0.6, 0.8, 1.0])
x = np.linspace(0, 1, num=5)
natoms_a = np.full(5, 8)
volume = np.array([150, 175, 200, 225, 250])
df = pd.DataFrame({"x": x, "natoms_a": natoms_a, "volume": volume})
result = exma.electrochemistry.fractional_volume_change(df, 8, 125)
np.testing.assert_almost_equal(result.x, x)
np.testing.assert_almost_equal(result.fvc, reffvc)
def test_fractional_volume_change_err():
"""Test the fractional volume change with error propagation."""
reffvc = np.array([0.2, 0.4, 0.6, 0.8, 1.0])
referrfvc = np.full(5, 0.04)
x = np.linspace(0, 1, num=5)
natoms_a = np.full(5, 8)
volume = np.array([150, 175, 200, 225, 250])
volume_error = np.full(5, 5)
df = pd.DataFrame(
{
"x": x,
"natoms_a": natoms_a,
"volume": volume,
"err_volume": volume_error,
}
)
result = exma.electrochemistry.fractional_volume_change(df, 8, 125)
np.testing.assert_almost_equal(result.x, x)
np.testing.assert_almost_equal(result.fvc, reffvc)
np.testing.assert_almost_equal(result.errfvc, referrfvc)
def test_raise_fvc():
"""Test the raise of KeyError in fvc."""
x = np.linspace(0, 1, num=5)
natoms_a = np.full(5, 8)
df = | pd.DataFrame({"x": x, "natoms_a": natoms_a}) | pandas.DataFrame |
import numpy as np
import pandas as pd
from analysis.transform_fast import load_raw_cohort, transform
def test_immuno_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF IMMRX_DAT <> NULL | Select | Next
if pd.notnull(row["immrx_dat"]):
assert row["immuno_group"]
continue
# IF IMMDX_COV_DAT <> NULL | Select | Reject
if pd.notnull(row["immdx_cov_dat"]):
assert row["immuno_group"]
else:
assert not row["immuno_group"]
def test_ckd_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF CKD_COV_DAT <> NULL (diagnoses) | Select | Next
if pd.notnull(row["ckd_cov_dat"]):
assert row["ckd_group"]
continue
# IF CKD15_DAT = NULL (No stages) | Reject | Next
if pd.isnull(row["ckd15_dat"]):
assert not row["ckd_group"]
continue
# IF CKD35_DAT>=CKD15_DAT | Select | Reject
if gte(row["ckd35_dat"], row["ckd15_dat"]):
assert row["ckd_group"]
else:
assert not row["ckd_group"]
def test_ast_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF ASTADM_DAT <> NULL | Select | Next
if pd.notnull(row["astadm_dat"]):
assert row["ast_group"]
continue
# IF AST_DAT <> NULL | Next | Reject
if pd.isnull(row["ast_dat"]):
assert not row["ast_group"]
continue
# IF ASTRXM1 <> NULL | Next | Reject
if pd.isnull(row["astrxm1_dat"]):
assert not row["ast_group"]
continue
# IF ASTRXM2 <> NULL | Next | Reject
if pd.isnull(row["astrxm2_dat"]):
assert not row["ast_group"]
continue
# IF ASTRXM3 <> NULL | Select | Reject
if pd.notnull(row["astrxm3_dat"]):
assert row["ast_group"]
else:
assert not row["ast_group"]
def test_cns_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF CNS_COV_DAT <> NULL | Select | Reject
if pd.notnull(row["cns_cov_dat"]):
assert row["cns_group"]
else:
assert not row["cns_group"]
def test_resp_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF AST_GROUP <> NULL | Select | Next
if row["ast_group"]:
assert row["resp_group"]
continue
# IF RESP_COV_DAT <> NULL | Select | Reject
if pd.notnull(row["resp_cov_dat"]):
assert row["resp_group"]
else:
assert not row["resp_group"]
def test_bmi_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF SEV_OBESITY_DAT > BMI_DAT | Select | Next
if gt(row["sev_obesity_dat"], row["bmi_dat"]):
assert row["bmi_group"]
continue
# IF BMI_VAL >=40 | Select | Reject
if gte(row["bmi_val"], 40):
assert row["bmi_group"]
else:
assert not row["bmi_group"]
def test_diab_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF DIAB_DAT > DMRES_DAT | Select | Reject
if gt(row["diab_dat"], row["dmres_dat"]):
assert row["diab_group"]
else:
assert not row["diab_group"]
def test_sevment_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF SEV_MENTAL_DAT > SMHRES_DAT | Select | Reject
if gt(row["sev_mental_dat"], row["smhres_dat"]):
assert row["sevment_group"]
else:
assert not row["sevment_group"]
def test_atrisk_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF IMMUNOGROUP <> NULL | Select | Next
if row["immuno_group"]:
assert row["atrisk_group"]
continue
# IF CKD_GROUP <> NULL | Select | Next
if row["ckd_group"]:
assert row["atrisk_group"]
continue
# IF RESP_GROUP <> NULL | Select | Next
if row["resp_group"]:
assert row["atrisk_group"]
continue
# IF DIAB_GROUP <> NULL | Select | Next
if row["diab_group"]:
assert row["atrisk_group"]
continue
# IF CLD_DAT <>NULL | Select | Next
if pd.notnull(row["cld_dat"]):
assert row["atrisk_group"]
continue
# IF CNS_GROUP <> NULL | Select | Next
if row["cns_group"]:
assert row["atrisk_group"]
continue
# IF CHD_COV_DAT <> NULL | Select | Next
if pd.notnull(row["chd_cov_dat"]):
assert row["atrisk_group"]
continue
# IF SPLN_COV_DAT <> NULL | Select | Next
if pd.notnull(row["spln_cov_dat"]):
assert row["atrisk_group"]
continue
# IF LEARNDIS_DAT <> NULL | Select | Next
if pd.notnull(row["learndis_dat"]):
assert row["atrisk_group"]
continue
# IF SEVMENT_GROUP <> NULL | Select | Reject
if row["sevment_group"]:
assert row["atrisk_group"]
else:
assert not row["atrisk_group"]
def test_covax1d_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF COVRX1_DAT <> NULL | Select | Next
if pd.notnull(row["covrx1_dat"]):
assert row["covax1d_group"]
continue
# IF COVADM1_DAT <> NULL | Select | Reject
if pd.notnull(row["covadm1_dat"]):
assert row["covax1d_group"]
else:
assert not row["covax1d_group"]
def test_covax2d_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF COVAX1D_GROUP <> NULL | Next | Reject
if not row["covax1d_group"]:
assert not row["covax2d_group"]
continue
# IF COVRX2_DAT <> NULL | Select | Next
if pd.notnull(row["covrx2_dat"]):
assert row["covax2d_group"]
continue
# IF COVADM2_DAT <> NULL | Select | Reject
if pd.notnull(row["covadm2_dat"]):
assert row["covax2d_group"]
else:
assert not row["covax2d_group"]
def test_unstatvacc1_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF COVAX1D_GROUP <> NULL | Next | Reject
if not row["covax1d_group"]:
assert not row["unstatvacc1_group"]
continue
# IF AZD1RX_DAT <> NULL | Reject | Next
if pd.notnull(row["azd1rx_dat"]):
assert not row["unstatvacc1_group"]
continue
# IF PFD1RX_DAT <> NULL | Reject | Next
if pd.notnull(row["pfd1rx_dat"]):
assert not row["unstatvacc1_group"]
continue
# IF MOD1RX_DAT <> NULL | Reject | Next
if pd.notnull(row["mod1rx_dat"]):
assert not row["unstatvacc1_group"]
continue
# IF NXD1RX_DAT <> NULL | Reject | Next
if pd.notnull(row["nxd1rx_dat"]):
assert not row["unstatvacc1_group"]
continue
# IF JND1RX _DAT <> NULL | Reject | Next
if pd.notnull(row["jnd1rx_dat"]):
assert not row["unstatvacc1_group"]
continue
# IF GSD1RX_DAT <> NULL | Reject | Next
if pd.notnull(row["gsd1rx_dat"]):
assert not row["unstatvacc1_group"]
continue
# IF VLD1RX_DAT <> NULL | Reject | Select
if pd.notnull(row["vld1rx_dat"]):
assert not row["unstatvacc1_group"]
else:
assert row["unstatvacc1_group"]
def test_unstatvacc2_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF COVAX2D_GROUP <> NULL | Next | Reject
if not row["covax2d_group"]:
assert not row["unstatvacc2_group"]
continue
# IF AZD2RX_DAT <> NULL | Reject | Next
if pd.notnull(row["azd2rx_dat"]):
assert not row["unstatvacc2_group"]
continue
# IF PFD2RX_DAT <> NULL | Reject | Next
if pd.notnull(row["pfd2rx_dat"]):
assert not row["unstatvacc2_group"]
continue
# IF MOD2RX_DAT <> NULL | Reject | Next
if pd.notnull(row["mod2rx_dat"]):
assert not row["unstatvacc2_group"]
continue
# IF NXD2RX_DAT <> NULL | Reject | Next
if pd.notnull(row["nxd2rx_dat"]):
assert not row["unstatvacc2_group"]
continue
# IF JND2RX _DAT <> NULL | Reject | Next
if pd.notnull(row["jnd2rx_dat"]):
assert not row["unstatvacc2_group"]
continue
# IF GSD2RX_DAT <> NULL | Reject | Next
if pd.notnull(row["gsd2rx_dat"]):
assert not row["unstatvacc2_group"]
continue
# IF VLD2RX_DAT <> NULL | Reject | Select
if pd.notnull(row["vld2rx_dat"]):
assert not row["unstatvacc2_group"]
else:
assert row["unstatvacc2_group"]
def test_shield_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF SHIELD_DAT = NULL | Reject | Next
if pd.isnull(row["shield_dat"]):
assert not row["shield_group"]
continue
# IF SHIELD_DAT <> NULL AND NONSHIELD_DAT = NULL | Select | Next
if (pd.notnull(row["shield_dat"])) & (pd.isnull(row["nonshield_dat"])):
assert row["shield_group"]
continue
# IF SHIELD_DAT > NONSHIELD_DAT | Select | Reject
if gt(row["shield_dat"], row["nonshield_dat"]):
assert row["shield_group"]
else:
assert not row["shield_group"]
def test_preg_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF PREG_DAT<> NULL | Next | Reject
if pd.isnull(row["preg_dat"]):
assert not row["preg_group"]
continue
# IF PREGDEL_DAT > PREG_DAT | Reject | Select
if gt(row["pregdel_dat"], row["preg_dat"]):
assert not row["preg_group"]
else:
assert row["preg_group"]
def gt(lhs, rhs):
if pd.isna(lhs):
return False
if pd.isna(rhs):
return True
return lhs > rhs
def gte(lhs, rhs):
if pd.isna(lhs):
return False
if | pd.isna(rhs) | pandas.isna |
import argparse
import json
import logging
import os
import pandas as pd
from exp_data_atts import learners
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir', type=str,
default=os.path.expanduser('~/experiments/lccv'))
parser.add_argument('--output_file', type=str,
default='data/extrapolation.csv')
return parser.parse_args()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,
format='[%(asctime)s] [%(levelname)s] %(message)s')
args = parse_args()
decode_errors = 0
all_rows = []
for task_dir in os.listdir(args.input_dir):
for learner in learners:
filename = 'result_' + str(learner) + '.json'
filepath = os.path.join(args.input_dir, task_dir, filename)
if os.path.isfile(filepath):
with open(filepath, 'r') as fp:
try:
task_results = json.load(fp)
lccv_size = max(task_results['lccv'].keys())
current = task_results['lccv'][lccv_size]
prediction = task_results['prediction'][str(task_results['sizes'][1])]
actual = task_results['cv'][str(task_results['sizes'][1])]
all_rows.append({
'task_id': int(task_dir),
'classifier': str(learner),
'performance_curve_end': current['mean'],
'performance_prediction': prediction,
'performance_next_point': actual['mean'],
'delta_current_prediction': current['mean'] - prediction,
'delta_current_actual': current['mean'] - actual['mean'],
})
except json.decoder.JSONDecodeError as e:
logging.warning('JSON decode error for file: %s' % filepath)
decode_errors += 1
df = | pd.DataFrame(all_rows) | pandas.DataFrame |
import pandas as pd
from django.test import TestCase
from django.urls import reverse
from django.utils.safestring import mark_safe
from django_dicom.models import DataElementDefinition
from tests.fixtures import (TEST_DATA_ELEMENT_DEFINITION,
TEST_DATA_ELEMENT_DEFINITION2,
TEST_DEFINITION2_TO_SERIES,
TEST_DEFINITION_TO_SERIES)
class DataElementTestCase(TestCase):
"""
Tests for the :class:`~django_dicom.models.data_element.DataElement` model.
"""
@classmethod
def setUpTestData(cls):
"""
Creates instances to test the :class:`~django_dicom.models.data_element_definition.DataElementDefinition`
model.
For more information see Django's :class:`~django.test.TestCase` documentation_.
.. _documentation: https://docs.djangoproject.com/en/2.2/topics/testing/tools/#testcase
"""
DataElementDefinition.objects.create(**TEST_DATA_ELEMENT_DEFINITION)
DataElementDefinition.objects.create(**TEST_DATA_ELEMENT_DEFINITION2)
def setUp(self):
self.definition = DataElementDefinition.objects.last()
self.definition2 = DataElementDefinition.objects.first()
def test_string_long(self):
expected = "\n" + pd.Series(TEST_DEFINITION_TO_SERIES).to_string()
result = str(self.definition)
self.assertIsInstance(result, str)
self.assertEqual(result, expected)
def test_string_person_name(self):
expected = "\n" + | pd.Series(TEST_DEFINITION2_TO_SERIES) | pandas.Series |
import pytest
from .. import Relic
from reliquery.storage import FileStorage
import pandas as pd
from pandas._testing import assert_frame_equal
d = {
"one": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"two": pd.Series([1.0, 2.0, 3.0, 4.0], index=["a", "b", "c", "d"]),
}
df = pd.DataFrame(d)
@pytest.fixture
def test_storage(tmp_path):
return FileStorage(str(tmp_path), "test_pandasdf")
def test_list_pandasdf_file_when_add_pandasdf(test_storage):
rq = Relic(name="test", relic_type="test", storage=test_storage)
rq.add_pandasdf("dataframe1", pd.DataFrame(d))
pandasdf_text_list = rq.list_pandasdf()
assert len(pandasdf_text_list) > 0
def test_pandasdf_given_name(test_storage):
rq = Relic(name="test", relic_type="test", storage=test_storage)
comparison = | pd.DataFrame(d) | pandas.DataFrame |
import warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Series,
isna,
)
import pandas._testing as tm
class TestDataFrameCov:
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame["A"][:5] = np.nan
frame["B"][5:10] = np.nan
result = frame.cov(min_periods=len(frame) - 8)
expected = frame.cov()
expected.loc["A", "B"] = np.nan
expected.loc["B", "A"] = np.nan
tm.assert_frame_equal(result, expected)
# regular
result = frame.cov()
expected = frame["A"].cov(frame["C"])
tm.assert_almost_equal(result["A"]["C"], expected)
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ["A", "B", "C", "D"]].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(
np.cov(df.values.T).reshape((1, 1)), index=df.columns, columns=df.columns
)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(
np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns,
columns=df.columns,
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("test_ddof", [None, 0, 1, 2, 3])
def test_cov_ddof(self, test_ddof):
# GH#34611
np_array1 = np.random.rand(10)
np_array2 = np.random.rand(10)
df = DataFrame({0: np_array1, 1: np_array2})
result = df.cov(ddof=test_ddof)
expected_np = np.cov(np_array1, np_array2, ddof=test_ddof)
expected = | DataFrame(expected_np) | pandas.DataFrame |
import numpy as np
import pandas as pd
from scipy import stats
from scipy.optimize import curve_fit
import os
import re
from fuelcell import utils
from fuelcell.model import Datum
dlm_default = utils.dlm_default
col_default_labels = {'current':'i', 'potential':'v', 'time':'t', 'current_err':'i_sd', 'potential_err':'v_sd', 'overpotential':'eta', 'tafelcurrent':'log(ioi)', 'realcurr':'real', 'imagcurr':'imag'}
col_default_ids = {'current':2, 'potential':1, 'time':0, 'current_err':2, 'potential_err':3, 'overpotential':2, 'tafelcurrent':3}
ref_electrodes = {'she':0, 'sce':0.241}
thermo_potentials = {'none':0, 'oer':1.23}
expt_types_all = ['cv', 'cp', 'ca', 'lsv', 'eis']
### functions to load raw data ###
def load_data(filename=None, folder=None, pattern='', expt_type='', filetype='', delimiter=dlm_default):
"""
Loads data file(s) as a Datum Object
Function to load electrochemical data files as a Datum object. If called with no
arguments, loads all supported data files in the present folder.
Parameters
___________
filename: str, path object, or file-like (default=None)
Full filename of a file in the present directory or a complete path to an individual file. If filename is specified, all other arguments except delimiter are ignored.
folder: str, path object, or path-like (default=None)
Directory in which data files are stored. If none, defaults to the present directory.
pattern: str or regex
If specified, only files matching this pattern in the specified folder are loaded. Ignored if filename is specified.
expt_type: str (default='')
Alternative to specifying pattern; ignored if pattern is specified. All files containing expt_type anywhere in the file name will be loaded. Ex: to load all chronopotentiometry files, specify expt_type='cp'.
filetype : str
Any supported filetype. Only files of the specified file type will be loaded. Can be used in conjunction with pattern or expt_type.
delimiter : char (default = '\t')
Delimiting character if the file is a text file. Defaults to '\t' (tab-delimiting).
Returns
________
data:list of Datum
Returns a list of Datum objects, with each entry corresponding to an individual data file
"""
data = []
if filename:
if type(filename) != list:
filename = [filename]
# for f in filename:
# data.append(utils.read_file(f, delimiter))
if folder:
dirpath = os.path.realpath(folder)
else:
dirpath = os.getcwd()
if expt_type and not pattern:
pattern = r'.*' + expt_type + r'.*'
files = utils.get_files(dirpath, pattern, filetype, filename)
for f in files:
path = os.path.join(dirpath, f)
this_data = utils.read_file(path, delimiter)
if expt_type:
this_data.set_expt_type(expt_type.lower())
else:
for this_type in expt_types_all:
pattern = r'.*' + this_type + r'.*'
if re.match(pattern, f):
this_data.set_expt_type(this_type.lower())
break
if this_data is not None:
data.append(this_data)
return data
def ca_raw(filename=None, folder=None, pattern='', filetype='', delimiter=dlm_default):
"""
Loads chronoamperometry data
Efficient way to load multiple chronoamperometry data files at once; equivalent to calling load_data and specifying expt_type='ca'. If called with no arguments, loads all chronoamperometryd files in the present folder.
Parameters
___________
filename: str, path object, or file-like (default=None)
Full filename of a file in the present directory or a complete path to an individual file. If filename is specified, all other arguments except delimiter are ignored.
folder: str, path object, or path-like (default=None)
Directory in which data files are stored. If none, defaults to the present directory.
pattern: str or regex
If specified, only files matching this pattern in the specified folder are loaded. Ignored if filename is specified.
filetype : str
Any supported filetype. Only files of the specified file type will be loaded. Can be used in conjunction with pattern or expt_type.
delimiter : char (default = '\t')
Delimiting character if the file is a text file. Defaults to '\t' (tab-delimiting).
Returns
________
data:list of Datum
Returns a list of Datum objects, with each entry corresponding to an individual data file
"""
data = load_data(filename, folder, pattern, 'ca', filetype, delimiter)
return data
def cp_raw(filename=None, folder=None, pattern='', filetype='', delimiter=dlm_default):
"""
Loads chronoamperometry data
Efficient way to load multiple chornopotentiometry files at once; equivalent to calling load_data and specifying expt_type='cp'. If called with no arguments, loads all chronopotentiometry files in the present folder.
Parameters
___________
filename: str, path object, or file-like (default=None)
Full filename of a file in the present directory or a complete path to an individual file. If filename is specified, all other arguments except delimiter are ignored.
folder: str, path object, or path-like (default=None)
Directory in which data files are stored. If none, defaults to the present directory.
pattern: str or regex
If specified, only files matching this pattern in the specified folder are loaded. Ignored if filename is specified.
filetype : str
Any supported filetype. Only files of the specified file type will be loaded. Can be used in conjunction with pattern or expt_type.
delimiter : char (default = '\t')
Delimiting character if the file is a text file. Defaults to '\t' (tab-delimiting).
Returns
________
data:list of Datum
Returns a list of Datum objects, with each entry corresponding to an individual data file
"""
data = load_data(filename, folder, pattern, 'cp', filetype, delimiter)
return data
def cv_raw(filename=None, folder=None, pattern='', filetype='', delimiter=dlm_default):
"""
Loads cyclic voltammetry data
Efficient way to load multiple cyclic voltammetry files at once; equivalent to calling load_data and specifying expt_type='cv'. If called with no arguments, loads all cyclic voltammetry files in the present folder.
Parameters
___________
filename: str, path object, or file-like (default=None)
Full filename of a file in the present directory or a complete path to an individual file. If filename is specified, all other arguments except delimiter are ignored.
folder: str, path object, or path-like (default=None)
Directory in which data files are stored. If none, defaults to the present directory.
pattern: str or regex
If specified, only files matching this pattern in the specified folder are loaded. Ignored if filename is specified.
filetype : str
Any supported filetype. Only files of the specified file type will be loaded. Can be used in conjunction with pattern or expt_type.
delimiter : char (default = '\t')
Delimiting character if the file is a text file. Defaults to '\t' (tab-delimiting).
Returns
________
data:list of Datum
Returns a list of Datum objects, with each entry corresponding to an individual data file
"""
data = load_data(filename, folder, pattern, 'cv', filetype, delimiter)
return data
def lsv_raw(filename=None, folder=None, pattern='', filetype='', delimiter=dlm_default):
"""
Loads linear sweep voltammetry data
Efficient way to load multiple linear sweep voltammetry files at once; equivalent to calling load_data and specifying expt_type='lsv'. If called with no arguments, loads all linear sweep voltammetry files in the present folder.
Parameters
___________
filename: str, path object, or file-like (default=None)
Full filename of a file in the present directory or a complete path to an individual file. If filename is specified, all other arguments except delimiter are ignored.
folder: str, path object, or path-like (default=None)
Directory in which data files are stored. If none, defaults to the present directory.
pattern: str or regex
If specified, only files matching this pattern in the specified folder are loaded. Ignored if filename is specified.
filetype : str
Any supported filetype. Only files of the specified file type will be loaded. Can be used in conjunction with pattern or expt_type.
delimiter : char (default = '\t')
Delimiting character if the file is a text file. Defaults to '\t' (tab-delimiting).
Returns
________
data:list of Datum
Returns a list of Datum objects, with each entry corresponding to an individual data file
"""
data = load_data(filename, folder, pattern, 'lsv', filetype, delimiter)
return data
def eis_raw(filename=None, folder=None, pattern='', filetype='', delimiter=dlm_default):
"""
Loads electrochemical impedance spectroscopy data
Efficient way to load multiple electrochemical impedance spectroscopy files at once; equivalent to calling load_data and specifying expt_type='eis'. If called with no arguments, loads all electrochemical impedance spectroscopy files in the present folder.
Parameters
___________
filename: str, path object, or file-like (default=None)
Full filename of a file in the present directory or a complete path to an individual file. If filename is specified, all other arguments except delimiter are ignored.
folder: str, path object, or path-like (default=None)
Directory in which data files are stored. If none, defaults to the present directory.
pattern: str or regex
If specified, only files matching this pattern in the specified folder are loaded. Ignored if filename is specified.
filetype : str
Any supported filetype. Only files of the specified file type will be loaded. Can be used in conjunction with pattern or expt_type.
delimiter : char (default = '\t')
Delimiting character if the file is a text file. Defaults to '\t' (tab-delimiting).
Returns
________
data:list of Datum
Returns a list of Datum objects, with each entry corresponding to an individual data file
"""
data = load_data(filename, folder, pattern, 'eis', filetype, delimiter)
return data
### high-level functions for processing data ###
def ca_process(data=None, current_column=2, potential_column=1, area=5, reference='she', thermo_potential=0, export_data=False, save_dir='processed', threshold=5, min_step_length=50, pts_to_average=300, pyramid=False, **kwargs):
"""
Processes chronoamperometry data
Can either process pre-loaded data or load and process data files. If called with no arguments, loads and processes all 'ca' files in the present folder. See process_steps for details on the operations performed.
Parameters
___________
data: list of Datum
List of Datum objects containing CA data. If unspecified, data will be loaded using ca_raw before processing.
current_column : int or str (default=1)
Index or label of the column containing current data. Used only if automatic column identification fails
potential_column : int or str (default=2)
Index or label of the column containing potential data. Used only if automatic column identification fails
threshold: int (default=5)
Minimum consecutive absolute difference which constitutes a step
min_step_length: int (default=25)
Minimum length of the arrays which result from spliting the intial array. Arrays shorter than this value will be discarded
pts_to_average: int (default=300)
Steady-state average and sd are calculated using the last pts_to_average values of the array. Default is 300 points, which is the last 30 seconds of each hold at the instrument's default collection rate of 10 Hz.
pyramid: bool (default=True)
Specifies whether the current is ramped in both directions. Set pyramid=False if only ramping up or only ramping down.
area: int or float (default=5)
Geometric active area of the MEA. Scaling factor to convert current to current density.
reference: {'she', 'sce'}, int, or float (default='she')
Either a string identifying the reference electrode (ie 'she' or 'sce'), or the potential of the reference electrode used. sce=0.241
**kwargs:
Remaining arguments are passed to ca_raw to load data
"""
if data is None:
data = ca_raw(**kwargs)
for d in data:
if d.get_expt_type() == 'ca':
raw = d.get_raw_data()
processed = process_steps(raw, potential_column, current_column, threshold, min_step_length, pts_to_average, pyramid, 'ca', area, reference, thermo_potential)
d.set_processed_data(processed)
d.set_current_data(processed['i'])
d.set_potential_data(processed['v'])
d.set_overpotential_data(processed['eta'])
d.set_error_data(processed['i_sd'])
set_datum_params(d, area, reference, thermo_potential)
if export_data:
name = d.get_name()
utils.save_data(processed, name+'.csv', save_dir)
return data
def cp_process(data=None, current_column=2, potential_column=1, area=5, reference='she', thermo_potential=0, export_data=False, save_dir='processed', threshold=5, min_step_length=25, pts_to_average=300, pyramid=True, **kwargs):
"""
Processes chronopotentiometry data
Can either process pre-loaded data or load and process data files. If called with no arguments, loads and processes all 'cp' files in the present folder. See process_steps for details on the operations performed.
Parameters
___________
data: list of Datum
List of Datum objects containing CP data. If unspecified, data will be loaded using cp_raw before processing.
current_column : int or str (default=1)
Index or label of the column containing current data. Used only if automatic column identification fails
potential_column : int or str (default=2)
Index or label of the column containing potential data. Used only if automatic column identification fails
threshold: int (default=5)
Minimum consecutive absolute difference which constitutes a step
min_step_length: int (default=25)
Minimum length of the arrays which result from spliting the intial array. Arrays shorter than this value will be discarded
pts_to_average: int (default=300)
Steady-state average and sd are calculated using the last pts_to_average values of the array. Default is 300 points, which is the last 30 seconds of each hold at the instrument's default collection rate of 10 Hz.
pyramid: bool (default=True)
Specifies whether the current is ramped in both directions. Set pyramid=False if only ramping up or only ramping down.
area: int or float (default=5)
Geometric active area of the MEA. Scaling factor to convert current to current density.
reference: {'she', 'sce'}, int, or float (default='she')
Either a string identifying the reference electrode (ie 'she' or 'sce'), or the potential of the reference electrode used. sce=0.241
**kwargs:
Remaining arguments are passed to cp_raw to load data
"""
if data is None:
data = cp_raw(**kwargs)
for d in data:
if d.get_expt_type() == 'cp':
raw = d.get_raw_data()
processed = process_steps(raw, current_column, potential_column, threshold, min_step_length, pts_to_average, pyramid, 'cp', area, reference, thermo_potential)
d.set_processed_data(processed)
d.set_current_data(processed['i'])
d.set_potential_data(processed['v'])
d.set_overpotential_data(processed['eta'])
d.set_error_data(processed['v_sd'])
set_datum_params(d, area, reference, thermo_potential)
if export_data:
name = d.get_name()
utils.save_data(processed, name+'.csv', save_dir)
return data
def cv_process(data=None, current_column=1, potential_column=0, area=5, reference='she', thermo_potential=0, export_data=False, save_dir='processed', **kwargs):
"""
Processes cyclic voltammetry data
Can either process pre-loaded data or load and process data files. If called with no arguments, loads and processes all 'cv' files in the present folder. Peforms the following operations in order:
1. Parse column labels to find columns containing current and potential data. If parsing fails, specified labels/indices are used
2. Convert current to current density using the specified area
Parameters
__________
data: list of Datum
List of Datum objects containing CV data. If unspecified, data will be loaded using cv _raw before processing.
area : int or float (default=5)
Geometric active area of the MEA. Scaling factor to convert current to durrent density
current_column : int or str (default=1)
Index or label of the column containing current data. Used only if automatic column identification fails
potential_column : int or str (default=0)
Index or label of the column containing potential data. Used only if automatic column identification fails
**kwargs:
Remaining arguments are passed to cv_raw to load data
"""
if data is None:
data = cv_raw(**kwargs)
for d in data:
if d.get_expt_type() == 'cv':
raw = d.get_raw_data()
current = find_col(raw, 'current', current_column)
current = current / area
potential = find_col(raw, 'potential', potential_column)
potential = electrode_correct(potential, reference)
overpotential = overpotential_correct(potential, thermo_potential)
processed = | pd.DataFrame({'i':current, 'v':potential, 'eta':overpotential}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 20 14:08:35 2019
@author: Team BTC - <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
"""
#sorry the code isnt very efficient. because of time constraints and the number of people working on the project, we couldnt do all the automatizations we would have liked to do.
#Code in block comment should not be run as it will make change to the cloud database
# %% Importing libraries
# You may need to install dnspython in order to work with cloud server
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
import json
import pandas as pd
import numpy as np
from tqdm import tqdm
from datetime import datetime as dt
import os
import time
import re
import copy
from textblob import TextBlob
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from datetime import timedelta
from pymongo import MongoClient
import statsmodels.formula.api as smf
import statsmodels.api as sm
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
from statsmodels.tsa.stattools import adfuller
import matplotlib.pyplot as plt
from statsmodels.tsa.api import VAR
#os.chdir('H:/Documents/Alternance/Project/')
# %% Function to scrap data from Stocktwit and add to the cloud server
# The function have 2 inputs:
# - Symbol of the asset in string
# - Rate limit: number of requests per execution, in integer
def get_stwits_data(symbol,rate_limit):
client = MongoClient('mongodb+srv://Group_fintech:[email protected]/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
exist=0
for q in db['{}'.format(symbol)].aggregate([
{ "$group": {
"_id": None,
"min": { "$min": "$ID" }
}}
]):
exist=1
min_prev_id=q['min']
http = urllib3.PoolManager()
mid=[]
duplicates=0
for j in tqdm(range(rate_limit)):
if exist==0:
url = "https://api.stocktwits.com/api/2/streams/symbol/{}.json".format(symbol)
elif exist!=0 and len(mid)==0:
url = "https://api.stocktwits.com/api/2/streams/symbol/{}.json?max={}".format(symbol,min_prev_id)
else:
min_ID=min(mid)
url = "https://api.stocktwits.com/api/2/streams/symbol/{}.json?max={}".format(symbol,min_ID)
r = http.request('GET', url)
try:
data = json.loads(r.data)
except:
print('Decode error, retry again')
continue
if duplicates==1:
print('\nThere are duplicates in the result. Other people are maybe running. \nPlease try again later.')
break
if data["response"]["status"] != 200:
print("\nYour request was denied, retry in 1 hour")
time.sleep(3600)
continue
# insert_element=[]
# break
for element in data["messages"]:
mid.append(element["id"])
symbol_list=[]
for s in element['symbols']:
symbol_list.append(s['symbol'])
try:
insert_element = {"ID": element["id"], "TimeStamp": element["created_at"], "User": element["user"]["username"], "Content": element["body"],"Sentiment": (element["entities"]["sentiment"]["basic"]=="Bullish")*2-1,'Symbols':symbol_list}
except:
insert_element = {"ID": element["id"], "TimeStamp": element["created_at"], "User": element["user"]["username"], "Content": element["body"],"Sentiment": 0,'Symbols':symbol_list}
try:
result = db['{}'.format(symbol)].insert_one(insert_element)
except:
duplicates=1
break
return insert_element
# %% Execution of the function
symbol='BTC.X'
rate_limit=2000
last_ele=get_stwits_data(symbol,rate_limit)
# %% #Creating custom lexicon
#%% Finding the time interval of the database
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
#Getting the minimum id
for q in db['BTC.X'].aggregate([
{ "$group": {
"_id": None,
"min": { "$min": "$ID" }
}}
]):
minID=q['min']
#Getting the timestamp from the min ID
for post in db['BTC.X'].find({'ID':minID}):
start_time=post['TimeStamp']
#Getting the max id
for q in db['BTC.X'].aggregate([
{ "$group": {
"_id": None,
"max": { "$max": "$ID" }
}}
]):
maxID=q['max']
#Getting the timestamp from the max ID
for post in db['BTC.X'].find({'ID':maxID}):
end_time=post['TimeStamp']
start_time=dt.strptime(start_time,'%Y-%m-%dT%H:%M:%SZ')
end_time=dt.strptime(end_time,'%Y-%m-%dT%H:%M:%SZ')
period=np.arange(dt(start_time.year,start_time.month,start_time.day),dt(end_time.year,end_time.month,end_time.day),timedelta(days=1))
#%% Creating dictionary
#Creating function to find words in positive and negative function
def create_positive_dictionary_by_day(day):
dictionary=pd.DataFrame(columns=['Word','Frequency'])
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
sentimental=1
for documents in db['BTC.X'].find({'Sentiment':sentimental,"TimeStamp":{"$regex": u"{}-{:02d}-{:02d}".format(day.astype(object).year,day.astype(object).month,day.astype(object).day)}}):
word_list=re.findall(r"[\w']+|[.,!?;$]", documents['Content'])
word_list = [porter.stem(t) for t in word_list]
for word in word_list:
if word in dictionary['Word'].tolist():
frq=copy.copy(dictionary.iloc[dictionary.index[dictionary['Word']==word].tolist()[0]][1])+1
dictionary.at[dictionary.index[dictionary['Word']==word].tolist()[0],'Frequency']=frq
else:
dictionary=dictionary.append({'Word': word ,'Frequency':1}, ignore_index=True)
return dictionary
def create_negative_dictionary_by_day(day):
dictionary=pd.DataFrame(columns=['Word','Frequency'])
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
sentimental=-1
for documents in db['BTC.X'].find({'Sentiment':sentimental,"TimeStamp":{"$regex": u"{}-{:02d}-{:02d}".format(day.astype(object).year,day.astype(object).month,day.astype(object).day)}}):
word_list=re.findall(r"[\w']+|[.,!?;$]", documents['Content'])
word_list = [porter.stem(t) for t in word_list]
for word in word_list:
if word in dictionary['Word'].tolist():
frq=copy.copy(dictionary.iloc[dictionary.index[dictionary['Word']==word].tolist()[0]][1])+1
dictionary.at[dictionary.index[dictionary['Word']==word].tolist()[0],'Frequency']=frq
else:
dictionary=dictionary.append({'Word': word ,'Frequency':1}, ignore_index=True)
return dictionary
from multiprocessing import Pool
pool = Pool()
#creating positive dictionary
df=list(tqdm(pool.imap(create_positive_dictionary_by_day, period), total=len(period)))
positive_dictionary=df[0].set_index('Word')
for i in tqdm(range(1,len(df))):
positive_dictionary=positive_dictionary.add(df[i].set_index('Word'), fill_value=0)
#creating negative dictionary
df=list(tqdm(pool.imap(create_negative_dictionary_by_day, period), total=len(period)))
negative_dictionary=df[0].set_index('Word')
for i in tqdm(range(1,len(df))):
negative_dictionary=negative_dictionary.add(df[i].set_index('Word'), fill_value=0)
negative_dictionary=negative_dictionary.sort_values('Frequency',ascending=False)
positive_dictionary=positive_dictionary.sort_values('Frequency',ascending=False)
positive_dictionary.columns=['Positive Freq']
negative_dictionary.columns=['Negative Freq']
positive_dictionary=positive_dictionary/db['BTC.X'].count_documents({'Sentiment':1})
negative_dictionary=negative_dictionary/db['BTC.X'].count_documents({'Sentiment':-1})
#Combining both dictionary
final_dict=positive_dictionary.add(negative_dictionary, fill_value=0).sort_values('Positive Freq',ascending=False)
final_dict['Pos over Neg']=final_dict['Positive Freq']/final_dict['Negative Freq']
#Removing stopwords from the dictionary
nltk.download('stopwords')
stop_words = set(stopwords.words('english'))
final_dict=final_dict.reset_index()
for i in final_dict['Word']:
if i in stop_words:
final_dict=final_dict[final_dict['Word']!=i]
#Removing words below the threshold
final_dic=final_dict.fillna(value=0)
final_dict=final_dict[(final_dict['Negative Freq']>0.0005) | (final_dict['Positive Freq']>0.0005)]
final_dict.fillna(value=0).sort_values('Pos over Neg',ascending=False).to_csv('Simple_Dictionary2.csv')
#%% Creating positive and negative word list from the lexicon
os.chdir('H:/Documents/Alternance/Project/')
lexicon=pd.read_csv('Simple_Dictionary2.csv')
lexicon=lexicon[['Word','Classification']]
neg_list=list(lexicon[lexicon['Classification']==-1]['Word'])
pos_list=list(lexicon[lexicon['Classification']==1]['Word'])
# Update lexicon result to the database
import nltk
porter = nltk.PorterStemmer()
import re
import copy
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
for i in range(32):
for documents in tqdm(db['BTC.X'].find({'Custom_Lexicon_Sentiment':{ "$exists" : False }},limit=10000)):
if documents['Sentiment']==0:
score=0
word_list=re.findall(r"[\w']+|[.,!?;$]", documents['Content'])
word_list = [porter.stem(t) for t in word_list]
for word in word_list:
if word in neg_list:
score+=-1
if word in pos_list:
score+=1
if score >0:
senti=1
elif score <0:
senti=-1
else:
senti=0
db['BTC.X'].update_one({'_id':documents['_id']},{'$set':{'Custom_Lexicon_Sentiment':senti}})
else:
db['BTC.X'].update_one({'_id':documents['_id']},{'$set':{'Custom_Lexicon_Sentiment':documents['Sentiment']}})
#%% Creating positive and negative word list from the teacher lexicon
os.chdir('H:/Documents/Alternance/Project/')
lexicon=pd.read_csv('l2_lexicon.csv',sep=';')
neg_list=list(lexicon[lexicon['sentiment']=='negative']['keyword'])
pos_list=list(lexicon[lexicon['sentiment']=='positive']['keyword'])
# Update lexicon result to the database
pattern = r'''(?x) # set flag to allow verbose regexps
(?:[A-Z]\.)+ # abbreviations, e.g. U.S.A.
| \w+(?:-\w+)* # words with optional internal hyphens
| \$?\w+(?:\.\w+)?%? # tickers
| \@?\w+(?:\.\w+)?%? # users
| \.\.\. # ellipsis
| [][.,;"'?!():_`-] # these are separate tokens; includes ], [
'''
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
cursor=db['BTC.X'].find({'Prof_Lexicon_Sentiment':{ "$exists" : False }},limit=10000)
for i in range(32):
for documents in tqdm(cursor):
if documents['Sentiment']==0:
score=0
word_list=nltk.regexp_tokenize(documents['Content'], pattern)
# word_list=re.findall(r"[\w']+|[.,!?;$]", documents['Content'])
# word_list = [porter.stem(t) for t in word_list]
for word in word_list:
if word in neg_list:
score+=-1
if word in pos_list:
score+=1
if score >0:
senti=1
elif score <0:
senti=-1
else:
senti=0
db['BTC.X'].update_one({'_id':documents['_id']},{'$set':{'Prof_Lexicon_Sentiment':senti}})
else:
db['BTC.X'].update_one({'_id':documents['_id']},{'$set':{'Prof_Lexicon_Sentiment':documents['Sentiment']}})
#%% Adding Vader analysis value to the database
# Connecting to the database
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true')
db=client['SorbonneBigData']
collection= db['BTC.X']
# Applying Vader
analyser = SentimentIntensityAnalyzer()
for i in tqdm(range(31)):
for documents in collection.find({'Vader_sentiment2':{ "$exists" : False }},limit=10000):
doc_id = documents['_id']
Vaderdoc = analyser.polarity_scores(documents['Content'])
Vaderdoc= Vaderdoc.get('compound')
if Vaderdoc> 0.33:
Sentiment_vader=1
elif Vaderdoc< -0.33:
Sentiment_vader=-1
else:
Sentiment_vader=0
print (Sentiment_vader)
#Insert Vader value to the database
db['BTC.X'].update_one({'_id':documents['_id']},{'$set':{'Vader_sentiment2':Sentiment_vader}})
db['BTC.X'].update_one({'_id':documents['_id']},{'$set':{'Vader_sentiment':Vaderdoc}})
#%% Adding Textblob analysis value to the database
# Connecting to the database
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
collection= db['BTC.X']
# Applying Vader
analyser = SentimentIntensityAnalyzer()
#Vader=[] 54452
for i in tqdm(range(31)):
for documents in collection.find({'Textblob_Sentiment2':{'$exists':False}},limit=10000):
doc_id = documents['_id']
pola = TextBlob(documents['Content']).sentiment.polarity
# Vader.append(Vaderdoc)
if pola> 0.33:
Sentiment_txt=1
elif pola< -0.33:
Sentiment_txt=-1
else:
Sentiment_txt=0
db['BTC.X'].update_one({'_id':documents['_id']},{'$set':{'Textblob_Sentiment2':Sentiment_txt}})
db['BTC.X'].update_one({'_id':documents['_id']},{'$set':{'Textblob_Sentiment':pola}})
#%% Econometric testing
#%% Import BTC price time series
client = MongoClient('mongodb+srv://Group_fintech:[email protected]/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
price=[]
for documents in db['BTC.Price'].find({}):
price.append([documents['Time'],documents['Price']])
price=pd.DataFrame(price,columns=['Time','Price'])
price['Time']= | pd.to_datetime(price['Time']) | pandas.to_datetime |
"""Module containing implementations for various psychological questionnaires.
Each function at least expects a dataframe containing the required columns in a specified order
(see function documentations for specifics) to be passed to the ``data`` argument.
If ``data`` is a dataframe that contains more than the required two columns, e.g., if the complete questionnaire
dataframe is passed, the required columns can be sliced by specifying them in the ``columns`` parameter.
Also, if the columns in the dataframe are not in the correct order, the order can be specified
using the ``columns`` parameter.
Some questionnaire functions also allow the possibility to only compute certain subscales. To do this, a dictionary
with subscale names as keys and the corresponding column names (as list of str) or column indices
(as list of ints) can be passed to the ``subscales`` parameter.
.. warning::
Column indices in ``subscales`` are assumed to start at 1 (instead of 0) to avoid confusion with
questionnaire item columns, which typically also start with index 1!
"""
from typing import Dict, Optional, Sequence, Union
import numpy as np
import pandas as pd
from typing_extensions import Literal
from biopsykit.questionnaires.utils import (
_compute_questionnaire_subscales,
_invert_subscales,
bin_scale,
invert,
to_idx,
)
from biopsykit.utils._datatype_validation_helper import _assert_has_columns, _assert_num_columns, _assert_value_range
from biopsykit.utils.exceptions import ValueRangeError
from biopsykit.utils.time import time_to_datetime
def psqi(data: pd.DataFrame, columns: Optional[Union[Sequence[str], pd.Index]] = None) -> pd.DataFrame:
"""Compute the **Pittsburgh Sleep Quality Index (PSQI)**.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
..warning::
The PSQI has a slightly different score name format than other questionnaires since it has several
subquestions (denoted "a", "b", ..., "j") for question 5, as well as one free-text question. When using this
function to compute the PSQI the make sure your column names adhere to the following naming convention for
the function to work properly:
* Questions 1 - 10 (except Question 5): suffix "01", "02", ..., "10"
* Subquestions of Question 5: suffix "05a", "05b", ..., "05j"
* Free-text subquestion of Question 5: suffix "05j_text"
Returns
-------
:class:`~pandas.DataFrame`
PSQI score
Raises
------
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns do not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
"""
score_name = "PSQI"
score_range = [0, 3]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
data = data.loc[:, columns]
# Bedtime Start: Question 1
bed_time_start = data.filter(regex="01").iloc[:, 0]
bed_time_start = time_to_datetime(bed_time_start)
# Bedtime End: Question 3
bed_time_end = data.filter(regex="03").iloc[:, 0]
bed_time_end = time_to_datetime(bed_time_end)
# Compute Hours in Bed (needed for habitual sleep efficiency)
bed_time_diff = bed_time_end - bed_time_start
hours_bed = ((bed_time_diff.view(np.int64) / 1e9) / 3600) % 24
# Sleep Duration: Question 4
sd = data.filter(regex="04").iloc[:, 0]
# Sleep Latency: Question 2
sl = data.filter(regex="02").iloc[:, 0]
# Bin scale: 0-15 = 0, 16-30 = 1, 31-60 = 2, >=61 = 3
bin_scale(sl, bins=[0, 15, 30, 60], last_max=True, inplace=True)
data = data.drop(columns=data.filter(regex="0[1234]"))
data = data.drop(columns=data.filter(regex="05j_text"))
_assert_value_range(data, score_range)
# Subjective Sleep Quality
ssq = data.filter(regex="06").iloc[:, 0]
# Sleep Disturbances: Use all questions from 5, except 05a and 05j_text
sdist = data.filter(regex="05").iloc[:, :]
# 05j_text does not need to be dropped since it was already excluded previously
sdist = sdist.drop(columns=sdist.filter(regex="05a")).sum(axis=1)
# Bin scale: 0 = 0, 1-9 = 1, 10-18 = 2, 19-27 = 3
sdist = bin_scale(sdist, bins=[-1, 0, 9, 18, 27])
# Use of Sleep Medication: Use question 7
sm = data.filter(regex="07").iloc[:, 0]
# Daytime Dysfunction: Sum questions 8 and 9
dd = data.filter(regex="0[89]").sum(axis=1)
# Bin scale: 0 = 0, 1-2 = 1, 3-4 = 2, 5-6 = 3
dd = bin_scale(dd, bins=[-1, 0, 2, 4], inplace=False, last_max=True)
# Sleep Latency: Question 2 and 5a, sum them
sl = sl + data.filter(regex="05a").iloc[:, 0]
# Bin scale: 0 = 0, 1-2 = 1, 3-4 = 2, 5-6 = 3
sl = bin_scale(sl, bins=[-1, 0, 2, 4, 6])
# Habitual Sleep Efficiency
hse = ((sd / hours_bed) * 100.0).round().astype(int)
# Bin scale: >= 85% = 0, 75%-84% = 1, 65%-74% = 2, < 65% = 3
hse = invert(bin_scale(hse, bins=[0, 64, 74, 84], last_max=True), score_range=score_range)
# Sleep Duration: Bin scale: > 7 = 0, 6-7 = 1, 5-6 = 2, < 5 = 3
sd = invert(bin_scale(sd, bins=[0, 4.9, 6, 7], last_max=True), score_range=score_range)
psqi_data = {
score_name + "_SubjectiveSleepQuality": ssq,
score_name + "_SleepLatency": sl,
score_name + "_SleepDuration": sd,
score_name + "_HabitualSleepEfficiency": hse,
score_name + "_SleepDisturbances": sdist,
score_name + "_UseSleepMedication": sm,
score_name + "_DaytimeDysfunction": dd,
}
data = pd.DataFrame(psqi_data, index=data.index)
data[score_name + "_TotalIndex"] = data.sum(axis=1)
return data
def mves(data: pd.DataFrame, columns: Optional[Union[Sequence[str], pd.Index]] = None) -> pd.DataFrame:
"""Compute the **Maastricht Vital Exhaustion Scale (MVES)**.
The MVES uses 23 items to assess the concept of Vital Exhaustion (VE), which is characterized by feelings of
excessive fatigue, lack of energy, irritability, and feelings of demoralization. Higher scores indicate greater
vital exhaustion.
.. note::
This implementation assumes a score range of [0, 2].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
Returns
-------
:class:`~pandas.DataFrame`
MVES score
Raises
------
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns do not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., <NAME>., & <NAME>. (1987). A questionnaire to assess premonitory symptoms of myocardial
infarction. *International Journal of Cardiology*, 17(1), 15–24. https://doi.org/10.1016/0167-5273(87)90029-5
"""
score_name = "MVES"
score_range = [0, 2]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
_assert_num_columns(data, 23)
_assert_value_range(data, score_range)
# Reverse scores 9, 14
data = invert(data, cols=to_idx([9, 14]), score_range=score_range)
return pd.DataFrame(data.sum(axis=1), columns=[score_name])
def tics_l(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Dict[str, Sequence[Union[str, int]]]] = None,
) -> pd.DataFrame:
"""Compute the **Trier Inventory for Chronic Stress (Long Version) (TICS_L)**.
The TICS assesses frequency of various types of stressful experiences in the past 3 months.
It consists of the subscales with the item indices (count-by-one, i.e., the first question has the index 1!):
* ``Work Overload``: [50, 38, 44, 54, 17, 4, 27, 1]
* ``Social Overload``: [39, 28, 49, 19, 7, 57]
* ``Excessive Demands at Work``: [55, 24, 20, 35, 47, 3]
* ``Lack of Social Recognition``: [31, 18, 46, 2]
* ``Work Discontent``: [21, 53, 10, 48, 41, 13, 37, 5]
* ``Social Tension``: [26, 15, 45, 52, 6, 33]
* ``Performance Pressure at Work``: [23, 43, 32, 22, 12, 14, 8, 40, 30]
* ``Performance Pressure in Social Interactions``: [6, 15, 22]
* ``Social Isolation``: [42, 51, 34, 56, 11, 29]
* ``Worry Propensity``: [36, 25, 16, 9]
.. note::
This implementation assumes a score range of [0, 4].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
.. warning::
Column indices in ``subscales`` are assumed to start at 1 (instead of 0) to avoid confusion with
questionnaire item columns, which typically also start with index 1!
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : dict, optional
A dictionary with subscale names (keys) and column names or column indices (count-by-1) (values)
if only specific subscales should be computed.
Returns
-------
:class:`~pandas.DataFrame`
TICS_L score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns do not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
Examples
--------
>>> from biopsykit.questionnaires import tics_s
>>> # compute only a subset of subscales; questionnaire items additionally have custom indices
>>> subscales = {
>>> 'WorkOverload': [1, 2, 3],
>>> 'SocialOverload': [4, 5, 6],
>>> }
>>> tics_s_result = tics_s(data, subscales=subscales)
References
----------
<NAME>., <NAME>., & <NAME>. (2004). Trierer Inventar zum chronischen Stress: TICS. *Hogrefe*.
"""
score_name = "TICS_L"
score_range = [0, 4]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
if subscales is None:
_assert_num_columns(data, 57)
subscales = {
"WorkOverload": [1, 4, 17, 27, 38, 44, 50, 54], # Arbeitsüberlastung
"SocialOverload": [7, 19, 28, 39, 49, 57], # Soziale Überlastung
"PressureToPerform": [8, 12, 14, 22, 23, 30, 32, 40, 43], # Erfolgsdruck
"WorkDiscontent": [5, 10, 13, 21, 37, 41, 48, 53], # Unzufriedenheit mit der Arbeit
"DemandsWork": [3, 20, 24, 35, 47, 55], # Überforderung bei der Arbeit
"LackSocialRec": [2, 18, 31, 46], # Mangel an sozialer Anerkennung
"SocialTension": [6, 15, 26, 33, 45, 52], # Soziale Spannungen
"SocialIsolation": [11, 29, 34, 42, 51, 56], # Soziale Isolation
"ChronicWorry": [9, 16, 25, 36], # Chronische Besorgnis
}
_assert_value_range(data, score_range)
tics_data = _compute_questionnaire_subscales(data, score_name, subscales)
if len(data.columns) == 57:
# compute total score if all columns are present
tics_data[score_name] = data.sum(axis=1)
return pd.DataFrame(tics_data, index=data.index)
def tics_s(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Dict[str, Sequence[Union[str, int]]]] = None,
) -> pd.DataFrame:
"""Compute the **Trier Inventory for Chronic Stress (Short Version) (TICS_S)**.
The TICS assesses frequency of various types of stressful experiences in the past 3 months.
It consists of the subscales (the name in the brackets indicate the name in the returned dataframe),
with the item indices (count-by-one, i.e., the first question has the index 1!):
* ``Work Overload``: [1, 3, 21]
* ``Social Overload``: [11, 18, 28]
* ``Excessive Demands at Work``: [12, 16, 27]
* ``Lack of Social Recognition``: [2, 20, 23]
* ``Work Discontent``: [8, 13, 24]
* ``Social Tension``: [4, 9, 26]
* ``Performance Pressure at Work``: [5, 14, 29]
* ``Performance Pressure in Social Interactions``: [6, 15, 22]
* ``Social Isolation``: [19, 25, 30]
* ``Worry Propensity``: [7, 10, 17]
.. note::
This implementation assumes a score range of [0, 4].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
.. warning::
Column indices in ``subscales`` are assumed to start at 1 (instead of 0) to avoid confusion with
questionnaire item columns, which typically also start with index 1!
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : dict, optional
A dictionary with subscale names (keys) and column names or column indices (count-by-1) (values)
if only specific subscales should be computed.
Returns
-------
:class:`~pandas.DataFrame`
TICS_S score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns do not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
Examples
--------
>>> from biopsykit.questionnaires import tics_s
>>> # compute only a subset of subscales; questionnaire items additionally have custom indices
>>> subscales = {
>>> 'WorkOverload': [1, 2, 3],
>>> 'SocialOverload': [4, 5, 6],
>>> }
>>> tics_s_result = tics_s(data, subscales=subscales)
References
----------
<NAME>., <NAME>., & <NAME>. (2004). Trierer Inventar zum chronischen Stress: TICS. *Hogrefe*.
"""
score_name = "TICS_S"
score_range = [0, 4]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
if subscales is None:
_assert_num_columns(data, 30)
subscales = {
"WorkOverload": [1, 3, 21],
"SocialOverload": [11, 18, 28],
"PressureToPerform": [5, 14, 29],
"WorkDiscontent": [8, 13, 24],
"DemandsWork": [12, 16, 27],
"PressureSocial": [6, 15, 22],
"LackSocialRec": [2, 20, 23],
"SocialTension": [4, 9, 26],
"SocialIsolation": [19, 25, 30],
"ChronicWorry": [7, 10, 17],
}
_assert_value_range(data, score_range)
tics_data = _compute_questionnaire_subscales(data, score_name, subscales)
if len(data.columns) == 30:
# compute total score if all columns are present
tics_data[score_name] = data.sum(axis=1)
return pd.DataFrame(tics_data, index=data.index)
def pss(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Dict[str, Sequence[Union[str, int]]]] = None,
) -> pd.DataFrame:
"""Compute the **Perceived Stress Scale (PSS)**.
The PSS is a widely used self-report questionnaire with adequate reliability and validity asking
about how stressful a person has found his/her life during the previous month.
The PSS consists of the subscales with the item indices
(count-by-one, i.e., the first question has the index 1!):
* Perceived Helplessness (Hilflosigkeit - ``Helpness``): [1, 2, 3, 6, 9, 10]
* Perceived Self-Efficacy (Selbstwirksamkeit - ``SelfEff``): [4, 5, 7, 8]
.. note::
This implementation assumes a score range of [0, 4].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : dict, optional
A dictionary with subscale names (keys) and column names or column indices (count-by-1) (values)
if only specific subscales should be computed.
Returns
-------
:class:`~pandas.DataFrame`
PSS score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns do not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., <NAME>., & <NAME>. (1983). A Global Measure of Perceived Stress.
*Journal of Health and Social Behavior*, 24(4), 385. https://doi.org/10.2307/2136404
"""
score_name = "PSS"
score_range = [0, 4]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
if subscales is None:
_assert_num_columns(data, 10)
subscales = {"Helpless": [1, 2, 3, 6, 9, 10], "SelfEff": [4, 5, 7, 8]}
_assert_value_range(data, score_range)
# Reverse scores 4, 5, 7, 8
data = invert(data, cols=to_idx([4, 5, 7, 8]), score_range=score_range)
pss_data = _compute_questionnaire_subscales(data, score_name, subscales)
pss_data["{}_Total".format(score_name)] = data.sum(axis=1)
return pd.DataFrame(pss_data, index=data.index)
def cesd(data: pd.DataFrame, columns: Optional[Union[Sequence[str], pd.Index]] = None) -> pd.DataFrame:
"""Compute the **Center for Epidemiological Studies Depression Scale (CES-D)**.
The CES-D asks about depressive symptoms experienced over the past week.
Higher scores indicate greater depressive symptoms.
.. note::
This implementation assumes a score range of [0, 3].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
Returns
-------
:class:`~pandas.DataFrame`
CES-D score
Raises
------
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns do not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>. (1977). The CES-D Scale: A Self-Report Depression Scale for Research in the General Population.
Applied Psychological Measurement, 1(3), 385–401. https://doi.org/10.1177/014662167700100306
"""
score_name = "CESD"
score_range = [0, 3]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
_assert_num_columns(data, 20)
_assert_value_range(data, score_range)
# Reverse scores 4, 8, 12, 16
data = invert(data, cols=to_idx([4, 8, 12, 16]), score_range=score_range)
return pd.DataFrame(data.sum(axis=1), columns=[score_name])
def ads_l(data: pd.DataFrame, columns: Optional[Union[Sequence[str], pd.Index]] = None) -> pd.DataFrame:
"""Compute the **Allgemeine Depressionsskala - Langform (ADS-L)** (General Depression Scale – Long Version).
The General Depression Scale (ADS) is a self-report instrument that can be used to assess the impairment caused by
depressive symptoms within the last week. Emotional, motivational, cognitive, somatic as well as motor symptoms are
assessed, motivational, cognitive, somatic, and motor/interactional complaints.
.. note::
This implementation assumes a score range of [0, 4].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
Returns
-------
:class:`~pandas.DataFrame`
ADS-L score
Raises
------
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns do not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., & <NAME>. (2001). Allgemeine Depressions-Skala (ADS). Normierung an Minderjährigen und
Erweiterung zur Erfassung manischer Symptome (ADMS). Diagnostica.
"""
score_name = "ADS_L"
score_range = [0, 3]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
_assert_num_columns(data, 20)
_assert_value_range(data, score_range)
# Reverse scores 4, 8, 12, 16
data = invert(data, cols=to_idx([4, 8, 12, 16]), score_range=score_range)
return pd.DataFrame(data.sum(axis=1), columns=[score_name])
def ghq(data: pd.DataFrame, columns: Optional[Union[Sequence[str], pd.Index]] = None) -> pd.DataFrame:
"""Compute the **General Health Questionnaire (GHQ)**.
The GHQ-12 is a widely used tool for detecting psychological and mental health and as a screening tool for
excluding psychological and psychiatric morbidity. Higher scores indicate *lower* health.
A summed score above 4 is considered an indicator of psychological morbidity.
.. note::
This implementation assumes a score range of [0, 3].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
Returns
-------
:class:`~pandas.DataFrame`
GHQ score
Raises
------
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>. (1972). The detection of psychiatric illness by questionnaire. *Maudsley monograph*, 21.
"""
score_name = "GHQ"
score_range = [0, 3]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
_assert_num_columns(data, 12)
_assert_value_range(data, score_range)
# Reverse scores 1, 3, 4, 7, 8, 12
data = invert(data, cols=to_idx([1, 3, 4, 7, 8, 12]), score_range=score_range)
return pd.DataFrame(data.sum(axis=1), columns=[score_name])
def hads(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Dict[str, Sequence[Union[str, int]]]] = None,
) -> pd.DataFrame:
"""Compute the **Hospital Anxiety and Depression Scale (HADS)**.
The HADS is a brief and widely used instrument to measure psychological distress in patients
and in the general population. It has two subscales: anxiety and depression.
Higher scores indicate greater distress.
It consists of the subscales with the item indices (count-by-one, i.e., the first question has the index 1!):
* ``Anxiety``: [1, 3, 5, 7, 9, 11, 13]
* ``Depression``: [2, 4, 6, 8, 10, 12, 14]
.. note::
This implementation assumes a score range of [0, 3].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : dict, optional
A dictionary with subscale names (keys) and column names or column indices (count-by-1) (values)
if only specific subscales should be computed.
Returns
-------
:class:`~pandas.DataFrame`
HADS score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns do not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., & <NAME>. (1983). The hospital anxiety and depression scale.
*Acta psychiatrica scandinavica*, 67(6), 361-370.
"""
score_name = "HADS"
score_range = [0, 3]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
if subscales is None:
_assert_num_columns(data, 14)
subscales = {
"Anxiety": [1, 3, 5, 7, 9, 11, 13],
"Depression": [2, 4, 6, 8, 10, 12, 14],
}
_assert_value_range(data, score_range)
# Reverse scores 2, 4, 6, 7, 12, 14
# (numbers in the dictionary correspond to the *positions* of the items to be reversed in the item list specified
# by the subscale dict)
data = _invert_subscales(
data, subscales=subscales, idx_dict={"Anxiety": [3], "Depression": [0, 1, 2, 5, 6]}, score_range=score_range
)
hads_data = _compute_questionnaire_subscales(data, score_name, subscales)
if len(data.columns) == 14:
# compute total score if all columns are present
hads_data[score_name] = data.sum(axis=1)
return pd.DataFrame(hads_data, index=data.index)
def type_d(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Dict[str, Sequence[int]]] = None,
) -> pd.DataFrame:
"""Compute the **Type D Personality Scale**.
Type D personality is a personality trait characterized by negative affectivity (NA) and social
inhibition (SI). Individuals who are high in both NA and SI have a *distressed* or Type D personality.
It consists of the subscales, with the item indices (count-by-one, i.e., the first question has the index 1!):
* ``Negative Affect``: [2, 4, 5, 7, 9, 12, 13]
* ``Social Inhibition``: [1, 3, 6, 8, 10, 11, 14]
.. note::
This implementation assumes a score range of [0, 4].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
.. warning::
Column indices in ``subscales`` are assumed to start at 1 (instead of 0) to avoid confusion with
questionnaire item columns, which typically also start with index 1!
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : dict, optional
A dictionary with subscale names (keys) and column names or column indices (count-by-1) (values)
if only specific subscales should be computed.
Returns
-------
:class:`~pandas.DataFrame`
TypeD score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>. (2005). DS14: standard assessment of negative affectivity, social inhibition, and Type D personality.
*Psychosomatic medicine*, 67(1), 89-97.
"""
score_name = "Type_D"
score_range = [0, 4]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
if subscales is None:
_assert_num_columns(data, 14)
subscales = {
"NegativeAffect": [2, 4, 5, 7, 9, 12, 13],
"SocialInhibition": [1, 3, 6, 8, 10, 11, 14],
}
_assert_value_range(data, score_range)
# Reverse scores 1, 3
# (numbers in the dictionary correspond to the *positions* of the items to be reversed in the item list specified
# by the subscale dict)
data = _invert_subscales(data, subscales=subscales, idx_dict={"SocialInhibition": [0, 1]}, score_range=score_range)
ds_data = _compute_questionnaire_subscales(data, score_name, subscales)
if len(data.columns) == 14:
# compute total score if all columns are present
ds_data[score_name] = data.sum(axis=1)
return pd.DataFrame(ds_data, index=data.index)
def rse(data: pd.DataFrame, columns: Optional[Union[Sequence[str], pd.Index]] = None) -> pd.DataFrame:
"""Compute the **Rosenberg Self-Esteem Inventory**.
The RSE is the most frequently used measure of global self-esteem. Higher scores indicate greater self-esteem.
.. note::
This implementation assumes a score range of [0, 3].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
Returns
-------
:class:`~pandas.DataFrame`
RSE score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>. (1965). Society and the Adolescent Self-Image. *Princeton University Press*, Princeton, NJ.
"""
score_name = "RSE"
score_range = [0, 3]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
_assert_num_columns(data, 10)
_assert_value_range(data, score_range)
# Reverse scores 2, 5, 6, 8, 9
data = invert(data, cols=to_idx([2, 5, 6, 8, 9]), score_range=score_range)
return pd.DataFrame(data.sum(axis=1), columns=[score_name])
def scs(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Dict[str, Sequence[int]]] = None,
) -> pd.DataFrame:
"""Compute the **Self-Compassion Scale (SCS)**.
The Self-Compassion Scale measures the tendency to be compassionate rather than critical
toward the self in difficult times. It is typically assessed as a composite but can be broken down
into subscales. Higher scores indicate greater self-compassion.
It consists of the subscales, with the item indices (count-by-one, i.e., the first question has the index 1!):
* ``SelfKindness``: [5, 12, 19, 23, 26]
* ``SelfJudgment``: [1, 8, 11, 16, 21]
* ``CommonHumanity``: [3, 7, 10, 15]
* ``Isolation``: [4, 13, 18, 25]
* ``Mindfulness``: [9, 14, 17, 22]
* ``OverIdentified`` [2, 6, 20, 24]
.. note::
This implementation assumes a score range of [1, 5].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
.. warning::
Column indices in ``subscales`` are assumed to start at 1 (instead of 0) to avoid confusion with
questionnaire item columns, which typically also start with index 1!
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : dict, optional
A dictionary with subscale names (keys) and column names or column indices (count-by-1) (values)
if only specific subscales should be computed.
Returns
-------
:class:`~pandas.DataFrame`
SCS score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>. (2003). The development and validation of a scale to measure self-compassion.
*Self and identity*, 2(3), 223-250.
https://www.academia.edu/2040459
"""
score_name = "SCS"
score_range = [1, 5]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
if subscales is None:
_assert_num_columns(data, 26)
subscales = {
"SelfKindness": [5, 12, 19, 23, 26],
"SelfJudgment": [1, 8, 11, 16, 21],
"CommonHumanity": [3, 7, 10, 15],
"Isolation": [4, 13, 18, 25],
"Mindfulness": [9, 14, 17, 22],
"OverIdentified": [2, 6, 20, 24],
}
_assert_value_range(data, score_range)
# Reverse scores 1, 2, 4, 6, 8, 11, 13, 16, 18, 20, 21, 24, 25
# (numbers in the dictionary correspond to the *positions* of the items to be reversed in the item list specified
# by the subscale dict)
data = _invert_subscales(
data,
subscales=subscales,
idx_dict={"SelfJudgment": [0, 1, 2, 3, 4], "Isolation": [0, 1, 2, 3], "OverIdentified": [0, 1, 2, 3]},
score_range=score_range,
)
# SCS is a mean, not a sum score!
scs_data = _compute_questionnaire_subscales(data, score_name, subscales, agg_type="mean")
if len(data.columns) == 26:
# compute total score if all columns are present
scs_data[score_name] = data.mean(axis=1)
return pd.DataFrame(scs_data, index=data.index)
def midi(data: pd.DataFrame, columns: Optional[Union[Sequence[str], pd.Index]] = None) -> pd.DataFrame:
"""Compute the **Midlife Development Inventory (MIDI) Sense of Control Scale**.
The Midlife Development Inventory (MIDI) sense of control scale assesses perceived control,
that is, how much an individual perceives to be in control of his or her environment. Higher scores indicate
greater sense of control.
.. note::
This implementation assumes a score range of [1, 7].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
Returns
-------
:class:`~pandas.DataFrame`
MIDI score
Raises
------
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., & <NAME>. (1998). The sense of control as a moderator of social class differences in
health and well-being. *Journal of personality and social psychology*, 74(3), 763.
"""
score_name = "MIDI"
score_range = [1, 7]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
_assert_num_columns(data, 12)
_assert_value_range(data, score_range)
# Reverse scores 1, 2, 4, 5, 7, 9, 10, 11
data = invert(data, cols=to_idx([1, 2, 4, 5, 7, 9, 10, 11]), score_range=score_range)
# MIDI is a mean, not a sum score!
return pd.DataFrame(data.mean(axis=1), columns=[score_name])
def tsgs(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Dict[str, Sequence[Union[str, int]]]] = None,
) -> pd.DataFrame:
"""Compute the **Trait Shame and Guilt Scale**.
The TSGS assesses the experience of shame, guilt, and pride over the past few months with three separate subscales.
Shame and guilt are considered distinct emotions, with shame being a global negative feeling about the self,
and guilt being a negative feeling about a specific event rather than the self. Higher scores on each subscale
indicate higher shame, guilt, or pride.
It consists of the subscales,
with the item indices (count-by-one, i.e., the first question has the index 1!):
* ``Shame``: [2, 5, 8, 11, 14]
* ``Guilt``: [3, 6, 9, 12, 15]
* ``Pride``: [1, 4, 7, 10, 13]
.. note::
This implementation assumes a score range of [1, 5].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
.. warning::
Column indices in ``subscales`` are assumed to start at 1 (instead of 0) to avoid confusion with
questionnaire item columns, which typically also start with index 1!
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : dict, optional
A dictionary with subscale names (keys) and column names or column indices (count-by-1) (values)
if only specific subscales should be computed.
Returns
-------
:class:`~pandas.DataFrame`
TSGS score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., <NAME>., <NAME>., & <NAME>. (2008). The psychobiology of trait shame in young women:
Extending the social self preservation theory. *Health Psychology*, 27(5), 523.
"""
score_name = "TSGS"
score_range = [1, 5]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
if subscales is None:
_assert_num_columns(data, 15)
subscales = {
"Pride": [1, 4, 7, 10, 13],
"Shame": [2, 5, 8, 11, 14],
"Guilt": [3, 6, 9, 12, 15],
}
_assert_value_range(data, score_range)
tsgs_data = _compute_questionnaire_subscales(data, score_name, subscales)
return pd.DataFrame(tsgs_data, index=data.index)
def rmidi(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Dict[str, Sequence[Union[str, int]]]] = None,
) -> pd.DataFrame:
"""Compute the **Revised Midlife Development Inventory (MIDI) Personality Scale**.
The Midlife Development Inventory (MIDI) includes 6 personality trait scales: Neuroticism,
Extraversion, Openness to Experience, Conscientiousness, Agreeableness, and Agency. Higher scores
indicate higher endorsement of each personality trait.
It consists of the subscales with the item indices (count-by-one, i.e., the first question has the index 1!):
* ``Neuroticism``: [3, 8, 13, 19]
* ``Extraversion``: [1, 6, 11, 23, 27]
* ``Openness``: [14, 17, 21, 22, 25, 28, 29]
* ``Conscientiousness``: [4, 9, 16, 24, 31]
* ``Agreeableness``: [2, 7, 12, 18, 26]
* ``Agency``: [5, 10, 15, 20, 30]
.. note::
This implementation assumes a score range of [1, 4].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
.. warning::
Column indices in ``subscales`` are assumed to start at 1 (instead of 0) to avoid confusion with
questionnaire item columns, which typically also start with index 1!
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : dict, optional
A dictionary with subscale names (keys) and column names or column indices (count-by-1) (values)
if only specific subscales should be computed.
Returns
-------
:class:`~pandas.DataFrame`
RMIDI score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., & <NAME>. (2001). Planning for the future: a life management strategy for increasing control
and life satisfaction in adulthood. *Psychology and aging*, 16(2), 206.
"""
score_name = "RMIDI"
score_range = [1, 4]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
if subscales is None:
_assert_num_columns(data, 31)
subscales = {
"Neuroticism": [3, 8, 13, 19],
"Extraversion": [1, 6, 11, 23, 27],
"Openness": [14, 17, 21, 22, 25, 28, 29],
"Conscientiousness": [4, 9, 16, 24, 31],
"Agreeableness": [2, 7, 12, 18, 26],
"Agency": [5, 10, 15, 20, 30],
}
_assert_value_range(data, score_range)
# "most items need to be reverse scored before subscales are computed => reverse all"
data = invert(data, score_range=score_range)
# Re-reverse scores 19, 24
# (numbers in the dictionary correspond to the *positions* of the items to be reversed in the item list specified
# by the subscale dict)
data = _invert_subscales(
data, subscales=subscales, idx_dict={"Neuroticism": [3], "Conscientiousness": [3]}, score_range=score_range
)
# RMIDI is a mean, not a sum score!
rmidi_data = _compute_questionnaire_subscales(data, score_name, subscales, agg_type="mean")
return pd.DataFrame(rmidi_data, index=data.index)
def lsq(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Sequence[str]] = None,
) -> pd.DataFrame:
"""Compute the **Life Stress Questionnaire**.
The LSQ asks participants about stressful life events that they and their close relatives have experienced
throughout their entire life, what age they were when the event occurred, and how much it impacted them.
Higher scores indicate more stress.
It consists of the subscales:
* ``PartnerStress``: columns with suffix ``_Partner``
* ``ParentStress``: columns with suffix ``_Parent``
* ``ChildStress``: columns with suffix ``_Child``
.. note::
This implementation assumes a score range of [0, 1].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : list of str, optional
List of subscales (``Partner``, ``Parent``, ``Child``) to compute or ``None`` to compute all subscales.
Default: ``None``
Returns
-------
:class:`~pandas.DataFrame`
LSQ score
Raises
------
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., & <NAME>. (2001). Planning for the future: a life management strategy for increasing control
and life satisfaction in adulthood. *Psychology and aging*, 16(2), 206.
"""
score_name = "LSQ"
score_range = [0, 1]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
if subscales is None:
_assert_num_columns(data, 30)
subscales = ["Partner", "Parent", "Child"]
if isinstance(subscales, str):
subscales = [subscales]
_assert_value_range(data, score_range)
lsq_data = {"{}_{}".format(score_name, subscale): data.filter(like=subscale).sum(axis=1) for subscale in subscales}
return pd.DataFrame(lsq_data, index=data.index)
def ctq(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Dict[str, Sequence[int]]] = None,
) -> pd.DataFrame:
"""Compute the **Childhood Trauma Questionnaire (CTQ)**.
It consists of the subscales with the item indices (count-by-one, i.e., the first question has the index 1!):
* ``PhysicalAbuse``: [9, 11, 12, 15, 17]
* ``SexualAbuse``: [20, 21, 23, 24, 27]
* ``EmotionalNeglect``: [5, 7, 13, 19, 28]
* ``PhysicalNeglect``: [1, 2, 4, 6, 26]
* ``EmotionalAbuse``: [3, 8, 14, 18, 25]
Additionally, three items assess the validity of the responses (high scores on these items could be grounds for
exclusion of a given participants’ responses):
* ``Validity``: [10, 16, 22]
.. note::
This implementation assumes a score range of [1, 5].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
.. warning::
Column indices in ``subscales`` are assumed to start at 1 (instead of 0) to avoid confusion with
questionnaire item columns, which typically also start with index 1!
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : dict, optional
A dictionary with subscale names (keys) and column names or column indices (count-by-1) (values)
if only specific subscales should be computed.
Returns
-------
:class:`~pandas.DataFrame`
CTQ score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., ... & <NAME>. (1994).
Initial reliability and validity of a new retrospective measure of child abuse and neglect.
*The American journal of psychiatry*.
"""
score_name = "CTQ"
score_range = [1, 5]
# create copy of data
data = data.copy()
if columns is not None:
_assert_has_columns(data, [columns])
# if columns parameter is supplied: slice columns from dataframe
data = data.loc[:, columns]
if subscales is None:
_assert_num_columns(data, 28)
subscales = {
"PhysicalAbuse": [9, 11, 12, 15, 17],
"SexualAbuse": [20, 21, 23, 24, 27],
"EmotionalNeglect": [5, 7, 13, 19, 28],
"PhysicalNeglect": [1, 2, 4, 6, 26],
"EmotionalAbuse": [3, 8, 14, 18, 25],
"Validity": [10, 16, 22],
}
_assert_value_range(data, score_range)
# reverse scores 2, 5, 7, 13, 19, 26, 28
# (numbers in the dictionary correspond to the *positions* of the items to be reversed in the item list specified
# by the subscale dict)
data = _invert_subscales(
data,
subscales=subscales,
idx_dict={
"PhysicalNeglect": [1, 4],
"EmotionalNeglect": [0, 1, 2, 3, 4],
},
score_range=score_range,
)
ctq_data = _compute_questionnaire_subscales(data, score_name, subscales)
return pd.DataFrame(ctq_data, index=data.index)
def peat(data: pd.DataFrame, columns: Optional[Union[Sequence[str], pd.Index]] = None) -> pd.DataFrame:
"""Compute the **Pittsburgh Enjoyable Activities Test (PEAT)**.
The PEAT is a self-report measure of engagement in leisure activities. It asks participants to report how often
over the last month they have engaged in each of the activities. Higher scores indicate more time spent in
leisure activities.
.. note::
This implementation assumes a score range of [0, 4].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
Returns
-------
:class:`~pandas.DataFrame`
PEAT score
Raises
------
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2009).
Association of enjoyable leisure activities with psychological and physical well-being.
*Psychosomatic medicine*, 71(7), 725.
"""
score_name = "PEAT"
score_range = [0, 4]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
_assert_num_columns(data, 10)
_assert_value_range(data, score_range)
return pd.DataFrame(data.sum(axis=1), columns=[score_name])
def purpose_life(data: pd.DataFrame, columns: Optional[Union[Sequence[str], pd.Index]] = None) -> pd.DataFrame:
"""Compute the **Purpose in Life** questionnaire.
Purpose in life refers to the psychological tendency to derive meaning from life’s experiences
and to possess a sense of intentionality and goal directedness that guides behavior.
Higher scores indicate greater purpose in life.
.. note::
This implementation assumes a score range of [0, 4].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
Returns
-------
:class:`~pandas.DataFrame`
TICS score
References
----------
<NAME>., <NAME>., <NAME>., & <NAME>. (2009). Purpose in life is associated with
mortality among community-dwelling older persons. *Psychosomatic medicine*, 71(5), 574.
"""
score_name = "PurposeLife"
score_range = [1, 5]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
_assert_num_columns(data, 10)
_assert_value_range(data, score_range)
# reverse scores 2, 3, 5, 6, 10
data = invert(data, cols=to_idx([2, 3, 5, 6, 10]), score_range=score_range)
# Purpose in Life is a mean, not a sum score!
return pd.DataFrame(data.mean(axis=1), columns=[score_name])
def trait_rumination(data: pd.DataFrame, columns: Optional[Union[Sequence[str], pd.Index]] = None) -> pd.DataFrame:
"""Compute the **Trait Rumination**.
Higher scores indicate greater rumination.
.. note::
This implementation assumes a score range of [0, 1], where 0 = no rumination, 1 = rumination.
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
Returns
-------
:class:`~pandas.DataFrame`
TraitRumination score
References
----------
<NAME>., <NAME>., & <NAME>. (1993). Response styles and the duration of episodes of
depressed mood. *Journal of abnormal psychology*, 102(1), 20.
"""
score_name = "TraitRumination"
score_range = [0, 1]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
_assert_num_columns(data, 14)
_assert_value_range(data, score_range)
return pd.DataFrame(data.sum(axis=1), columns=[score_name])
def besaa(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Dict[str, Sequence[Union[int, str]]]] = None,
) -> pd.DataFrame:
"""Compute the **Body-Esteem Scale for Adolescents and Adults (BESAA)**.
Body Esteem refers to self-evaluations of one’s body or appearance. The BESAA is based on
the idea that feelings about one’s weight can be differentiated from feelings about one’s general appearance,
and that one’s own opinions may be differentiated from the opinions attributed to others.
Higher scores indicate higher body esteem.
It consists of the subscales with the item indices (count-by-one, i.e., the first question has the index 1!):
* ``Appearance``: [1, 6, 9, 7, 11, 13, 15, 17, 21, 23]
* ``Weight``: [3, 4, 8, 10, 16, 18, 19, 22]
* ``Attribution``: [2, 5, 12, 14, 20]
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : dict, optional
A dictionary with subscale names (keys) and column names or column indices (count-by-1) (values)
if only specific subscales should be computed.
Returns
-------
:class:`~pandas.DataFrame`
BESAA score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., <NAME>., & <NAME>. (2001). Body-esteem scale for adolescents and adults.
*Journal of personality assessment*, 76(1), 90-106.
"""
score_name = "BESAA"
score_range = [0, 4]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
if subscales is None:
_assert_num_columns(data, 23)
subscales = {
"Appearance": [1, 6, 7, 9, 11, 13, 15, 17, 21, 23],
"Weight": [3, 4, 8, 10, 16, 18, 19, 22],
"Attribution": [2, 5, 12, 14, 20],
}
_assert_value_range(data, score_range)
# reverse scores 4, 7, 9, 11, 13, 17, 18, 19, 21
# (numbers in the dictionary correspond to the *positions* of the items to be reversed in the item list specified
# by the subscale dict)
data = _invert_subscales(
data,
subscales=subscales,
idx_dict={"Appearance": [2, 3, 4, 5, 7, 8], "Weight": [1, 5, 6]},
score_range=score_range,
)
# BESAA is a mean, not a sum score!
besaa_data = _compute_questionnaire_subscales(data, score_name, subscales, agg_type="mean")
return pd.DataFrame(besaa_data, index=data.index)
def fscrs(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Dict[str, Sequence[int]]] = None,
) -> pd.DataFrame:
"""Compute the **Forms of Self-Criticizing/Attacking and Self-Reassuring Scale (FSCRS)**.
Self-criticism describes the internal relationship with the self in which part of the self shames
and puts down, while the other part of the self responds and submits to such attacks.
Self-reassurance refers to the opposing idea that many individuals focus on positive aspects of self and defend
against self-criticism. The FSCRS exemplifies some of the self-statements made by either those who are
self-critical or by those who self-reassure.
The scale measures these two traits on a continuum with self-criticism at one end and
self-reassurance at the other. Higher scores on each subscale indicate higher self-criticizing ("Inadequate Self"),
self-attacking ("Hated Self"), and self-reassuring "Reassuring Self", respectively.
It consists of the subscales with the item indices (count-by-one, i.e., the first question has the index 1!):
* ``InadequateSelf``: [1, 2, 4, 6, 7, 14, 17, 18, 20]
* ``HatedSelf``: [9, 10, 12, 15, 22]
* ``ReassuringSelf``: [3, 5, 8, 11, 13, 16, 19, 21]
.. note::
This implementation assumes a score range of [0, 4].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
.. warning::
Column indices in ``subscales`` are assumed to start at 1 (instead of 0) to avoid confusion with
questionnaire item columns, which typically also start with index 1!
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : dict, optional
A dictionary with subscale names (keys) and column names or column indices (count-by-1) (values)
if only specific subscales should be computed.
Returns
-------
:class:`~pandas.DataFrame`
FSCRS score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2004). Criticizing and reassuring oneself:
An exploration of forms, styles and reasons in female students. *British Journal of Clinical Psychology*,
43(1), 31-50.
"""
score_name = "FSCRS"
score_range = [0, 4]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
if subscales is None:
_assert_num_columns(data, 22)
subscales = {
"InadequateSelf": [1, 2, 4, 6, 7, 14, 17, 18, 20],
"HatedSelf": [9, 10, 12, 15, 22],
"ReassuringSelf": [3, 5, 8, 11, 13, 16, 19, 21],
}
_assert_value_range(data, score_range)
fscrs_data = _compute_questionnaire_subscales(data, score_name, subscales)
return pd.DataFrame(fscrs_data, index=data.index)
def pasa(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Dict[str, Sequence[int]]] = None,
) -> pd.DataFrame:
"""Compute the **Primary Appraisal Secondary Appraisal Scale (PASA)**.
The PASA assesses each of the four cognitive appraisal processes relevant for acute stress protocols,
such as the TSST: primary stress appraisal (threat and challenge) and secondary stress appraisal
(self-concept of own abilities and control expectancy). Higher scores indicate greater appraisals for each sub-type.
It consists of the subscales with the item indices (count-by-one, i.e., the first question has the index 1!):
* ``Threat``: [1, 9, 5, 13]
* ``Challenge``: [6, 10, 2, 14]
* ``SelfConcept``: [7, 3, 11, 15]
* ``ControlExp``: [4, 8, 12, 16]
.. note::
This implementation assumes a score range of [1, 6].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
.. warning::
Column indices in ``subscales`` are assumed to start at 1 (instead of 0) to avoid confusion with
questionnaire item columns, which typically also start with index 1!
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : dict, optional
A dictionary with subscale names (keys) and column names or column indices (count-by-1) (values)
if only specific subscales should be computed.
Returns
-------
:class:`~pandas.DataFrame`
PASA score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., <NAME>., <NAME>., & <NAME>. (2005). Psychological determinants of the cortisol stress
response: the role of anticipatory cognitive appraisal. *Psychoneuroendocrinology*, 30(6), 599-610.
"""
score_name = "PASA"
score_range = [1, 6]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
if subscales is None:
_assert_num_columns(data, 16)
subscales = {
"Threat": [1, 5, 9, 13],
"Challenge": [2, 6, 10, 14],
"SelfConcept": [3, 7, 11, 15],
"ControlExp": [4, 8, 12, 16],
}
_assert_value_range(data, score_range)
# reverse scores 1, 6, 7, 9, 10
# (numbers in the dictionary correspond to the *positions* of the items to be reversed in the item list specified
# by the subscale dict)
data = _invert_subscales(
data,
subscales=subscales,
idx_dict={"Threat": [0, 2], "Challenge": [1, 2], "SelfConcept": [1]},
score_range=score_range,
)
pasa_data = _compute_questionnaire_subscales(data, score_name, subscales)
if all(s in subscales for s in ["Threat", "Challenge"]):
pasa_data[score_name + "_Primary"] = (
pasa_data[score_name + "_Threat"] + pasa_data[score_name + "_Challenge"]
) / 2
if all(s in subscales for s in ["SelfConcept", "ControlExp"]):
pasa_data[score_name + "_Secondary"] = (
pasa_data[score_name + "_SelfConcept"] + pasa_data[score_name + "_ControlExp"]
) / 2
if all("{}_{}".format(score_name, s) in pasa_data for s in ["Primary", "Secondary"]):
pasa_data[score_name + "_StressComposite"] = (
pasa_data[score_name + "_Primary"] - pasa_data[score_name + "_Secondary"]
)
return pd.DataFrame(pasa_data, index=data.index)
def ssgs(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Dict[str, Sequence[int]]] = None,
) -> pd.DataFrame:
"""Compute the **State Shame and Guilt Scale (SSGS)**.
The SSGS assesses the experience of shame, guilt, and pride experienced during an acute stress protocol with three
separate subscales. Shame and guilt are considered distinct emotions, with shame being a global negative feeling
about the self, and guilt being a negative feeling about a specific event rather than the self.
This scale is a modified version from the State Shame and Guilt scale by Marschall et al. (1994).
Higher scores on each subscale indicate higher shame, guilt, or pride.
It consists of the subscales with the item indices (count-by-one, i.e., the first question has the index 1!):
* ``Pride``: [1, 4, 7, 10, 13]
* ``Shame``: [2, 5, 8, 11, 14]
* ``Guilt``: [3, 6, 9, 12, 15]
.. note::
This implementation assumes a score range of [1, 5].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
.. warning::
Column indices in ``subscales`` are assumed to start at 1 (instead of 0) to avoid confusion with
questionnaire item columns, which typically also start with index 1!
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : dict, optional
A dictionary with subscale names (keys) and column names or column indices (count-by-1) (values)
if only specific subscales should be computed.
Returns
-------
:class:`~pandas.DataFrame`
SSGS score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., <NAME>., <NAME>., & <NAME>. (2008). The psychobiology of trait shame in young women:
Extending the social self preservation theory. *Health Psychology*, 27(5), 523.
<NAME>., <NAME>., & <NAME>. (1994). The state shame and guilt scale.
*Fairfax, VA: George Mason University*.
"""
score_name = "SSGS"
score_range = [1, 5]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
if subscales is None:
_assert_num_columns(data, 15)
subscales = {
"Pride": [1, 4, 7, 10, 13],
"Shame": [2, 5, 8, 11, 14],
"Guilt": [3, 6, 9, 12, 15],
}
_assert_value_range(data, score_range)
ssgs_data = _compute_questionnaire_subscales(data, score_name, subscales)
return pd.DataFrame(ssgs_data, index=data.index)
def panas(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
language: Optional[Literal["english", "german"]] = None,
) -> pd.DataFrame:
"""Compute the **Positive and Negative Affect Schedule (PANAS)**.
The PANAS assesses *positive affect* (interested, excited, strong, enthusiastic, proud, alert, inspired,
determined, attentive, and active) and *negative affect* (distressed, upset, guilty, scared, hostile, irritable,
ashamed, nervous, jittery, and afraid).
Higher scores on each subscale indicate greater positive or negative affect.
.. note::
This implementation assumes a score range of [1, 5].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
.. warning::
Column indices in ``subscales`` are assumed to start at 1 (instead of 0) to avoid confusion with
questionnaire item columns, which typically also start with index 1!
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
language : "english" or "german", optional
Language of the questionnaire used since index items differ between the german and the english version.
Default: ``english``
Returns
-------
:class:`~pandas.DataFrame`
PANAS score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., <NAME>., & <NAME>. (1988). Development and validation of brief measures of positive and
negative affect: the PANAS scales. *Journal of personality and social psychology*, 54(6), 1063.
"""
score_name = "PANAS"
score_range = [1, 5]
supported_versions = ["english", "german"]
# create copy of data
data = data.copy()
if language is None:
language = "english"
if language not in supported_versions:
raise ValueError("questionnaire_version must be one of {}, not {}.".format(supported_versions, language))
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
_assert_num_columns(data, 20)
_assert_value_range(data, score_range)
if language == "german":
# German Version has other item indices
subscales = {
"NegativeAffect": [2, 5, 7, 8, 9, 12, 14, 16, 19, 20],
"PositiveAffect": [1, 3, 4, 6, 10, 11, 13, 15, 17, 18],
}
else:
subscales = {
"NegativeAffect": [2, 4, 6, 7, 8, 11, 13, 15, 18, 20],
"PositiveAffect": [1, 3, 5, 9, 10, 12, 14, 16, 17, 19],
}
# PANAS is a mean, not a sum score!
panas_data = _compute_questionnaire_subscales(data, score_name, subscales, agg_type="mean")
data = _invert_subscales(
data, subscales, {"NegativeAffect": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}, score_range=score_range
)
panas_data[score_name + "_Total"] = data.mean(axis=1)
return pd.DataFrame(panas_data, index=data.index)
def state_rumination(data: pd.DataFrame, columns: Optional[Union[Sequence[str], pd.Index]] = None) -> pd.DataFrame:
"""Compute the **State Rumination** scale.
Rumination is the tendency to dwell on negative thoughts and emotions.
Higher scores indicate greater rumination.
.. note::
This implementation assumes a score range of [0, 4].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
Returns
-------
:class:`~pandas.DataFrame`
State Rumination score
Raises
------
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., & <NAME>. (1998). The relationship between emotional rumination and cortisol secretion
under stress. *Personality and Individual Differences*, 24(4), 531-538.
"""
score_name = "StateRumination"
score_range = [0, 4]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
_assert_num_columns(data, 27)
_assert_value_range(data, score_range)
# reverse scores 1, 6, 9, 12, 15, 17, 18, 20, 27
data = invert(data, cols=to_idx([1, 6, 9, 12, 15, 17, 18, 20, 27]), score_range=score_range)
return pd.DataFrame(data.sum(axis=1), columns=[score_name], index=data.index)
# HABIT DATASET
def abi(data: pd.DataFrame, columns: Optional[Union[Sequence[str], pd.Index]] = None) -> pd.DataFrame:
"""Compute the **Angstbewältigungsinventar (ABI)** (Anxiety Management Inventory).
The ABI measures two key personality constructs in the area of stress or anxiety management:
*Vigilance (VIG)* and *Cognitive Avoidance (KOV)*. *VIG* is defined as a class of coping strategies whose
use aims to, to reduce uncertainty in threatening situations.
In contrast, *KOV* refers to strategies aimed at shielding the organism from arousal-inducing stimuli.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
Returns
-------
:class:`~pandas.DataFrame`
ABI score
Raises
------
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., <NAME>., <NAME>., Angstbewältigung, <NAME>., & VIG, V. (1999).
Das Angstbewältigungs-Inventar (ABI). *Frankfurt am Main*.
"""
score_name = "ABI"
score_range = [1, 2]
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
_assert_num_columns(data, 80)
_assert_value_range(data, score_range)
# split into 8 subitems, consisting of 10 questions each
items = np.split(data, 8, axis=1)
abi_raw = pd.concat(items, keys=[str(i) for i in range(1, len(items) + 1)], axis=1)
idx_kov = {
# ABI-P
"2": [2, 3, 7, 8, 9],
"4": [1, 4, 5, 8, 10],
"6": [2, 3, 5, 6, 7],
"8": [2, 4, 6, 8, 10],
# ABI-E
"1": [2, 3, 6, 8, 10],
"3": [2, 4, 5, 7, 9],
"5": [3, 4, 5, 9, 10],
"7": [1, 5, 6, 7, 9],
}
idx_kov = {key: np.array(val) for key, val in idx_kov.items()}
idx_vig = {key: np.setdiff1d(np.arange(1, 11), np.array(val), assume_unique=True) for key, val in idx_kov.items()}
abi_kov, abi_vig = [
pd.concat(
[abi_raw.loc[:, key].iloc[:, idx[key] - 1] for key in idx],
axis=1,
keys=idx_kov.keys(),
)
for idx in [idx_kov, idx_vig]
]
abi_data = {
score_name + "_KOV_T": abi_kov.sum(axis=1),
score_name + "_VIG_T": abi_vig.sum(axis=1),
score_name + "_KOV_P": abi_kov.loc[:, ["2", "4", "6", "8"]].sum(axis=1),
score_name + "_VIG_P": abi_vig.loc[:, ["2", "4", "6", "8"]].sum(axis=1),
score_name + "_KOV_E": abi_kov.loc[:, ["1", "3", "5", "7"]].sum(axis=1),
score_name + "_VIG_E": abi_vig.loc[:, ["1", "3", "5", "7"]].sum(axis=1),
}
return pd.DataFrame(abi_data, index=data.index)
def stadi(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Dict[str, Sequence[int]]] = None,
stadi_type: Optional[Literal["state", "trait", "state_trait"]] = None,
) -> pd.DataFrame:
"""Compute the **State-Trait Anxiety-Depression Inventory (STADI)**.
With the STADI, anxiety and depression can be recorded, both as state and as trait.
Two self-report questionnaires with 20 items each are available for this purpose.
The state part measures the degree of anxiety and depression currently experienced by a person, which varies
depending on internal or external influences. It can be used in a variety of situations of different types.
This includes not only the whole spectrum of highly heterogeneous stressful situations, but also situations of
neutral or positive ("euthymic") character. The trait part is used to record trait expressions, i.e. the
enduring tendency to experience anxiety and depression.
The STADI can either be computed only for state, only for trait, or for state and trait.
The state and trait scales both consist of the subscales with the item indices
(count-by-one, i.e., the first question has the index 1!):
* Emotionality (Aufgeregtheit - affektive Komponente – ``AU``): [1, 5, 9, 13, 17]
* Worry (Besorgnis - kognitive Komponente - ``BE``): [2, 6, 10, 14, 18]
* Anhedonia (Euthymie - positive Stimmung - ``EU``): [3, 7, 11, 15, 19]
* Dysthymia (Dysthymie - depressive Stimmung - ``DY``): [4, 8, 12, 16, 20]
.. note::
This implementation assumes a score range of [1, 4].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
.. note::
If both state and trait score are present it is assumed that all *state* items are first,
followed by all *trait* items. If all subscales are present this adds up to 20 state items and 20 trait items.
.. warning::
Column indices in ``subscales`` are assumed to start at 1 (instead of 0) to avoid confusion with
questionnaire item columns, which typically also start with index 1!
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : dict, optional
A dictionary with subscale names (keys) and column names or column indices (count-by-1) (values)
if only specific subscales should be computed.
stadi_type : any of ``state``, ``trait``, or ``state_trait``
which type of STADI subscale should be computed. Default: ``state_trait``
Returns
-------
:class:`~pandas.DataFrame`
STADI score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
if invalid parameter was passed to ``stadi_type``
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2013).
Das State-Trait-Angst-Depressions-Inventar: STADI; Manual.
<NAME>., <NAME>., <NAME>., & <NAME>. (2018). Differentiating anxiety and depression:
the state-trait anxiety-depression inventory. *Cognition and Emotion*, 32(7), 1409-1423.
"""
score_name = "STADI"
score_range = [1, 4]
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
stadi_type = _get_stadi_type(stadi_type)
if subscales is None:
_assert_num_columns(data, 20 * len(stadi_type))
subscales = {
"AU": [1, 5, 9, 13, 17],
"BE": [2, 6, 10, 14, 18],
"EU": [3, 7, 11, 15, 19],
"DY": [4, 8, 12, 16, 20],
}
_assert_value_range(data, score_range)
# split into n subitems (either "State", "Trait" or "State and Trait")
items = np.split(data, len(stadi_type), axis=1)
data = pd.concat(items, keys=stadi_type, axis=1)
stadi_data = {}
for st in stadi_type:
stadi_data.update(_compute_questionnaire_subscales(data[st], "{}_{}".format(score_name, st), subscales))
if all("{}_{}_{}".format(score_name, st, subtype) in stadi_data for subtype in ["AU", "BE"]):
stadi_data.update(
{
"{}_{}_Anxiety".format(score_name, st): stadi_data["{}_{}_AU".format(score_name, st)]
+ stadi_data["{}_{}_BE".format(score_name, st)]
}
)
if all("{}_{}_{}".format(score_name, st, subtype) in stadi_data for subtype in ["EU", "DY"]):
stadi_data.update(
{
"{}_{}_Depression".format(score_name, st): stadi_data["{}_{}_EU".format(score_name, st)]
+ stadi_data["{}_{}_DY".format(score_name, st)]
}
)
if all("{}_{}_{}".format(score_name, st, subtype) in stadi_data for subtype in ["Anxiety", "Depression"]):
stadi_data.update(
{
"{}_{}_Total".format(score_name, st): stadi_data["{}_{}_Anxiety".format(score_name, st)]
+ stadi_data["{}_{}_Depression".format(score_name, st)]
}
)
df_stadi = pd.DataFrame(stadi_data, index=data.index)
return df_stadi
def _get_stadi_type(stadi_type: str) -> Sequence[str]:
if stadi_type is None:
stadi_type = ["State", "Trait"]
elif stadi_type == "state_trait":
stadi_type = ["State", "Trait"]
elif stadi_type == "state":
stadi_type = ["State"]
elif stadi_type == "trait":
stadi_type = ["Trait"]
else:
raise ValueError(
"Invalid 'stadi_type'! Must be one of 'state_trait', 'state', or 'trait', not {}.".format(stadi_type)
)
return stadi_type
def svf_120(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Dict[str, Sequence[int]]] = None,
) -> pd.DataFrame:
"""Compute the **Stressverarbeitungsfragebogen - 120 item version (SVF120)**.
The stress processing questionnaire enables the assessment of coping or processing measures in stressful
situations. The SVF is not a singular test instrument, but rather an inventory of methods that relate to various
aspects of stress processing and coping and from which individual procedures can be selected depending on
the study objective/question.
It consists of the subscales with the item indices (count-by-one, i.e., the first question has the index 1!):
* Trivialization/Minimalization (Bagatellisierung – ``Bag``): [10, 31, 50, 67, 88, 106]
* De-Emphasis by Comparison with Others (Herunterspielen – ``Her``): [17, 38, 52, 77, 97, 113]
* Rejection of Guilt (Schuldabwehr – ``Schab``): [5, 30, 43, 65, 104, 119]
* Distraction/Deflection from a Situation (Ablenkung – ``Abl``): [1, 20, 45, 86, 101, 111]
* Vicarious Satisfaction (Ersatzbefriedigung –``Ers``): [22, 36, 64, 74, 80, 103]
* Search for Self-Affirmation (Selbstbestätigung – ``Sebest``): [34, 47, 59, 78, 95, 115]
* Relaxation (Entspannung –``Entsp``): [12, 28, 58, 81, 99, 114]
* Attempt to Control Situation (Situationskontrolle – ``Sitkon``): [11, 18, 39, 66, 91, 116]
* Response Control (Reaktionskontrolle – ``Rekon``): [2, 26, 54, 68, 85, 109]
* Positive Self-Instruction (Positive Selbstinstruktion – ``Posi``): [15, 37, 56, 71, 83, 96]
* Need for Social Support (Soziales Unterstützungsbedürfnis – ``Sozube``): [3, 21, 42, 63, 84, 102]
* Avoidance Tendencies (Vermeidung – ``Verm``): [8, 29, 48, 69, 98, 118]
* Escapist Tendencies (Flucht – ``Flu``): [14, 24, 40, 62, 73, 120]
* Social Isolation (Soziale Abkapselung – ``Soza``): [6, 27, 49, 76, 92, 107]
* Mental Perseveration (Gedankliche Weiterbeschäftigung – ``Gedw``): [16, 23, 55, 72, 100, 110]
* Resignation (Resignation – ``Res``): [4, 32, 46, 60, 89, 105]
* Self-Pity (Selbstbemitleidung – ``Selmit``): [13, 41, 51, 79, 94, 117]
* Self-Incrimination (Selbstbeschuldigung – ``Sesch``): [9, 25, 35, 57, 75, 87]
* Aggression (Aggression – ``Agg``): [33, 44, 61, 82, 93, 112]
* Medicine-Taking (Pharmakaeinnahme – ``Pha``): [7, 19, 53, 70, 90, 108]
.. note::
This implementation assumes a score range of [1, 5].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
.. warning::
Column indices in ``subscales`` are assumed to start at 1 (instead of 0) to avoid confusion with
questionnaire item columns, which typically also start with index 1!
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : dict, optional
A dictionary with subscale names (keys) and column names or column indices (count-by-1) (values)
if only specific subscales should be computed.
Returns
-------
:class:`~pandas.DataFrame`
SFV120 score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
"""
score_name = "SVF120"
score_range = [1, 5]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
if subscales is None:
_assert_num_columns(data, 120)
subscales = {
"Bag": [10, 31, 50, 67, 88, 106], # Bagatellisierung
"Her": [17, 38, 52, 77, 97, 113], # Herunterspielen
"Schab": [5, 30, 43, 65, 104, 119], # Schuldabwehr
"Abl": [1, 20, 45, 86, 101, 111], # Ablenkung
"Ers": [22, 36, 64, 74, 80, 103], # Ersatzbefriedigung
"Sebest": [34, 47, 59, 78, 95, 115], # Selbstbestätigung
"Entsp": [12, 28, 58, 81, 99, 114], # Entspannung
"Sitkon": [11, 18, 39, 66, 91, 116], # Situationskontrolle
"Rekon": [2, 26, 54, 68, 85, 109], # Reaktionskontrolle
"Posi": [15, 37, 56, 71, 83, 96], # Positive Selbstinstruktion
"Sozube": [3, 21, 42, 63, 84, 102], # Soziales Unterstützungsbedürfnis
"Verm": [8, 29, 48, 69, 98, 118], # Vermeidung
"Flu": [14, 24, 40, 62, 73, 120], # Flucht
"Soza": [6, 27, 49, 76, 92, 107], # Soziale Abkapselung
"Gedw": [16, 23, 55, 72, 100, 110], # Gedankliche Weiterbeschäftigung
"Res": [4, 32, 46, 60, 89, 105], # Resignation
"Selmit": [13, 41, 51, 79, 94, 117], # Selbstbemitleidung
"Sesch": [9, 25, 35, 57, 75, 87], # Selbstbeschuldigung
"Agg": [33, 44, 61, 82, 93, 112], # Aggression
"Pha": [7, 19, 53, 70, 90, 108], # Pharmakaeinnahme
}
_assert_value_range(data, score_range)
svf_data = _compute_questionnaire_subscales(data, score_name, subscales)
svf_data = pd.DataFrame(svf_data, index=data.index)
meta_scales = {
"Pos1": ("Bag", "Her", "Schab"),
"Pos2": ("Abl", "Ers", "Sebest", "Entsp"),
"Pos3": ("Sitkon", "Rekon", "Posi"),
"PosGesamt": (
"Bag",
"Her",
"Schab",
"Abl",
"Ers",
"Sebest",
"Entsp",
"Sitkon",
"Rekon",
"Posi",
),
"NegGesamt": ("Flu", "Soza", "Gedw", "Res", "Selmit", "Sesch"),
}
for name, scale_items in meta_scales.items():
if all(scale in subscales for scale in scale_items):
svf_data["{}_{}".format(score_name, name)] = svf_data[
["{}_{}".format(score_name, s) for s in scale_items]
].mean(axis=1)
return svf_data
def svf_42(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Dict[str, Sequence[int]]] = None,
) -> pd.DataFrame:
"""Compute the **Stressverarbeitungsfragebogen - 42 item version (SVF42)**.
The stress processing questionnaire enables the assessment of coping or processing measures in stressful
situations. The SVF is not a singular test instrument, but rather an inventory of methods that relate to various
aspects of stress processing and coping and from which individual procedures can be selected depending on
the study objective/question.
It consists of the subscales with the item indices (count-by-one, i.e., the first question has the index 1!):
* Trivialization/Minimalization (Bagatellisierung – ``Bag``): [7, 22]
* De-Emphasis by Comparison with Others (Herunterspielen – ``Her``): [11, 35]
* Rejection of Guilt (Schuldabwehr – ``Schab``): [2, 34]
* Distraction/Deflection from a Situation (Ablenkung – ``Abl``): [1, 32]
* Vicarious Satisfaction (Ersatzbefriedigung –``Ers``): [12, 42]
* Search for Self-Affirmation (Selbstbestätigung – ``Sebest``): [19, 37]
* Relaxation (Entspannung –``Entsp``): [13, 26]
* Attempt to Control Situation (Situationskontrolle – ``Sitkon``): [4, 23]
* Response Control (Reaktionskontrolle – ``Rekon``): [17, 33]
* Positive Self-Instruction (Positive Selbstinstruktion – ``Posi``): [9, 24]
* Need for Social Support (Soziales Unterstützungsbedürfnis – ``Sozube``): [14, 27]
* Avoidance Tendencies (Vermeidung – ``Verm``): [6, 30]
* Escapist Tendencies (Flucht – ``Flu``): [16, 40]
* Social Isolation (Soziale Abkapselung – ``Soza``): [20, 29]
* Mental Perseveration (Gedankliche Weiterbeschäftigung – ``Gedw``): [10, 25]
* Resignation (Resignation – ``Res``): [38, 15]
* Self-Pity (Selbstbemitleidung – ``Selmit``): [18, 28]
* Self-Incrimination (Selbstbeschuldigung – ``Sesch``): [8, 31]
* Aggression (Aggression – ``Agg``): [21, 36]
* Medicine-Taking (Pharmakaeinnahme – ``Pha``): [3, 39]
.. note::
This implementation assumes a score range of [1, 5].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
.. warning::
Column indices in ``subscales`` are assumed to start at 1 (instead of 0) to avoid confusion with
questionnaire item columns, which typically also start with index 1!
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : dict, optional
A dictionary with subscale names (keys) and column names or column indices (count-by-1) (values)
if only specific subscales should be computed.
Returns
-------
:class:`~pandas.DataFrame`
SFV42 score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
"""
score_name = "SVF42"
score_range = [1, 5]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
if subscales is None:
_assert_num_columns(data, 42)
subscales = {
"Bag": [7, 22], # Bagatellisierung
"Her": [11, 35], # Herunterspielen
"Schab": [2, 34], # Schuldabwehr
"Abl": [1, 32], # Ablenkung
"Ers": [12, 42], # Ersatzbefriedigung
"Sebest": [19, 37], # Selbstbestätigung
"Entsp": [13, 26], # Entspannung
"Sitkon": [4, 23], # Situationskontrolle
"Rekon": [17, 33], # Reaktionskontrolle
"Posi": [9, 24], # Positive Selbstinstruktion
"Sozube": [14, 27], # Soziales Unterstützungsbedürfnis
"Verm": [6, 30], # Vermeidung
"Flu": [16, 40], # Flucht
"Soza": [20, 29], # Soziale Abkapselung
"Gedw": [10, 25], # Gedankliche Weiterbeschäftigung
"Res": [15, 38], # Resignation
"Hilf": [18, 28], # Hilflosigkeit
"Selmit": [8, 31], # Selbstbemitleidung
"Sesch": [21, 36], # Selbstbeschuldigung
"Agg": [3, 39], # Aggression
"Pha": [5, 41], # Pharmakaeinnahme
}
_assert_value_range(data, score_range)
svf_data = _compute_questionnaire_subscales(data, score_name, subscales)
svf_data = pd.DataFrame(svf_data, index=data.index)
meta_scales = {
"Denial": ["Verm", "Flu", "Soza"],
"Distraction": ["Ers", "Entsp", "Sozube"],
"Stressordevaluation": ["Bag", "Her", "Posi"],
}
for name, scale_items in meta_scales.items():
if all(scale in subscales.keys() for scale in scale_items):
svf_data["{}_{}".format(score_name, name)] = svf_data[
["{}_{}".format(score_name, s) for s in scale_items]
].mean(axis=1)
return svf_data
def brief_cope(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Dict[str, Sequence[int]]] = None,
) -> pd.DataFrame:
"""Compute the **Brief-COPE (28 items) Questionnaire (Brief_COPE)**.
The Brief-COPE is a 28 item self-report questionnaire designed to measure effective and ineffective ways to cope
with a stressful life event. "Coping" is defined broadly as an effort used to minimize distress associated with
negative life experiences. The scale is often used in health-care settings to ascertain how patients are
responding to a serious diagnosis. It can be used to measure how someone is coping with a wide range of
adversity, including cancer diagnosis, heart failure, injuries, assaults, natural disasters and financial stress.
The scale can determine someone’s primary coping styles as either Approach Coping, or Avoidant Coping.
Higher scores indicate better coping capabilities.
It consists of the subscales with the item indices (count-by-one, i.e., the first question has the index 1!):
* ``SelfDistraction``: [1, 19]
* ``ActiveCoping``: [2, 7]
* ``Denial``: [3, 8]
* ``SubstanceUse``: [4, 11]
* ``EmotionalSupport``: [5, 15]
* ``InstrumentalSupport``: [10, 23]
* ``BehavioralDisengagement``: [6, 16]
* ``Venting``: [9, 21]
* ``PosReframing``: [12, 17]
* ``Planning``: [14, 25]
* ``Humor``: [18, 28]
* ``Acceptance``: [20, 24]
* ``Religion``: [22, 27]
* ``SelfBlame``: [13, 26]
.. note::
This implementation assumes a score range of [1, 4].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
.. warning::
Column indices in ``subscales`` are assumed to start at 1 (instead of 0) to avoid confusion with
questionnaire item columns, which typically also start with index 1!
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : dict, optional
A dictionary with subscale names (keys) and column names or column indices (count-by-1) (values)
if only specific subscales should be computed.
Returns
-------
:class:`~pandas.DataFrame`
Brief_COPE score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>. (1997). You want to measure coping but your protocol’too long: Consider the brief cope.
*International journal of behavioral medicine*, 4(1), 92-100.
"""
score_name = "Brief_COPE"
score_range = [1, 4]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
if subscales is None:
_assert_num_columns(data, 28)
subscales = {
"SelfDistraction": [1, 19], # Ablenkung
"ActiveCoping": [2, 7], # Aktive Bewältigung
"Denial": [3, 8], # Verleugnung
"SubstanceUse": [4, 11], # Alkohol/Drogen
"EmotionalSupport": [5, 15], # Emotionale Unterstützung
"InstrumentalSupport": [10, 23], # Instrumentelle Unterstützung
"BehavioralDisengagement": [6, 16], # Verhaltensrückzug
"Venting": [9, 21], # Ausleben von Emotionen
"PosReframing": [12, 17], # Positive Umdeutung
"Planning": [14, 25], # Planung
"Humor": [18, 28], # Humor
"Acceptance": [20, 24], # Akzeptanz
"Religion": [22, 27], # Religion
"SelfBlame": [13, 26], # Selbstbeschuldigung
}
_assert_value_range(data, score_range)
cope_data = _compute_questionnaire_subscales(data, score_name, subscales)
return | pd.DataFrame(cope_data, index=data.index) | pandas.DataFrame |
import os
import time
import uuid
import yaml
import logging
import shutil
import numpy as np
import pandas as pd
import multiprocessing as mp
from functools import partial
from astropy.time import Time
from .config import Config
from .config import Configuration
from .clusters import find_clusters, filter_clusters_by_length
from .cell import Cell
from .orbit import TestOrbit
from .orbits import Orbits
from .orbits import generateEphemeris
from .orbits import initialOrbitDetermination
from .orbits import differentialCorrection
from .orbits import mergeAndExtendOrbits
from .observatories import getObserverState
from .utils import _initWorker
from .utils import _checkParallel
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'
logger = logging.getLogger("thor")
__all__ = [
"rangeAndShift_worker",
"rangeAndShift",
"clusterVelocity",
"clusterVelocity_worker",
"clusterAndLink",
"runTHOROrbit",
"runTHOR",
]
def rangeAndShift_worker(observations, ephemeris, cell_area=10):
assert len(observations["mjd_utc"].unique()) == 1
assert len(ephemeris["mjd_utc"].unique()) == 1
assert observations["mjd_utc"].unique()[0] == ephemeris["mjd_utc"].unique()[0]
observation_time = observations["mjd_utc"].unique()[0]
# Create Cell centered on the sky-plane location of the
# test orbit
cell = Cell(
ephemeris[["RA_deg", "Dec_deg"]].values[0],
observation_time,
area=cell_area,
)
# Grab observations within cell
cell.getObservations(observations)
if len(cell.observations) != 0:
# Create test orbit with state of orbit at visit time
test_orbit = TestOrbit(
ephemeris[["obj_x", "obj_y", "obj_z", "obj_vx", "obj_vy", "obj_vz"]].values[0],
observation_time
)
# Prepare rotation matrices
test_orbit.prepare()
# Apply rotation matrices and transform observations into the orbit's
# frame of motion.
test_orbit.applyToObservations(cell.observations)
projected_observations = cell.observations
else:
projected_observations = pd.DataFrame()
return projected_observations
def clusterVelocity(
obs_ids,
x,
y,
dt,
vx,
vy,
eps=0.005,
min_obs=5,
min_arc_length=1.0,
alg="hotspot_2d",
):
"""
Clusters THOR projection with different velocities
in the projection plane using `~scipy.cluster.DBSCAN`.
Parameters
----------
obs_ids : `~numpy.ndarray' (N)
Observation IDs.
x : `~numpy.ndarray' (N)
Projection space x coordinate in degrees or radians.
y : `~numpy.ndarray' (N)
Projection space y coordinate in degrees or radians.
dt : `~numpy.ndarray' (N)
Change in time from 0th exposure in units of MJD.
vx : `~numpy.ndarray' (N)
Projection space x velocity in units of degrees or radians per day in MJD.
vy : `~numpy.ndarray' (N)
Projection space y velocity in units of degrees or radians per day in MJD.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
See: http://scikit-learn.org/stable/modules/generated/sklearn.cluster.dbscan.html
[Default = 0.005]
min_obs : int, optional
The number of samples (or total weight) in a neighborhood for a
point to be considered as a core point. This includes the point itself.
See: http://scikit-learn.org/stable/modules/generated/sklearn.cluster.dbscan.html
[Default = 5]
min_arc_length : float, optional
Minimum arc length in units of days for a cluster to be accepted.
Returns
-------
list
If clusters are found, will return a list of numpy arrays containing the
observation IDs for each cluster. If no clusters are found, will return np.NaN.
"""
logger.debug(f"cluster: vx={vx} vy={vy} n_obs={len(obs_ids)}")
xx = x - vx * dt
yy = y - vy * dt
X = np.stack((xx, yy), 1)
clusters = find_clusters(X, eps, min_obs, alg=alg)
clusters = filter_clusters_by_length(
clusters, dt, min_obs, min_arc_length,
)
cluster_ids = []
for cluster in clusters:
cluster_ids.append(obs_ids[cluster])
if len(cluster_ids) == 0:
cluster_ids = np.NaN
return cluster_ids
def clusterVelocity_worker(
vx,
vy,
obs_ids=None,
x=None,
y=None,
dt=None,
eps=None,
min_obs=None,
min_arc_length=None,
alg=None
):
"""
Helper function to multiprocess clustering.
"""
cluster_ids = clusterVelocity(
obs_ids,
x,
y,
dt,
vx,
vy,
eps=eps,
min_obs=min_obs,
min_arc_length=min_arc_length,
alg=alg
)
return cluster_ids
def rangeAndShift(
observations,
orbit,
cell_area=10,
backend="PYOORB",
backend_kwargs={},
num_jobs=1,
parallel_backend="mp"
):
"""
Propagate the orbit to all observation times in observations. At each epoch gather a circular region of observations of size cell_area
centered about the location of the orbit on the sky-plane. Transform and project each of the gathered observations into
the frame of motion of the test orbit.
Parameters
----------
observations : `~pandas.DataFrame`
DataFrame containing preprocessed observations.
Should contain the following columns:
obs_id : observation IDs
RA_deg : Right Ascension in degrees.
Dec_deg : Declination in degrees.
RA_sigma_deg : 1-sigma uncertainty for Right Ascension in degrees.
Dec_sigma_deg : 1-sigma uncertainty for Declination in degrees.
observatory_code : MPC observatory code
orbit : `~numpy.ndarray` (6)
Orbit to propagate. If backend is 'THOR', then these orbits must be expressed
as heliocentric ecliptic cartesian elements. If backend is 'PYOORB' orbits may be
expressed in keplerian, cometary or cartesian elements.
cell_area : float, optional
Cell's area in units of square degrees.
[Default = 10]
backend : {'THOR', 'PYOORB'}, optional
Which backend to use.
backend_kwargs : dict, optional
Settings and additional parameters to pass to selected
backend.
num_jobs : int, optional
Number of jobs to launch.
parallel_backend : str, optional
Which parallelization backend to use {'ray', 'mp'}. Defaults to using Python's multiprocessing
module ('mp').
Returns
-------
projected_observations : {`~pandas.DataFrame`, -1}
Observations dataframe (from cell.observations) with columns containing
projected coordinates.
"""
time_start = time.time()
logger.info("Running range and shift...")
logger.info("Assuming r = {} au".format(orbit.cartesian[0, :3]))
logger.info("Assuming v = {} au per day".format(orbit.cartesian[0, 3:]))
# Build observers dictionary: keys are observatory codes with exposure times (as astropy.time objects)
# as values
observers = {}
for code in observations["observatory_code"].unique():
observers[code] = Time(
observations[observations["observatory_code"].isin([code])]["mjd_utc"].unique(),
format="mjd",
scale="utc"
)
# Propagate test orbit to all times in observations
ephemeris = generateEphemeris(
orbit,
observers,
backend=backend,
backend_kwargs=backend_kwargs,
chunk_size=1,
num_jobs=1,
parallel_backend=parallel_backend
)
if backend == "FINDORB":
observer_states = []
for observatory_code, observation_times in observers.items():
observer_states.append(
getObserverState(
[observatory_code],
observation_times,
frame='ecliptic',
origin='heliocenter',
)
)
observer_states = pd.concat(observer_states)
observer_states.reset_index(
inplace=True,
drop=True
)
ephemeris = ephemeris.join(observer_states[["obs_x", "obs_y", "obs_z", "obs_vx", "obs_vy", "obs_vz"]])
velocity_cols = []
if backend != "PYOORB":
velocity_cols = ["obs_vx", "obs_vy", "obs_vz"]
observations = observations.merge(
ephemeris[["mjd_utc", "observatory_code", "obs_x", "obs_y", "obs_z"] + velocity_cols],
left_on=["mjd_utc", "observatory_code"],
right_on=["mjd_utc", "observatory_code"]
)
# Split the observations into a single dataframe per unique observatory code and observation time
# Basically split the observations into groups of unique exposures
observations_grouped = observations.groupby(by=["observatory_code", "mjd_utc"])
observations_split = [observations_grouped.get_group(g) for g in observations_grouped.groups]
# Do the same for the test orbit's ephemerides
ephemeris_grouped = ephemeris.groupby(by=["observatory_code", "mjd_utc"])
ephemeris_split = [ephemeris_grouped.get_group(g) for g in ephemeris_grouped.groups]
parallel, num_workers = _checkParallel(num_jobs, parallel_backend)
if parallel:
if parallel_backend == "ray":
import ray
if not ray.is_initialized():
ray.init(address="auto")
rangeAndShift_worker_ray = ray.remote(rangeAndShift_worker)
rangeAndShift_worker_ray = rangeAndShift_worker_ray.options(
num_returns=1,
num_cpus=1
)
p = []
for observations_i, ephemeris_i in zip(observations_split, ephemeris_split):
p.append(
rangeAndShift_worker_ray.remote(
observations_i,
ephemeris_i,
cell_area=cell_area
)
)
projected_dfs = ray.get(p)
else: # parallel_backend == "mp"
p = mp.Pool(
processes=num_workers,
initializer=_initWorker,
)
projected_dfs = p.starmap(
partial(
rangeAndShift_worker,
cell_area=cell_area
),
zip(
observations_split,
ephemeris_split,
)
)
p.close()
else:
projected_dfs = []
for observations_i, ephemeris_i in zip(observations_split, ephemeris_split):
projected_df = rangeAndShift_worker(
observations_i,
ephemeris_i,
cell_area=cell_area
)
projected_dfs.append(projected_df)
projected_observations = pd.concat(projected_dfs)
if len(projected_observations) > 0:
projected_observations.sort_values(by=["mjd_utc", "observatory_code"], inplace=True)
projected_observations.reset_index(inplace=True, drop=True)
else:
projected_observations = pd.DataFrame(
columns=[
'obs_id', 'mjd_utc', 'RA_deg', 'Dec_deg', 'RA_sigma_deg',
'Dec_sigma_deg', 'observatory_code', 'obs_x', 'obs_y', 'obs_z', 'obj_x',
'obj_y', 'obj_z', 'theta_x_deg', 'theta_y_deg'
]
)
time_end = time.time()
logger.info("Found {} observations.".format(len(projected_observations)))
logger.info("Range and shift completed in {:.3f} seconds.".format(time_end - time_start))
return projected_observations
def clusterAndLink(
observations,
vx_range=[-0.1, 0.1],
vy_range=[-0.1, 0.1],
vx_bins=100,
vy_bins=100,
vx_values=None,
vy_values=None,
eps=0.005,
min_obs=5,
min_arc_length=1.0,
alg="dbscan",
num_jobs=1,
parallel_backend="mp"
):
"""
Cluster and link correctly projected (after ranging and shifting)
detections.
Parameters
----------
observations : `~pandas.DataFrame`
DataFrame containing post-range and shift observations.
vx_range : {None, list or `~numpy.ndarray` (2)}
Maximum and minimum velocity range in x.
Will not be used if vx_values are specified.
[Default = [-0.1, 0.1]]
vy_range : {None, list or `~numpy.ndarray` (2)}
Maximum and minimum velocity range in y.
Will not be used if vy_values are specified.
[Default = [-0.1, 0.1]]
vx_bins : int, optional
Length of x-velocity grid between vx_range[0]
and vx_range[-1]. Will not be used if vx_values are
specified.
[Default = 100]
vy_bins: int, optional
Length of y-velocity grid between vy_range[0]
and vy_range[-1]. Will not be used if vy_values are
specified.
[Default = 100]
vx_values : {None, `~numpy.ndarray`}, optional
Values of velocities in x at which to cluster
and link.
[Default = None]
vy_values : {None, `~numpy.ndarray`}, optional
Values of velocities in y at which to cluster
and link.
[Default = None]
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
See: http://scikit-learn.org/stable/modules/generated/sklearn.cluster.dbscan.html
[Default = 0.005]
min_obs : int, optional
The number of samples (or total weight) in a neighborhood for a
point to be considered as a core point. This includes the point itself.
See: http://scikit-learn.org/stable/modules/generated/sklearn.cluster.dbscan.html
[Default = 5]
alg: str
Algorithm to use. Can be "dbscan" or "hotspot_2d".
num_jobs : int, optional
Number of jobs to launch.
parallel_backend : str, optional
Which parallelization backend to use {'ray', 'mp'}. Defaults to using Python's multiprocessing
module ('mp').
Returns
-------
clusters : `~pandas.DataFrame`
DataFrame with the cluster ID, the number of observations, and the x and y velocity.
cluster_members : `~pandas.DataFrame`
DataFrame containing the cluster ID and the observation IDs of its members.
Notes
-----
The algorithm chosen can have a big impact on performance and accuracy.
alg="dbscan" uses the DBSCAN algorithm of Ester et. al. It's relatively slow
but works with high accuracy; it is certain to find all clusters with at
least min_obs points that are separated by at most eps.
alg="hotspot_2d" is much faster (perhaps 10-20x faster) than dbscan, but it
may miss some clusters, particularly when points are spaced a distance of 'eps'
apart.
"""
time_start_cluster = time.time()
logger.info("Running velocity space clustering...")
# Extract useful quantities
obs_ids = observations["obs_id"].values
theta_x = observations["theta_x_deg"].values
theta_y = observations["theta_y_deg"].values
mjd = observations["mjd_utc"].values
# Select detections in first exposure
first = np.where(mjd == mjd.min())[0]
mjd0 = mjd[first][0]
dt = mjd - mjd0
if vx_values is None and vx_range is not None:
vx = np.linspace(*vx_range, num=vx_bins)
elif vx_values is None and vx_range is None:
raise ValueError("Both vx_values and vx_range cannot be None.")
else:
vx = vx_values
vx_range = [vx_values[0], vx_values[-1]]
vx_bins = len(vx)
if vy_values is None and vy_range is not None:
vy = np.linspace(*vy_range, num=vy_bins)
elif vy_values is None and vy_range is None:
raise ValueError("Both vy_values and vy_range cannot be None.")
else:
vy = vy_values
vy_range = [vy_values[0], vy_values[-1]]
vy_bins = len(vy)
if vx_values is None and vy_values is None:
vxx, vyy = np.meshgrid(vx, vy)
vxx = vxx.flatten()
vyy = vyy.flatten()
elif vx_values is not None and vy_values is not None:
vxx = vx
vyy = vy
else:
raise ValueError("")
logger.debug("X velocity range: {}".format(vx_range))
if vx_values is not None:
logger.debug("X velocity values: {}".format(vx_bins))
else:
logger.debug("X velocity bins: {}".format(vx_bins))
logger.debug("Y velocity range: {}".format(vy_range))
if vy_values is not None:
logger.debug("Y velocity values: {}".format(vy_bins))
else:
logger.debug("Y velocity bins: {}".format(vy_bins))
if vx_values is not None:
logger.debug("User defined x velocity values: True")
else:
logger.debug("User defined x velocity values: False")
if vy_values is not None:
logger.debug("User defined y velocity values: True")
else:
logger.debug("User defined y velocity values: False")
if vx_values is None and vy_values is None:
logger.debug("Velocity grid size: {}".format(vx_bins * vy_bins))
else:
logger.debug("Velocity grid size: {}".format(vx_bins))
logger.info("Max sample distance: {}".format(eps))
logger.info("Minimum samples: {}".format(min_obs))
possible_clusters = []
parallel, num_workers = _checkParallel(num_jobs, parallel_backend)
if parallel:
if parallel_backend == "ray":
import ray
if not ray.is_initialized():
ray.init(address="auto")
clusterVelocity_worker_ray = ray.remote(clusterVelocity_worker)
clusterVelocity_worker_ray = clusterVelocity_worker_ray.options(
num_returns=1,
num_cpus=1
)
# Put all arrays (which can be large) in ray's
# local object store ahead of time
obs_ids_oid = ray.put(obs_ids)
theta_x_oid = ray.put(theta_x)
theta_y_oid = ray.put(theta_y)
dt_oid = ray.put(dt)
p = []
for vxi, vyi in zip(vxx, vyy):
p.append(
clusterVelocity_worker_ray.remote(
vxi,
vyi,
obs_ids=obs_ids_oid,
x=theta_x_oid,
y=theta_y_oid,
dt=dt_oid,
eps=eps,
min_obs=min_obs,
min_arc_length=min_arc_length,
alg=alg
)
)
possible_clusters = ray.get(p)
else: # parallel_backend == "mp"
p = mp.Pool(
processes=num_workers,
initializer=_initWorker
)
possible_clusters = p.starmap(
partial(
clusterVelocity_worker,
obs_ids=obs_ids,
x=theta_x,
y=theta_y,
dt=dt,
eps=eps,
min_obs=min_obs,
min_arc_length=min_arc_length,
alg=alg
),
zip(vxx, vyy)
)
p.close()
else:
possible_clusters = []
for vxi, vyi in zip(vxx, vyy):
possible_clusters.append(
clusterVelocity(
obs_ids,
theta_x,
theta_y,
dt,
vxi,
vyi,
eps=eps,
min_obs=min_obs,
min_arc_length=min_arc_length,
alg=alg
)
)
time_end_cluster = time.time()
logger.info("Clustering completed in {:.3f} seconds.".format(time_end_cluster - time_start_cluster))
logger.info("Restructuring clusters...")
time_start_restr = time.time()
possible_clusters = pd.DataFrame({"clusters": possible_clusters})
# Remove empty clusters
possible_clusters = possible_clusters[~possible_clusters["clusters"].isna()]
if len(possible_clusters) != 0:
### The following code is a little messy, its a lot of pandas dataframe manipulation.
### I have tried doing an overhaul wherein the clusters and cluster_members dataframe are created per
### velocity combination in the clusterVelocity function. However, this adds an overhead in that function
### of ~ 1ms. So clustering 90,000 velocities takes 90 seconds longer which on small datasets is problematic.
### On large datasets, the effect is not as pronounced because the below code takes a while to run due to
### in-memory pandas dataframe restructuring.
# Make DataFrame with cluster velocities so we can figure out which
# velocities yielded clusters, add names to index so we can enable the join
cluster_velocities = pd.DataFrame({"vtheta_x": vxx, "vtheta_y": vyy})
cluster_velocities.index.set_names("velocity_id", inplace=True)
# Split lists of cluster ids into one column per cluster for each different velocity
# then stack the result
possible_clusters = pd.DataFrame(
possible_clusters["clusters"].values.tolist(),
index=possible_clusters.index
)
possible_clusters = pd.DataFrame(possible_clusters.stack())
possible_clusters.rename(
columns={0: "obs_ids"},
inplace=True
)
possible_clusters = pd.DataFrame(possible_clusters["obs_ids"].values.tolist(), index=possible_clusters.index)
# Drop duplicate clusters
possible_clusters.drop_duplicates(inplace=True)
# Set index names
possible_clusters.index.set_names(["velocity_id", "cluster_id"], inplace=True)
# Reset index
possible_clusters.reset_index(
"cluster_id",
drop=True,
inplace=True
)
possible_clusters["cluster_id"] = [str(uuid.uuid4().hex) for i in range(len(possible_clusters))]
# Make clusters DataFrame
clusters = possible_clusters.join(cluster_velocities)
clusters.reset_index(drop=True, inplace=True)
clusters = clusters[["cluster_id", "vtheta_x", "vtheta_y"]]
# Make cluster_members DataFrame
cluster_members = possible_clusters.reset_index(drop=True).copy()
cluster_members.index = cluster_members["cluster_id"]
cluster_members.drop("cluster_id", axis=1, inplace=True)
cluster_members = pd.DataFrame(cluster_members.stack())
cluster_members.rename(columns={0: "obs_id"}, inplace=True)
cluster_members.reset_index(inplace=True)
cluster_members.drop("level_1", axis=1, inplace=True)
# Calculate arc length and add it to the clusters dataframe
cluster_members_time = cluster_members.merge(
observations[["obs_id", "mjd_utc"]],
on="obs_id",
how="left"
)
clusters_time = cluster_members_time.groupby(
by=["cluster_id"])["mjd_utc"].apply(lambda x: x.max() - x.min()).to_frame()
clusters_time.reset_index(
inplace=True
)
clusters_time.rename(
columns={"mjd_utc" : "arc_length"},
inplace=True
)
clusters = clusters.merge(
clusters_time[["cluster_id", "arc_length"]],
on="cluster_id",
how="left",
)
else:
cluster_members = pd.DataFrame(columns=["cluster_id", "obs_id"])
clusters = | pd.DataFrame(columns=["cluster_id", "vtheta_x", "vtheta_y", "arc_length"]) | pandas.DataFrame |
from collections import OrderedDict
import datetime
from distutils.version import LooseVersion
import io
import os
import pathlib
import tempfile
import numpy as np
import pandas as pd
import fiona
import pytz
from pandas.testing import assert_series_equal
from shapely.geometry import Point, Polygon, box
import geopandas
from geopandas import GeoDataFrame, read_file
from geopandas.io.file import fiona_env, _detect_driver, _EXTENSION_TO_DRIVER
from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal
from geopandas.tests.util import PACKAGE_DIR, validate_boro_df
import pytest
FIONA_GE_1814 = str(fiona.__version__) >= LooseVersion("1.8.14") # datetime roundtrip
_CRS = "epsg:4326"
@pytest.fixture
def df_nybb():
nybb_path = geopandas.datasets.get_path("nybb")
df = read_file(nybb_path)
return df
@pytest.fixture
def df_null():
return read_file(
os.path.join(PACKAGE_DIR, "geopandas", "tests", "data", "null_geom.geojson")
)
@pytest.fixture
def file_path():
return os.path.join(PACKAGE_DIR, "geopandas", "tests", "data", "null_geom.geojson")
@pytest.fixture
def df_points():
N = 10
crs = _CRS
df = GeoDataFrame(
[
{"geometry": Point(x, y), "value1": x + y, "value2": x * y}
for x, y in zip(range(N), range(N))
],
crs=crs,
)
return df
# -----------------------------------------------------------------------------
# to_file tests
# -----------------------------------------------------------------------------
driver_ext_pairs = [
("ESRI Shapefile", ".shp"),
("GeoJSON", ".geojson"),
("GPKG", ".gpkg"),
(None, ".shp"),
(None, ""),
(None, ".geojson"),
(None, ".gpkg"),
]
def assert_correct_driver(file_path, ext):
# check the expected driver
expected_driver = "ESRI Shapefile" if ext == "" else _EXTENSION_TO_DRIVER[ext]
with fiona.open(str(file_path)) as fds:
assert fds.driver == expected_driver
@pytest.mark.parametrize("driver,ext", driver_ext_pairs)
def test_to_file(tmpdir, df_nybb, df_null, driver, ext):
"""Test to_file and from_file"""
tempfilename = os.path.join(str(tmpdir), "boros." + ext)
df_nybb.to_file(tempfilename, driver=driver)
# Read layer back in
df = GeoDataFrame.from_file(tempfilename)
assert "geometry" in df
assert len(df) == 5
assert np.alltrue(df["BoroName"].values == df_nybb["BoroName"])
# Write layer with null geometry out to file
tempfilename = os.path.join(str(tmpdir), "null_geom" + ext)
df_null.to_file(tempfilename, driver=driver)
# Read layer back in
df = GeoDataFrame.from_file(tempfilename)
assert "geometry" in df
assert len(df) == 2
assert np.alltrue(df["Name"].values == df_null["Name"])
# check the expected driver
assert_correct_driver(tempfilename, ext)
@pytest.mark.parametrize("driver,ext", driver_ext_pairs)
def test_to_file_pathlib(tmpdir, df_nybb, df_null, driver, ext):
"""Test to_file and from_file"""
temppath = pathlib.Path(os.path.join(str(tmpdir), "boros." + ext))
df_nybb.to_file(temppath, driver=driver)
# Read layer back in
df = GeoDataFrame.from_file(temppath)
assert "geometry" in df
assert len(df) == 5
assert np.alltrue(df["BoroName"].values == df_nybb["BoroName"])
# check the expected driver
assert_correct_driver(temppath, ext)
@pytest.mark.parametrize("driver,ext", driver_ext_pairs)
def test_to_file_bool(tmpdir, driver, ext):
"""Test error raise when writing with a boolean column (GH #437)."""
tempfilename = os.path.join(str(tmpdir), "temp.{0}".format(ext))
df = GeoDataFrame(
{
"a": [1, 2, 3],
"b": [True, False, True],
"geometry": [Point(0, 0), Point(1, 1), Point(2, 2)],
},
crs=4326,
)
df.to_file(tempfilename, driver=driver)
result = read_file(tempfilename)
if ext in (".shp", ""):
# Shapefile does not support boolean, so is read back as int
df["b"] = df["b"].astype("int64")
assert_geodataframe_equal(result, df)
# check the expected driver
assert_correct_driver(tempfilename, ext)
TEST_DATE = datetime.datetime(2021, 11, 21, 1, 7, 43, 17500)
eastern = pytz.timezone("US/Eastern")
datetime_type_tests = (TEST_DATE, eastern.localize(TEST_DATE))
@pytest.mark.parametrize(
"time", datetime_type_tests, ids=("naive_datetime", "datetime_with_timezone")
)
@pytest.mark.parametrize("driver,ext", driver_ext_pairs)
def test_to_file_datetime(tmpdir, driver, ext, time):
"""Test writing a data file with the datetime column type"""
if ext in (".shp", ""):
pytest.skip(f"Driver corresponding to ext {ext} doesn't support dt fields")
if time.tzinfo is not None and FIONA_GE_1814 is False:
# https://github.com/Toblerity/Fiona/pull/915
pytest.skip("Fiona >= 1.8.14 needed for timezone support")
tempfilename = os.path.join(str(tmpdir), f"test_datetime{ext}")
point = Point(0, 0)
df = GeoDataFrame(
{"a": [1, 2], "b": [time, time]}, geometry=[point, point], crs=4326
)
if FIONA_GE_1814:
fiona_precision_limit = "ms"
else:
fiona_precision_limit = "s"
df["b"] = df["b"].dt.round(freq=fiona_precision_limit)
df.to_file(tempfilename, driver=driver)
df_read = read_file(tempfilename)
assert_geodataframe_equal(df.drop(columns=["b"]), df_read.drop(columns=["b"]))
if df["b"].dt.tz is not None:
# US/Eastern becomes pytz.FixedOffset(-300) when read from file
# so compare fairly in terms of UTC
assert_series_equal(
df["b"].dt.tz_convert(pytz.utc), df_read["b"].dt.tz_convert(pytz.utc)
)
else:
assert_series_equal(df["b"], df_read["b"])
@pytest.mark.parametrize("driver,ext", driver_ext_pairs)
def test_to_file_with_point_z(tmpdir, ext, driver):
"""Test that 3D geometries are retained in writes (GH #612)."""
tempfilename = os.path.join(str(tmpdir), "test_3Dpoint" + ext)
point3d = Point(0, 0, 500)
point2d = Point(1, 1)
df = GeoDataFrame({"a": [1, 2]}, geometry=[point3d, point2d], crs=_CRS)
df.to_file(tempfilename, driver=driver)
df_read = GeoDataFrame.from_file(tempfilename)
assert_geoseries_equal(df.geometry, df_read.geometry)
# check the expected driver
assert_correct_driver(tempfilename, ext)
@pytest.mark.parametrize("driver,ext", driver_ext_pairs)
def test_to_file_with_poly_z(tmpdir, ext, driver):
"""Test that 3D geometries are retained in writes (GH #612)."""
tempfilename = os.path.join(str(tmpdir), "test_3Dpoly" + ext)
poly3d = Polygon([[0, 0, 5], [0, 1, 5], [1, 1, 5], [1, 0, 5]])
poly2d = Polygon([[0, 0], [0, 1], [1, 1], [1, 0]])
df = GeoDataFrame({"a": [1, 2]}, geometry=[poly3d, poly2d], crs=_CRS)
df.to_file(tempfilename, driver=driver)
df_read = GeoDataFrame.from_file(tempfilename)
assert_geoseries_equal(df.geometry, df_read.geometry)
# check the expected driver
assert_correct_driver(tempfilename, ext)
def test_to_file_types(tmpdir, df_points):
"""Test various integer type columns (GH#93)"""
tempfilename = os.path.join(str(tmpdir), "int.shp")
int_types = [
np.int8,
np.int16,
np.int32,
np.int64,
np.intp,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
]
geometry = df_points.geometry
data = dict(
(str(i), np.arange(len(geometry), dtype=dtype))
for i, dtype in enumerate(int_types)
)
df = GeoDataFrame(data, geometry=geometry)
df.to_file(tempfilename)
def test_to_file_int64(tmpdir, df_points):
tempfilename = os.path.join(str(tmpdir), "int64.shp")
geometry = df_points.geometry
df = GeoDataFrame(geometry=geometry)
df["data"] = pd.array([1, np.nan] * 5, dtype=pd.Int64Dtype())
df.to_file(tempfilename)
df_read = GeoDataFrame.from_file(tempfilename)
assert_geodataframe_equal(df_read, df, check_dtype=False, check_like=True)
def test_to_file_empty(tmpdir):
input_empty_df = GeoDataFrame(columns=["geometry"])
tempfilename = os.path.join(str(tmpdir), "test.shp")
with pytest.raises(ValueError, match="Cannot write empty DataFrame to file."):
input_empty_df.to_file(tempfilename)
def test_to_file_privacy(tmpdir, df_nybb):
tempfilename = os.path.join(str(tmpdir), "test.shp")
with pytest.warns(DeprecationWarning):
geopandas.io.file.to_file(df_nybb, tempfilename)
def test_to_file_schema(tmpdir, df_nybb):
"""
Ensure that the file is written according to the schema
if it is specified
"""
tempfilename = os.path.join(str(tmpdir), "test.shp")
properties = OrderedDict(
[
("Shape_Leng", "float:19.11"),
("BoroName", "str:40"),
("BoroCode", "int:10"),
("Shape_Area", "float:19.11"),
]
)
schema = {"geometry": "Polygon", "properties": properties}
# Take the first 2 features to speed things up a bit
df_nybb.iloc[:2].to_file(tempfilename, schema=schema)
with fiona.open(tempfilename) as f:
result_schema = f.schema
assert result_schema == schema
def test_to_file_column_len(tmpdir, df_points):
"""
Ensure that a warning about truncation is given when a geodataframe with
column names longer than 10 characters is saved to shapefile
"""
tempfilename = os.path.join(str(tmpdir), "test.shp")
df = df_points.iloc[:1].copy()
df["0123456789A"] = ["the column name is 11 characters"]
with pytest.warns(
UserWarning, match="Column names longer than 10 characters will be truncated"
):
df.to_file(tempfilename, driver="ESRI Shapefile")
@pytest.mark.parametrize("driver,ext", driver_ext_pairs)
def test_append_file(tmpdir, df_nybb, df_null, driver, ext):
"""Test to_file with append mode and from_file"""
from fiona import supported_drivers
tempfilename = os.path.join(str(tmpdir), "boros" + ext)
driver = driver if driver else _detect_driver(tempfilename)
if "a" not in supported_drivers[driver]:
return None
df_nybb.to_file(tempfilename, driver=driver)
df_nybb.to_file(tempfilename, mode="a", driver=driver)
# Read layer back in
df = GeoDataFrame.from_file(tempfilename)
assert "geometry" in df
assert len(df) == (5 * 2)
expected = | pd.concat([df_nybb] * 2, ignore_index=True) | pandas.concat |
import numpy as np
import pandas as pd
import plotly.figure_factory as ff
import plotly.graph_objects as go
from math import ceil, log
class Plot:
@staticmethod
def fdc(data, y_log_scale=True):
"""
Make a flow duration curve plot.
Parameters
----------
data : pandas DataFrame
A Pandas daily DataFrame with DatetimeIndex where each column corresponds to a station..
y_log_scale : boolean, default True
Defines if the the plotting y-axis will be in the logarithmic scale.
Returns
-------
fig : plotly Figure
"""
fig = go.Figure()
y_max = 0
for name in data.columns:
series = data[name].dropna()
n = len(series)
y = np.sort(series)
y = y[::-1]
if y_max < y.max():
y_max = y.max()
x = (np.arange(1, n + 1) / n) * 100
fig.add_trace(go.Scatter(x=x, y=y, mode='lines', name=name))
if y_log_scale:
ticks = 10 ** np.arange(1, ceil(log(y_max, 10)) + 1, 1)
ticks[-1:] += 1
fig.update_layout(yaxis=dict(
tickmode='array', tickvals=ticks, dtick=2), yaxis_type="log")
fig.update_layout(xaxis=dict(tickmode='array', tickvals=np.arange(0, 101, step=10)))
return fig
@staticmethod
def gantt(data, monthly=True):
"""
Make a Gantt plot, which shows the temporal data availability for each station.
Parameters
----------
data : pandas DataFrame
A Pandas daily DataFrame with DatetimeIndex where each column corresponds to a station..
monthly : boolean, default True
Defines if the availability count of the data will be monthly to obtain a more fluid graph.
Returns
-------
fig : plotly Figure
"""
date_index = pd.date_range(data.index[0], data.index[-1], freq='D')
data = data.reindex(date_index)
periods = []
for column in data.columns:
series = data[column]
if monthly:
missing = series.isnull().groupby( | pd.Grouper(freq='1MS') | pandas.Grouper |
'''
Load data template. Includes load(), metadata, split_stations(),
remove_upcast() and locals().update().
Inputs:
load() - data/ctd/<DATE>.cnv
metadata() - data/csv/coordenadas_<DATE>.csv
'''
# Dependencies
import pandas as pd
from code.functions import *
saida1 = 'data/ctd/stations_25-01-2017_processed.cnv'
saida2 = 'data/ctd/stations_27-05-2017_processed.cnv'
saida3 = 'data/ctd/stations_08-07-2017_processed.cnv'
saida4 = 'data/ctd/stations_01-10-2017_processed.cnv'
df = pd.read_csv('data/csv/coordenadas.csv', sep=';')
dates = set(df['Data'])
dates = list(dates)
'''
Saída 1 - 25-01-2017
'''
# Loading the data
hd1, hd2, variables, datapoints, alldata = load(saida1)
# Loading metadata
today = dates[2]
stations = list(df.loc[df['Data'] == today]['Ponto'])
lat = list(df.loc[df['Data'] == today]['Lat'])
lon = list(df.loc[df['Data'] == today]['Lon'])
[i.insert(3, i[2]) for i in [stations, lat, lon]]
# Splitting data into different stations
d = split_stations(datapoints, stations, variables, lat, lon)
# Removing upcasts
for st in d:
d[st] = remove_upcast(d[st])
# Creating variables with stations from the dictionary
locals().update(d)
# Let's put them all into lists
st_list = list(d.values())
# Picking out surface and bottom temperatures
top, bot, names = [], [], []
for i in st_list:
top.append(i['sal00:'][0])
bot.append(i['sal00:'][len(i)-1])
names.append(i['STATION'][0])
top, bot, names = pd.Series(top), pd.Series(bot), pd.Series(names)
df2 = pd.DataFrame([top, bot, names])
df2 = df2.transpose()
df2.to_csv('./25-jan-sal.csv')
'''
Saída 2 - 27-05-2017
'''
# Loading the data
hd1, hd2, variables, datapoints, alldata = load(saida2)
# Loading metadata
today = dates[0]
stations = list(df.loc[df['Data'] == today]['Ponto'])
lat = list(df.loc[df['Data'] == today]['Lat'])
lon = list(df.loc[df['Data'] == today]['Lon'])
# This particular day (27-05) there was a test station before sampling.
stations, lat, lon = ['test'] + stations, ['test'] + lat, ['test'] + lon
# Splitting data into different stations
d = split_stations(datapoints, stations, variables, lat, lon)
# Removing upcasts
for st in d:
d[st] = remove_upcast(d[st])
# Creating variables with stations from the dictionary
locals().update(d)
# Let's put them all into lists
st_list = list(d.values())
top, bot, names = [], [], []
for i in st_list:
top.append(i['sal00:'][0])
bot.append(i['sal00:'][len(i)-1])
names.append(i['STATION'][0])
top, bot, names = pd.Series(top), pd.Series(bot), pd.Series(names)
df2 = pd.DataFrame([top, bot, names])
df2 = df2.transpose()
df2.to_csv('./27-may-sal.csv')
'''
Saída 3 - 08-07-2017
'''
# Loading the data
hd1, hd2, variables, datapoints, alldata = load(saida3)
# Loading metadata
today = dates[3]
stations = list(df.loc[df['Data'] == today]['Ponto'])
lat = list(df.loc[df['Data'] == today]['Lat'])
lon = list(df.loc[df['Data'] == today]['Lon'])
# Splitting data into different stations
d = split_stations(datapoints, stations, variables, lat, lon)
# Removing upcasts
for st in d:
d[st] = remove_upcast(d[st])
# Creating variables with stations from the dictionary
locals().update(d)
st_list = list(d.values())
top, bot, names = [], [], []
for i in st_list:
top.append(i['sal00:'][0])
bot.append(i['sal00:'][len(i)-1])
names.append(i['STATION'][0])
top, bot, names = pd.Series(top), pd.Series(bot), pd.Series(names)
df2 = pd.DataFrame([top, bot, names])
df2 = df2.transpose()
df2.to_csv('./08-jul-sal.csv')
'''
Saída 4 - 01-10-2017
'''
# Loading the data
hd1, hd2, variables, datapoints, alldata = load(saida4)
# Loading metadata
today = dates[1]
stations = list(df.loc[df['Data'] == today]['Ponto'])
lat = list(df.loc[df['Data'] == today]['Lat'])
lon = list(df.loc[df['Data'] == today]['Lon'])
[i.insert(3, i[2]) for i in [stations, lat, lon]]
# Splitting data into different stations
d = split_stations(datapoints, stations, variables, lat, lon)
# Removing upcasts
for st in d:
d[st] = remove_upcast(d[st])
# Creating variables with stations from the dictionary
locals().update(d)
st_list = list(d.values())
top, bot, names = [], [], []
for i in st_list:
top.append(i['sal00:'][0])
bot.append(i['sal00:'][len(i)-1])
names.append(i['STATION'][0])
top, bot, names = pd.Series(top), pd.Series(bot), pd.Series(names)
df2 = | pd.DataFrame([top, bot, names]) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder
class PreprocessedDataset(object):
def __init__(self, path=None, df=None,
y_col=None, header='infer',
numeric_cols=[],
categorical_cols=[],
ordinal_cols={},
ignore_cols=[],
classification=True):
"""
--------------------------------------------------------------
Arguments:
--------------------------------------------------------------
path - string type, df will be ignored if path is provided
df - pandas dataframe, path and df cannot be both None
y_col - name of target variable column
ignore_cols - list of columns to be ignored
train_size + test_size must be equal to 1
classification - set to False if you are doing regression
numeric_cols - list of column names of numerical features
categorical_cols - list of column names of categorical features
ordinal_cols - a dictionary where each key is a column name, each value is a list of ordinal_values (ordered)
"""
# path and df cannot be both None
assert((path is not None) or (df is not None))
self.data_df_raw = None
if path is not None:
# read csv file as dataframe
self.data_df_raw = pd.read_csv(path, sep= ',', header=header)
elif df is not None:
# use dataframe as given
self.data_df_raw = df
# ignore columns that are not relevant
self.data_df = self.data_df_raw.drop(columns=ignore_cols)
# split dataset into features and output
self.X_df, self.y_df = PreprocessedDataset.feature_target_split(self.data_df, y_col)
# save the transformation of categorical and ordinal cols
self.encoded_cols = {}
self.encodings = {}
# one hot encode categorical columns
for col in categorical_cols:
self.X_df[col] = self.X_df[col].astype('category')
self.encodings[col] = dict(enumerate(self.X_df[col].cat.categories))
integer_encoded_col = self.X_df[col].cat.codes
one_hot_encoded_col = pd.get_dummies(integer_encoded_col, prefix=col)
self.encoded_cols[col] = one_hot_encoded_col
# encode ordinal columns
for col in ordinal_cols:
self.encodings[col] = dict(zip(ordinal_cols[col], range(len(ordinal_cols[col]))))
self.encoded_cols[col] = pd.DataFrame(self.X_df[col].replace(to_replace=self.encodings[col]), columns=[col])
# merge the encoded columns
self.X_df_encoded = pd.DataFrame(self.X_df[numeric_cols])
for col in self.encoded_cols:
self.X_df_encoded[self.encoded_cols[col].columns] = self.encoded_cols[col]
# df to numpy
self.X = self.X_df_encoded.to_numpy()
self.y = None
# encode the output column, if necessary
if y_col:
self.y_df_encoded = pd.DataFrame(self.y_df)
if classification:
self.y_df_encoded[y_col] = self.y_df[y_col].astype('category')
self.encodings[y_col] = dict(enumerate(self.y_df_encoded[y_col].cat.categories))
self.y_df_encoded[y_col] = self.y_df_encoded[y_col].cat.codes
else:
self.y_df_encoded[y_col] = self.y_df[y_col].astype('float')
self.y = self.y_df_encoded.to_numpy()
@staticmethod
def feature_target_split(data, y_col):
if y_col:
return data.drop(columns=y_col), | pd.DataFrame(data[y_col]) | pandas.DataFrame |
from sales_analysis.data_pipeline import BASEPATH
from sales_analysis.data_pipeline._pipeline import SalesPipeline
import pytest
import os
import pandas as pd
# --------------------------------------------------------------------------
# Fixtures
@pytest.fixture
def pipeline():
FILEPATH = os.path.join(BASEPATH, "data")
DATA_FILES = [f for f in os.listdir(FILEPATH) if f.endswith('.csv')]
DATA = {f : pd.read_csv(os.path.join(FILEPATH, f)) for f in DATA_FILES}
return SalesPipeline(**DATA)
# --------------------------------------------------------------------------
# Data
data = {'customers': {pd.Timestamp('2019-08-01 00:00:00'): 9,
pd.Timestamp('2019-08-02 00:00:00'): 10,
pd.Timestamp('2019-08-03 00:00:00'): 10,
pd.Timestamp('2019-08-04 00:00:00'): 10,
pd.Timestamp('2019-08-05 00:00:00'): 9,
pd.Timestamp('2019-08-06 00:00:00'): 9,
pd.Timestamp('2019-08-07 00:00:00'): 10,
pd.Timestamp('2019-08-08 00:00:00'): 8,
pd.Timestamp('2019-08-09 00:00:00'): 5,
pd.Timestamp('2019-08-10 00:00:00'): 5,
pd.Timestamp('2019-08-11 00:00:00'): 10,
pd.Timestamp('2019-08-12 00:00:00'): 10,
pd.Timestamp('2019-08-13 00:00:00'): 6,
pd.Timestamp('2019-08-14 00:00:00'): 7,
pd.Timestamp('2019-08-15 00:00:00'): 10,
pd.Timestamp('2019-08-16 00:00:00'): 8,
pd.Timestamp('2019-08-17 00:00:00'): 7,
pd.Timestamp('2019-08-18 00:00:00'): 9,
pd.Timestamp('2019-08-19 00:00:00'): 5,
pd.Timestamp('2019-08-20 00:00:00'): 5},
'total_discount_amount': {pd.Timestamp('2019-08-01 00:00:00'): 15152814.736907512,
pd.Timestamp('2019-08-02 00:00:00'): 20061245.64408109,
pd.Timestamp('2019-08-03 00:00:00'): 26441693.751396574,
pd.Timestamp('2019-08-04 00:00:00'): 25783015.567048658,
pd.Timestamp('2019-08-05 00:00:00'): 16649773.993076814,
pd.Timestamp('2019-08-06 00:00:00'): 24744027.428384878,
pd.Timestamp('2019-08-07 00:00:00'): 21641181.771564845,
pd.Timestamp('2019-08-08 00:00:00'): 27012160.85245146,
pd.Timestamp('2019-08-09 00:00:00'): 13806814.237002019,
pd.Timestamp('2019-08-10 00:00:00'): 9722459.599448118,
pd.Timestamp('2019-08-11 00:00:00'): 20450260.26194652,
pd.Timestamp('2019-08-12 00:00:00'): 22125711.151501,
pd.Timestamp('2019-08-13 00:00:00'): 11444206.200090334,
pd.Timestamp('2019-08-14 00:00:00'): 17677326.65707852,
pd.Timestamp('2019-08-15 00:00:00'): 26968819.12338184,
pd.Timestamp('2019-08-16 00:00:00'): 22592246.991756547,
pd.Timestamp('2019-08-17 00:00:00'): 15997597.519811645,
pd.Timestamp('2019-08-18 00:00:00'): 17731498.506244037,
pd.Timestamp('2019-08-19 00:00:00'): 22127822.876592986,
pd.Timestamp('2019-08-20 00:00:00'): 5550506.789972418},
'items': {pd.Timestamp('2019-08-01 00:00:00'): 2895,
pd.Timestamp('2019-08-02 00:00:00'): 3082,
pd.Timestamp('2019-08-03 00:00:00'): 3559,
pd.Timestamp('2019-08-04 00:00:00'): 3582,
pd.Timestamp('2019-08-05 00:00:00'): 2768,
pd.Timestamp('2019-08-06 00:00:00'): 3431,
| pd.Timestamp('2019-08-07 00:00:00') | pandas.Timestamp |
# importing all the required libraries
import numpy as np
import pandas as pd
from datetime import datetime
import time, datetime
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler, LabelEncoder, MinMaxScaler
from chart_studio.plotly import plotly
import plotly.offline as offline
import plotly.graph_objs as go
offline.init_notebook_mode()
from collections import Counter
import pickle
from sklearn.model_selection import train_test_split
import lightgbm as lgb
import math
from tqdm import tqdm
# Reading all the files
air_visit_data = pd.read_csv('air_visit_data.csv')
air_store_info = pd.read_csv('air_store_info.csv')
air_reserve = pd.read_csv('air_reserve.csv')
hpg_store_info = pd.read_csv('hpg_store_info.csv')
hpg_reserve = pd.read_csv('hpg_reserve.csv')
date_info = pd.read_csv('date_info.csv')
store_id_relation = pd.read_csv('store_id_relation.csv')
sample_submission = pd.read_csv('sample_submission.csv')
# error metric
# kaggle
def root_mean_squared_logarithmic_error(p,a):
err=0
for i in range(len(p)):
err=err+((np.log(p[i]+1)-np.log(a[i]+1))**2)
total_error=(np.sqrt(err/len(p)))
return total_error
# code taken from,
# https://stackoverflow.com/questions/238260/how-to-calculate-the-bounding-box-for-a-given-lat-lng-location/238558#238558
# by <NAME> (https://stackoverflow.com/users/18770/federico-a-ramponi)
# This snippet of code basically takes a set of latitude and longitude coverts it into radius(distance) due to \
# speroidical shape of earth \
# and returns 4 coordinates which surround the set of latitude and longitude as a box.
# degrees to radians
def deg2rad(degrees):
return math.pi*degrees/180.0
# radians to degrees
def rad2deg(radians):
return 180.0*radians/math.pi
# Semi-axes of WGS-84 geoidal reference
WGS84_a = 6378137.0 # Major semiaxis [m]
WGS84_b = 6356752.3 # Minor semiaxis [m]
# Earth radius at a given latitude, according to the WGS-84 ellipsoid [m]
def WGS84EarthRadius(lat):
# http://en.wikipedia.org/wiki/Earth_radius
An = WGS84_a*WGS84_a * math.cos(lat)
Bn = WGS84_b*WGS84_b * math.sin(lat)
Ad = WGS84_a * math.cos(lat)
Bd = WGS84_b * math.sin(lat)
return math.sqrt( (An*An + Bn*Bn)/(Ad*Ad + Bd*Bd) )
# Bounding box surrounding the point at given coordinates,
# assuming local approximation of Earth surface as a sphere
# of radius given by WGS84
def boundingBox(latitudeInDegrees, longitudeInDegrees, halfSideInKm):
lat = deg2rad(latitudeInDegrees)
lon = deg2rad(longitudeInDegrees)
halfSide = 1000*halfSideInKm
# Radius of Earth at given latitude
radius = WGS84EarthRadius(lat)
# Radius of the parallel at given latitude
pradius = radius*math.cos(lat)
latMin = lat - halfSide/radius
latMax = lat + halfSide/radius
lonMin = lon - halfSide/pradius
lonMax = lon + halfSide/pradius
return (rad2deg(latMin), rad2deg(lonMin), rad2deg(latMax), rad2deg(lonMax))
def final_fun_2(air_visit_data, air_store_info, hpg_store_info, date_info, store_id_relation):
bounding_box_lat=[]
bounding_box_lon=[]
for i in range(len(air_store_info)):
bounding_box_lat.append(air_store_info['latitude'][i])
bounding_box_lon.append(air_store_info['longitude'][i])
neighbour=[]
lat_1=[]
lon_1=[]
lat_2=[]
lon_2=[]
for i in range(len(air_store_info)):
lat1, lon1, lat2, lon2=boundingBox(bounding_box_lat[i],bounding_box_lon[i],1.5)
lat_1.append(lat1)
lon_1.append(lon1)
lat_2.append(lat2)
lon_2.append(lon2)
for i in range(len(air_store_info)):
count=0
for j in range(len(air_store_info)):
if bounding_box_lat[j]>lat_1[i] and bounding_box_lat[j]<lat_2[i] and bounding_box_lon[j]>lon_1[i] and bounding_box_lon[j]<lon_2[i]:
count=count+1
neighbour.append(count-1)
air_store_info['nearest_neighbour']=neighbour
air_store_info=air_store_info.rename(columns={"air_genre_name":"genre_name","air_area_name":"area_name"})
hpg_store_info=hpg_store_info.rename(columns={"hpg_genre_name":"genre_name","hpg_area_name":"area_name"})
date_info=date_info.rename(columns={"calendar_date":"visit_date"})
total_data=pd.merge(air_visit_data,date_info,how='left',on=['visit_date'])
total_data= | pd.merge(total_data,store_id_relation,how='left',on= ['air_store_id']) | pandas.merge |
#!/usr/bin/env python
"""
Requirements:
* Python >= 3.6.2
* Pandas
* NumPy
Copyright (c) 2020 <NAME> <<EMAIL>>
MIT License <http://opensource.org/licenses/MIT>
"""
RELEASE = False
__version_info__ = (
"0",
"3",
)
__version__ = ".".join(__version_info__)
__version__ += "-dev" if not RELEASE else ""
import argparse
import math
import os, sys
import pandas as pd
import numpy as np
def convert_to_df(pvacseq_1_tsv, pvacseq_2_tsv):
if not pvacseq_2_tsv:
pvacseq_1_reader = pd.read_csv(pvacseq_1_tsv, sep="\t")
merged_df = pd.DataFrame(pvacseq_1_reader)
elif not pvacseq_1_tsv:
pvacseq_2_reader = | pd.read_csv(pvacseq_2_tsv, sep="\t") | pandas.read_csv |
import dash
import dash_html_components as html
import dash_core_components as dcc
import pandas as pd
import simfin as sf
from simfin.names import *
import dash_table
from dash.dependencies import Output, Input, State
from flask import Flask
from flask.helpers import get_root_path
from flask_login import login_required
from config import BaseConfig
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import numpy as np
from flask_migrate import Migrate
from textwrap import dedent
def create_app():
server = Flask(__name__)
server.config.from_object(BaseConfig)
register_dashapps(server)
register_extensions(server)
register_blueprints(server)
return server
def register_dashapps(app):
sf.set_data_dir('~/simfin_data/')
api_key = "<KEY>"
df_income = sf.load(dataset='income', variant='annual', market='us', index=[TICKER])
df_income = df_income.drop(['Currency', 'SimFinId', 'Fiscal Period', 'Publish Date', 'Shares (Basic)',
'Abnormal Gains (Losses)', 'Net Extraordinary Gains (Losses)',
'Income (Loss) from Continuing Operations',
'Net Income (Common)', 'Pretax Income (Loss), Adj.', 'Report Date', 'Restated Date'],
axis=1)
df_income = df_income.fillna(0)
df_income = df_income.apply(lambda x: x / 1000000)
decimals = 0
df_income['Fiscal Year'] = df_income['Fiscal Year'].apply(lambda x: x * 1000000)
df_income['Fiscal Year'] = df_income['Fiscal Year'].apply(lambda x: round(x, decimals))
ticker = "AAPL"
df_income.rename(
columns={FISCAL_YEAR: 'Year', SHARES_DILUTED: 'Shares', SGA: 'SGA', RD: 'R&D', DEPR_AMOR: 'D&A',
OP_INCOME: 'Operating Income', NON_OP_INCOME: 'Non Operating Income',
INTEREST_EXP_NET: 'Interest Expense', PRETAX_INCOME_LOSS: 'Pretax Income',
INCOME_TAX: 'Income Tax'}, inplace=True)
# restated date
df_names = df_income.index.copy()
df_names = df_names.drop_duplicates()
# income signals
df_negative = df_income.copy()
df_negative[['Cost of Revenue', 'R&D', 'Operating Expenses', 'SGA', 'Income Tax', 'D&A', 'Interest Expense']] = \
df_negative[
['Cost of Revenue', 'R&D', 'Operating Expenses', 'SGA', 'Income Tax', 'D&A', 'Interest Expense']].apply(
lambda x: x * -1)
df_negative['Expenses'] = df_negative['Operating Expenses'] + df_negative['SGA'] + df_negative['R&D'] + df_negative[
'D&A']
df_signals = pd.DataFrame(index=df_negative.index)
df_signals['Year'] = df_negative['Year'].copy()
df_signals['Gross Profit Margin %'] = round((df_negative['Gross Profit'] / df_negative['Revenue']) * 100,
2).copy()
df_signals['SGA Of Gross Profit'] = round((df_negative['SGA'] / df_negative['Gross Profit']) * 100, 2).copy()
df_signals['R&D Of Gross Profit'] = round((df_negative['R&D'] / df_negative['Gross Profit']) * 100, 2).copy()
df_signals['D&A Of Gross Profit'] = round((df_negative['D&A'] / df_negative['Gross Profit']) * 100, 2).copy()
df_signals['Operating margin ratio'] = round((df_negative['Operating Income'] / df_negative['Revenue']) * 100,
2).copy()
df_signals['Interest to Operating Income %'] = round((df_negative['Interest Expense'] / df_negative['Operating Income'])
* 100, 2).copy()
df_signals['Taxes paid'] = round((df_negative['Income Tax'] / df_negative['Pretax Income']) * 100, 2).copy()
df_signals['Net income margin'] = round((df_negative['Net Income'] / df_negative['Revenue']) * 100, 2).copy()
df_signals['Interest to Operating Income %'] = df_signals['Interest to Operating Income %'].replace(-np.inf, 0)
df2_original = df_signals.loc[ticker]
# income growth per year
df1_growth = pd.DataFrame(index=df_income.index)
df1_growth['Year'] = df_income['Year'].copy()
df1_growth['Revenue Growth'] = df_income['Revenue'].pct_change().mul(100).round(2).copy()
df1_growth['Profit Growth'] = df_income['Gross Profit'].pct_change().mul(100).round(2).copy()
df1_growth['Operating Income Growth'] = df_income['Operating Income'].pct_change().mul(100).round(2).copy()
df1_growth['Pretax Income Growth'] = df_income['Pretax Income'].pct_change().mul(100).round(2).copy()
df1_growth['Net Income Growth'] = df_income['Net Income'].pct_change().mul(100).round(2).copy()
df1_growth = df1_growth.fillna(0)
# compounded income growth
df_income_compound_original = pd.DataFrame()
df_income_compound_original['Revenue %'] = []
df_income_compound_original['Inventory %'] = []
df_income_compound_original['Gross Profit %'] = []
df_income_compound_original['Operating Income %'] = []
df_income_compound_original['Pre tax %'] = []
df_income_compound_original['Net Income %'] = []
# balance sheet
df_balance = sf.load_balance(variant='annual', market='us', index=[TICKER])
df_balance = df_balance.drop(
['Currency', 'SimFinId', 'Fiscal Period', 'Publish Date', 'Shares (Basic)', 'Report Date',
'Shares (Diluted)', 'Total Liabilities & Equity', 'Restated Date'], axis=1)
df_balance = df_balance.fillna(0)
df_balance = df_balance.apply(lambda x: x / 1000000)
df_balance['Fiscal Year'] = df_balance['Fiscal Year'].apply(lambda x: x * 1000000)
df_balance['Fiscal Year'] = df_balance['Fiscal Year'].apply(lambda x: round(x, 0))
df_balance.rename(columns={FISCAL_YEAR: 'Year', CASH_EQUIV_ST_INVEST: 'Cash & Equivalent',
ACC_NOTES_RECV: 'Accounts Receivable', TOTAL_CUR_ASSETS: 'Current Assets',
PPE_NET: 'Prop Plant & Equipment', LT_INVEST_RECV: 'Long Term Investments',
OTHER_LT_ASSETS: 'Other Long Term Assets', TOTAL_NONCUR_ASSETS: 'Noncurrent assets',
PAYABLES_ACCRUALS: 'Accounts Payable', TOTAL_CUR_LIAB: 'Current Liabilities',
TOTAL_NONCUR_LIAB: 'Noncurrent Liabilities', SHARE_CAPITAL_ADD: 'C&APIC Stock',
ST_DEBT: 'ShortTerm debts', LT_DEBT: 'LongTerm Debts',
INVENTORIES: 'Inventory & Stock'}, inplace=True)
df3_original = df_balance.loc[ticker]
# balance signals
df_balance_signals = pd.DataFrame(index=df_balance.index)
df_balance_signals['Year'] = df_balance['Year'].copy()
df_balance_signals['Return on EquityT'] = round(
(df_income['Net Income'] / (df_balance['Total Equity'] + (-1 * df_balance['Treasury Stock']))), 2).copy()
df_balance_signals['Liabilities to EquityT'] = round(
(df_balance['Total Liabilities'] / (df_balance['Total Equity'] + (-1 * df_balance['Treasury Stock']))),
2).copy()
df_balance_signals['Debt (LS) to EquityT'] = round(
((df_balance['LongTerm Debts'] + df_balance['ShortTerm debts']) / (df_balance['Total Equity'] +
(-1 * df_balance['Treasury Stock']))), 2).copy()
df_balance_signals['Long Term Debt Coverage'] = round((df_income['Net Income'] / df_balance['LongTerm Debts']),
2).copy()
df_balance_signals['Long Term Debt Coverage'] = df_balance_signals['Long Term Debt Coverage'].replace([np.inf, -np.inf],
0)
df_balance_signals['Current Ratio'] = round((df_balance['Current Assets'] / df_balance['Current Liabilities']),
2).copy()
df_balance_signals['Return on Assets%'] = round((df_income['Net Income'] / df_balance['Total Assets']) * 100, 2).copy()
df_balance_signals['Retained Earning to Equity%'] = round(
(df_balance['Retained Earnings'] / df_balance['Total Equity']) * 100, 2).copy()
df_balance_signals['Receivables of Revenue%'] = round((df_balance['Accounts Receivable'] / df_income['Revenue']) * 100,
2).copy()
df_balance_signals['PP&E of Assets%'] = round((df_balance['Prop Plant & Equipment'] / df_balance['Total Assets']) * 100,
2).copy()
df_balance_signals['Inventory of Assets%'] = round((df_balance['Inventory & Stock'] / df_balance['Total Assets']) * 100,
2).copy()
df4_original = df_balance_signals.loc[ticker]
# balance growth per year
balance_growth = pd.DataFrame(index=df_balance.index)
balance_growth['Year'] = df_balance['Year'].copy()
balance_growth['Cash Growth'] = df_balance['Cash & Equivalent'].pct_change().mul(100).round(2).copy()
balance_growth['Inventory Growth'] = df_balance['Inventory & Stock'].pct_change().mul(100).round(2).copy()
balance_growth['Current Assets Growth'] = df_balance['Current Assets'].pct_change().mul(100).round(2).copy()
balance_growth['PP&E Growth'] = df_balance['Prop Plant & Equipment'].pct_change().mul(100).round(2).copy()
balance_growth['Investment Growth'] = df_balance['Long Term Investments'].pct_change().mul(100).round(2).copy()
balance_growth['Asset Growth'] = df_balance['Total Assets'].pct_change().mul(100).round(2).copy()
balance_growth['Liability Growth'] = df_balance['Total Liabilities'].pct_change().mul(100).round(2).copy()
balance_growth['Retained Earnings Growth'] = df_balance['Retained Earnings'].pct_change().mul(100).round(2).copy()
balance_growth['Equity Growth'] = df_balance['Total Equity'].pct_change().mul(100).round(2).copy()
balance_growth = balance_growth.fillna(0)
# balance compound growth
df_balance_compound_original = pd.DataFrame()
df_balance_compound_original['Cash %'] = []
df_balance_compound_original['Inventory %'] = []
df_balance_compound_original['Current Assets %'] = []
df_balance_compound_original['PP&E %'] = []
df_balance_compound_original['Long Term Investment%'] = []
df_balance_compound_original['Assets %'] = []
df_balance_compound_original['Liability %'] = []
df_balance_compound_original['Retained Earnings %'] = []
df_balance_compound_original['Equity %'] = []
# Meta tags for viewport responsiveness
meta_viewport = {"name": "viewport", "content": "width=device-width, initial-scale=1, shrink-to-fit=no"}
dashapp1 = dash.Dash(__name__,
server=app,
url_base_pathname='/dashboard/',
assets_folder=get_root_path(__name__) + '/assets/',
meta_tags=[meta_viewport])
#html.Img(src= dashapp1.get_asset_url('stock-icon.png'))
dashapp1.title = 'Financial Statements'
dashapp1.config['suppress_callback_exceptions'] = True
dashapp1.layout = html.Div([
html.Div([
html.H2('Fundemental Analysis'),
html.A(html.Button(id="logout-button", n_clicks=0, children="Log Out", className="logout2"),
href='https://financial8999.herokuapp.com/logout/'),
html.Img(src= dashapp1.get_asset_url('stock-icon.png')),
# html.Img(src= dashapp1.get_asset_url('stock-icon.png'))
], className="banner"),
html.Div([
dcc.Dropdown(id='drop-down', options=[
{'label': i, 'value': i} for i in df_names
], value=ticker, multi=False, placeholder='Enter a ticker'),
], className='drops'),
dcc.Tabs(id="tabs", value='Tab2', className='custom-tabs-container', children=[
dcc.Tab(label='Portfolio tracker', id='tab1', value='Tab1', selected_className='custom-tab--selected',
children=[]),
dcc.Tab(label='Financial Statements', id='tab2', value='Tab2', selected_className='custom-tab--selected',
children=[
dcc.Tabs(className='sub-tab-container', id='sub-tabs', value='tab-1', children=[
dcc.Tab(label='Income Statement', selected_className='sub-tab', value='tab-1'),
dcc.Tab(label='Balance Sheet', selected_className='sub-tab', value='tab-2'),
dcc.Tab(label='Cash Flow statement ', selected_className='sub-tab', value='tab-3'),
]),
html.Div(id='tabs-content')
]),
dcc.Tab(label='Intrinsic value estimations', id='tab3', value='Tab3', selected_className='custom-tab--selected',
children=["yo"]),
dcc.Tab(label='Machine learning', id='tab4', value='Tab4', selected_className='custom-tab--selected',
children=["yo"]),
]),
html.Div([ # modal div
html.Div([ # content div
html.Img(
id='modal-close-button',
src= dashapp1.get_asset_url('times-circle-solid.svg'),
# html.Img(src= dashapp1.get_asset_url('times-circle-solid.svg'))
n_clicks=0,
className='info-icon2',
style={'margin': 0},
),
html.Div(
children=[
dcc.Markdown(dedent('''
The Income Statement has been simplified by dividing by 1,000,000.
_**SGA**_ - Companies that do not have competitive advantage suffer from intense competition
showing wild variation in SGA (selling, general and administrative) costs as a percentage of
gross profit.
_**R&D**_ - Companies that spend heavily on R&D have an inherent flaw in their competitive
advantage that will always put their long term economics at risk since what seems like long
term competitive advantage is bestowed by a patent or technological advancement that will
expire or be replaced by newer technologies. Furthermore, since they constantly have to
invest in new products they must also redesign and update sales programs increasing
administrative costs.
_**A&D**_ – Machinery and equipment eventually wear out over time with the amount they
depreciate each year deducted from gross profit. Depreciation is a real cost of doing
business because at some point in the future the printing press will need to be replaced.
_**Interest Expense**_ – Interest paid out during the year is reflective of the total debt that
a company is carrying on its books. It can be very informative as to the level of economic
danger a company is in. Generally speaking, in any given industry, the company with the
lowest ratio of interest payments to operating income has some kind of competitive advantage.
_**Pre Tax Income**_ – This is the number Warren Buffet uses when calculating the return
he’ll be getting from a business as all investments are marketed on a pre tax basis. Since
all investments compete with each other, it is easier to think about them on equal terms.
_**Net Income**_ – Must have a historical uptrend with consistent earnings. Share
repurchasing increase per share earnings by decreasing the shares outstanding – while a lot
of analysts look at per share earnings, Warren Buffet looks at the business as a whole and
its net earnings to see what is actually happening.
'''))]
),
],
style={'textAlign': 'center', },
className='modal-content',
),
], id='modal', className='modal', style={"display": "none"}),
html.Div([ # modal div
html.Div([ # content div
html.Img(
id='modal-close-button2',
src= dashapp1.get_asset_url('times-circle-solid.svg'),
# html.Img(src= dashapp1.get_asset_url('stock-icon.png'))
n_clicks=0,
className='info-icon2',
style={'margin': 0},
),
html.Div(
children=[
dcc.Markdown(dedent('''
_**Gross Profit Margin**_ - Companies with excellent economics and high profit margins tend to
have a durable competitive advantage as they have the freedom to price their products well in
excess of costs of goods sold. Without competitive advantage companies have too compete by
lowering their prices of products or service they are selling. As a general rule 40% or better
tend to have durable competitive advantage
_**SGA of Gross Profit**_ – Anything under 30% of gross profit is considered fantastic. However,
there are lots of companies with durable competitive advantage that have SGA expenses in 30-80%.
_**D&A of Gross Profit**_ – Companies with durable competitive advantage have low depreciation
costs e.g. Coca Cola at 6% compared to GM at 22-57%.
_**Interest of Operating Income**_ – Warren Buffet’s favourite durable competitive advantage
holders in the consumer products category have interest pay-outs of less than 15% of operating
income. This changes from industry to industry e.g Wells Fargo has 30% of operating income on
interest because it’s a bank.
_**Tax**_ – Check how much a company pays in taxes. Businesses that are busy misleading the IRS
are usually hard at work misleading their shareholders as well. Companies with long term
competitive advantage make so much money it doesn’t have to mislead anyone to look good.
_**Net Income to Revenue**_ – A company showing net earnings history of more than 20% of revenue
is likely to be benefitting from durable competitive advantage long term. If under 10% it may not
have competitive advantage but 10-20% are lots of good businesses ripe for the mining long term
investment gold. E.g Coca Cola with 21%, Moody’s with 31% compared with Southwest Airlines with a
meagre 7% which reflects the highly competitive nature of the airline business.
Although an exception to this is banks and financial institutions where abnormally high ratios is
seen as a slacking off for the risk management department and acceptance of greater risk for
easier money.
'''))]
),
],
style={'textAlign': 'center', },
className='modal-content',
),
], id='modal2', className='modal', style={"display": "none"}),
html.Div([ # modal div
html.Div([ # content div
html.Img(
id='modal-close-button3',
src= dashapp1.get_asset_url('times-circle-solid.svg'),
# html.Img(src= dashapp1.get_asset_url('stock-icon.png'))
n_clicks=0,
className='info-icon2',
style={'margin': 0},
),
html.Div(
children=[
dcc.Markdown(dedent('''
_**Cash & Short-term Investments**_ – A low amount or lack of cash stockpile usually means that the
company has poor or mediocre economics. Companies that have a surplus of cash resulting from
ongoing business activities, little or no debt, no new sales of shares or assets and a history of
consistent earnings probably have excellent economics and competitive advantage working in their
favour. If we see a lot of cash and marketable securities with little to no debt, chances are the
business will sail through troubled times.
_**Property plant and equipment**_ (net accumulated depreciation) – Companies that are in constant
competition constantly have to update their manufacturing facilities to try to stay competitive
often before equipment is already worn out. This creates an ongoing expense that is often quite
substantial and keeps adding to the amount of plant and equipment the company lists on its
balance sheet. A company with durable competitive advantage doesn’t need to constantly upgrade
its plant and equipment to stay competitive. Instead it replaces equipment as they wear out. PP&E
depreciates in value over time.
_**Short term debts**_ – Money owed and due within a year is historically cheaper than long term
money. Institutions make money by borrowing short term and lending long term but the problem with
this is money borrowed in the short term needs to be payed off. This works fine until short term
rates jump above what we leant long term. This makes aggressive borrowers of short-term money at
the mercy of sudden shifts in the credit market. Smartest and safest way to make money is borrow
money long term and lend it long term. Warren does not invest in companies with lots of
short-term debt. E.g Wells Fargo has $0.57 of short-term debt to every dollar of long-term debt
compared to Bank of America who has $2.09.
_**Long term debt**_ – Some companies lump it with short term debt which creates the illusion
that the company has more short-term debt then it actually does. As a rule, companies with
durable competitive advantage have little to no long-term debt.
Sometimes an excellent business with a consumer monopoly will add large amounts of debt to
finance the acquisition of another business, if so check the acquisition is also a consumer
monopoly – when two combine lots of excess profits quickly reduce these debt mountains but when a
consumer monopoly acquires a commodity business it will only suck out profits to support its poor
economics.
_**Treasury shares**_ – Shares set aside that can be brought back for additional funding and reduces
the number of shares owned by private investors lowering the amount that must be paid out in
dividends. If a company feels the market has undervalued its business, it might buy back some
shares possibly reissuing once the price has been corrected. Reducing the number of shares boosts
certain ratios as a form of financial engineering such as earnings per share which causes short
term investors to flock back to stock seeing improved ratios increasing share price.
_**Retained Earnings**_ – Net Income can either be paid out as a dividend, used to buy back
company shares or it can be retained to keep the business growing. When income is retained it is
put on the balance sheet under shareholders equity and when they are profitability used,
they can greatly improve the long-term economic picture of the business.
It is an accumulated number which means each year new retained earnings are added to the total
accumulated retained earnings years prior. This is one of the most important metrics when
determining if a business has durable competitive advantage – if a company is not adding to its
retained earnings pool it is not growing its long term net worth and is unlikely to make you
super rich long term.
Not all growth in retained earnings is due to incremental increases in sales of existing
products, some off it is due to the acquisition of other businesses. When two companies merge,
their retained earnings pools are joined which creates an even larger pool.
_**Leverage**_ – using debt to increase earnings of a company can give of the illusion of
competitive advantage. The problem is while there seems to be some consistency in the income
stream the source paying the interest payments may not be able to maintain these payments – just
look at the sub prime lending crisis where banks borrowed billions at 6% and loaned at 8% to
homebuyers but when the economy started to slip these buyers started to default on mortgages.
These subprime borrowers did not have a durable source of income which ultimately meant
investment banks didn’t have either.
In assessing the quality and durability of a company’s competitive advantage, <NAME>
avoids businesses that use a lot of leverage to generate earnings – in the short run they appear
to be the goose that lays the golden egg but at the end of the day they are not. _**“Only when
the tide goes out do you discover who's been swimming naked.”**_
'''))]
),
],
style={'textAlign': 'center', },
className='modal-content',
),
], id='modal3', className='modal', style={"display": "none"}),
])
# callback
@dashapp1.callback(Output('tabs-content', 'children'),
[Input('sub-tabs', 'value')])
def render_content(tab):
if tab == 'tab-1':
return html.Div([
html.Div([
html.H6('Annual Income Statement'),
html.Img(
id='instructions-button',
src= dashapp1.get_asset_url('question-circle-solid.svg'),
# html.Img(src= dashapp1.get_asset_url('question-circle-solid.svg'))
n_clicks=0,
className='info-icon',
),
], className='annual-income'),
html.Div([
dash_table.DataTable(
style_cell={
'whiteSpace': 'normal',
'height': 'auto',
},
style_table={
'width': '95%',
'margin': '0px 20px 0px'
},
id='table',
columns=[{"name": i, "id": i} for i in df_income.columns]
)
]),
html.Div([
dcc.Graph(id='sales', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "32vw",
"float": "left",
'display': 'inline-block',
"margin-left": "20px"
}),
dcc.Graph(id='costs', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "32vw",
"float": "left",
'display': 'inline-block',
# "margin-left":"-100px"
}),
dcc.Graph(id='operating', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "32vw",
"float": "left",
"display": "inline-block",
# "margin-left":"-100px"
}),
], style={"height": "50vh", "width": "98vw", "margin-top": "20px"}),
html.Div([
dcc.Graph(id='interest', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "32vw",
"float": "left",
'display': 'inline-block',
"margin-left": "20px"
}),
dcc.Graph(id='tax', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "32vw",
"float": "left",
'display': 'inline-block'
}),
dcc.Graph(id='shares', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "30vw",
"float": "left",
'display': 'inline-block'
}),
], style={"height": "50vh", "width": "98vw", "margin-top": "-20px"}),
html.Div([
html.H6('Key Ratios %'),
html.Img(
id='instructions-button2',
src= dashapp1.get_asset_url('question-circle-solid.svg'),
# html.Img(src=dashapp1.get_asset_url('stock-icon.png'))
n_clicks=0,
className='info-icon3',
),
], className='text1'),
html.Div([
dash_table.DataTable(
style_table={
'width': '95%',
'margin': '0px 20px 0px'
},
id='table2',
columns=[{"name": i, "id": i} for i in df2_original.columns]
)
]),
html.Div([
dcc.Graph(id='profit-margin', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "31vw",
"float": "left",
'display': 'inline-block',
"margin-left": "20px"
}),
dcc.Graph(id='SGA', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "31vw",
"float": "left",
'display': 'inline-block',
"margin-left": "20px"
}),
dcc.Graph(id='R&D', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "30vw",
"float": "left",
"display": "inline-block",
"margin-left": "20px"
}),
], style={"height": "50vh", "width": "98vw", "margin-top": "20px"}),
html.Div([
dcc.Graph(id='operating-margin-ratio', config={'displayModeBar': False},
style={
"height": "40vh",
"width": "32vw",
"float": "left",
'display': 'inline-block',
"margin-left": "20px"
}),
dcc.Graph(id='interest-coverage', config={'displayModeBar': False},
style={
"height": "40vh",
"width": "32vw",
"float": "left",
'display': 'inline-block'
}),
dcc.Graph(id='taxes-paid', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "30vw",
"float": "left",
'display': 'inline-block'
}),
], style={"height": "50vh", "width": "98vw", "margin-top": "-20px"}),
html.Div([
html.H6('Growth Signals')
], className='text2'),
html.Div([
dash_table.DataTable(
# style_cell={
# 'whiteSpace': 'normal',
# 'height': 'auto',
# },
style_table={
'width': '95%',
'margin': '0px 20px 20px'
},
id='income_compound_table',
columns=[{"name": i, "id": i} for i in df_income_compound_original.columns],
)
]),
html.Div([
dash_table.DataTable(
style_table={
'width': '95%',
'margin': '0px 20px 0px'
},
id='table_growth',
columns=[{"name": i, "id": i} for i in df1_growth.columns]
)
]),
]),
elif tab == 'tab-2':
return html.Div([
html.Div([
html.H6('Annual Balance Sheets'),
html.Img(
id='instructions-button3',
src= dashapp1.get_asset_url('question-circle-solid.svg'),
# html.Img(src= dashapp1.get_asset_url('stock-icon.png'))
n_clicks=0,
className='info-icon4',
),
], className='annual-income'),
html.Div([
dash_table.DataTable(
style_cell={
'whiteSpace': 'normal',
'height': 'auto',
},
style_table={
'width': '95%',
'margin': '0px 20px 0px'
},
id='table3',
columns=[{"name": i, "id": i} for i in df3_original.columns],
),
]),
html.Div([
dcc.Graph(id='balance', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "32vw",
"float": "left",
'display': 'inline-block',
"margin-left": "20px"
}),
dcc.Graph(id='liquidity', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "32vw",
"float": "left",
'display': 'inline-block',
# "margin-left":"-100px"
}),
dcc.Graph(id='long-term-assets', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "32vw",
"float": "left",
"display": "inline-block",
# "margin-left":"-100px"
}),
], style={"height": "50vh", "width": "98vw", "margin-top": "20px"}),
html.Div([
dcc.Graph(id='current debts', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "32vw",
"float": "left",
'display': 'inline-block',
"margin-left": "20px"
}),
dcc.Graph(id='non-current-debts', config={'displayModeBar': False},
style={
"height": "40vh",
"width": "32vw",
"float": "left",
'display': 'inline-block',
# "margin-left":"-100px"
}),
dcc.Graph(id='retained-earnings', config={'displayModeBar': False},
style={
"height": "40vh",
"width": "30vw",
"float": "left",
"display": "inline-block",
# "margin-left":"-100px"
}),
], style={"height": "50vh", "width": "98vw", "margin-top": "-20px"}),
html.Div([
html.H6('Balance Signals')
], className='text2'),
html.Div([
dash_table.DataTable(
style_cell={
'whiteSpace': 'normal',
'height': 'auto',
},
style_table={
'width': '95%',
'margin': '0px 20px 0px'
},
id='table4',
columns=[{"name": i, "id": i} for i in df4_original.columns],
# data=df4.to_dict('records'),
)
]),
html.Div([
dcc.Graph(id='equity_returns', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "32vw",
"float": "left",
'display': 'inline-block',
"margin-left": "20px"
}),
dcc.Graph(id='retained_equity', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "32vw",
"float": "left",
'display': 'inline-block',
# "margin-left":"-100px"
}),
dcc.Graph(id='assets_return', config={'displayModeBar': False}, style={
"height": "40vh",
"width": "32vw",
"float": "left",
"display": "inline-block",
# "margin-left":"-100px"
}),
], style={"height": "50vh", "width": "98vw", "margin-top": "20px"}),
html.Div([
html.H6('Growth Signals')
], className='text2'),
html.Div([
dash_table.DataTable(
# style_cell={
# 'whiteSpace': 'normal',
# 'height': 'auto',
# },
style_table={
'width': '95%',
'margin': '0px 20px 0px'
},
id='balance_compound_growth',
columns=[{"name": i, "id": i} for i in df_balance_compound_original.columns]
)
]),
html.Div([
dash_table.DataTable(
style_cell={
'whiteSpace': 'normal',
'height': 'auto',
},
style_table={
'width': '95%',
'margin': '20px 20px 0px'
},
id='balance_growth',
columns=[{"name": i, "id": i} for i in balance_growth.columns],
# data=df4.to_dict('records'),
)
])
])
@dashapp1.callback(
Output('table', 'data'),
[Input("drop-down", "value")])
def update_data(input_value):
try:
df1 = df_income.loc[input_value]
data = df1.to_dict("records")
return data
except TypeError:
pass
@dashapp1.callback(
Output('table_growth', 'data'),
[Input("drop-down", "value")])
def update_data(input_value):
try:
df1_growth.loc[input_value]['Revenue Growth'][0] = 0
df1_growth.loc[input_value]['Profit Growth'][0] = 0
df1_growth.loc[input_value]['Operating Income Growth'][0] = 0
df1_growth.loc[input_value]['Pretax Income Growth'][0] = 0
df1_growth.loc[input_value]['Net Income Growth'][0] = 0
growth = df1_growth.loc[input_value]
data = growth.to_dict("records")
return data
except TypeError:
pass
@dashapp1.callback(
Output('income_compound_table', 'data'),
[Input("drop-down", "value")])
def update_data(input_value):
try:
df1 = df_income.loc[input_value]
df3 = df_balance.loc[input_value]
years_data = df1['Year'][-1] - df1['Year'][0]
income_change = df1['Net Income'][-1] / df1['Net Income'][0]
income_growth_percent = round((((income_change ** (1 / years_data)) - 1) * 100), 2)
revenue_change = df1['Revenue'][-1] / df1['Revenue'][0]
revenue_growth_percent = round((((revenue_change ** (1 / years_data)) - 1) * 100), 2)
profit_change = df1['Gross Profit'][-1] / df1['Gross Profit'][0]
profit_growth_percent = round((((profit_change ** (1 / years_data)) - 1) * 100), 2)
operating_change = df1['Operating Income'][-1] / df1['Operating Income'][0]
operating_growth_percent = round((((operating_change ** (1 / years_data)) - 1) * 100), 2)
pretax_change = df1['Pretax Income'][-1] / df1['Pretax Income'][0]
pretax_growth_percent = round((((pretax_change ** (1 / years_data)) - 1) * 100), 2)
inventory_change = df3['Inventory & Stock'][-1] / df3['Inventory & Stock'][0]
inventory_growth_percent = round((((inventory_change ** (1 / years_data)) - 1) * 100), 2)
df_income_compound = pd.DataFrame()
df_income_compound['Revenue %'] = [revenue_growth_percent]
df_income_compound['Inventory %'] = [inventory_growth_percent]
df_income_compound['Gross Profit %'] = [profit_growth_percent]
df_income_compound['Operating Income %'] = [operating_growth_percent]
df_income_compound['Pre tax %'] = [pretax_growth_percent]
df_income_compound['Net Income %'] = [income_growth_percent]
data = df_income_compound.to_dict("records")
return data
except (TypeError, IndexError):
pass
@dashapp1.callback(
Output('table2', 'data'),
[Input("drop-down", "value")])
def update_data(input_value):
try:
df2 = df_signals.loc[input_value]
data = df2.to_dict("records")
return data
except TypeError:
pass
@dashapp1.callback(
Output('table3', 'data'),
[Input("drop-down", "value")])
def update_data(input_value):
try:
df3 = df_balance.loc[input_value]
data = df3.to_dict("records")
return data
except TypeError:
pass
@dashapp1.callback(
Output('sales', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df11 = df_negative.loc[input_value]
fig1 = make_subplots(specs=[[{"secondary_y": True}]])
fig1.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Revenue']), name="Revenue"))
fig1.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Cost of Revenue']), name="Cost of Revenue"))
fig1.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Gross Profit']), name="Gross Profit"))
fig1.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)", borderwidth=0))
# fig1.update_xaxes(title_text="Year")
fig1.update_layout(title={'text': "Sales", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig1.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig1.update_yaxes(rangemode="tozero")
return fig1
except TypeError:
pass
@dashapp1.callback(
Output('costs', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df11 = df_negative.loc[input_value]
fig2 = make_subplots(specs=[[{"secondary_y": True}]])
fig2.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Operating Expenses']), name="Operating Expenses"))
fig2.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['SGA']), name="SGA"))
fig2.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['R&D']), name="R&D"))
fig2.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['D&A']), name="D&A"))
fig2.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)", borderwidth=0))
fig2.update_layout(title={'text': "Costs", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig2.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig2.update_yaxes(rangemode="tozero")
return fig2
except TypeError:
pass
@dashapp1.callback(
Output('operating', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df11 = df_negative.loc[input_value]
fig3 = make_subplots(specs=[[{"secondary_y": True}]])
fig3.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Expenses']), name="Expenses"))
fig3.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Gross Profit']), name="Gross Profit"))
fig3.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Operating Income']), name="Operating Income"))
fig3.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)", borderwidth=0))
fig3.update_layout(title={'text': "Gross Profit to Operating Income", 'y': 0.96, 'x': 0.5, 'xanchor': 'center',
'yanchor': 'top'})
fig3.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig3.update_yaxes(rangemode="tozero")
return fig3
except TypeError:
pass
@dashapp1.callback(
Output('interest', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df11 = df_negative.loc[input_value]
fig4 = make_subplots(specs=[[{"secondary_y": True}]])
fig4.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Operating Income']), name="Operating Income"))
fig4.add_trace(
go.Scatter(x=list(df11['Year']), y=list(df11['Non Operating Income']), name="Non Operating Income"))
fig4.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Pretax Income']), name="Pretax Income"))
fig4.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Interest Expense']), name="Interest Expense"))
fig4.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)", borderwidth=0))
fig4.update_layout(
title={'text': "Measuring Interest Expense", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig4.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig4.update_yaxes(rangemode="tozero")
return fig4
except TypeError:
pass
@dashapp1.callback(
Output('tax', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df11 = df_negative.loc[input_value]
fig5 = make_subplots(specs=[[{"secondary_y": True}]])
fig5.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Net Income']), name="Net Income"))
fig5.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Income Tax']), name="Income Tax"))
fig5.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Pretax Income']), name="Pretax Income"))
fig5.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)", borderwidth=0))
fig5.update_layout(title={'text': "Measuring Tax", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig5.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig5.update_yaxes(rangemode="tozero")
return fig5
except TypeError:
pass
@dashapp1.callback(
Output('shares', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df11 = df_negative.loc[input_value]
fig6 = make_subplots()
fig6.add_trace(go.Scatter(x=list(df11['Year']), y=list(df11['Shares']), name="Shares"))
fig6.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)", borderwidth=0))
fig6.update_layout(title={'text': "Shares", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig6.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig6.update_yaxes(rangemode="tozero")
return fig6
except TypeError:
pass
@dashapp1.callback(
Output('profit-margin', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df2 = df_signals.loc[input_value]
fig7 = make_subplots()
fig7.add_trace(go.Scatter(x=list(df2['Year']), y=list(df2['Gross Profit Margin %']), name="proft-maergin"))
fig7.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)", borderwidth=0))
fig7.update_layout(
title={'text': "Gross Profit Margin %", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig7.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig7.update_yaxes(rangemode="tozero")
return fig7
except TypeError:
pass
@dashapp1.callback(
Output('SGA', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df2 = df_signals.loc[input_value]
fig8 = make_subplots()
fig8.add_trace(
go.Scatter(x=list(df2['Year']), y=list(df2['SGA Of Gross Profit']), name="SGA", line=dict(color="#EF553B")))
fig8.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)", borderwidth=0))
fig8.update_layout(
title={'text': "SGA of Gross Profit % ", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig8.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig8.update_yaxes(rangemode="tozero")
return fig8
except TypeError:
pass
@dashapp1.callback(
Output('R&D', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df2 = df_signals.loc[input_value]
fig9 = make_subplots()
fig9.add_trace(
go.Scatter(x=list(df2['Year']), y=list(df2['R&D Of Gross Profit']), name="R&D", line=dict(color='#00cc96')))
fig9.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)", borderwidth=0))
fig9.update_layout(
title={'text': "R&D of Gross Profit % ", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig9.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig9.update_yaxes(rangemode="tozero")
return fig9
except TypeError:
pass
@dashapp1.callback(
Output('operating-margin-ratio', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df2 = df_signals.loc[input_value]
fig10 = make_subplots(specs=[[{"secondary_y": True}]])
fig10.add_trace(go.Scatter(x=list(df2['Year']), y=list(df2['Operating margin ratio']), name="Operating Margin"))
fig10.add_trace(go.Scatter(x=list(df2['Year']), y=list(df2['Net income margin']), name="Net Income"))
fig10.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)",
borderwidth=0))
fig10.update_layout(
title={'text': "Margin ratio % ", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig10.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig10.update_yaxes(rangemode="tozero")
return fig10
except TypeError:
pass
@dashapp1.callback(
Output('interest-coverage', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df2 = df_signals.loc[input_value]
fig11 = make_subplots()
fig11.add_trace(
go.Scatter(x=list(df2['Year']), y=list(df2['Interest to Operating Income %']), name="interest-coverage",
line=dict(color='#00cc96')))
fig11.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)",
borderwidth=0))
fig11.update_layout(
title={'text': "Interest Coverage ratio % ", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig11.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig11.update_yaxes(rangemode="tozero")
return fig11
except TypeError:
pass
@dashapp1.callback(
Output('taxes-paid', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df2 = df_signals.loc[input_value]
fig12 = make_subplots()
fig12.add_trace(
go.Scatter(x=list(df2['Year']), y=list(df2['Taxes paid']), name="taxes", line=dict(color='#00cc96')))
fig12.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)",
borderwidth=0))
fig12.update_layout(
title={'text': "Taxes % ", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig12.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig12.update_yaxes(rangemode="tozero")
return fig12
except TypeError:
pass
@dashapp1.callback(
Output('liquidity', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df3 = df_balance.loc[input_value]
fig13 = make_subplots(specs=[[{"secondary_y": True}]])
fig13.add_trace(go.Scatter(x=list(df3['Year']), y=list(df3['Cash & Equivalent']), name="Cash & Equivalent"))
fig13.add_trace(
go.Scatter(x=list(df3['Year']), y=list(df3['Accounts Receivable']), name="Accounts Receivables"))
fig13.add_trace(go.Scatter(x=list(df3['Year']), y=list(df3['Inventory & Stock']), name="Inventory"))
fig13.add_trace(go.Scatter(x=list(df3['Year']), y=list(df3['Current Assets']), name="Current_Assets"))
fig13.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)",
borderwidth=0))
fig13.update_layout(title={'text': "Liquidity", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig13.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig13.update_yaxes(rangemode="tozero")
return fig13
except TypeError:
pass
@dashapp1.callback(
Output('long-term-assets', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df3 = df_balance.loc[input_value]
fig14 = make_subplots(specs=[[{"secondary_y": True}]])
fig14.add_trace(
go.Scatter(x=list(df3['Year']), y=list(df3['Prop Plant & Equipment']), name="Prop Plant & Equipment"))
fig14.add_trace(
go.Scatter(x=list(df3['Year']), y=list(df3['Long Term Investments']), name="Long Term Investments"))
fig14.add_trace(
go.Scatter(x=list(df3['Year']), y=list(df3['Other Long Term Assets']), name="Other Long Term Assets"))
fig14.add_trace(go.Scatter(x=list(df3['Year']), y=list(df3['Noncurrent assets']), name="Non current Assets"))
fig14.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)",
borderwidth=0))
fig14.update_layout(
title={'text': "Non Current Assets", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig14.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig14.update_yaxes(rangemode="tozero")
return fig14
except TypeError:
pass
@dashapp1.callback(
Output('balance', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df3 = df_balance.loc[input_value]
fig15 = make_subplots(specs=[[{"secondary_y": True}]])
fig15.add_trace(go.Scatter(x=list(df3['Year']), y=list(df3['Total Assets']), name="Assets"))
fig15.add_trace(go.Scatter(x=list(df3['Year']), y=list(df3['Total Liabilities']), name="Liabilities"))
fig15.add_trace(go.Scatter(x=list(df3['Year']), y=list(df3['Total Equity']), name="Equity"))
fig15.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)",
borderwidth=0))
fig15.update_layout(title={'text': "Balance", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig15.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig15.update_yaxes(rangemode="tozero")
return fig15
except TypeError:
pass
@dashapp1.callback(
Output('current debts', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df3 = df_balance.loc[input_value]
fig16 = make_subplots(specs=[[{"secondary_y": True}]])
fig16.add_trace(go.Scatter(x=list(df3['Year']), y=list(df3['Accounts Payable']), name="Accounts Payable"))
fig16.add_trace(go.Scatter(x=list(df3['Year']), y=list(df3['ShortTerm debts']), name="Short Term Debts"))
fig16.add_trace(
go.Scatter(x=list(df3['Year']), y=list(df3['Current Liabilities']), name="Current Liabilities"))
fig16.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)",
borderwidth=0))
fig16.update_layout(
title={'text': "Current Debts", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig16.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig16.update_yaxes(rangemode="tozero")
return fig16
except TypeError:
pass
@dashapp1.callback(
Output('non-current-debts', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df3 = df_balance.loc[input_value]
fig17 = make_subplots(specs=[[{"secondary_y": True}]])
fig17.add_trace(go.Scatter(x=list(df3['Year']), y=list(df3['LongTerm Debts']), name="Long Term Debts"))
fig17.add_trace(
go.Scatter(x=list(df3['Year']), y=list(df3['Noncurrent Liabilities']), name="Non Current Liabilities"))
fig17.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)",
borderwidth=0))
fig17.update_layout(
title={'text': "Non Current Debts", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig17.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig17.update_yaxes(rangemode="tozero")
return fig17
except TypeError:
pass
@dashapp1.callback(
Output('retained-earnings', 'figure'),
[Input("drop-down", "value")])
def update_fig(input_value):
try:
df3 = df_balance.loc[input_value]
fig18 = make_subplots()
fig18.add_trace(
go.Scatter(x=list(df3['Year']), y=list(df3['Retained Earnings']), name="retained",
line=dict(color='#00cc96')))
fig18.update_layout(legend=dict(x=0, y=1,
traceorder="normal",
font=dict(family="sans-serif", size=12, color="black"),
bgcolor="rgba(50, 50, 50, 0)", bordercolor="rgba(50, 50, 50, 0)",
borderwidth=0))
fig18.update_layout(
title={'text': "Retained Earnings", 'y': 0.96, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'})
fig18.update_layout(margin={'t': 25, 'b': 0, 'l': 0, 'r': 0})
fig18.update_yaxes(rangemode="tozero")
return fig18
except TypeError:
pass
@dashapp1.callback(
Output('table4', 'data'),
[Input("drop-down", "value")])
def update_data(input_value):
try:
df4 = df_balance_signals.loc[input_value]
data = df4.to_dict("records")
return data
except TypeError:
pass
@dashapp1.callback(
Output('balance_growth', 'data'),
[Input("drop-down", "value")])
def update_data(input_value):
try:
balance_growth.loc[input_value]['Cash Growth'][0] = 0
balance_growth.loc[input_value]['Inventory Growth'][0] = 0
balance_growth.loc[input_value]['Current Assets Growth'][0] = 0
balance_growth.loc[input_value]['PP&E Growth'][0] = 0
balance_growth.loc[input_value]['Investment Growth'][0] = 0
balance_growth.loc[input_value]['Asset Growth'][0] = 0
balance_growth.loc[input_value]['Liability Growth'][0] = 0
balance_growth.loc[input_value]['Retained Earnings Growth'][0] = 0
balance_growth.loc[input_value]['Equity Growth'][0] = 0
growth_balance = balance_growth.loc[input_value]
data = growth_balance.to_dict("records")
return data
except TypeError:
pass
@dashapp1.callback(
Output('balance_compound_growth', 'data'),
[Input("drop-down", "value")])
def update_data(input_value):
try:
df3 = df_balance.loc[input_value]
years_data = df3['Year'][-1] - df3['Year'][0]
inventory_change = df3['Inventory & Stock'][-1] / df3['Inventory & Stock'][0]
inventory_growth_percent = round((((inventory_change ** (1 / years_data)) - 1) * 100), 2)
current_assets_change = df3['Current Assets'][-1] / df3['Current Assets'][0]
current_assets_growth_percent = round((((current_assets_change ** (1 / years_data)) - 1) * 100), 2)
ppe_change = df3['Prop Plant & Equipment'][-1] / df3['Prop Plant & Equipment'][0]
ppe_percent = round((((ppe_change ** (1 / years_data)) - 1) * 100), 2)
investment_change = df3['Long Term Investments'][-1] / df3['Long Term Investments'][0]
investment_percent = round((((investment_change ** (1 / years_data)) - 1) * 100), 2)
assets_change = df3['Total Assets'][-1] / df3['Total Assets'][0]
assets_percent = round((((assets_change ** (1 / years_data)) - 1) * 100), 2)
liability_change = df3['Total Liabilities'][-1] / df3['Total Liabilities'][0]
liability_percent = round((((liability_change ** (1 / years_data)) - 1) * 100), 2)
retained_earnings_change = df3['Retained Earnings'][-1] / df3['Retained Earnings'][0]
retained_earnings_percent = round((((retained_earnings_change ** (1 / years_data)) - 1) * 100), 2)
equity_change = df3['Total Equity'][-1] / df3['Total Equity'][0]
equity_percent = round((((equity_change ** (1 / years_data)) - 1) * 100), 2)
cash_equivalent_change = df3['Cash & Equivalent'][-1] / df3['Cash & Equivalent'][0]
cash_equivalent_duplicate = round((((cash_equivalent_change ** (1 / years_data)) - 1) * 100), 2)
df_balance_compound = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Authors: <NAME>, <NAME>, <NAME>, and
<NAME>
IHE Delft 2017
Contact: <EMAIL>
Repository: https://github.com/gespinoza/hants
Module: hants
"""
from __future__ import division
import netCDF4
import pandas as pd
import math
from .davgis.functions import (Spatial_Reference, List_Datasets, Clip,
Resample, Raster_to_Array, NetCDF_to_Raster)
import os
import tempfile
from copy import deepcopy
import matplotlib.pyplot as plt
import warnings
def run_HANTS(rasters_path_inp, name_format,
start_date, end_date, latlim, lonlim, cellsize, nc_path,
nb, nf, HiLo, low, high, fet, dod, delta,
epsg=4326, fill_val=-9999.0,
rasters_path_out=None, export_hants_only=False):
'''
This function runs the python implementation of the HANTS algorithm. It
takes a folder with geotiffs raster data as an input, creates a netcdf
file, and optionally export the data back to geotiffs.
'''
create_netcdf(rasters_path_inp, name_format, start_date, end_date,
latlim, lonlim, cellsize, nc_path,
epsg, fill_val)
HANTS_netcdf(nc_path, nb, nf, HiLo, low, high, fet, dod, delta,
fill_val)
#if rasters_path_out:
#export_tiffs(rasters_path_out, nc_path, name_format, export_hants_only)
return nc_path
def create_netcdf(rasters_path, name_format, start_date, end_date,
latlim, lonlim, cellsize, nc_path,
epsg=4326, fill_val=-9999.0):
'''
This function creates a netcdf file from a folder with geotiffs rasters to
be used to run HANTS.
'''
# Latitude and longitude
lat_ls = pd.np.arange(latlim[0] + 0.5*cellsize, latlim[1] + 0.5*cellsize,
cellsize)
lat_ls = lat_ls[::-1] # ArcGIS numpy
lon_ls = pd.np.arange(lonlim[0] + 0.5*cellsize, lonlim[1] + 0.5*cellsize,
cellsize)
lat_n = len(lat_ls)
lon_n = len(lon_ls)
spa_ref = Spatial_Reference(epsg)
ll_corner = [lonlim[0], latlim[0]]
# Rasters
dates_dt = pd.date_range(start_date, end_date, freq='D')
dates_ls = [d.strftime('%Y%m%d') for d in dates_dt]
ras_ls = List_Datasets(rasters_path, 'tif')
# Cell code
temp_ll_ls = [pd.np.arange(x, x + lon_n)
for x in range(1, lat_n*lon_n, lon_n)]
code_ls = pd.np.array(temp_ll_ls)
empty_vec = pd.np.empty((lat_n, lon_n))
empty_vec[:] = fill_val
# Create netcdf file
print('Creating netCDF file...')
nc_file = netCDF4.Dataset(nc_path, 'w', format="NETCDF4")
# Create Dimensions
lat_dim = nc_file.createDimension('latitude', lat_n)
lon_dim = nc_file.createDimension('longitude', lon_n)
time_dim = nc_file.createDimension('time', len(dates_ls))
# Create Variables
crs_var = nc_file.createVariable('crs', 'i4')
crs_var.grid_mapping_name = 'latitude_longitude'
crs_var.crs_wkt = spa_ref
lat_var = nc_file.createVariable('latitude', 'f8', ('latitude'),
fill_value=fill_val)
lat_var.units = 'degrees_north'
lat_var.standard_name = 'latitude'
lon_var = nc_file.createVariable('longitude', 'f8', ('longitude'),
fill_value=fill_val)
lon_var.units = 'degrees_east'
lon_var.standard_name = 'longitude'
time_var = nc_file.createVariable('time', 'l', ('time'),
fill_value=fill_val)
time_var.standard_name = 'time'
time_var.calendar = 'gregorian'
code_var = nc_file.createVariable('code', 'i4', ('latitude', 'longitude'),
fill_value=fill_val)
outliers_var = nc_file.createVariable('outliers', 'i4',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
outliers_var.long_name = 'outliers'
original_var = nc_file.createVariable('original_values', 'f8',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
original_var.long_name = 'original values'
hants_var = nc_file.createVariable('hants_values', 'f8',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
hants_var.long_name = 'hants values'
combined_var = nc_file.createVariable('combined_values', 'f8',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
combined_var.long_name = 'combined values'
print('\tVariables created')
# Load data
lat_var[:] = lat_ls
lon_var[:] = lon_ls
time_var[:] = dates_ls
code_var[:] = code_ls
# temp folder
temp_dir = tempfile.mkdtemp()
bbox = [lonlim[0], latlim[0], lonlim[1], latlim[1]]
# Raster loop
print('\tExtracting data from rasters...')
for tt in range(len(dates_ls)):
# Raster
ras = name_format.format(dates_ls[tt])
if ras in ras_ls:
# Resample
ras_resampled = os.path.join(temp_dir, 'r_' + ras)
Resample(os.path.join(rasters_path, ras), ras_resampled, cellsize)
# Clip
ras_clipped = os.path.join(temp_dir, 'c_' + ras)
Clip(ras_resampled, ras_clipped, bbox)
# Raster to Array
array = Raster_to_Array(ras_resampled,
ll_corner, lon_n, lat_n,
values_type='float32')
# Store values
original_var[:, :, tt] = array
else:
# Store values
original_var[:, :, tt] = empty_vec
# Close file
nc_file.close()
print('NetCDF file created')
# Return
return nc_path
def HANTS_netcdf(nc_path, nb, nf, HiLo, low, high, fet, dod, delta,
fill_val=-9999.0):
'''
This function runs the python implementation of the HANTS algorithm. It
takes the input netcdf file and fills the 'hants_values',
'combined_values', and 'outliers' variables.
'''
# Read netcdfs
nc_file = netCDF4.Dataset(nc_path, 'r+')
time_var = nc_file.variables['time'][:]
original_values = nc_file.variables['original_values'][:]
[rows, cols, ztime] = original_values.shape
size_st = cols*rows
values_hants = pd.np.empty((rows, cols, ztime))
outliers_hants = pd.np.empty((rows, cols, ztime))
values_hants[:] = pd.np.nan
outliers_hants[:] = pd.np.nan
# Additional parameters
ni = len(time_var)
ts = range(ni)
# Loop
counter = 1
print('Running HANTS...')
for m in range(rows):
for n in range(cols):
print('\t{0}/{1}'.format(counter, size_st))
y = pd.np.array(original_values[m, n, :])
y[pd.np.isnan(y)] = fill_val
[yr, outliers] = HANTS(ni, nb, nf, y, ts, HiLo,
low, high, fet, dod, delta, fill_val)
values_hants[m, n, :] = yr
outliers_hants[m, n, :] = outliers
counter = counter + 1
nc_file.variables['hants_values'][:] = values_hants
nc_file.variables['outliers'][:] = outliers_hants
nc_file.variables['combined_values'][:] = pd.np.where(outliers_hants,
values_hants,
original_values)
# Close netcdf file
nc_file.close()
def HANTS_singlepoint(nc_path, point, nb, nf, HiLo, low, high, fet, dod,
delta, fill_val=-9999.0):
'''
This function runs the python implementation of the HANTS algorithm for a
single point (lat, lon). It plots the fit and returns a data frame with
the 'original' and the 'hants' time series.
'''
# Location
lonx = point[0]
latx = point[1]
nc_file = netCDF4.Dataset(nc_path, 'r')
time = [pd.to_datetime(i, format='%Y%m%d')
for i in nc_file.variables['time'][:]]
lat = nc_file.variables['latitude'][:]
lon = nc_file.variables['longitude'][:]
# Check that the point falls within the extent of the netcdf file
lon_max = max(lon)
lon_min = min(lon)
lat_max = max(lat)
lat_min = min(lat)
if not (lon_min < lonx < lon_max) or not (lat_min < latx < lat_max):
warnings.warn('The point lies outside the extent of the netcd file. '
'The closest cell is plotted.')
if lonx > lon_max:
lonx = lon_max
elif lonx < lon_min:
lonx = lon_min
if latx > lat_max:
latx = lat_max
elif latx < lat_min:
latx = lat_min
# Get lat-lon index in the netcdf file
lat_closest = lat.flat[pd.np.abs(lat - latx).argmin()]
lon_closest = lon.flat[pd.np.abs(lon - lonx).argmin()]
lat_i = pd.np.where(lat == lat_closest)[0][0]
lon_i = pd.np.where(lon == lon_closest)[0][0]
# Read values
original_values = nc_file.variables['original_values'][lat_i, lon_i, :]
# Additional parameters
ni = len(time)
ts = range(ni)
# HANTS
y = pd.np.array(original_values)
y[pd.np.isnan(y)] = fill_val
[hants_values, outliers] = HANTS(ni, nb, nf, y, ts, HiLo, low, high, fet,
dod, delta, fill_val)
# Plot
top = 1.15*max(pd.np.nanmax(original_values),
pd.np.nanmax(hants_values))
bottom = 1.15*min(pd.np.nanmin(original_values),
pd.np.nanmin(hants_values))
ylim = [bottom, top]
plt.plot(time, hants_values, 'r-', label='HANTS')
plt.plot(time, original_values, 'b.', label='Original data')
plt.ylim(ylim[0], ylim[1])
plt.legend(loc=4)
plt.xlabel('time')
plt.ylabel('values')
plt.gcf().autofmt_xdate()
plt.axes().set_title('Point: lon {0:.2f}, lat {1:.2f}'.format(lon_closest,
lat_closest))
plt.axes().set_aspect(0.5*(time[-1] - time[0]).days/(ylim[1] - ylim[0]))
plt.show()
# Close netcdf file
nc_file.close()
# Data frame
df = pd.DataFrame({'time': time,
'original': original_values,
'hants': hants_values})
# Return
return df
def HANTS(ni, nb, nf, y, ts, HiLo, low, high, fet, dod, delta, fill_val):
'''
This function applies the Harmonic ANalysis of Time Series (HANTS)
algorithm originally developed by the Netherlands Aerospace Centre (NLR)
(http://www.nlr.org/space/earth-observation/).
This python implementation was based on two previous implementations
available at the following links:
https://codereview.stackexchange.com/questions/71489/harmonic-analysis-of-time-series-applied-to-arrays
http://nl.mathworks.com/matlabcentral/fileexchange/38841-matlab-implementation-of-harmonic-analysis-of-time-series--hants-
'''
# Arrays
mat = pd.np.zeros((min(2*nf+1, ni), ni))
# amp = np.zeros((nf + 1, 1))
# phi = np.zeros((nf+1, 1))
yr = pd.np.zeros((ni, 1))
outliers = pd.np.zeros((1, len(y)))
# Filter
sHiLo = 0
if HiLo == 'Hi':
sHiLo = -1
elif HiLo == 'Lo':
sHiLo = 1
nr = min(2*nf+1, ni)
noutmax = ni - nr - dod
# dg = 180.0/math.pi
mat[0, :] = 1.0
ang = 2*math.pi*pd.np.arange(nb)/nb
cs = pd.np.cos(ang)
sn = pd.np.sin(ang)
i = pd.np.arange(1, nf+1)
for j in pd.np.arange(ni):
index = pd.np.mod(i*ts[j], nb)
mat[2 * i-1, j] = cs.take(index)
mat[2 * i, j] = sn.take(index)
p = pd.np.ones_like(y)
bool_out = (y < low) | (y > high)
p[bool_out] = 0
outliers[bool_out.reshape(1, y.shape[0])] = 1
nout = pd.np.sum(p == 0)
if nout > noutmax:
if pd.np.isclose(y, fill_val).any():
ready = pd.np.array([True])
yr = y
outliers = pd.np.zeros((y.shape[0]), dtype=int)
outliers[:] = fill_val
else:
raise Exception('Not enough data points.')
else:
ready = pd.np.zeros((y.shape[0]), dtype=bool)
nloop = 0
nloopmax = ni
while ((not ready.all()) & (nloop < nloopmax)):
nloop += 1
za = pd.np.matmul(mat, p*y)
A = pd.np.matmul(pd.np.matmul(mat, pd.np.diag(p)),
pd.np.transpose(mat))
A = A + pd.np.identity(nr)*delta
A[0, 0] = A[0, 0] - delta
zr = | pd.np.linalg.solve(A, za) | pandas.np.linalg.solve |
'''
SYNBIOCHEM (c) University of Manchester 2018
SYNBIOCHEM is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
# pylint: disable=invalid-name
# pylint: disable=wrong-import-order
from difflib import SequenceMatcher
import itertools
import os
import sys
from mscl_arfa.ena import get_start_end_comp
from mscl_arfa.uniprot import get_data, get_gen_dna_ids
import numpy as np
import pandas as pd
def run(out_dir):
'''Run script.'''
# Make output directory:
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# Extract data from Uniprot:
arfa_query = 'database:(type:pfam id:PF03889)'
arfa_df = _get_uniprot_data('arfA', arfa_query, out_dir)
mscl_query = 'database:(type:pfam id:PF01741)'
mscl_df = _get_uniprot_data('mscL', mscl_query, out_dir)
df = pd.merge(arfa_df, mscl_df, left_index=True, right_index=True)
# Get start, end, is complement:
df = _get_start_ends(df, out_dir)
# Calculate is overlaps:
df = _calc_overlaps(df, out_dir)
df = _pair_genomic_dna_ids(df, out_dir)
_filter(df, out_dir)
def _get_uniprot_data(name, query, out_dir):
'''Get Uniprot data.'''
# Get Uniprot data from API:
df = get_data(name, query, out_dir)
# Get genomic DNA id from Uniprot XML:
gen_dna_csv = os.path.join(out_dir, name + '_gen_dna.csv')
if not os.path.exists(gen_dna_csv):
data = []
for _, row in df.iterrows():
uniprot_id = row['Entry']
data.extend([[uniprot_id, prot_seq_id]
for prot_seq_id in get_gen_dna_ids(uniprot_id)])
gen_dna_id_df = pd.DataFrame(data, columns=['Entry', 'genomic_dna_id'])
gen_dna_id_df.to_csv(gen_dna_csv, encoding='utf8', index=False)
else:
gen_dna_id_df = pd.read_csv(gen_dna_csv, encoding='utf8')
# Merge Uniprot data with genomic id data:
df = df.merge(gen_dna_id_df, on='Entry')
# Reset indices and columns:
df.set_index(['Organism', 'Organism ID'], inplace=True)
df.columns = pd.MultiIndex.from_tuples([[name, col] for col in df.columns])
return df
def _get_start_ends(df, out_dir):
'''Get all start, end, is complement for dataframe.'''
start_ends_csv = os.path.join(out_dir, 'start_ends.csv')
if not os.path.exists(start_ends_csv):
for level in df.columns.levels[0]:
data = [_get_start_end(genomic_dna_id)
for genomic_dna_id in df[level]['genomic_dna_id'].unique()]
cols = pd.MultiIndex.from_tuples([[level, column]
for column in ['genomic_dna_id',
'start',
'end',
'is_complement']])
start_end_df = pd.DataFrame(data, columns=cols)
df = df.reset_index().merge(start_end_df).set_index(df.index.names)
df.to_csv(start_ends_csv, encoding='utf-8')
else:
df = pd.read_csv(start_ends_csv, index_col=[0, 1], header=[0, 1])
return df
def _get_start_end(genomic_dna_id):
'''Get start, end, is complement from genomic DNA id.'''
return [genomic_dna_id] + list(get_start_end_comp(genomic_dna_id))
def _calc_overlaps(df, out_dir):
'''Calculate overlaps.'''
overlaps_csv = os.path.join(out_dir, 'overlaps.csv')
if not os.path.exists(overlaps_csv):
is_overlaps = []
levels = dict(zip(df.columns.get_level_values(0),
df.columns.get_level_values(1))).keys()
for _, row in df.iterrows():
vals = [[row[level]['start'],
row[level]['end'],
row[level]['is_complement']]
for level in levels]
is_overlaps.append(_calc_overlap(vals[0], vals[1]))
df['common', 'overlap'] = is_overlaps
df.to_csv(overlaps_csv, encoding='utf-8')
else:
df = pd.read_csv(overlaps_csv, index_col=[0, 1], header=[0, 1])
return df
def _calc_overlap(left, right):
'''Calculate overlap if genes are on complementary strands.'''
if all(pd.notna(left)) and all(pd.notna(right)) and left[2] ^ right[2]:
range_left = range(int(left[0]), int(left[1]))
range_right = range(int(right[0]), int(right[1]))
intersection = set(range_left).intersection(range_right)
if intersection:
return len(intersection)
return -min(abs(int(left[1]) - int(right[0])),
abs(int(right[1]) - int(left[0])))
return 0
def _pair_genomic_dna_ids(df, out_dir):
'''Pair genomic_dna_ids.'''
paired_gen_ids_csv = os.path.join(out_dir, 'raw.csv')
if not os.path.exists(paired_gen_ids_csv):
df['common', 'gen_data_id_sim'] = \
df.apply(__score_gen_data_id_similarity, axis=1)
df.to_csv(paired_gen_ids_csv, encoding='utf-8')
else:
df = | pd.read_csv(paired_gen_ids_csv, index_col=[0, 1], header=[0, 1]) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 15:39:02 2018
@author: joyce
"""
import pandas as pd
import numpy as np
from numpy.matlib import repmat
from stats import get_stockdata_from_sql,get_tradedate,Corr,Delta,Rank,Cross_max,\
Cross_min,Delay,Sum,Mean,STD,TsRank,TsMax,TsMin,DecayLinear,Count,SMA,Cov,DTM,DBM,\
Highday,Lowday,HD,LD,RegBeta,RegResi,SUMIF,get_indexdata_from_sql,timer,get_fama
class stAlpha(object):
def __init__(self,begin,end):
self.begin = begin
self.end = end
self.close = get_stockdata_from_sql(1,self.begin,self.end,'Close')
self.open = get_stockdata_from_sql(1,self.begin,self.end,'Open')
self.high = get_stockdata_from_sql(1,self.begin,self.end,'High')
self.low = get_stockdata_from_sql(1,self.begin,self.end,'Low')
self.volume = get_stockdata_from_sql(1,self.begin,self.end,'Vol')
self.amt = get_stockdata_from_sql(1,self.begin,self.end,'Amount')
self.vwap = get_stockdata_from_sql(1,self.begin,self.end,'Vwap')
self.ret = get_stockdata_from_sql(1,begin,end,'Pctchg')
self.close_index = get_indexdata_from_sql(1,begin,end,'close','000001.SH')
self.open_index = get_indexdata_from_sql(1,begin,end,'open','000001.SH')
# self.mkt = get_fama_from_sql()
@timer
def alpha1(self):
volume = self.volume
ln_volume = np.log(volume)
ln_volume_delta = Delta(ln_volume,1)
close = self.close
Open = self.open
price_temp = pd.concat([close,Open],axis = 1,join = 'outer')
price_temp['ret'] = (price_temp['Close'] - price_temp['Open'])/price_temp['Open']
del price_temp['Close'],price_temp['Open']
r_ln_volume_delta = Rank(ln_volume_delta)
r_ret = Rank(price_temp)
rank = pd.concat([r_ln_volume_delta,r_ret],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,6)
alpha = corr
alpha.columns = ['alpha1']
return alpha
@timer
def alpha2(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
temp['alpha'] = (2 * temp['Close'] - temp['Low'] - temp['High']) \
/ (temp['High'] - temp['Low'])
del temp['Close'],temp['Low'],temp['High']
alpha = -1 * Delta(temp,1)
alpha.columns = ['alpha2']
return alpha
@timer
def alpha3(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
close_delay = Delay(pd.DataFrame(temp['Close']),1)
close_delay.columns = ['close_delay']
temp = pd.concat([temp,close_delay],axis = 1,join = 'inner')
temp['min'] = Cross_max(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['Low']))
temp['max'] = Cross_min(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['High']))
temp['alpha_temp'] = 0
temp['alpha_temp'][temp['Close'] > temp['close_delay']] = temp['Close'] - temp['min']
temp['alpha_temp'][temp['Close'] < temp['close_delay']] = temp['Close'] - temp['max']
alpha = Sum(pd.DataFrame(temp['alpha_temp']),6)
alpha.columns = ['alpha3']
return alpha
@timer
def alpha4(self):
close = self.close
volume = self.volume
close_mean_2 = Mean(close,2)
close_mean_8 = Mean(close,8)
close_std = STD(close,8)
volume_mean_20 = Mean(volume,20)
data = pd.concat([close_mean_2,close_mean_8,close_std,volume_mean_20,volume],axis = 1,join = 'inner')
data.columns = ['close_mean_2','close_mean_8','close_std','volume_mean_20','volume']
data['alpha'] = -1
data['alpha'][data['close_mean_2'] < data['close_mean_8'] - data['close_std']] = 1
data['alpha'][data['volume']/data['volume_mean_20'] >= 1] = 1
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha4']
return alpha
@timer
def alpha5(self):
volume = self.volume
high = self.high
r1 = TsRank(volume,5)
r2 = TsRank(high,5)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(corr,5)
alpha.columns = ['alpha5']
return alpha
@timer
def alpha6(self):
Open = self.open
high = self.high
df = pd.concat([Open,high],axis = 1,join = 'inner')
df['price'] = df['Open'] * 0.85 + df['High'] * 0.15
df_delta = Delta(pd.DataFrame(df['price']),1)
alpha = Rank(np.sign(df_delta))
alpha.columns = ['alpha6']
return alpha
@timer
def alpha7(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_delta = Delta(volume,3)
data = pd.concat([close,vwap],axis = 1,join = 'inner')
data['diff'] = data['Vwap'] - data['Close']
r1 = Rank(TsMax(pd.DataFrame(data['diff']),3))
r2 = Rank(TsMin(pd.DataFrame(data['diff']),3))
r3 = Rank(volume_delta)
rank = pd.concat([r1,r2,r3],axis = 1,join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = (rank['r1'] + rank['r2'])* rank['r3']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha7']
return alpha
@timer
def alpha8(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
data_price = (data['High'] + data['Low'])/2 * 0.2 + data['Vwap'] * 0.2
data_price_delta = Delta(pd.DataFrame(data_price),4) * -1
alpha = Rank(data_price_delta)
alpha.columns = ['alpha8']
return alpha
@timer
def alpha9(self):
high = self.high
low = self.low
volume = self.volume
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['price']= (data['High'] + data['Low'])/2
data['price_delay'] = Delay(pd.DataFrame(data['price']),1)
alpha_temp = (data['price'] - data['price_delay']) * (data['High'] - data['Low'])/data['Vol']
alpha_temp_unstack = alpha_temp.unstack(level = 'ID')
alpha = alpha_temp_unstack.ewm(span = 7, ignore_na = True, min_periods = 7).mean()
alpha_final = alpha.stack()
alpha = pd.DataFrame(alpha_final)
alpha.columns = ['alpha9']
return alpha
@timer
def alpha10(self):
ret = self.ret
close = self.close
ret_std = STD(pd.DataFrame(ret),20)
ret_std.columns = ['ret_std']
data = pd.concat([ret,close,ret_std],axis = 1, join = 'inner')
temp1 = pd.DataFrame(data['ret_std'][data['Pctchg'] < 0])
temp2 = pd.DataFrame(data['Close'][data['Pctchg'] >= 0])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0,join = 'outer')
temp_order = pd.concat([data,temp],axis = 1)
temp_square = pd.DataFrame(np.power(temp_order['temp'],2))
alpha_temp = TsMax(temp_square,5)
alpha = Rank(alpha_temp)
alpha.columns = ['alpha10']
return alpha
@timer
def alpha11(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume],axis = 1,join = 'inner')
data_temp = (data['Close'] - data['Low']) -(data['High'] - data['Close'])\
/(data['High'] - data['Low']) * data['Vol']
alpha = Sum(pd.DataFrame(data_temp),6)
alpha.columns = ['alpha11']
return alpha
@timer
def alpha12(self):
Open = self.open
vwap = self.vwap
close = self.close
data = pd.concat([Open,vwap,close],axis = 1, join = 'inner')
data['p1'] = data['Open'] - Mean(data['Open'],10)
data['p2'] = data['Close'] - data['Vwap']
r1 = Rank(pd.DataFrame(data['p1']))
r2 = Rank(pd.DataFrame(np.abs(data['p2'])))
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
alpha = rank['r1'] - rank['r2']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha12']
return alpha
@timer
def alpha13(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
alpha = (data['High'] + data['Low'])/2 - data['Vwap']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha13']
return alpha
@timer
def alpha14(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close'] - data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha14']
return alpha
@timer
def alpha15(self):
Open = self.open
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([Open,close_delay],axis = 1,join = 'inner')
alpha = data['Open']/data['close_delay'] - 1
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha15']
return alpha
@timer
def alpha16(self):
vwap = self.vwap
volume = self.volume
data = pd.concat([vwap,volume],axis = 1, join = 'inner')
r1 = Rank(pd.DataFrame(data['Vol']))
r2 = Rank(pd.DataFrame(data['Vwap']))
rank = pd.concat([r1,r2],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(Rank(corr),5)
alpha.columns = ['alpha16']
return alpha
@timer
def alpha17(self):
vwap = self.vwap
close = self.close
data = pd.concat([vwap,close],axis = 1, join = 'inner')
data['vwap_max15'] = TsMax(data['Vwap'],15)
data['close_delta5'] = Delta(data['Close'],5)
temp = np.power(data['vwap_max15'],data['close_delta5'])
alpha = Rank(pd.DataFrame(temp))
alpha.columns = ['alpha17']
return alpha
@timer
def alpha18(self):
"""
this one is similar with alpha14
"""
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close']/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha18']
return alpha
@timer
def alpha19(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
data['temp1'] = (data['Close'] - data['close_delay'])/data['close_delay']
data['temp2'] = (data['Close'] - data['close_delay'])/data['Close']
temp1 = pd.DataFrame(data['temp1'][data['Close'] < data['close_delay']])
temp2 = pd.DataFrame(data['temp2'][data['Close'] >= data['close_delay']])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0)
data = pd.concat([data,temp],axis = 1,join = 'outer')
alpha = pd.DataFrame(data['temp'])
alpha.columns = ['alpha19']
return alpha
@timer
def alpha20(self):
close = self.close
close_delay = Delay(close,6)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha20']
return alpha
@timer
def alpha21(self):
close = self.close
close_mean = Mean(close,6)
alpha = RegBeta(0,close_mean,None,6)
alpha.columns = ['alpha21']
return alpha
@timer
def alpha22(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1,join = 'inner')
data.columns = ['close','close_mean']
temp = pd.DataFrame((data['close'] - data['close_mean'])/data['close_mean'])
temp_delay = Delay(temp,3)
data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner')
data_temp.columns = ['temp','temp_delay']
temp2 = pd.DataFrame(data_temp['temp'] - data_temp['temp_delay'])
alpha = SMA(temp2,12,1)
alpha.columns = ['alpha22']
return alpha
@timer
def alpha23(self):
close = self.close
close_std = STD(close,20)
close_delay = Delay(close,1)
data = pd.concat([close,close_std,close_delay],axis = 1, join = 'inner')
data.columns = ['Close','close_std','close_delay']
data['temp'] = data['close_std']
data['temp'][data['Close'] <= data['close_delay']] = 0
temp = pd.DataFrame(data['temp'])
sma1 = SMA(temp,20,1)
sma2 = SMA(pd.DataFrame(data['close_std']),20,1)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'])
alpha.columns = ['alpha23']
return alpha
@timer
def alpha24(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis=1 ,join = 'inner' )
temp = data['Close'] - data['close_delay']
temp = pd.DataFrame(temp)
alpha = SMA(temp,5,1)
alpha.columns = ['alpha24']
return alpha
@timer
def alpha25(self):
close = self.close
close_delta = Delta(close,7)
ret = self.ret
r1 = Rank(close_delta)
r3 = Rank(Sum(ret,250))
volume = self.volume
volume_mean = Mean(pd.DataFrame(volume['Vol']),20)
volume_mean.columns = ['volume_mean']
data = pd.concat([volume,volume_mean],axis = 1,join = 'inner')
temp0 = pd.DataFrame(data['Vol']/data['volume_mean'])
temp = DecayLinear(temp0,9)
r2 = Rank(temp)
rank = pd.concat([r1,r2,r3],axis = 1, join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = pd.DataFrame(-1 * rank['r1'] * (1 - rank['r2']) * rank['r3'])
alpha.columns = ['alpha25']
return alpha
@timer
def alpha26(self):
close = self.close
vwap = self.vwap
close_mean7 = Mean(close,7)
close_mean7.columns = ['close_mean7']
close_delay5 = Delay(close,5)
close_delay5.columns = ['close_delay5']
data = pd.concat([vwap,close_delay5],axis = 1,join = 'inner')
corr = Corr(data,230)
corr.columns = ['corr']
data_temp = pd.concat([corr,close_mean7,close],axis = 1,join = 'inner')
alpha = data_temp['close_mean7'] - data_temp['Close'] + data_temp['corr']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha26']
return alpha
@timer
def alpha27(self):
"""
uncompleted
"""
close = self.close
close_delay3 = Delay(close,3)
close_delay6 = Delay(close,6)
data = pd.concat([close,close_delay3,close_delay6],axis = 1,join = 'inner')
data.columns = ['close','close_delay3','close_delay6']
temp1 = pd.DataFrame((data['close'] - data['close_delay3'])/data['close_delay3'] * 100)
temp2 = pd.DataFrame((data['close'] - data['close_delay6'])/data['close_delay6'] * 100)
data_temp = pd.concat([temp1,temp2],axis = 1,join = 'inner')
data_temp.columns = ['temp1','temp2']
temp = pd.DataFrame(data_temp['temp1'] + data_temp['temp2'])
alpha = DecayLinear(temp,12)
alpha.columns = ['alpha27']
return alpha
@timer
def alpha28(self):
close = self.close
low = self.low
high = self.high
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['Close','low_min','high_max']
temp1 = pd.DataFrame((data['Close'] - data['low_min']) /(data['high_max'] - data['low_min']))
sma1 = SMA(temp1,3,1)
sma2 = SMA(sma1,3,1)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1'] * 2 - sma['sma2'] * 3)
alpha.columns = ['alpha28']
return alpha
@timer
def alpha29(self):
close = self.close
volume = self.volume
close_delay = Delay(close,6)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay'] * data['Vol']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha29']
return alpha
@timer
def alpha30(self):
close = self.close
close_delay = Delay(close,1)
@timer
def alpha31(self):
close = self.close
close_delay = Delay(close,12)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha31']
return alpha
@timer
def alpha32(self):
volume = self.volume
high = self.high
r1 = Rank(volume)
r2 = Rank(high)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
corr = Corr(rank,3)
r = Rank(corr)
alpha = -1 * Sum(r,3)
alpha.columns = ['alpha32']
return alpha
@timer
def alpha33(self):
low = self.low
volume = self.volume
ret = self.ret
low_min = TsMin(low,5)
low_min_delay = Delay(low_min,5)
data1 = pd.concat([low_min,low_min_delay],axis = 1,join = 'inner')
data1.columns = ['low_min','low_min_delay']
ret_sum240 = Sum(ret,240)
ret_sum20 = Sum(ret,20)
ret_temp = pd.concat([ret_sum240,ret_sum20],axis = 1, join = 'inner')
ret_temp.columns = ['ret240','ret20']
temp1 = pd.DataFrame(data1['low_min_delay'] - data1['low_min'])
temp2 = pd.DataFrame((ret_temp['ret240'] - ret_temp['ret20'])/220)
r_temp2 = Rank(temp2)
r_volume = TsRank(volume,5)
temp = pd.concat([temp1,r_temp2,r_volume],axis = 1,join = 'inner')
temp.columns = ['temp1','r_temp2','r_volume']
alpha = temp['temp1'] * temp['r_temp2'] * temp['r_volume']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha33']
return alpha
@timer
def alpha34(self):
close = self.close
close_mean = Mean(close,12)
close_mean.columns = ['close_mean']
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
alpha = pd.DataFrame(data['close_mean']/data['Close'])
alpha.columns = ['alpha34']
return alpha
@timer
def alpha35(self):
volume = self.volume
Open = self.open
open_delay = Delay(Open,1)
open_delay.columns = ['open_delay']
open_linear = DecayLinear(Open,17)
open_linear.columns = ['open_linear']
open_delay_temp = DecayLinear(open_delay,15)
r1 = Rank(open_delay_temp)
data = pd.concat([Open,open_linear],axis = 1,join = 'inner')
Open_temp = data['Open'] * 0.65 + 0.35 * data['open_linear']
rank = pd.concat([volume,Open_temp],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,7)
r2 = Rank(-1 * corr)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = Cross_min(pd.DataFrame(r['r1']),pd.DataFrame(r['r2']))
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha35']
return alpha
@timer
def alpha36(self):
volume = self.volume
vwap = self.vwap
r1 = Rank(volume)
r2 = Rank(vwap)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
corr = Corr(rank,6)
temp = Sum(corr,2)
alpha = Rank(temp)
alpha.columns = ['alpha36']
return alpha
@timer
def alpha37(self):
Open = self.open
ret = self.ret
open_sum = Sum(Open,5)
ret_sum = Sum(ret,5)
data = pd.concat([open_sum,ret_sum],axis = 1,join = 'inner')
data.columns = ['open_sum','ret_sum']
temp = data['open_sum'] * data['ret_sum']
temp_delay = Delay(temp,10)
data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner')
data_temp.columns = ['temp','temp_delay']
alpha = -1 * Rank(pd.DataFrame(data_temp['temp'] - data_temp['temp_delay']))
alpha.columns = ['alpha37']
return alpha
@timer
def alpha38(self):
high = self.high
high_mean = Mean(high,20)
high_delta = Delta(high,2)
data = pd.concat([high,high_mean,high_delta],axis = 1,join = 'inner')
data.columns = ['high','high_mean','high_delta']
data['alpha'] = -1 * data['high_delta']
data['alpha'][data['high_mean'] >= data['high']] = 0
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha38']
return alpha
@timer
def alpha39(self):
close = self.close
Open = self.open
vwap = self.vwap
volume = self.volume
close_delta2 = Delta(close,2)
close_delta2_decay = DecayLinear(close_delta2,8)
r1 = Rank(close_delta2_decay)
price_temp = pd.concat([vwap,Open],axis = 1,join = 'inner')
price = pd.DataFrame(price_temp['Vwap'] * 0.3 + price_temp['Open'] * 0.7)
volume_mean = Mean(volume,180)
volume_mean_sum = Sum(volume_mean,37)
rank = pd.concat([price,volume_mean_sum],axis = 1,join = 'inner')
corr = Corr(rank,14)
corr_decay = DecayLinear(corr,12)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r2'] - r['r1'])
alpha.columns = ['alpha39']
return alpha
@timer
def alpha40(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
data = pd.concat([close,volume,close_delay],axis = 1, join = 'inner')
data.columns = ['close','volume','close_delay']
data['temp1'] = data['volume']
data['temp2'] = data['volume']
data['temp1'][data['close'] <= data['close_delay']] = 0
data['temp2'][data['close'] > data['close_delay']] = 0
s1 = Sum(pd.DataFrame(data['temp1']),26)
s2 = Sum(pd.DataFrame(data['temp2']),26)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'] * 100)
alpha.columns = ['alpha40']
return alpha
@timer
def alpha41(self):
vwap = self.vwap
vwap_delta = Delta(vwap,3)
vwap_delta_max = TsMax(vwap_delta,5)
alpha = -1 * Rank(vwap_delta_max)
alpha.columns = ['alpha41']
return alpha
@timer
def alpha42(self):
high = self.high
volume = self.volume
high_std = STD(high,10)
r1 = Rank(high_std)
data = pd.concat([high,volume],axis = 1,join = 'inner')
corr = Corr(data,10)
r = pd.concat([r1,corr],axis = 1,join = 'inner')
r.columns = ['r1','corr']
alpha = pd.DataFrame(-1 * r['r1'] * r['corr'])
alpha.columns = ['alpha42']
return alpha
@timer
def alpha43(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,6)
alpha.columns = ['alpha43']
return alpha
@timer
def alpha44(self):
volume = self.volume
vwap = self.vwap
low = self.low
volume_mean = Mean(volume,10)
rank = pd.concat([low,volume_mean],axis = 1,join = 'inner')
corr = Corr(rank,7)
corr_decay = DecayLinear(corr,6)
r1 = TsRank(corr_decay,4)
vwap_delta = Delta(vwap,3)
vwap_delta_decay = DecayLinear(vwap_delta,10)
r2 = TsRank(vwap_delta_decay,15)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha44']
return alpha
@timer
def alpha45(self):
volume = self.volume
vwap = self.vwap
close = self.close
Open = self.open
price = pd.concat([close,Open],axis = 1,join = 'inner')
price['price'] = price['Close'] * 0.6 + price['Open'] * 0.4
price_delta = Delta(pd.DataFrame(price['price']),1)
r1 = Rank(price_delta)
volume_mean = Mean(volume,150)
data = pd.concat([vwap,volume_mean],axis = 1,join = 'inner')
corr = Corr(data,15)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha45']
return alpha
@timer
def alpha46(self):
close = self.close
close_mean3 = Mean(close,3)
close_mean6 = Mean(close,6)
close_mean12 = Mean(close,12)
close_mean24 = Mean(close,24)
data = pd.concat([close,close_mean3,close_mean6,close_mean12,close_mean24],axis = 1,join = 'inner')
data.columns = ['c','c3','c6','c12','c24']
alpha = (data['c3'] + data['c6'] + data['c12'] + data['c24'])/(4 * data['c'])
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha46']
return alpha
@timer
def alpha47(self):
close = self.close
low = self.low
high = self.high
high_max = TsMax(high,6)
low_min = TsMin(low,6)
data = pd.concat([high_max,low_min,close],axis = 1,join = 'inner')
data.columns = ['high_max','low_min','close']
temp = pd.DataFrame((data['high_max'] - data['close'])/(data['high_max'] - \
data['low_min']) * 100)
alpha = SMA(temp,9,1)
alpha.columns = ['alpha47']
return alpha
@timer
def alpha48(self):
close = self.close
volume = self.volume
temp1 = Delta(close,1)
temp1_delay1 = Delay(temp1,1)
temp1_delay2 = Delay(temp1,2)
data = pd.concat([temp1,temp1_delay1,temp1_delay2],axis = 1,join = 'inner')
data.columns = ['temp1','temp1_delay1','temp1_delay2']
temp2 = pd.DataFrame(np.sign(data['temp1']) + np.sign(data['temp1_delay1']) \
+ np.sign(data['temp1_delay2']))
volume_sum5 = Sum(volume,5)
volume_sum20 = Sum(volume,20)
data_temp = pd.concat([temp2,volume_sum5,volume_sum20],axis = 1,join = 'inner')
data_temp.columns = ['temp2','volume_sum5','volume_sum20']
temp3 = pd.DataFrame(data_temp['temp2'] * data_temp['volume_sum5']/\
data_temp['volume_sum20'])
alpha = -1 * Rank(temp3)
alpha.columns = ['alpha48']
return alpha
@timer
def alpha49(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 0
price['temp'][price['sum'] < price['sum_delay']] = 1
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha49']
return alpha
@timer
def alpha50(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 1
price['temp'][price['sum'] <= price['sum_delay']] = -1
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha50']
return alpha
@timer
def alpha51(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 1
price['temp'][price['sum'] <= price['sum_delay']] = 0
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha51']
return alpha
@timer
def alpha52(self):
low = self.low
high = self.high
close = self.close
data = pd.concat([low,high,close],axis = 1,join = 'inner')
data['sum_delay'] = Delay(pd.DataFrame((data['High'] + data['Low'] + data['Close'])/3),1)
temp1 = pd.DataFrame(data['High'] - data['sum_delay'])
temp1.columns = ['high_diff']
temp2 = pd.DataFrame(data['sum_delay'] - data['Low'])
temp2.columns = ['low_diff']
temp1['max'] = temp1['high_diff']
temp1['max'][temp1['high_diff'] < 0 ] = 0
temp2['max'] = temp2['low_diff']
temp2['max'][temp2['low_diff'] < 0 ] = 0
temp1_sum = Sum(pd.DataFrame(temp1['max']),26)
temp2_sum = Sum(pd.DataFrame(temp2['max']),26)
alpha_temp = pd.concat([temp1_sum,temp2_sum],axis = 1,join = 'inner')
alpha_temp.columns = ['s1','s2']
alpha = pd.DataFrame(alpha_temp['s1']/alpha_temp['s2'] * 100)
alpha.columns = ['alpha52']
return alpha
@timer
def alpha53(self):
close = self.close
close_delay = Delay(close,1)
count = Count(0,close,close_delay,12)
alpha = count/12.0 * 100
alpha.columns = ['alpha53']
return alpha
@timer
def alpha54(self):
Open = self.open
close = self.close
data = pd.concat([Open,close], axis = 1, join = 'inner')
data.columns = ['close','open']
temp = pd.DataFrame(data['close'] - data['open'])
temp_abs = pd.DataFrame(np.abs(temp))
df = pd.concat([temp,temp_abs], axis = 1, join= 'inner')
df.columns = ['temp','abs']
std = STD(pd.DataFrame(df['temp'] + df['abs']),10)
corr = Corr(data,10)
data1 = pd.concat([corr,std],axis = 1, join = 'inner')
data1.columns = ['corr','std']
alpha = Rank(pd.DataFrame(data1['corr'] + data1['std'])) * -1
alpha.columns = ['alpha54']
return alpha
@timer
def alpha55(self):
Open = self.open
close = self.close
low = self.low
high = self.high
close_delay = Delay(close,1)
open_delay = Delay(Open,1)
low_delay = Delay(low,1)
data = pd.concat([Open,close,low,high,close_delay,open_delay,low_delay], axis =1 ,join = 'inner')
data.columns= ['open','close','low','high','close_delay','open_delay','low_delay']
temp1 = pd.DataFrame((data['close'] - data['close_delay'] + (data['close'] - data['open'])/2\
+ data['close_delay'] - data['open_delay'])/ np.abs(data['high'] - data['close_delay']))
temp2 = pd.DataFrame(np.abs(data['high'] - data['close_delay']) + np.abs(data['low'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
temp3 = pd.DataFrame(np.abs(data['low'] - data['close_delay']) + np.abs(data['high'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
abs1 = pd.DataFrame(np.abs(data['high'] - data['close_delay']))
abs2 = pd.DataFrame(np.abs(data['low'] - data['close_delay']))
abs3 = pd.DataFrame(np.abs(data['high'] - data['low_delay']))
data1 = pd.concat([abs1,abs2,abs3], axis = 1, join = 'inner')
data1.columns = ['abs1','abs2','abs3']
data_temp = pd.concat([abs1,abs2],axis = 1, join = 'inner')
data_temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
data_temp_max.columns = ['max']
data_temp1 = pd.concat([data,data_temp_max], axis = 1, join = 'inner')
temp4 = pd.DataFrame((np.abs(data_temp1['high'] - data_temp1['low_delay']) + \
np.abs(data_temp1['close_delay'] - data_temp1['open_delay'])) *\
data_temp1['max'])
data1['judge1'] = 0
data1['judge2'] = 0
data1['judge3'] = 0
data1['judge4'] = 0
data1['judge1'][data1['abs1'] > data1['abs2']] = 1
data1['judge2'][data1['abs1'] > data1['abs3']] = 1
data1['judge3'][data1['abs2'] > data1['abs3']] = 1
data1['judge3'][data1['abs3'] > data1['abs1']] = 1
judge_1 = pd.DataFrame(data1['judge1'] * data1['judge2'])
judge_2 = pd.DataFrame(data1['judge3'] * data1['judge4'])
data2 = pd.concat([temp1,temp2,temp3,temp4,judge_1,judge_2], axis = 1, join = 'inner')
data2.columns = ['t1','t2','t3','t4','j1','j2']
data2['j3'] = 1
data2['j4'] = data2['j3'] - data2['j1'] - data2['j2']
data2['t5'] = data2['t2'] * data2['j1'] + data2['t3'] * data2['j2'] + \
data2['t4'] * data2['j4']
tep = pd.DataFrame(16 * data2['t5']/data2['t1'])
alpha = Sum(tep,20)
alpha.columns = ['alpha55']
return alpha
@timer
def alpha56(self):
low = self.low
high = self.high
volume = self.volume
Open = self.open
open_min = TsMin(Open,12)
data1 = pd.concat([Open,open_min], axis = 1, join = 'inner')
data1.columns = ['open','open_min']
r1 = Rank(pd.DataFrame(data1['open'] - data1['open_min']))
volume_mean = Mean(volume,40)
volume_mean_sum= Sum(volume_mean,19)
data2 = pd.concat([high,low],axis = 1, join = 'inner')
temp = pd.DataFrame((data2['High'] + data2['Low'])/2)
rank = pd.concat([temp,volume_mean_sum],axis = 1 , join = 'inner')
rank.columns = ['temp','volume_mean_sum']
corr = Corr(rank,13)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
r['alpha'] = 0
r['alpha'][r['r1'] >= r['r2']] = 1
alpha = pd.DataFrame(r['alpha'])
alpha.columns = ['alpha56']
return alpha
@timer
def alpha57(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = pd.DataFrame((data['close'] - data['low_min'])/(data['high_max'] \
- data['low_min']) * 100)
alpha = SMA(temp,3,1)
alpha.columns = ['alpha57']
return alpha
@timer
def alpha58(self):
close = self.close
close_delay = Delay(close,1)
count = Count(0,close,close_delay,20)
alpha = count/20.0 * 100
alpha.columns = ['alpha58']
return alpha
@timer
def alpha59(self):
low = self.low
high = self.high
close = self.close
close_delay = Delay(close,1)
max_temp = pd.concat([high,close_delay],axis = 1,join = 'inner')
min_temp = pd.concat([low,close_delay],axis = 1,join = 'inner')
max_temp1 = pd.DataFrame(np.max(max_temp,axis = 1))
min_temp1 = pd.DataFrame(np.min(min_temp,axis = 1))
data = pd.concat([close,close_delay,max_temp1,min_temp1],axis = 1,join = 'inner')
data.columns = ['close','close_delay','max','min']
data['max'][data['close'] > data['close_delay']] = 0
data['min'][data['close'] <= data['close_delay']] = 0
alpha = pd.DataFrame(data['max'] + data['min'])
alpha.columns = ['alpha59']
return alpha
@timer
def alpha60(self):
low = self.low
high = self.high
close = self.close
volume = self.volume
data = pd.concat([low,high,close,volume],axis = 1,join = 'inner')
temp = pd.DataFrame((2 * data['Close'] - data['Low'] - data['High'])/(data['Low'] + \
data['High']) * data['Vol'])
alpha = Sum(temp,20)
alpha.columns = ['alpha60']
return alpha
@timer
def alpha61(self):
low = self.low
volume = self.volume
vwap = self.vwap
vwap_delta = Delta(vwap,1)
vwap_delta_decay = DecayLinear(vwap_delta,12)
r1 = Rank(vwap_delta_decay)
volume_mean = Mean(volume,80)
data = pd.concat([low,volume_mean],axis = 1,join = 'inner')
corr = Corr(data,8)
corr_decay = DecayLinear(corr,17)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.max(r,axis = 1) * -1)
alpha.columns = ['alpha61']
return alpha
@timer
def alpha62(self):
high = self.high
volume = self.volume
volume_r = Rank(volume)
data = pd.concat([high,volume_r],axis = 1,join = 'inner')
alpha = -1 * Corr(data,5)
alpha.columns = ['alpha62']
return alpha
@timer
def alpha63(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),6,1)
sma2 = SMA(pd.DataFrame(data['abs']),6,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha63']
return alpha
@timer
def alpha64(self):
vwap = self.vwap
volume = self.volume
close = self.close
vwap_r = Rank(vwap)
volume_r = Rank(volume)
data1 = pd.concat([vwap_r,volume_r],axis = 1,join = 'inner')
corr1 = Corr(data1,4)
corr1_decay = DecayLinear(corr1,4)
r1 = Rank(corr1_decay)
close_mean = Mean(close,60)
close_r = Rank(close)
close_mean_r = Rank(close_mean)
data2 = pd.concat([close_r,close_mean_r],axis = 1,join = 'inner')
corr2 = Corr(data2,4)
corr2_max = TsMax(corr2,13)
corr2_max_decay = DecayLinear(corr2_max,14)
r2 = Rank(corr2_max_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.max(r,axis = 1) *-1)
alpha.columns = ['alpha64']
return alpha
@timer
def alpha65(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = pd.DataFrame(data['close_mean']/data['close'])
alpha.columns = ['alpha65']
return alpha
@timer
def alpha66(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = (data['close'] - data['close_mean'])/data['close_mean'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha66']
return alpha
@timer
def alpha67(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),24,1)
sma2 = SMA(pd.DataFrame(data['abs']),24,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha67']
return alpha
@timer
def alpha68(self):
high = self.high
volume = self.volume
low = self.low
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['sum']= (data['High'] + data['Low'])/2
data['sum_delta'] = Delta(pd.DataFrame(data['sum']),1)
temp = data['sum_delta'] * (data['High'] - data['Low'])/data['Vol']
alpha = SMA(pd.DataFrame(temp),15,2)
alpha.columns = ['alpha68']
return alpha
@timer
def alpha69(self):
high = self.high
low = self.low
Open = self.open
dtm = DTM(Open,high)
dbm = DBM(Open,low)
dtm_sum = Sum(dtm,20)
dbm_sum = Sum(dbm,20)
data = pd.concat([dtm_sum,dbm_sum],axis = 1, join = 'inner')
data.columns = ['dtm','dbm']
data['temp1'] = (data['dtm'] - data['dbm'])/data['dtm']
data['temp2'] = (data['dtm'] - data['dbm'])/data['dbm']
data['temp1'][data['dtm'] <= data['dbm']] = 0
data['temp2'][data['dtm'] >= data['dbm']] = 0
alpha = pd.DataFrame(data['temp1'] + data['temp2'])
alpha.columns = ['alpha69']
return alpha
@timer
def alpha70(self):
amount = self.amt
alpha= STD(amount,6)
alpha.columns = ['alpha70']
return alpha
@timer
def alpha71(self):
close = self.close
close_mean = Mean(close,24)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = (data['close'] - data['close_mean'])/data['close_mean'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha71']
return alpha
@timer
def alpha72(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,6)
high_max = TsMax(high,6)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = (data['high_max'] - data['close'])/(data['high_max'] - data['low_min']) * 100
alpha = SMA(pd.DataFrame(temp),15,1)
alpha.columns = ['alpha72']
return alpha
@timer
def alpha73(self):
vwap = self.vwap
volume = self.volume
close = self.close
data1 = pd.concat([close,volume],axis = 1,join = 'inner')
corr1 = Corr(data1,10)
corr1_decay = DecayLinear(DecayLinear(corr1,16),4)
r1 = TsRank(corr1_decay,5)
volume_mean = Mean(volume,30)
data2 = pd.concat([vwap,volume_mean],axis = 1,join = 'inner')
corr2 = Corr(data2,4)
corr2_decay = DecayLinear(corr2,3)
r2 = Rank(corr2_decay)
r = pd.concat([r1,r2],axis = 1,join ='inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r2'] - r['r1'])
alpha.columns= ['alpha73']
return alpha
@timer
def alpha74(self):
vwap = self.vwap
volume = self.volume
low = self.low
volume_mean = Mean(volume,40)
volume_mean_sum = Sum(volume_mean,20)
data1 = pd.concat([low,vwap],axis = 1,join = 'inner')
data_sum = Sum(pd.DataFrame(data1['Low'] * 0.35 + data1['Vwap'] * 0.65),20)
data = pd.concat([volume_mean_sum,data_sum],axis = 1,join = 'inner')
corr = Corr(data,7)
r1 = Rank(corr)
vwap_r = Rank(vwap)
volume_r = Rank(volume)
data_temp = pd.concat([vwap_r,volume_r],axis = 1,join = 'inner')
corr2 = Corr(data_temp,6)
r2 = Rank(corr2)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha74']
return alpha
@timer
def alpha75(self):
close = self.close
Open = self.open
close_index = self.close_index
open_index = self.open_index
data1 = pd.concat([close,Open], axis = 1, join = 'inner')
data1.columns = ['close','open']
data1['temp'] = 1
data1['temp'][data1['close'] <= data1['open']] = 0
data2 = pd.concat([close_index,open_index], axis = 1, join = 'inner')
data2.columns = ['close','open']
data2['tep'] = 1
data2['tep'][data2['close'] > data2['open']] = 0
temp = data1['temp'].unstack()
tep = data2['tep'].unstack()
tep1 = repmat(tep,1,np.size(temp,1))
data3 = temp * tep1
temp_result = data3.rolling(50,min_periods = 50).sum()
tep_result = tep.rolling(50,min_periods = 50).sum()
tep2_result = np.matlib.repmat(tep_result,1,np.size(temp,1))
result = temp_result/tep2_result
alpha = pd.DataFrame(result.stack())
alpha.columns = ['alpha75']
return alpha
@timer
def alpha76(self):
volume = self.volume
close = self.close
close_delay = Delay(close,1)
data = pd.concat([volume,close,close_delay],axis = 1,join = 'inner')
data.columns = ['volume','close','close_delay']
temp = pd.DataFrame(np.abs((data['close']/data['close_delay'] -1 )/data['volume']))
temp_std = STD(temp,20)
temp_mean = Mean(temp,20)
data_temp = pd.concat([temp_std,temp_mean],axis = 1,join = 'inner')
data_temp.columns = ['std','mean']
alpha = pd.DataFrame(data_temp['std']/data_temp['mean'])
alpha.columns = ['alpha76']
return alpha
@timer
def alpha77(self):
vwap = self.vwap
volume = self.volume
low = self.low
high = self.high
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
temp = pd.DataFrame((data['High'] + data['Low'])/2 - data['Vwap'])
temp_decay = DecayLinear(temp,20)
r1 = Rank(temp_decay)
temp1 = pd.DataFrame((data['High'] + data['Low'])/2)
volume_mean = Mean(volume,40)
data2 = pd.concat([temp1,volume_mean],axis = 1,join = 'inner')
corr = Corr(data2,3)
corr_decay = DecayLinear(corr,6)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.min(r,axis = 1))
alpha.columns = ['alpha77']
return alpha
@timer
def alpha78(self):
low = self.low
high = self.high
close = self.close
data = pd.concat([low,high,close],axis = 1,join = 'inner')
temp = pd.DataFrame((data['Low'] + data['High'] + data['Close'])/3)
temp.columns = ['temp']
temp_mean = Mean(temp,12)
temp_mean.columns = ['temp_mean']
temp2 = pd.concat([temp,temp_mean],axis = 1,join = 'inner')
tmp = pd.DataFrame(temp2['temp'] - temp2['temp_mean'])
data1 = pd.concat([close,temp_mean],axis = 1,join = 'inner')
temp_abs = pd.DataFrame(np.abs(data1['Close'] - data1['temp_mean']))
temp_abs_mean = Mean(temp_abs,12)
df = pd.concat([tmp,temp_abs_mean],axis = 1,join = 'inner')
df.columns = ['df1','df2']
alpha = pd.DataFrame(df['df1']/(df['df2'] * 0.015))
alpha.columns = ['alpha78']
return alpha
@timer
def alpha79(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),12,1)
sma2 = SMA(pd.DataFrame(data['abs']),12,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha79']
return alpha
@timer
def alpha80(self):
volume = self.volume
volume_delay = Delay(volume,5)
volume_delay.columns = ['volume_delay']
data = pd.concat([volume,volume_delay],axis = 1,join = 'inner')
alpha = (data['Vol'] - data['volume_delay'])/data['volume_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha80']
return alpha
@timer
def alpha81(self):
volume = self.volume
alpha = SMA(volume,21,2)
alpha.columns = ['alpha81']
return alpha
@timer
def alpha82(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,6)
high_max = TsMax(high,6)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = (data['high_max'] - data['close'])/(data['high_max'] - data['low_min']) * 100
alpha = SMA(pd.DataFrame(temp),20,1)
alpha.columns = ['alpha82']
return alpha
@timer
def alpha83(self):
high = self.high
volume = self.volume
high_r = Rank(high)
volume_r = Rank(volume)
data = pd.concat([high_r,volume_r],axis = 1,join = 'inner')
corr = Corr(data,5)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha83']
return alpha
@timer
def alpha84(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
data['sign'][data['Close'] == data['close_delay']] = 0
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,20)
alpha.columns = ['alpha84']
return alpha
@timer
def alpha85(self):
close = self.close
volume = self.volume
volume_mean = Mean(volume,20)
close_delta = Delta(close,7)
data1 = pd.concat([volume,volume_mean],axis = 1,join = 'inner')
data1.columns = ['volume','volume_mean']
temp1 = pd.DataFrame(data1['volume']/data1['volume_mean'])
r1 = TsRank(temp1,20)
r2 = TsRank(-1 * close_delta,8)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'])
alpha.columns = ['alpha85']
return alpha
@timer
def alpha86(self):
close = self.close
close_delay20 = Delay(close,20)
close_delay10 = Delay(close,20)
data = pd.concat([close,close_delay20,close_delay10],axis = 1,join = 'inner')
data.columns = ['close','close_delay20','close_delay10']
temp = pd.DataFrame((data['close_delay20'] - data['close_delay10'])/10 - \
(data['close_delay10'] - data['close'])/10)
close_delta = Delta(close,1) * -1
data_temp = pd.concat([close_delta,temp],axis = 1,join = 'inner')
data_temp.columns = ['close_delta','temp']
data_temp['close_delta'][data_temp['temp'] > 0.25]= -1
data_temp['close_delta'][data_temp['temp'] < 0]= 1
alpha = pd.DataFrame(data_temp['close_delta'])
alpha.columns = ['alpha86']
return alpha
@timer
def alpha87(self):
vwap = self.vwap
high = self.high
low = self.low
Open = self.open
vwap_delta = Delta(vwap,4)
vwap_delta_decay = DecayLinear(vwap_delta,7)
r1 = Rank(vwap_delta_decay)
data = pd.concat([low,high,vwap,Open], axis = 1, join = 'inner')
temp = pd.DataFrame((data['Low'] * 0.1 + data['High'] * 0.9 - data['Vwap'])/\
(data['Open'] - 0.5 * (data['Low'] + data['High'])))
temp_decay = DecayLinear(temp,11)
r2 = TsRank(temp_decay,7)
r = pd.concat([r1,r2], axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(-1 * (r['r1'] + r['r2']))
alpha.columns = ['alpha87']
return alpha
@timer
def alpha88(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delta']
alpha = (data['close'] - data['close_delta'])/data['close_delta'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha88']
return alpha
@timer
def alpha89(self):
close = self.close
sma1 = SMA(close,13,2)
sma2 = SMA(close,27,2)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
temp = pd.DataFrame(sma['sma1'] - sma['sma2'])
sma3 = SMA(temp,10,2)
data = pd.concat([temp,sma3],axis = 1, join = 'inner')
data.columns = ['temp','sma']
alpha = pd.DataFrame(2 *(data['temp'] - data['sma']))
alpha.columns = ['alpha89']
return alpha
@timer
def alpha90(self):
volume = self.volume
vwap = self.vwap
r1 = Rank(volume)
r2 = Rank(vwap)
rank = pd.concat([r1,r2], axis = 1, join = 'inner')
corr = Corr(rank,5)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha90']
return alpha
@timer
def alpha91(self):
close = self.close
volume = self.volume
low = self.low
close_max = TsMax(close,5)
data1 = pd.concat([close,close_max], axis = 1,join = 'inner')
data1.columns = ['close','close_max']
r1 = Rank(pd.DataFrame(data1['close'] - data1['close_max']))
volume_mean = Mean(volume,40)
data2 = pd.concat([volume_mean,low], axis = 1, join = 'inner')
corr = Corr(data2,5)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'] * -1)
alpha.columns = ['alpha91']
return alpha
@timer
def alpha92(self):
volume = self.volume
vwap = self.vwap
close = self.close
data = pd.concat([close,vwap],axis = 1, join = 'inner')
data['price'] = data['Close'] * 0.35 + data['Vwap'] * 0.65
price_delta = Delta(pd.DataFrame(data['price']),2)
price_delta_decay = DecayLinear(price_delta,3)
r1 = Rank(price_delta_decay)
volume_mean = Mean(volume,180)
rank = pd.concat([volume_mean,close],axis = 1,join = 'inner')
corr = Corr(rank,13)
temp = pd.DataFrame(np.abs(corr))
temp_decay = DecayLinear(temp,5)
r2 = TsRank(temp_decay,15)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
alpha = pd.DataFrame(-1 * np.max(r, axis = 1))
alpha.columns = ['alpha92']
return alpha
@timer
def alpha93(self):
low = self.low
Open = self.open
open_delay = Delay(Open,1)
data = pd.concat([low,Open,open_delay],axis = 1,join = 'inner')
data.columns = ['low','open','open_delay']
temp1 = pd.DataFrame(data['open'] - data['low'])
temp2 = pd.DataFrame(data['open'] - data['open_delay'])
data_temp = pd.concat([temp1,temp2],axis = 1 ,join = 'inner')
temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
temp_max.columns = ['max']
data2 = pd.concat([data,temp_max],axis = 1,join = 'inner')
data2['temp'] = data2['max']
data2['temp'][data2['open'] >= data2['open_delay']] = 0
alpha = Sum(pd.DataFrame(data2['temp']),20)
alpha.columns = ['alpha93']
return alpha
@timer
def alpha94(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
data['sign'][data['Close'] == data['close_delay']] = 0
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,30)
alpha.columns = ['alpha94']
return alpha
@timer
def alpha95(self):
amt = self.amt
alpha = STD(amt,20)
alpha.columns = ['alpha95']
return alpha
@timer
def alpha96(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = ( data['close'] - data['low_min'])/(data['high_max'] - data['low_min']) * 100
alpha_temp = SMA(pd.DataFrame(temp),3,1)
alpha = SMA(alpha_temp,3,1)
alpha.columns = ['alpha96']
return alpha
@timer
def alpha97(self):
volume = self.volume
alpha = STD(volume,10)
alpha.columns = ['alpha97']
return alpha
@timer
def alpha98(self):
close = self.close
close_mean = Mean(close,100)
close_mean_delta = Delta(close_mean,100)
close_delay = Delay(close,100)
data = pd.concat([close_mean_delta,close_delay],axis = 1,join = 'inner')
data.columns = ['delta','delay']
temp = pd.DataFrame(data['delta']/ data['delay'])
close_delta = Delta(close,3)
close_min = TsMin(close,100)
data_temp = pd.concat([close,close_delta,close_min,temp],axis = 1,join = 'inner')
data_temp.columns = ['close','close_delta','close_min','temp']
data_temp['diff'] = (data_temp['close'] - data_temp['close_min']) * -1
data_temp['diff'][data_temp['temp'] < 0.05] = 0
data_temp['close_delta'] = data_temp['close_delta'] * -1
data_temp['close_delta'][data_temp['temp'] >= 0.05]= 0
alpha = pd.DataFrame(data_temp['close_delta'] + data_temp['diff'])
alpha.columns = ['alpha98']
return alpha
@timer
def alpha99(self):
close = self.close
volume = self.volume
r1 = Rank(close)
r2 = Rank(volume)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
cov = Cov(r,5)
alpha = -1 * Rank(cov)
alpha.columns = ['alpha99']
return alpha
@timer
def alpha100(self):
volume = self.volume
alpha = STD(volume,20)
alpha.columns = ['alpha100']
return alpha
@timer
def alpha101(self):
close = self.close
volume = self.volume
high = self.high
vwap = self.vwap
volume_mean = Mean(volume,30)
volume_mean_sum = Sum(volume_mean,37)
data1 = pd.concat([close,volume_mean_sum], axis = 1, join = 'inner')
corr1 = Corr(data1,15)
r1 = Rank(corr1)
data2 = pd.concat([high,vwap],axis = 1, join = 'inner')
temp = pd.DataFrame(data2['High'] * 0.1 + data2['Vwap'] * 0.9)
temp_r = Rank(temp)
volume_r = Rank(volume)
data3 = pd.concat([temp_r,volume_r], axis = 1, join = 'inner')
corr2 = Corr(data3,11)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
r['alpha'] = 0
r['alpha'][r['r1'] < r['r2']] = -1
alpha = pd.DataFrame(r['alpha'])
alpha.columns = ['alpha101']
return alpha
@timer
def alpha102(self):
volume = self.volume
temp = Delta(volume,1)
temp.columns = ['temp']
temp['max'] = temp['temp']
temp['max'][temp['temp'] < 0 ] = 0
temp['abs'] = np.abs(temp['temp'])
sma1 = SMA(pd.DataFrame(temp['max']),6,1)
sma2 = SMA(pd.DataFrame(temp['abs']),6,1)
sma = pd.concat([sma1,sma2], axis = 1 ,join ='inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/ sma['sma2'] * 100)
alpha.columns = ['alpha102']
return alpha
@timer
def alpha103(self):
low = self.low
lowday = Lowday(low,20)
alpha = (20 - lowday)/20.0 * 100
alpha.columns = ['alpha103']
return alpha
@timer
def alpha104(self):
close = self.close
volume = self.volume
high = self.high
data = pd.concat([high,volume], axis = 1, join = 'inner')
corr = Corr(data,5)
corr_delta = Delta(corr,5)
close_std = STD(close,20)
r1 = Rank(close_std)
temp = pd.concat([corr_delta,r1], axis = 1, join = 'inner')
temp.columns = ['delta','r']
alpha = pd.DataFrame(-1 * temp['delta'] * temp['r'])
alpha.columns = ['alpha104']
return alpha
@timer
def alpha105(self):
volume = self.volume
Open = self.open
volume_r = Rank(volume)
open_r = Rank(Open)
rank = pd.concat([volume_r,open_r],axis = 1, join = 'inner')
alpha = -1 * Corr(rank,10)
alpha.columns = ['alpha105']
return alpha
@timer
def alpha106(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
alpha = pd.DataFrame(data['close'] - data['close_delay'])
alpha.columns = ['alpha106']
return alpha
@timer
def alpha107(self):
Open = self.open
high = self.high
close = self.close
low = self.low
high_delay = Delay(high,1)
close_delay = Delay(close,1)
low_delay = Delay(low,1)
data = pd.concat([high_delay,close_delay,low_delay,Open], axis = 1, join = 'inner')
data.columns = ['high_delay','close_delay','low_delay','open']
r1 = Rank(pd.DataFrame(data['open'] - data['high_delay']))
r2 = Rank(pd.DataFrame(data['open'] - data['close_delay']))
r3 = Rank(pd.DataFrame(data['open'] - data['low_delay']))
alpha = -1 * r1 * r2 * r3
alpha.columns = ['alpha107']
return alpha
@timer
def alpha108(self):
high = self.high
volume = self.volume
vwap = self.vwap
high_min = TsMin(high,2)
data1 = pd.concat([high,high_min], axis = 1, join = 'inner')
data1.columns = ['high','high_min']
r1 = Rank(pd.DataFrame(data1['high'] - data1['high_min']))
volume_mean = Mean(volume,120)
rank = pd.concat([vwap,volume_mean],axis = 1, join = 'inner')
corr = Corr(rank,6)
r2 = Rank(corr)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = r['r1'] * r['r2'] * -1
alpha.columns = ['alpha108']
return alpha
@timer
def alpha109(self):
high = self.high
low = self.low
data = pd.concat([high,low],axis = 1, join = 'inner')
temp = SMA(pd.DataFrame(data['High'] - data['Low']),10,2)
sma = SMA(temp,10,2)
sma_temp = pd.concat([temp,sma],axis = 1, join = 'inner')
sma_temp.columns = ['temp','sma']
alpha = pd.DataFrame(sma_temp['temp']/sma_temp['sma'])
alpha.columns = ['alpha109']
return alpha
@timer
def alpha110(self):
high = self.high
low = self.low
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([high,low,close_delay], axis = 1, join = 'inner')
data['max1'] = data['High'] - data['close_delay']
data['max2'] = data['close_delay'] - data['Low']
data['max1'][data['max1'] < 0] = 0
data['max2'][data['max2'] < 0] = 0
s1 = Sum(pd.DataFrame(data['max1']),20)
s2 = Sum(pd.DataFrame(data['max2']),20)
s = pd.concat([s1,s2], axis = 1 , join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'])
alpha.columns = ['alpha110']
return alpha
@timer
def alpha111(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume], axis = 1, join = 'inner')
temp = pd.DataFrame(data['Vol'] * (2 * data['Close'] - data['Low'] - data['High'])\
/(data['High'] - data['Low']))
sma1 = SMA(temp,11,2)
sma2 = SMA(temp,4,2)
sma = pd.concat([sma1, sma2], axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1'] - sma['sma2'])
alpha.columns = ['alpha111']
return alpha
@timer
def alpha112(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close, close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['temp'] = 1
data['temp'][data['close'] > data['close_delay']] = 0
alpha = Sum(pd.DataFrame(data['temp']),12)
alpha.columns = ['alpha112']
return alpha
@timer
def alpha113(self):
close = self.close
volume = self.volume
close_delay = Delay(close,5)
close_delay_mean = Mean(close_delay,20)
data1 = pd.concat([close,volume],axis = 1, join = 'inner')
corr = Corr(data1,2)
r1 = Rank(close_delay_mean)
data2 = pd.concat([r1,corr], axis = 1, join = 'inner')
data2.columns = ['r1','corr']
r1 = pd.DataFrame(data2['r1'] * data2['corr'])
close_sum5 = Sum(close,5)
close_sum20 = Sum(close,20)
data3 = pd.concat([close_sum5,close_sum20],axis = 1, join = 'inner')
corr2 = Corr(data3,2)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'] * -1)
alpha.columns = ['alpha113']
return alpha
@timer
def alpha114(self):
close = self.close
high = self.high
low = self.low
volume = self.volume
vwap = self.vwap
close_mean = Mean(close,5)
data = pd.concat([high,low,close_mean], axis = 1, join = 'inner')
data.columns = ['high','low','close_mean']
temp = pd.DataFrame(data['high'] - data['low'] / data['close_mean'])
temp_delay = Delay(temp,2)
r1 = TsRank(temp_delay,5)
temp1 = pd.concat([temp,vwap,close], axis = 1, join = 'inner')
temp1.columns = ['temp','vwap','close']
tep = pd.DataFrame(temp1['temp']/(temp1['vwap'] - temp1['close']))
r2 = TsRank(volume,5)
data2 = pd.concat([r2,tep], axis = 1, join = 'inner')
data2.columns = ['r2','tep']
tep1 = pd.DataFrame(data2['r2']/data2['tep'])
r3 = TsRank(tep1,5)
r = pd.concat([r1,r3],axis = 1, join = 'inner')
r.columns = ['r1','r3']
alpha = pd.DataFrame(r['r1'] + r['r3'])
alpha.columns = ['alpha114']
return alpha
@timer
def alpha115(self):
high = self.high
low = self.low
volume = self.volume
volume_mean = Mean(volume,30)
price = pd.concat([high,low], axis = 1, join = 'inner')
price.columns = ['high','low']
price_temp = price['high'] * 0.9 + price['low'] * 0.1
data = pd.concat([price_temp,volume_mean],axis = 1, join = 'inner')
corr = Corr(data,10)
r1 = Rank(corr)
data2 = pd.concat([high,low], axis = 1, join = 'inner')
temp = pd.DataFrame((data2['High'] + data2['Low'])/2)
temp_r = TsRank(temp,4)
volume_r = TsRank(volume,10)
data3 = pd.concat([temp_r,volume_r], axis = 1, join = 'inner')
corr2 = Corr(data3,7)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'])
alpha.columns = ['alpha115']
return alpha
@timer
def alpha116(self):
close = self.close
alpha = RegResi(0,close,None,20)
alpha.columns = ['alpha116']
return alpha
@timer
def alpha117(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
ret = self.ret
r1 = TsRank(volume,32)
data1 = pd.concat([close,high,low],axis = 1, join = 'inner')
r2 = TsRank(pd.DataFrame(data1['Close'] + data1['High'] - data1['Low']),16)
r3 = TsRank(ret,32)
r = pd.concat([r1,r2,r3], axis = 1, join = 'inner')
r.columns = ['r1','r2','r3']
alpha = pd.DataFrame(r['r1'] * (1 - r['r2']) * (1 - r['r3']))
alpha.columns = ['alpha117']
return alpha
@timer
def alpha118(self):
high = self.high
low = self.low
Open = self.open
data = pd.concat([high,low,Open], axis = 1, join = 'inner')
s1 = Sum(pd.DataFrame(data['High'] - data['Open']),20)
s2 = Sum(pd.DataFrame(data['Open'] - data['Low']),20)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'] * 100)
alpha.columns = ['alpha118']
return alpha
@timer
def alpha119(self):
Open = self.open
volume = self.volume
vwap = self.vwap
volume_mean = Mean(volume,5)
volume_mean_sum = Sum(volume_mean,26)
data1 = pd.concat([vwap,volume_mean_sum],axis = 1, join = 'inner')
corr1 = Corr(data1,5)
corr1_decay = DecayLinear(corr1,7)
r1 = Rank(corr1_decay)
open_r = Rank(Open)
volume_mean2 = Mean(volume,15)
volume_mean2_r = Rank(volume_mean2)
data2 = pd.concat([open_r, volume_mean2_r], axis = 1, join = 'inner')
corr2 = Corr(data2,21)
corr2_min = TsMin(corr2,9)
corr2_min_r = TsRank(corr2_min,7)
corr_min_r_decay = DecayLinear(corr2_min_r,8)
r2 = Rank(corr_min_r_decay)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] - r['r2'])
alpha.columns = ['alpha119']
return alpha
@timer
def alpha120(self):
vwap = self.vwap
close = self.close
data = pd.concat([vwap,close], axis = 1, join = 'inner')
r1 = Rank(pd.DataFrame(data['Vwap'] - data['Close']))
r2 = Rank( | pd.DataFrame(data['Vwap'] + data['Close']) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : tql-Python.
# @File : OOFlgb
# @Time : 2019-06-23 23:41
# @Author : yuanjie
# @Email : <EMAIL>
# @Software : PyCharm
# @Description :
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import StratifiedKFold, RepeatedStratifiedKFold, KFold, cross_val_predict, cross_validate
from sklearn.metrics import roc_auc_score
from lightgbm import LGBMClassifier
from tqdm.auto import tqdm
import os
import socket
if socket.gethostname() == 'yuanjie-Mac.local':
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
class LGBMClassifierCV(object):
"""cross_val_predict"""
def __init__(self, params=None, cv=5, cv_seed=None, n_repeats=None):
self.clf = LGBMClassifier()
self.cv = cv
if params:
self.clf.set_params(**params)
if n_repeats:
self._kf = RepeatedStratifiedKFold(cv, shuffle=True, random_state=cv_seed)
self._num_preds = cv * n_repeats
else:
self._kf = StratifiedKFold(cv, shuffle=True, random_state=cv_seed)
self._num_preds = cv
def fit(self, X, y, X_test=None, feval=roc_auc_score, fix_valid_index=None, sample_weight=None, init_score=None,
eval_metric='auc', early_stopping_rounds=300, verbose=100, feature_name='auto', categorical_feature='auto',
callbacks=None):
"""
:param X: 数组
:param y:
:param X_test:
:param feval:
:param fix_valid_index: 默认折外为验证集,可添加验证集范围(指定其在X里的index)
:return:
"""
self.best_info = {}
self.feature_importances = 0
if X_test is None:
X_test = X[:1]
self.oof_train = np.zeros(len(X))
self.oof_test = np.zeros((len(X_test), self._num_preds))
for n_fold, (train_index, valid_index) in enumerate(self._kf.split(X, y)):
if verbose:
print("\033[94mFold %s started at %s\033[0m" % (n_fold + 1, time.ctime()))
# 设置valid早停范围:原生X索引
if fix_valid_index is not None:
valid_index = list(set(fix_valid_index) & set(valid_index)) # 线下 + 线上验证集
X_train, y_train = X[train_index], y[train_index]
X_valid, y_valid = X[valid_index], y[valid_index]
eval_set = [(X_train, y_train), (X_valid, y_valid)]
########################################################################
self.clf.fit(X_train, y_train, sample_weight, init_score, eval_set, eval_names=('Train', 'Valid'),
eval_sample_weight=None, eval_class_weight=None, eval_init_score=None, eval_metric=eval_metric,
early_stopping_rounds=early_stopping_rounds, verbose=verbose, feature_name=feature_name,
categorical_feature=categorical_feature, callbacks=callbacks)
self.oof_train[valid_index] = self.clf.predict_proba(X_valid)[:, 1]
self.oof_test[:, n_fold] = self.clf.predict_proba(X_test)[:, 1]
# best info
self.best_info.setdefault('best_iteration', []).append(self.clf.best_iteration_)
# todo: 支持多分类
self.best_info.setdefault('best_score_train', []).append(self.clf.best_score_['Train']['auc'])
self.best_info.setdefault('best_score_valid', []).append(self.clf.best_score_['Valid']['auc'])
# feature importances
self.feature_importances += self.clf.feature_importances_ / self.cv
########################################################################
# 输出 测试集 oof
self.oof_test_rank = ( | pd.DataFrame(self.oof_test) | pandas.DataFrame |
import functools
import pandas
import pandas.testing
import pytest
pytest.importorskip("google.cloud.bigquery", minversion="1.24.0")
@pytest.fixture
def method_under_test(credentials):
import pandas_gbq
return functools.partial(pandas_gbq.to_gbq, credentials=credentials)
def test_float_round_trip(
method_under_test, random_dataset_id, bigquery_client
):
"""Ensure that 64-bit floating point numbers are unchanged.
See: https://github.com/pydata/pandas-gbq/issues/326
"""
table_id = "{}.float_round_trip".format(random_dataset_id)
input_floats = pandas.Series(
[
0.14285714285714285,
0.4406779661016949,
1.05148,
1.05153,
1.8571428571428572,
2.718281828459045,
3.141592653589793,
2.0988936657440586e43,
],
name="float_col",
)
df = | pandas.DataFrame({"float_col": input_floats}) | pandas.DataFrame |
import re
import numpy as np
import pandas as pd
from dateutil.tz import tzutc
from dateutil.parser import parse as parse_date
from datetime import datetime, timedelta, timezone
from qset.utils.numeric import custom_round
# NOTE: slow
def parse_human_timestamp_re(hts, min_date_str="2000"):
"""
:param hts: Human timestamp: 20180101/2018010112/201801011200/20180101120000/20180101120000123...
:return:
"""
ts_PATTERN = re.compile(r"(\d{4})(\d{2})(\d{2})(\d{2})?(\d{2})?(\d{2})?(\d+)?")
hts = str(hts)
if hts < min_date_str:
raise Exception("Min date test failed")
split = list(ts_PATTERN.match(hts).groups()[: int((len(hts) - 2) / 2)])
# adjust microseconds
if len(split) == 7:
split[-1] = split[-1].ljust(6, "0")[:6]
return datetime(*map(int, split))
def parse_human_timestamp(hts, min_date_str="2000"):
"""
:param hts: Human timestamp: 20180101/2018010112/201801011200/20180101120000/20180101120000123...
:return:
"""
hts = str(hts)
if hts < min_date_str:
raise Exception("Min date test failed")
slices = [
slice(0, 4),
slice(4, 6),
slice(6, 8),
slice(8, 10),
slice(10, 12),
slice(12, 14),
slice(14, None),
][: int((len(hts) - 2) / 2)]
split = [hts[sl] for sl in slices]
# adjust microseconds
if len(split) == 7:
split[-1] = split[-1].ljust(6, "0")[:6]
return datetime(*map(int, split))
def cast_hts(dt_obj):
dt_obj = cast_datetime(dt_obj)
return int(dt_obj.strftime("%Y%m%d%H%M%S%f")[:-3])
def cast_datetime(dt_obj, none_invariant=True):
"""
:param dt_obj: datetime-like object: datetime.datetime or str
:return: datetime
is_utc =
NOTE: This is slow
"""
if isinstance(dt_obj, datetime):
return dt_obj
elif isinstance(dt_obj, str) and not is_freq(dt_obj):
try:
return parse_human_timestamp(dt_obj)
except:
pass
# '01.08.2019' type
search = re.search(r"(\d\d)\.(\d\d)\.(\d\d\d\d)", dt_obj)
if search:
d, m, y = search.groups()
return datetime(int(y), int(m), int(d))
# '01.08.20' type
for pat in [
r"^(\d\d)\.(\d\d)\.(\d\d)$",
r"^\d(\d\d)\.(\d\d)\.(\d\d)$",
r"^\d(\d\d)\.(\d\d)\.(\d\d)^\d",
r"^(\d\d)\.(\d\d)\.(\d\d)\d",
]:
search = re.search(pat, dt_obj)
if search:
d, m, y = search.groups()
return datetime(int(y) + 2000, int(m), int(d))
dt = parse_date(dt_obj)
if dt and dt.tzinfo:
dt = dt.astimezone(tzutc()).replace(tzinfo=None)
return dt
elif none_invariant and dt_obj is None:
return None
elif isinstance(dt_obj, (int, float, np.integer)):
try:
return parse_human_timestamp(dt_obj)
except:
pass
try:
return parse_date(str(dt_obj))
except:
pass
try:
return datetime.fromtimestamp(dt_obj, tz=timezone.utc).replace(tzinfo=None)
except:
pass
return datetime.fromtimestamp(dt_obj / 1000, tz=timezone.utc).replace(
tzinfo=None
)
else:
raise Exception("Unknown datetime-like object type")
cast_dt = cast_datetime
def cast_timestamp(dt_obj):
"""
:param dt_obj: naive datetime
:return:
"""
dt_obj = cast_datetime(dt_obj)
# timestamp is always in utc!
return dt_obj.replace(tzinfo=timezone.utc).timestamp()
cast_ts = cast_timestamp
def cast_mts(dt_obj):
return cast_timestamp(dt_obj) * 1000
def cast_str(dt, format=None):
if isinstance(dt, str):
return dt
elif isinstance(dt, datetime):
if not format:
return str(dt)
return dt.strftime(format)
else:
raise Exception("Unsupported type")
def get_strptime_pattern(s):
"""
:param s: str
:return: get strptime pattern
NOTE: be careful with microseconds. It is not handled properly
"""
if len(s) > 20:
raise Exception("Too big string")
return "%Y%m%d%H%M%S%f"[: int(len(s) - 2)]
def cast_datetime_series(s):
"""
:param s: a series of datetime-like objects with the same prototype.
:return: a datetime series
"""
sample = s.iloc[0]
# process hts case
try:
parse_human_timestamp(sample)
except:
pass
else:
sample = str(sample)
pattern = get_strptime_pattern(sample)
s = s.astype(str)
# 20 is for full %Y%m%D%H%M%S%f and 17 is for the same format, but when %f is replaced with 3 digits, not 6 as is by default
if len(sample) > 20:
# crop to microseconds
s = s[:20]
elif len(sample) >= 17:
# add zeros for microseconds
s = s + "0" * (20 - len(sample))
return | pd.to_datetime(s, format=pattern) | pandas.to_datetime |
"""
json 불러와서 캡션 붙이는 것
"""
import json
import pandas as pd
path = './datasets/vqa/v2_OpenEnded_mscoco_train2014_questions.json'
with open(path) as question:
question = json.load(question)
# question['questions'][0]
# question['questions'][1]
# question['questions'][2]
df = pd.DataFrame(question['questions'])
df
caption_path = './datasets/caption/vis_st_trainval.json'
with open(caption_path) as cap:
cap = json.load(cap)
df_cap = pd.DataFrame(cap)
df_cap
df_addcap = pd.merge(df, df_cap, how='left', on='image_id')
del df_addcap['file_path']
########################################################################################################################
"""
pandas to json
"""
df_addcap.to_json('./datasets/caption/train_cap2.json', orient='table')
with open('./datasets/caption/train_cap2.json') as train_cap:
train_cap = json.load(train_cap)
########################################################################################################################
########################################################################################################################
"""
answer + cap
"""
path = '/home/nextgen/Desktop/mcan-vqa/datasets/vqa/v2_mscoco_train2014_annotations.json'
path = './datasets/vqa/v2_mscoco_val2014_annotations.json'
with open(path) as answer:
answer = json.load(answer)
answer['annotations'][0]
df_ans = pd.DataFrame(answer['annotations'])
df_ans[:0]
del df_ans['question_type']
del df_ans['answers']
del df_ans['answer_type']
del df_ans['image_id']
df_ans[df_ans['question_id']==458752000]
df_addcap2 = | pd.merge(df_addcap, df_ans, how='left', on='question_id') | pandas.merge |
"""===========================
Pipeline transcriptdiffexpression
===========================
To do: update documentation and pipeline.ini. e.g supports sailfish too!
Overview
========
RNA-Seq differential expression analysis can, broadly speaking, be
performed at two levels. Gene-level and transcript-level.
As transcripts are the true unit of expression, differential
expression at the transcript-level is more ideal. However,
quantification of transcript-level expression is complicated by reads
which align to multiple transcripts from the same gene, especially
with short read techonologies. In addition transcript-level
quantification may be hindered by inadequate genome annotation.
Kallisto and Salmon are transcript quantification tools which attempt
to quantify transcripts directly from the sequence reads by
lightweight alignment algorithms - referred to as
"pseduoaligning". This avoids the time-consuming step of aligning
genes to the reference genome but depends heavily on the quality of
the reference transcript geneset.
Kallisto and Salmon can bootstrap the transcript abundance
estimates. In order to identify differentially expression transcripts,
Sleuth uses these bootstrap values to estimate the transcript-wise
techincal variance which is subtracted from the total variance, thus
leaving an estimate of the remaining biological variance. Sleuth then
allows the user to fit a transcript-wise general linear model to the
expression data to identify transcripts which are signficantly
differentially expressed.
These tools require a reference transcript geneset. The easiest way to
generate this is to use the 'auto-generate' method (see pipeline.ini)
which uses the output of pipeline_annotations.py with user-defined
filtering. Alternatively, the geneset may be user-supplied (must be
called 'geneset.fa'). If you're not using the 'auto-generate' option
and you want to perform the simulation with a pre-mRNA fraction
included, you must also include a 'geneset_pre_mrna.fa' geneset with
pre-mRNA sequences.
To generate a geneset multi-fasta from a gtf, use the following:
zcat geneset.gtf |
awk '$3=="exon"'|
cgat gff2fasta
--is-gtf --genome-file=genome.fa --fold-at=60 -v 0
--log=geneset.fa.log > geneset.fa;
samtools faidx geneset.fa
'''
To generate a geneset multi-fasta of pre-mRNAs from a gtf, use the following:
zcat geneset.gtf |
awk '$3 == "transcript"'|
cgat gff2fasta
--is-gtf --genome-file=genome.fa --fold-at 60 -v 0
--log=geneset_pre_mrna.fa.log > geneset_pre_mrna.fa;
samtools faidx geneset_pre_mrna.fa
Prior to the sample quantification, reads are simulated from the gene
set. This is a naive RNA-Seq simulation which does not simulate the
well known but viable biases from library preparation
sequencing. Reads are sampled uniformly at random across the
transcript model and sequencing errors introduced at random uniformly
across the reads, with the fragment length sampled from a user-defined
normal distribution. The main purpose of the simulation is to flag
transcripts which cannot be accurately quantified with "near-perfect"
RNA-Seq reads, although it may also be used to compare the accuracy of
the tools selected, with the caveat that the simulation does not model
real RNA-Seq samples well. The user should check the performance with
the simulated data to establish whether the geneset used is
suitable. For instance, it has been noted that inclusion of poorly
support transcripts leads to poorer quantification of well-supported
transcripts.
Note: If the transcripts.fa is not being generated within the
pipeline, you must ensure the suppled geneset.fa is sorted in gene_id
order (gtf2gtf --method=sort --sort-order=gene) and you must supply a
file mapping transcript ids to gene ids called transcript2gene.tsv
with the following form:
transcript_1 g_1
transcript_2 g_1
transcript_3 g_2
transcript_4 g_3
Principal targets
-----------------
simulation
perform the simulation only
quantification
compute all quantifications
full
compute all quantifications and perform differential transcript
expression testing
Usage
=====
See :ref:`PipelineSettingUp` and :ref:`PipelineRunning` on general
information how to use CGAT pipelines.
Configuration
-------------
The pipeline requires a configured :file:`pipeline.ini` file.
CGATReport report requires a :file:`conf.py` and optionally a
:file:`cgatreport.ini` file (see :ref:`PipelineReporting`).
Default configuration files can be generated by executing:
python <srcdir>/pipeline_transcriptdiffexpression.py config
Input files
-----------
Sequence read input. Can be fastq or sra, single or paired end.
Design_files ("*.design.tsv") are used to specify sample variates. The
minimal design file is shown below, where include specifies if the
sample should be included in the analysis, group specifies the sample
group and pair specifies whether the sample is paired. Note, multiple
design files may be included, for example so that multiple models can
be fitted to different subsets of the data
(tab-seperated values)
sample include group pair
WT-1-1 1 WT 0
WT-1-2 1 WT 0
Mutant-1-1 1 Mutant 0
Mutant-1-2 1 Mutant 0
If further variates need to be given, e.g the General linear model is
defined as ~ group + replicate, these can be specified in further columns:
sample include group pair replicate
WT-1-1 1 WT 0 1
WT-1-2 1 WT 0 2
Mutant-1-1 1 Mutant 0 1
Mutant-1-2 1 Mutant 0 2
For each design file, the pipeline.ini must specify a model and contrasts
Requirements
------------
The pipeline requires the results from
:doc:`pipeline_annotations`. Set the configuration variable
:py:data:`annotations_database` and :py:data:`annotations_dir`.
On top of the default CGAT setup, the pipeline requires the following
software to be in the path:
.. Add any additional external requirements such as 3rd party software
or R modules below:
Requirements:
* kallisto >= 0.42.1
* salmon >= 0.5.0
* sleuth >= 0.27.1
Pipeline output
===============
The main outputs of the pipeline are results tables and plots from the
differential expression analysis. Outputs are generated for each
*.design.tsv file and each contrast specfied and placed in DEresults.dir
`results_[design]_counts.tsv"`
counts table for all samples within the design
`results_[design]_tpm.tsv"`
Transcripts Per Million (tpm) table for all samples within the design
`results_[design]_[contrast]_sleuth_ma.png`
MA plot using sleuth function
DEresults.dir contains further plots summarising the sleuth analysis
`results_[design]_[contrast]_sleuth_vars.png`
technical vs. observed variance plot from sleuth
`results_[design]_[contrast]_sleuth_mean_var.png`
mean-variance plot from sleuth
`results_[design]_[contrast]_MA_plot.png`
MA plot from sleuth results table
(for direction comparison with other methods)
`results_[design]_[contrast]_volcano_plot.png`
volcano plot from sleuth results table
(for direction comparison with other methods)
The summary_plots directory contains further plots summarising the
expression estimates across the samples
# Mention Simulation results too!
Glossary
========
.. glossary::
Code
====
"""
# To do:
# Once Kallisto is upgraded > 0.42.2, include alignment stats from parsing sam
# Once sleuth is capable of performing within gene comparisons of
# transcript expression, add this analysis here too
# Add power test using counts2counts.py?
# add option to remove flagged transcripts from gene set
from ruffus import *
from ruffus.combinatorics import *
import sys
import os
import sqlite3
import glob
import pandas as pd
import numpy as np
import itertools
import CGATCore.Experiment as E
import CGATCore.IOTools as IOTools
import CGAT.Expression as Expression
from CGATCore import Pipeline as P
import CGATPipelines.PipelineMapping as PipelineMapping
import CGATPipelines.PipelineTracks as PipelineTracks
import CGATPipelines.PipelineTranscriptDiffExpression as TranscriptDiffExpression
# load options from the config file
PARAMS = P.getParameters(
["%s/pipeline.ini" % os.path.splitext(__file__)[0],
"../pipeline.ini",
"pipeline.ini"])
# Helper functions mapping tracks to conditions, etc
# determine the location of the input files (reads).
try:
PARAMS["input"]
except KeyError:
DATADIR = "."
else:
if PARAMS["input"] == 0:
DATADIR = "."
elif PARAMS["input"] == 1:
DATADIR = "data.dir"
else:
DATADIR = PARAMS["input"] # not recommended practise.
# add configuration values from associated pipelines
#
# 1. pipeline_annotations: any parameters will be added with the
# prefix "annotations_". The interface will be updated with
# "annotations_dir" to point to the absolute path names.
PARAMS.update(P.peekParameters(
PARAMS["annotations_dir"],
"pipeline_annotations.py",
on_error_raise=__name__ == "__main__",
prefix="annotations_",
update_interface=True,
restrict_interface=True))
# if necessary, update the PARAMS dictionary in any modules file.
# e.g.:
#
# import CGATPipelines.PipelineGeneset as PipelineGeneset
# PipelineGeneset.PARAMS = PARAMS
#
# Note that this is a hack and deprecated, better pass all
# parameters that are needed by a function explicitely.
# -----------------------------------------------
# Utility functions
def connect():
'''utility function to connect to database.
Use this method to connect to the pipeline database.
Additional databases can be attached here as well.
Returns an sqlite3 database handle.
'''
dbh = sqlite3.connect(PARAMS["database"])
statement = '''ATTACH DATABASE '%s' as annotations''' % (
PARAMS["annotations_database"])
cc = dbh.cursor()
cc.execute(statement)
cc.close()
return dbh
SEQUENCESUFFIXES = ("*.fastq.1.gz",
"*.fastq.gz",
"*.sra")
SEQUENCEFILES = tuple([os.path.join(DATADIR, suffix_name)
for suffix_name in SEQUENCESUFFIXES])
Sample = PipelineTracks.AutoSample
DESIGNS = PipelineTracks.Tracks(Sample).loadFromDirectory(
glob.glob("*.design.tsv"), "(\S+).design.tsv")
###############################################################################
# load designs
###############################################################################
@transform(["%s.design.tsv" % x.asFile() for x in DESIGNS],
suffix(".tsv"),
".load")
def loadDesigns(infile, outfile):
'''load design files into database'''
# note group column needs renaming
tmpfile = P.getTempFilename("/ifs/scratch")
statement = "sed 's/group/_group/g' %(infile)s > %(tmpfile)s"
P.run()
P.load(tmpfile, outfile)
os.unlink(tmpfile)
###############################################################################
# Create geneset
###############################################################################
if PARAMS["geneset_auto_generate"]:
# TS: move to module file?
@mkdir("index.dir")
@originate("index.dir/transcript_ids.tsv")
def identifyTranscripts(outfile):
'''output a list of gene identifiers where biotype matches filter'''
dbh = connect()
table = os.path.basename(
PARAMS["annotations_interface_table_transcript_info"])
where_cmd = "WHERE (%s)" % " OR ".join(
["gene_biotype = '%s'" % x for x in
PARAMS["geneset_gene_biotypes"].split(",")])
if PARAMS["geneset_transcript_biotypes"]:
t_biotypes = PARAMS["geneset_transcript_biotypes"].split(",")
where_cmd += " AND (%s)" % " OR ".join(
["transcript_biotype = '%s'" % x for x in t_biotypes])
if PARAMS["geneset_transcript_support"]:
if PARAMS["geneset_random_removal"]:
# if using random removal we don't want to filter on
# transcript support here
pass
else:
# TS: TSL is not given for all transcripts. Filtering here
# will retain transcripts without TSL annotation
# TS: I'm using substr because the tsl values also describe
# the previous tsl and we only want the current tsl
support_cmd = " OR ".join(
["substr(transcript_support,1,4) = 'tsl%s'" % x
for x in range(1, PARAMS["geneset_transcript_support"] + 1)])
# ensembl transcript support not given (e.g "NA") for
# pseudogenes, single exon transcripts, HLA, T-cell
# receptor, Ig transcripts. Do we want to keep these in?
na_support_cmd = "substr(transcript_support,1,5) = 'tslNA' "
null_support_cmd = "transcript_support IS NULL"
where_cmd += " AND (%s OR %s OR %s )" % (support_cmd,
na_support_cmd,
null_support_cmd)
if PARAMS["geneset_random_removal"]:
# TS:this section is for testing null random removal of transcripts
# perform random removal of transcripts
# remove the equivalent number as would be removed by
# transcript support level filtering
# note: in line with above, transcripts with tsl = NA are retained
select_cmd = """ SELECT DISTINCT gene_id, transcript_id,
transcript_support FROM annotations.%(table)s %(where_cmd)s
""" % locals()
select = dbh.execute(select_cmd)
previous_gene = ""
transcript_ids = []
tsls = []
# TS: remove these counts when this section has been checked...
n_entries = 0
n_transcripts = 0
n_genes = 0
n_low_support = 0
n_high_support = 0
tsl_NA = 0
tsl_NULL = 0
with IOTools.openFile(outfile, "w") as outf:
outf.write("transcript_id\n")
for entry in select:
n_entries += 1
gene_id, transcript_id, tsl = entry
if gene_id == previous_gene:
n_transcripts += 1
transcript_ids.append(transcript_id)
# some transcripts do not have a tsl
try:
tsls.append(
int(tsl.strip().split()[0].replace("tsl", "")))
except:
if tsl is None:
tsl_NULL += 1
else:
if tsl.strip().split()[0] == "tslNA":
tsl_NA += 1
else:
count_below_threshold = len(
[x for x in tsls if x >
PARAMS["geneset_transcript_support"]])
count_above_threshold = len(
[x for x in tsls if x <=
PARAMS["geneset_transcript_support"]])
n_low_support += count_below_threshold
n_high_support += count_above_threshold
if count_below_threshold > 0:
# randomly remove transcripts
transcript_ids = np.random.choice(
transcript_ids,
size=len(transcript_ids) -
count_below_threshold,
replace=False)
# for some gene_ids, all transcripts may be removed!
if len(transcript_ids) > 0:
outf.write(
"\n".join((x for x in transcript_ids)) + "\n")
previous_gene = gene_id
transcript_ids = [transcript_id]
try:
tsls = [
int(tsl.strip().split()[0].replace("tsl", ""))]
except:
tsls = []
if tsl is None:
tsl_NULL += 1
else:
if tsl.strip().split()[0] == "tslNA":
tsl_NA += 1
n_transcripts += 1
n_genes += 1
# random select again for the last gene_id
count_below_threshold = len(
[x for x in tsls if x >
PARAMS["geneset_transcript_support"]])
count_above_threshold = len(
[x for x in tsls if x <=
PARAMS["geneset_transcript_support"]])
n_low_support += count_below_threshold
n_high_support += count_above_threshold
if count_below_threshold > 0:
transcript_ids = np.random.choice(
transcript_ids,
size=len(transcript_ids) - count_below_threshold,
replace=False)
if len(transcript_ids) > 0:
outf.write("\n".join((x for x in transcript_ids)))
print(("# entries %i, # transcripts %i, # genes %i,"
"# low support %i, # high support %i,"
" # NA tsl %i, # NULL tsl %i" % (
n_entries, n_transcripts, n_genes, n_low_support,
n_high_support, tsl_NA, tsl_NULL)))
else:
# select all distinct transcript_ids passing filters
select_cmd = """ SELECT DISTINCT transcript_id
FROM annotations.%(table)s %(where_cmd)s""" % locals()
select = dbh.execute(select_cmd)
with IOTools.openFile(outfile, "w") as outf:
outf.write("transcript_id\n")
outf.write("\n".join((x[0] for x in select)) + "\n")
@transform(identifyTranscripts,
regex("index.dir/transcript_ids.tsv"),
"index.dir/transcripts.gtf.gz")
def buildGeneSet(mapfile, outfile):
''' build a gene set with only transcripts which pass filter '''
geneset = PARAMS['annotations_interface_geneset_all_gtf']
statement = '''
zcat %(geneset)s
| cgat gtf2gtf
--method=filter
--filter-method=transcript
--map-tsv-file=%(mapfile)s
--log=%(outfile)s.log
| cgat gtf2gtf
--method=sort
--sort-order=gene+transcript
--log=%(outfile)s.log
| gzip
> %(outfile)s
'''
P.run()
@transform(buildGeneSet,
suffix(".gtf.gz"),
".fa")
def buildReferenceTranscriptome(infile, outfile):
''' build reference transcriptome from geneset'''
genome_file = os.path.abspath(
os.path.join(PARAMS["genome_dir"], PARAMS["genome"] + ".fa"))
statement = '''
zcat %(infile)s |
awk '$3=="exon"'|
cgat gff2fasta
--is-gtf --genome-file=%(genome_file)s --fold-at=60 -v 0
--log=%(outfile)s.log > %(outfile)s;
samtools faidx %(outfile)s
'''
P.run()
@transform(buildGeneSet,
suffix(".gtf.gz"),
".pre_mRNA.fa")
def buildReferencePreTranscriptome(infile, outfile):
''' build a reference transcriptome for pre-mRNAs'''
if PARAMS['simulation_pre_mrna_fraction']:
genome_file = os.path.abspath(
os.path.join(PARAMS["genome_dir"], PARAMS["genome"] + ".fa"))
statement = '''
zcat %(infile)s |
awk '$3 == "transcript"'|
cgat gff2fasta
--is-gtf --genome-file=%(genome_file)s --fold-at 60 -v 0
--log=%(outfile)s.log > %(outfile)s;
samtools faidx %(outfile)s
'''
P.run()
else:
P.touch(outfile)
else:
# if a reference gtf is provided, just soft link to this
assert os.path.exists("geneset.fa") > 0, (
"if not auto generating a geneset, you must"
"provide a geneset in a geneset.fa file")
@mkdir("index.dir")
@files("geneset.fa", "index.dir/transcripts.fa")
def buildReferenceTranscriptome(infile, outfile):
''' link to the geneset provided'''
P.clone(os.path.abspath(infile), os.path.abspath(outfile))
@mkdir("index.dir")
@files("geneset.fa", "index.dir/transcripts.pre_mRNA.fa")
def buildReferencePreTranscriptome(infile, outfile):
''' build a reference transcriptome for pre-mRNAs'''
if PARAMS['simulation_pre_mrna_fraction']:
assert os.path.exists("geneset_pre_mRNA.fa") > 0, (
"if not auto generating a geneset and simulating with"
" a pre-mRNA fraction, you must provide a 'pre-mrna'"
" geneset in a 'geneset_pre_mRNA.fa' file")
P.clone(os.path.abspath(infile), os.path.abspath(outfile))
else:
P.touch(outfile)
###############################################################################
# build indexes
###############################################################################
@transform(buildReferenceTranscriptome,
suffix(".fa"),
".kallisto.index")
def buildKallistoIndex(infile, outfile):
''' build a kallisto index'''
job_memory = "12G"
statement = '''
kallisto index -i %(outfile)s -k %(kallisto_kmer)s %(infile)s
'''
P.run()
@transform(buildReferenceTranscriptome,
suffix(".fa"),
".salmon.index")
def buildSalmonIndex(infile, outfile):
''' build a salmon index'''
job_memory = "2G"
statement = '''
salmon index %(salmon_index_options)s -t %(infile)s -i %(outfile)s
-k %(salmon_kmer)s
'''
P.run()
@transform(buildReferenceTranscriptome,
suffix(".fa"),
".sailfish.index")
def buildSailfishIndex(infile, outfile):
''' build a sailfish index'''
# sailfish indexing is more memory intensive than Salmon/Kallisto
job_memory = "6G"
statement = '''
sailfish index --transcripts=%(infile)s --out=%(outfile)s
--kmerSize=%(sailfish_kmer)s
%(sailfish_index_options)s
'''
P.run()
@follows(mkdir("index.dir"),
buildReferencePreTranscriptome,
buildKallistoIndex,
buildSalmonIndex,
buildSailfishIndex)
def index():
pass
###############################################################################
# Simulation
###############################################################################
# if not simulating, final task ('simulation') is empty
if PARAMS['simulation_run']:
@mkdir("simulation.dir")
@transform(buildReferenceTranscriptome,
suffix(".fa"),
"_transcript_kmers.tsv",
output_dir="simulation.dir")
def countTranscriptKmers(infile, outfile):
''' count the number of unique and non-unique kmers per transcript '''
job_memory = PARAMS["simulation_kmer_memory"]
statement = '''
cgat fasta2unique_kmers
--input-fasta=%(infile)s
--method=transcript
--kmer-size=%(kallisto_kmer)s
-L %(outfile)s.log
> %(outfile)s '''
P.run()
@transform(countTranscriptKmers,
suffix(".tsv"),
".load")
def loadTranscriptKmers(infile, outfile):
''' load the kmer counts'''
options = "--add-index=id"
P.load(infile, outfile, options=options)
@mkdir("simulation.dir")
@transform(buildReferenceTranscriptome,
suffix(".fa"),
"_gene_kmers.tsv",
output_dir="simulation.dir")
def countGeneKmers(infile, outfile):
''' count the number of unique and non-unique kmers per gene '''
job_memory = PARAMS["simulation_kmer_memory"]
if PARAMS["geneset_auto_generate"]:
genemap = P.getTempFilename(shared=True)
dbh = connect()
select = dbh.execute('''
SELECT DISTINCT transcript_id, gene_id FROM transcript_info''')
with IOTools.openFile(genemap, "w") as outf:
for line in select:
outf.write("%s\t%s\n" % (line[0], line[1]))
else:
assert os.path.exists("transcript2gene.tsv"), (
"if you want to run the simulation on a user-supplied "
"geneset, you need to supply a file mapping "
"transcripts to genes " "called transcript2gene.tsv")
genemap = "transcript2gene.tsv"
statement = '''
cgat fasta2unique_kmers
--input-fasta=%(infile)s
--method=gene
--genemap=%(genemap)s
--kmer-size=%(kallisto_kmer)s
-L %(outfile)s.log > %(outfile)s '''
P.run()
if PARAMS["geneset_auto_generate"]:
os.unlink(genemap)
@transform(countGeneKmers,
suffix(".tsv"),
".load")
def loadGeneKmers(infile, outfile):
''' load the kmer counts'''
options = "--add-index=id"
P.load(infile, outfile, options=options)
@mkdir("simulation.dir")
@follows(buildReferenceTranscriptome,
buildReferencePreTranscriptome)
@files([(["index.dir/transcripts.fa",
"index.dir/transcripts.pre_mRNA.fa"],
("simulation.dir/simulated_reads_%i.fastq.1.gz" % x,
"simulation.dir/simulated_read_counts_%i.tsv" % x))
for x in range(0, PARAMS["simulation_iterations"])])
def simulateRNASeqReads(infiles, outfiles):
''' simulate RNA-Seq reads from the transcripts fasta file
and transcripts pre-mRNA fasta file'''
# TS: to do: add option to learn parameters from real RNA-Seq data
# TS: move to module file. the statement is complicated by
# neccesity for random order for some simulations
infile, premrna_fasta = infiles
outfile, outfile_counts = outfiles
single_end_random_cmd = ""
paired_end_random_cmd = ""
if PARAMS["simulation_paired"]:
outfile2 = outfile.replace(".1.gz", ".2.gz")
options = '''
--output-paired-end
--output-fastq2=%(outfile2)s ''' % locals()
if PARAMS["simulation_random"]:
# need to randomised order but keep pairs in same position
tmp_fastq1 = P.getTempFilename()
tmp_fastq2 = P.getTempFilename()
# randomise fastqs, gzip and replace
paired_end_random_cmd = '''
; checkpoint ;
paste <(zcat %(outfile)s) <(zcat %(outfile2)s) |
paste - - - - | sort -R |
awk -F'\\t' '{OFS="\\n"; print $1,$3,$5,$7 > "%(tmp_fastq1)s";
print $2,$4,$6,$8 > "%(tmp_fastq2)s"}'; checkpoint ;
rm -rf %(outfile)s %(outfile2)s; checkpoint;
gzip -c %(tmp_fastq1)s > %(outfile)s; checkpoint;
gzip -c %(tmp_fastq2)s > %(outfile2)s
''' % locals()
os.unlink(tmp_fastq1)
os.unlink(tmp_fastq2)
else:
options = ""
if PARAMS["simulation_random"]:
single_end_random_cmd = '''
paste - - - - | sort -R | sed 's/\\t/\\n/g'| '''
if PARAMS["simulation_random"]:
# random shuffling requires all the reads to be held in memory!
# should really estimate whether 4G will be enough
job_memory = "4G"
else:
job_memory = "1G"
job_threads = 2
statement = '''
cat %(infile)s |
cgat fasta2fastq
--premrna-fraction=%(simulation_pre_mrna_fraction)s
--infile-premrna-fasta=%(premrna_fasta)s
--output-read-length=%(simulation_read_length)s
--insert-length-mean=%(simulation_insert_mean)s
--insert-length-sd=%(simulation_insert_sd)s
--counts-method=copies
--counts-min=%(simulation_copies_min)s
--counts-max=%(simulation_copies_max)s
--sequence-error-phred=%(simulation_phred)s
--output-counts=%(outfile_counts)s
--output-quality-format=33 -L %(outfile)s.log
%(options)s | %(single_end_random_cmd)s
gzip > %(outfile)s %(paired_end_random_cmd)s'''
P.run()
@mkdir("simulation.dir/quant.dir/kallisto")
@transform(simulateRNASeqReads,
regex("simulation.dir/simulated_reads_(\d+).fastq.1.gz"),
add_inputs(buildKallistoIndex),
r"simulation.dir/quant.dir/kallisto/simulated_reads_\1/abundance.h5")
def quantifyWithKallistoSimulation(infiles, outfile):
''' quantify trancript abundance from simulated reads with kallisto'''
# TS more elegant way to parse infiles and index?
infiles, index = infiles
infile, counts = infiles
# multithreading not supported until > v0.42.1
job_threads = PARAMS["kallisto_threads"]
job_memory = "8G"
kallisto_options = PARAMS["kallisto_options"]
if PARAMS["simulation_bootstrap"]:
kallisto_bootstrap = PARAMS["kallisto_bootstrap"]
else:
kallisto_bootstrap = 0
m = PipelineMapping.Kallisto()
statement = m.build((infile,), outfile)
P.run()
@transform(quantifyWithKallistoSimulation,
suffix(".h5"),
".tsv")
def extractKallistoCountSimulation(infile, outfile):
''' run kalliso h5dump to extract txt file'''
outfile_dir = os.path.dirname(os.path.abspath(infile))
statement = '''kallisto h5dump -o %(outfile_dir)s %(infile)s'''
P.run()
@mkdir("simulation.dir/quant.dir/salmon")
@transform(simulateRNASeqReads,
regex("simulation.dir/simulated_reads_(\d+).fastq.1.gz"),
add_inputs(buildSalmonIndex),
r"simulation.dir/quant.dir/salmon/simulated_reads_\1/quant.sf")
def quantifyWithSalmonSimulation(infiles, outfile):
# TS more elegant way to parse infiles and index?
infiles, index = infiles
infile, counts = infiles
job_threads = PARAMS["salmon_threads"]
job_memory = "8G"
salmon_options = PARAMS["salmon_options"]
if PARAMS["salmon_bias_correct"]:
salmon_options += " --biascorrect"
salmon_libtype = "ISF"
if PARAMS["simulation_bootstrap"]:
salmon_bootstrap = PARAMS["salmon_bootstrap"]
else:
salmon_bootstrap = 0
m = PipelineMapping.Salmon(PARAMS["salmon_bias_correct"])
statement = m.build((infile,), outfile)
P.run()
@mkdir("simulation.dir/quant.dir/sailfish")
@transform(simulateRNASeqReads,
regex("simulation.dir/simulated_reads_(\d+).fastq.1.gz"),
add_inputs(buildSailfishIndex),
r"simulation.dir/quant.dir/sailfish/simulated_reads_\1/quant.sf")
def quantifyWithSailfishSimulation(infiles, outfile):
# TS more elegant way to parse infiles and index?
infiles, index = infiles
infile, counts = infiles
job_threads = PARAMS["sailfish_threads"]
job_memory = "8G"
sailfish_options = PARAMS["sailfish_options"]
sailfish_libtype = "ISF"
if PARAMS["simulation_bootstrap"]:
sailfish_bootstrap = PARAMS["sailfish_bootstrap"]
else:
sailfish_bootstrap = 0
m = PipelineMapping.Sailfish()
statement = m.build((infile,), outfile)
P.run()
@transform(quantifyWithSalmonSimulation,
regex("(\S+)/quant.sf"),
r"\1/abundance.tsv")
def extractSalmonCountSimulation(infile, outfile):
''' rename columns and remove comment to keep file format the same
as kallisto'''
# note: this expects column order to stay the same
with IOTools.openFile(infile, "r") as inf:
lines = inf.readlines()
with IOTools.openFile(outfile, "w") as outf:
outf.write("%s\n" % "\t".join(
("target_id", "length", "tpm", "est_counts")))
for line in lines:
if not line.startswith("# "):
outf.write(line)
@transform(quantifyWithSailfishSimulation,
regex("(\S+)/quant.sf"),
r"\1/abundance.tsv")
def extractSailfishCountSimulation(infile, outfile):
''' rename columns and remove comment to keep file format the same
as kallisto'''
# note: this expects column order to stay the same
with IOTools.openFile(infile, "r") as inf:
lines = inf.readlines()
with IOTools.openFile(outfile, "w") as outf:
outf.write("%s\n" % "\t".join(
("target_id", "length", "tpm", "est_counts")))
for line in lines:
if not line.startswith("# "):
outf.write(line)
# define simulation targets
SIMTARGETS = []
mapToSimulationTargets = {'kallisto': (extractKallistoCountSimulation, ),
'salmon': (extractSalmonCountSimulation, ),
'sailfish': (extractSailfishCountSimulation, )}
for x in P.asList(PARAMS["quantifiers"]):
SIMTARGETS.extend(mapToSimulationTargets[x])
@follows(*SIMTARGETS)
def quantifySimulation():
pass
@transform(SIMTARGETS,
regex(
"simulation.dir/quant.dir/(\S+)/simulated_reads_(\d+)/abundance.tsv"),
r"simulation.dir/quant.dir/\1/simulated_reads_\2/results.tsv",
r"simulation.dir/simulated_read_counts_\2.tsv")
def mergeAbundanceCounts(infile, outfile, counts):
''' merge the abundance and simulation counts files for
each simulation '''
TranscriptDiffExpression.mergeAbundanceCounts(
infile, outfile, counts, job_memory="2G", submit=True)
@collate(mergeAbundanceCounts,
regex("simulation.dir/quant.dir/(\S+)/simulated_reads_\d+/results.tsv"),
r"simulation.dir/\1_simulation_results.tsv")
def concatSimulationResults(infiles, outfile):
''' concatenate all simulation results '''
df = pd.DataFrame()
for inf in infiles:
df_tmp = pd.read_table(inf, sep="\t")
df = | pd.concat([df, df_tmp], ignore_index=True) | pandas.concat |
#**************************************************************************************#
# Project: River Node
# Authors: <NAME>
# Department: CIDSE
# Semester: Fall 2016/Spring 2017
# Course Number and Name: CSE 492/493 Honors Thesis
# Supervisors: Dr. <NAME> & Dr. <NAME>
#**************************************************************************************#
# STANDARD LIBRARIES
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import pandas as pd
# MY FILES
from data_calc import *
from list_conversions import *
from map_data import map_points
dataList=[]
#**************************************************************************************#
# Functions #
#**************************************************************************************#
def update_data():
"""
Description:
Updates the data list as well as the feed data.
"""
global feed_data
feed_data = pd.read_json('https://io.adafruit.com/api/v2/specialKody/feeds/river-node-location-ph/data')
feed_data['created_at'] = pd.to_datetime(feed_data['created_at'], infer_datetime_format=True)
#This removes the unused data columns
feed_data.drop(feed_data.columns[[0,2,4,5,6,9,11]], axis=1, inplace=True)
lat = feed_data['lat']
lon = feed_data['lon']
dist = calculated_distance(lat,lon)
speedSeries = list_to_series(calculate_speeds(feed_data['created_at'], dist))
global dataList
dataList= [lat, lon, feed_data['ele'], feed_data['value'], feed_data['created_at'], speedSeries]
def map_ph(high_contrast):
"""
:param high_contrast: This specifies the color contrast of the map. 0=regular contrast, 1=heightened contrast
Description:
Maps the pH values on the Basemap map through the map_points function call.
"""
map_points((dataList[1]).tolist(), (dataList[0]).tolist(), (pd.Series(dataList[3])).tolist(), high_contrast)
def elev_update(dump, line, ax, high_contrast):
"""
:param dump: Believe this is needed as garbage data goes into first parameter
:param line: The line to be updated
:param ax: The plot the line is currently on
:param high_contrast: This specifies the color contrast of the map. 0=regular contrast, 1=heightened contrast
Description:
Updates the elevation line plot after pulling new data.
"""
plt.cla()
update_data()
elevation = pd.Series(dataList[2])
if(high_contrast):
line = ax.plot(elevation, linewidth=3.0)
else:
line = ax.plot(elevation)
return line
def plot_elev(real_time, high_contrast):
"""
:param real_time: This specifies if real time updates are to occur. 0=static data, 1=updating data
:param high_contrast: This specifies the color contrast of the map. 0=regular contrast, 1=heightened contrast
Description:
This functions plots the elevation data.
"""
if(real_time):
elevation = pd.Series(dataList[2])
fig, ax = plt.subplots()
fig.canvas.set_window_title("Node Elevation")
ax.set_ylabel("Elevation (Meters)")
ax.set_xlabel("Measurment")
line = ax.plot(elevation)
ani = animation.FuncAnimation(fig, elev_update, interval=1000, fargs=(line, ax, high_contrast), blit=True)
else:
plt.figure("Node Elevation")
elevation = pd.Series(dataList[2])
if(high_contrast == 1):
elevation.plot(linewidth=3.0)
else:
elevation.plot()
plt.ylabel("Elevation (Meters)")
plt.xlabel("Measurment")
plt.show()
def ph_update(dump, line, ax, high_contrast):
"""
:param dump: Believe this is needed as garbage data goes into first parameter
:param line: The line to be updated
:param ax: The plot the line is currently on
:param high_contrast: This specifies the color contrast of the map. 0=regular contrast, 1=heightened contrast
Description:
Updates the ph line plot after pulling new data.
"""
plt.cla()
update_data()
values = pd.Series(dataList[3])
if(high_contrast):
line = ax.plot(values, linewidth=3.0)
else:
line = ax.plot(values)
return line
def plot_ph(real_time, high_contrast):
"""
:param real_time: This specifies if real time updates are to occur. 0=static data, 1=updating data
:param high_contrast: This specifies the color contrast of the map. 0=regular contrast, 1=heightened contrast
Description:
This function plots the PH data. The PH data is stored as 'value' by the Adafruit IOT website.
"""
if(real_time):
values = pd.Series(dataList[3])
fig, ax = plt.subplots()
fig.canvas.set_window_title("Node pH Recordings")
ax.set_ylabel("PH")
ax.set_xlabel("Measurment")
line = ax.plot(values)
ani = animation.FuncAnimation(fig, ph_update, interval=20000, fargs=(line, ax, high_contrast), blit=True)
else:
plt.figure("Node PH Recordings")
values = pd.Series(dataList[3])
if(high_contrast == 1):
values.plot(linewidth=3.0)
else:
values.plot()
plt.ylabel("PH")
plt.xlabel("Measurment")
plt.show()
def speed_update(dump, line, ax, high_contrast):
"""
:param dump: Believe this is needed as garbage data goes into first parameter
:param line: The line to be updated
:param ax: The plot the line is currently on
:param high_contrast: This specifies the color contrast of the map. 0=regular contrast, 1=heightened contrast
Description:
Updates the speed line plot after pulling new data.
"""
plt.cla()
update_data()
speed = dataList[5]
if(high_contrast):
line = ax.plot(speed, linewidth=3.0)
else:
line = ax.plot(speed)
return line
def plot_speed(real_time, high_contrast):
"""
:param real_time: This specifies if real time updates are to occur. 0=static data, 1=updating data
:param high_contrast: This specifies the color contrast of the map. 0=regular contrast, 1=heightened contrast
Description:
This function plots the calculated speed values. This requires a call to the list_to_series fucntion and the
calculate_speeds function.
"""
if(real_time):
speed = dataList[5]
fig, ax = plt.subplots()
fig.canvas.set_window_title("Node Speed")
ax.set_ylabel("Speed (Meters/Second)")
ax.set_xlabel("Measurment")
line = ax.plot(speed)
ani = animation.FuncAnimation(fig, speed_update, interval=20000, fargs=(line, ax, high_contrast), blit=True)
else:
plt.figure("Node Speed")
speedSeries = dataList[5]
if(high_contrast == 1):
speedSeries.plot(linewidth=3.0)
else:
speedSeries.plot()
plt.ylabel("Speed (Meters/Second)")
plt.xlabel("Measurment")
plt.show()
def export_data():
"""
Description:
Exports the feed data to a text file. This feed data has unused columns trimmed.
"""
global feed_data
a = feed_data.to_string(buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, line_width=None, max_rows=None, max_cols=None, show_dimensions=False)
f = open('dashboard_export.txt', 'w')
f.write(a)
def elev_update_nr(dump, line, ax, high_contrast):
"""
:param dump: Believe this is needed as garbage data goes into first parameter
:param line: The line to be updated
:param ax: The plot the line is currently on
:param high_contrast: This specifies the color contrast of the map. 0=regular contrast, 1=heightened contrast
Description:
Updates the elevation plot without updating data.
"""
plt.cla()
elevation = pd.Series(dataList[2])
if(high_contrast):
line = ax.plot(elevation, linewidth=3.0)
else:
line = ax.plot(elevation)
return line
def speed_update_nr(dump, line, ax, high_contrast):
"""
:param dump: Believe this is needed as garbage data goes into first parameter
:param line: The line to be updated
:param ax: The plot the line is currently on
:param high_contrast: This specifies the color contrast of the map. 0=regular contrast, 1=heightened contrast
Description:
Updates the speed plot without updating data.
"""
plt.cla()
speed = dataList[5]
if(high_contrast):
line = ax.plot(speed, linewidth=3.0)
else:
line = ax.plot(speed)
return line
def plot_combined(real_time, high_contrast):
"""
:param real_time: This specifies if real time updates are to occur. 0=static data, 1=updating data
:param high_contrast: This specifies the color contrast of the map. 0=regular contrast, 1=heightened contrast
Description:
This function places all three line plots (pH, elevation, speed) on a single window one above another.
This is a seperate button as the stacking creates smaller graphing windows.
"""
if(real_time):
elevation = | pd.Series(dataList[2]) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
from install import *
from solvers import *
from params import *
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import rayleigh, norm, kstest
def plot_maxwell(vel, label=None, draw=True):
speed = (vel*vel).sum(1)**0.5
loc, scale = rayleigh.fit(speed, floc=0)
dist = rayleigh(scale=scale)
if draw:
plt.hist(speed, 20, normed=True)
x = np.linspace(dist.ppf(0.01), dist.ppf(0.99), 1000)
plt.plot(x, dist.pdf(x), label=label)
if label:
plt.legend()
return kstest(speed, dist.cdf)[0]
def plot_maxwell_x(vel, label=None, draw=True):
loc, scale = norm.fit(vel[:, 0], floc=0)
dist = norm(scale=scale)
if draw:
plt.hist(vel[:, 0], 20, normed=True)
x = np.linspace(dist.ppf(0.01), dist.ppf(0.99), 1000)
plt.plot(x, dist.pdf(x), label=label)
if label:
plt.legend()
return kstest(vel[:, 0], dist.cdf)[0]
def plot_particles(pos, vel):
plt.xlabel("X")
plt.ylabel("Y")
plt.quiver(pos[:, 0], pos[:, 1], vel[:, 0], vel[:, 1])
def multi_particles(f, title=None, n=None, width=4.5, height=4):
if n is None:
n = min(5, len(f.data))
fig = plt.figure(figsize=(width*n, height))
if title:
fig.suptitle(title)
for i in range(n):
plt.subplot(1, n, i+1)
j = math.floor(i*(len(f.data)-1)/(n-1))
plt.title(f"t = {f.time[j]:.1f}")
r = f.data[j]
plot_particles(r.pos, r.vel)
def multi_maxwell(f, title=None, n=None, draw=True, width=20, height=2):
if n is None:
n = len(f.data)
if draw:
fig = plt.figure(figsize=(width, height*n))
if title:
fig.suptitle(title)
max_vel = max((r.vel*r.vel).sum(1).max() for r in f.data)**0.5
max_x = max((np.abs(r.vel[:,0])).max() for r in f.data)
fits = []
for i in range(n):
j = i*(len(f.data)-1)/(n-1)
r = f.data[j]
if draw:
plt.subplot(n, 2, 2*i+1)
plt.xlim(0, max_vel)
f1 = plot_maxwell(r.vel, f"t = {f.time[j]:.1f}", draw)
if draw:
plt.subplot(n, 2, 2*i+2)
plt.xlim(-max_x, max_x)
f2 = plot_maxwell_x(r.vel, f"t = {f.time[j]:.1f}", draw)
fits.append({"t": f.time[j], "speed_stat":f1, "xvel_stat":f2})
return | pd.DataFrame.from_records(fits, index='t') | pandas.DataFrame.from_records |
import pandas as pd
import copy
import argparse
import helper
env_data = helper.fetch_maze()
def is_move_valid_visited(env_data,visit_map,loc,act):
"""
Judge wether the robot can take action act
at location loc.
Keyword arguments:
env -- list, the environment data
loc -- tuple, robots current location
act -- string, robots meant action
"""
nextloc=list(loc)
if act=='u':
nextloc[0]=nextloc[0]-1
elif act=='d':
nextloc[0]=nextloc[0]+1
elif act=='r':
nextloc[1]=nextloc[1]+1
elif act=='l':
nextloc[1]=nextloc[1]-1
else:
return False
if (nextloc[0] in range(len(env_data))) and (nextloc[1] in range(len(env_data[0]))):
if env_data[nextloc[0]][nextloc[1]]==0 or env_data[nextloc[0]][nextloc[1]]==1 or env_data[nextloc[0]][nextloc[1]]==3:
if visit_map[nextloc[0]][nextloc[1]]==0 or visit_map[nextloc[0]][nextloc[1]]==1 or visit_map[nextloc[0]][nextloc[1]]==3:
return True
else:
return False
else:
return False
else:
return False
def valid_novisit_actions(env_data,visit_map,loc):
valid_action=[]
'''
Follow u,d,r,l direction to move around
'''
for i in ['u','d','r','l']:
if is_move_valid_visited(env_data,visit_map,loc,i):
valid_action.append(i)
return valid_action
def get_valid_neighbor_loc(loc,action_list):
neighbor_list=list()
'''
Follow u,d,r,l direction to move around
'''
for i in action_list:
new_loc=list(loc)
if i=='u':
new_loc[0]=new_loc[0]-1
elif i=='d':
new_loc[0]=new_loc[0]+1
elif i=='r':
new_loc[1]=new_loc[1]+1
elif i=='l':
new_loc[1]=new_loc[1]-1
neighbor_list.append((new_loc[0],new_loc[1]))
return neighbor_list
def move_robot(loc, act):
move_dict ={
'u': (-1,0),
'd': (1,0),
'l': (0,-1),
'r': (0,1)
}
return loc[0] + move_dict[act][0], loc[1] + move_dict[act][1]
def bfs_move_robot(env_data,visit_map,loc,act_list,route_table):
#algorithm reference: https://blog.csdn.net/raphealguo/article/details/7523411
for act in act_list:
new_loc=list(loc)
if act=='u':
new_loc[0]=new_loc[0]-1
elif act=='d':
new_loc[0]=new_loc[0]+1
elif act=='r':
new_loc[1]=new_loc[1]+1
elif act=='l':
new_loc[1]=new_loc[1]-1
mark_visit(visit_map,(new_loc[0],new_loc[1]),'gray')
route_table=route_table.append(pd.DataFrame(data={'source_loc':[(list(loc)[0],list(loc)[1])],'move_direct':act,'next_loc':[(new_loc[0],new_loc[1])],'route_type':'forward'}),ignore_index=True)
if env_data[new_loc[0]][new_loc[1]]==3:
return route_table
else:
Source_loc=new_loc
new_loc=move_back_robot(new_loc,act)
act=roll_back_direction(act)
route_table=route_table.append(pd.DataFrame(data={'source_loc':[(Source_loc[0],Source_loc[1])],'move_direct':act,'next_loc':[(new_loc[0],new_loc[1])],'route_type':'backward'}),ignore_index=True)
continue
return route_table
def move_back_robot(loc,act):
'''Rollback need not check visit_map'''
new_loc=list(loc)
if act=='u':
new_loc[0]=new_loc[0]+1
elif act=='d':
new_loc[0]=new_loc[0]-1
elif act=='r':
new_loc[1]=new_loc[1]-1
elif act=='l':
new_loc[1]=new_loc[1]+1
return (new_loc[0],new_loc[1])
def roll_back_direction(act):
'''Rollback need not check visit_map'''
if act=='u':
new_act='d'
elif act=='d':
new_act='u'
elif act=='l':
new_act='r'
elif act=='r':
new_act='l'
return new_act
def mark_visit(visit_map,loc,color):
new_loc=list(loc)
if color=='dark':
visit_map[new_loc[0]][new_loc[1]]=4
elif color=='gray':
visit_map[new_loc[0]][new_loc[1]]=5
else:
print('Only accept color:dark or gray!')
def trace_route(route_table,initial_loc,from_loc,to_loc):
back_route=pd.DataFrame(columns=['source_loc','move_direct','next_loc','route_type'])
forward_route=pd.DataFrame(columns=['source_loc','move_direct','next_loc','route_type'])
forward_route=forward_route.append(route_table[route_table.route_type=='forward'],ignore_index=True)
s_flag=0
d_flag=0
if from_loc==to_loc:
return
else:
s_loc=from_loc
d_loc=to_loc
while 1:
if s_loc==initial_loc or d_loc==initial_loc:
if s_loc==initial_loc and d_loc==initial_loc:
break
elif s_loc==initial_loc:
d_route=pd.DataFrame(columns=['source_loc','move_direct','next_loc','route_type']).append(forward_route[forward_route.next_loc==d_loc],ignore_index=True)
if s_flag==0:
s_route=pd.DataFrame(columns=['source_loc','move_direct','next_loc','route_type']).append(forward_route[(forward_route.source_loc==s_loc) & (forward_route.next_loc==d_loc)],ignore_index=True)
back_route=back_route.append(pd.DataFrame(data={'source_loc':[s_route.loc[0]['next_loc']],'move_direct':roll_back_direction(s_route.loc[0]['move_direct']),'next_loc':[s_route.loc[0]['source_loc']],'route_type':'backward'}),ignore_index=True)
s_flag+=1
back_route=back_route.append(pd.DataFrame(data={'source_loc':[d_route.loc[0]['source_loc']],'move_direct':d_route.loc[0]['move_direct'],'next_loc':[d_route.loc[0]['next_loc']],'route_type':'backward'}),ignore_index=True)
d_loc=d_route.loc[0]['source_loc']
elif d_loc==initial_loc:
if d_flag==0:
d_route=pd.DataFrame(columns=['source_loc','move_direct','next_loc','route_type']).append(forward_route[(forward_route.source_loc==s_loc) & (forward_route.next_loc==d_loc)],ignore_index=True)
back_route=back_route.append(pd.DataFrame(data={'source_loc':[d_route.loc[0]['source_loc']],'move_direct':d_route.loc[0]['move_direct'],'next_loc':[d_route.loc[0]['next_loc']],'route_type':'backward'}),ignore_index=True)
d_flag+=1
s_route=pd.DataFrame(columns=['source_loc','move_direct','next_loc','route_type']).append(forward_route[forward_route.next_loc==s_loc],ignore_index=True)
back_route=back_route.append(pd.DataFrame(data={'source_loc':[s_route.loc[0]['next_loc']],'move_direct':roll_back_direction(s_route.loc[0]['move_direct']),'next_loc':[s_route.loc[0]['source_loc']],'route_type':'backward'}),ignore_index=True)
s_loc=s_route.loc[0]['source_loc']
elif s_loc==d_loc:
break
else:
s_route=pd.DataFrame(columns=['source_loc','move_direct','next_loc','route_type']).append(forward_route[forward_route.next_loc==s_loc],ignore_index=True)
d_route= | pd.DataFrame(columns=['source_loc','move_direct','next_loc','route_type']) | pandas.DataFrame |
"""
module that analyse user's routines
Copyright (c) 2021 Idiap Research Institute, https://www.idiap.ch/
Written by <NAME> <<EMAIL>>,
"""
import json
import pickle
from copy import deepcopy
from datetime import datetime, timedelta
from functools import lru_cache
from os.path import join
from typing import Any, Callable, List, Optional
from uuid import uuid4
import numpy as np
import pandas as pd # type: ignore
from regions_builder.algorithms import closest_locations # type: ignore
from regions_builder.data_loading import MockWenetSourceLocations # type: ignore
from regions_builder.models import GPSPoint # type: ignore
from regions_builder.models import (
LabelledStayRegion,
LocationPoint,
StayRegion,
UserLocationPoint,
)
from scipy import spatial # type: ignore
from personal_context_builder import config
from personal_context_builder.wenet_realtime_user_db import (
DatabaseRealtimeLocationsHandler,
DatabaseRealtimeLocationsHandlerMock,
)
from personal_context_builder.wenet_user_profile_db import (
DatabaseProfileHandler,
DatabaseProfileHandlerMock,
)
def compare_routines(
source_user: str,
users: List[str],
model: Any,
function: Callable = spatial.distance.cosine,
is_mock: bool = False,
):
"""
compare routines of users
Args:
source_user: the user that will be compared to the users
users: list of users to compare to
model: on which model the comparison should be applied
function: the similarity function to use
is_mock: if true, use mocked data
"""
model_num = config.MAP_MODEL_TO_DB[model]
if is_mock:
db = DatabaseProfileHandlerMock.get_instance(db_index=model_num)
else:
db = DatabaseProfileHandler.get_instance(db_index=model_num)
source_routine = db.get_profile(source_user)
if source_routine is None:
return dict()
routines = [db.get_profile(u) for u in users]
users, routines = zip(
*[
(user, routine)
for (user, routine) in zip(users, routines)
if routines is not None
]
)
routines_dist = [function(source_routine, r) for r in routines]
res = list(zip(users, routines_dist))
res = sorted(res, key=lambda x: -x[1])
return dict(res)
def closest_users(lat: float, lng: float, N: int, is_mock: bool = False):
"""
give the N closest users to the point (lat, lng)
Args:
lat: the latitude
lng: the longitude
N: how many users in output
is_mock: if true, use mocked data
"""
point = GPSPoint(lat, lng)
if is_mock:
db = DatabaseRealtimeLocationsHandlerMock.get_instance()
fake_locations = [
MockWenetSourceLocations._create_fake_locations(str(uuid4()), nb=1)[0]
for _ in range(3000)
]
db.update(fake_locations)
else:
db = DatabaseRealtimeLocationsHandler.get_instance()
users_locations = db.get_all_users().values()
sorted_users_locations = closest_locations(point, users_locations, N=N)
return sorted_users_locations
@lru_cache(maxsize=None)
def _loads_regions(regions_mapping_file: str):
"""loads regions mapping file
this function is cached to avoid unnecessary disk accesses
Args:
regions_mapping_file: the filename where the json mapping file is
Return:
dict created from the json file
"""
with open(regions_mapping_file, "r") as f:
return json.load(f)
class BagOfWordsVectorizer(object):
def __init__(
self,
labelled_stay_regions: Optional[List[LabelledStayRegion]],
stay_regions: Optional[List[StayRegion]],
regions_mapping_file: str = config.PCB_REGION_MAPPING_FILE,
):
if labelled_stay_regions is not None:
self._labelled_stay_regions = labelled_stay_regions
else:
self._labelled_stay_regions = []
if stay_regions is not None:
self._stay_regions = stay_regions
else:
self._stay_regions = []
self._regions_mapping = _loads_regions(regions_mapping_file)
self._inner_vector_size = max(self._regions_mapping.values())
@classmethod
def group_by_days(
cls,
locations: List[LocationPoint],
user: str = "unknown",
start_day: str = "00:00:00",
dt_hours: float = 23.5,
freq: str = "30T",
):
"""class method to group the locations by days
Args:
locations: list of location to use
user: user to use to create UserLocationPoint
start_day: "HH:MM:SS" to define the start of a day
dt_hours: how many hours we use from the start_day to define the day
freq: at which freqency the data will be sample
Return:
List of list of location, each sublist is a day
"""
data = [l.__dict__ for l in locations]
df = pd.DataFrame.from_records(data)
df["_pts_t"] = | pd.to_datetime(df["_pts_t"]) | pandas.to_datetime |
#!/usr/bin/env python3
# _*_coding:utf-8 _*_
# @Time :Created on Dec 04 4:39 PM 2018
# @Author :<NAME>
import os,sys
import numpy as np
import pandas as pd
import glob
import math
def compute_time_difference(time_to_seconds_list):
'''calculate the delta time
Input: time_to_seconds_list.
Output: new list for store delta time.'''
save_time_difference = []
for i in range(0, len(time_to_seconds_list) - 1):
save_time_difference.append(abs(time_to_seconds_list[i + 1] - time_to_seconds_list[i]))
save_time_difference.insert(0, 0)
return save_time_difference
def compute_speed_difference(Speed_list):
'''Calculate the delta speed.
Input: Speed_list
Output: new list for store delta speed.'''
save_speed_difference = []
for i in range(0, len(Speed_list) - 1):
difference = math.fabs(Speed_list[i + 1] - Speed_list[i])
save_speed_difference.append(difference)
save_speed_difference.insert(0, 0.0)
save_speed_difference1 = [round(j, 2) for j in save_speed_difference]
return save_speed_difference1
def compute_heading_difference(Heading_list):
'''Calculate the delta speed.
Input: Heading_list
Output: new list for store delta heading.'''
save_heading_difference = []
for i in range(0,len(Heading_list)-1):
difference = math.fabs(Heading_list[i+1]-Heading_list[i])
save_heading_difference.append(difference)
save_heading_difference.insert(0,0)
return save_heading_difference
def save_data_into_file(MMSI_list,
Longitude_list,
Latitude_list,
Speed_list,
Heading_list,
Day_list,
time_to_seconds_list,
delta_time,
delta_speed,
delta_heading):
'''This function is for storing the data and outputing the data into a file.'''
# dictionary for storing the list and transfer it to dataframe
save_dict = {'MMSI':MMSI_list,
'Longitude':Longitude_list,
'Latitude':Latitude_list,
'Speed':Speed_list,
'Heading':Heading_list,
'Day':Day_list,
'time_to_seconds':time_to_seconds_list,
'delta_time':delta_time,
'delta_speed':delta_speed,
'delta_heading':delta_heading}
data = pd.DataFrame(save_dict)
# output the file
name_mmsi = int(data.iloc[0]['MMSI'])
name_day = int(data.iloc[0]['Day'])
data.to_csv(r'C:\Users\LPT-ucesxc0\AIS-Data\Danish_AIS_data_process\aisdk_20180901\%d-%d.csv' % (name_mmsi, name_day),
index=False)
file_names = glob.glob(r"C:\Users\LPT-ucesxc0\AIS-Data\Danish_AIS_data_process\aisdk_20180901\test\*.csv")
threshold_heading_max_value = 20
for file in file_names:
file_load = pd.read_csv(file)
file_load['Timestamp']=pd.to_datetime(file_load['Timestamp'], format='%d/%m/%Y %H:%M:%S')
file_load['Day'] = pd.to_datetime(file_load['Timestamp']).dt.day
file_load['Hour'] = (pd.to_datetime(file_load['Timestamp']).dt.hour).apply(lambda x:x*3600)
file_load['Minute'] = ( | pd.to_datetime(file_load['Timestamp']) | pandas.to_datetime |
import pandas as pd
import ssl # Used if pandas gives a SSLError
ssl._create_default_https_context = ssl._create_unverified_context
import pprint
from datetime import datetime
class CryptoDataDownload:
url = "https://www.cryptodatadownload.com/cdd/"
# For trades/ticks, not candles
tick_symbol_list = {
'Binance': [
'BTC/USDT', 'ETH/USDT', 'LTC/USDT', 'LINK/USDT',
'BNB/USDT', 'XRP/USDT', 'EOS/USDT', 'TRX/USDT',
'NEO/USDT', 'ETC/USDT', 'XLM/USDT', 'BAT/USDT',
'QTUM/USDT', 'ADA/USDT', 'XMR/USDT', 'ZEC/USDT',
'DASH/USDT', 'BTT/USDT', 'MATIC/USDT', 'PAX/USDT',
'CELR/USDT', 'ONE/USDT'
],
'Bitstamp': [
'BTC/USD','BTC/EUR',
'BCH/USD','BCH/EUR','BCH/BTC',
'ETH/USD','ETH/EUR','ETH/BTC',
'LTC/USD','LTC/EUR','LTC/BTC',
'XRP/USD','XRP/EUR','XRP/BTC'
]
}
@classmethod
def fetch_default(cls,
exchange_name,
base_symbol,
quote_symbol,
timeframe,
include_all_volumes=False):
filename = "{}_{}{}_{}.csv".format(exchange_name,
base_symbol,
quote_symbol,
timeframe)
base_vc = "Volume {}".format(base_symbol)
new_base_vc = "volume_base"
quote_vc = "Volume {}".format(quote_symbol)
new_quote_vc = "volume_quote"
df = pd.read_csv(cls.url + filename, skiprows=1)
df = df[::-1]
df = df.drop(["Symbol"], axis=1)
df = df.rename({base_vc: new_base_vc,
quote_vc: new_quote_vc,
"Date": "date"}, axis=1)
if "d" in timeframe:
df["date"] = pd.to_datetime(df["date"])
elif "h" in timeframe:
df["date"] = pd.to_datetime(df["date"],
format="%Y-%m-%d %I-%p")
df = df.set_index("date")
df.columns = [name.lower() for name in df.columns]
df = df.reset_index()
if not include_all_volumes:
df = df.drop([new_quote_vc], axis=1)
df = df.rename({new_base_vc: "volume"}, axis=1)
return df
return df
@classmethod
def fetch_gemini(cls, base_symbol, quote_symbol, timeframe):
exchange_name = "gemini"
if timeframe.lower() in ['1d', 'd']:
exchange_name = "Gemini"
timeframe = 'd'
elif timeframe.lower() == 'h':
timeframe = timeframe[:-1] + "hr"
filename = "{}_{}{}_{}.csv".format(exchange_name,
base_symbol,
quote_symbol,
timeframe)
df = pd.read_csv(cls.url + filename,
skiprows=1)
df = df[::-1]
df = df.drop(["Symbol", "Unix Timestamp"], axis=1)
df.columns = [name.lower() for name in df.columns]
df = df.set_index("date")
df = df.reset_index()
return df
@classmethod
def fetch_candles(cls,
exchange_name = 'Coinbase',
base_symbol = 'BTC',
quote_symbol = 'USD',
timeframe = '1d',
include_all_volumes = False):
"""
Fetch CSVs of Candle/OHLCV Data from CDD
Only 1d and 1h time frames are available
There may be errors getting data from untested exchanges
Check this link to see all the available exchanges:
https://www.cryptodatadownload.com/data/
Example Usage:
from tensortrade.utils import CryptoDataDownload as cdd
cdd.fetch_candles(exchange_name = 'Coinbase',
base_symbol = 'BTC',
quote_symbol = 'USD',
timeframe = '1h',
include_all_volumes = False)
"""
if 'd' in timeframe.lower():
timeframe = 'd'
if exchange_name.lower() == "gemini":
return cls.fetch_gemini(base_symbol,
quote_symbol,
timeframe)
return cls.fetch_default(exchange_name,
base_symbol,
quote_symbol,
timeframe,
include_all_volumes)
@classmethod
def fetch_trades(cls,
exchange = None,
symbol = 'BTC/USDT',
month = 'aug'):
"""
Fetch CSVs of Tick/Trade Data from CDD
Quickly gets 300 000+ trades, for 36mb.
Binance and Bitstamp are the only exchanges available.
Check these links to see all the available pairs:
https://www.cryptodatadownload.com/data/binance/
https://www.cryptodatadownload.com/data/bitstamp/
Example Usage:
from tensortrade.utils.cryptodatadownload import CryptoDataDownload as cdd
cdd.fetch_trades(exchange='binance', # or Bitstamp
symbol='BTC/USDT', # run cdd.all() to see all
month='aug') # Aug - Sep
"""
# Parse date input
months = ['August', 'September', 'October', 'November', 'December', 'January']
month = month.strip(' ').lower() if month not in months else month
for month_ in months:
month_L = month_.lower()
if month_L.startswith(month) or month_L.endswith(month) or month_L.find(month) >= 0:
month = month_
# Parse input symbol
for delim in ['/', '-', '_', ' ']:
try:
base, quote = symbol.strip(' ').upper().split(delim)
break
except:
continue
if not base or not quote:
print(f'Please input a symbol with tick data available')
pprint.pprint(cls.symbol_list)
return
# Correct USD/T if Exchange is explicit
if exchange:
exchange_ = exchange.lower()
if ('binance'.startswith(exchange_) or
'binance'.endswith(exchange_) or
'binance'.find(exchange_) >= 0) and quote == 'USD':
quote += 'T'
elif ('bitstamp'.startswith(exchange_) or
'bitstamp'.endswith(exchange_) or
'bitstamp'.find(exchange_) >= 0) and quote == 'USDT':
quote = 'USD'
# Get proper exchange name
for ex, ex_data in cls.tick_symbol_list.items():
if base+'/'+quote in ex_data:
exchange = ex
break
year = '2020' if month == 'January' else '2019' # Deduce Year
exch_date = f'{month}{year}_{exchange}' if exchange == 'Binance' else f'{exchange}_{month}{year}'
filename = f'tradeprints/{base}{quote}_{exch_date}_prints.csv'
df = | pd.read_csv(cls.url + filename, skiprows=1 if exchange == 'Binance' else 0) | pandas.read_csv |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import itertools
import statsmodels.api as sm
import sklearn
import sklearn.ensemble
from sklearn.model_selection import StratifiedKFold, cross_val_score, LeaveOneOut, LeavePOut, GridSearchCV
import sklearn.linear_model
import warnings
sns.set(style='darkgrid', palette='muted', font_scale=1.5)
__all__ = ['plotROC', 'plotROCObj',
'plotProb',
'plotLogisticL1Paths',
'plotLogisticL1Vars',
'logisticL1NestedCV',
'plotLogisticL1NestedTuning',
'nestedCVClassifier',
'computeROC',
'computeCVROC',
'captureStandardization',
'smLogisticRegression',
'rocStats',
'compute2x2',
'plotNestedCVParams',
'plotNestedCVScores']
def plotROCObj(**objD):
fprL = [o['fpr'] for o in objD.values()]
tprL = [o['tpr'] for o in objD.values()]
aucL = [o['AUC'].mean() for o in objD.values()]
accL = [o['ACC'].mean() for o in objD.values()]
labelL = objD.keys()
outcomeVar = [o['Yvar'] for o in objD.values()][0]
plotROC(fprL, tprL, aucL, accL, labelL, outcomeVar)
def plotROC(fprL, tprL, aucL=None, accL=None, labelL=None, outcomeVar=''):
if labelL is None and aucL is None and accL is None:
labelL = ['Model %d' % i for i in range(len(fprL))]
else:
if not accL is None:
labelL = ['%s (AUC = %0.2f; ACC = %0.2f)' % (label, auc, acc) for label, auc, acc in zip(labelL, aucL, accL)]
else:
labelL = ['%s (AUC = %0.2f)' % (label, auc) for label, auc in zip(labelL, aucL)]
colors = sns.color_palette('Set1', n_colors=len(fprL))
plt.clf()
plt.gca().set_aspect('equal')
for i, (fpr, tpr, label) in enumerate(zip(fprL, tprL, labelL)):
plt.plot(fpr, tpr, color=colors[i], lw=2, label=label)
plt.plot([0, 1], [0, 1], '--', color='gray', label='Chance')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
if outcomeVar == '':
plt.title('ROC')
else:
plt.title('ROC for %s' % outcomeVar)
plt.legend(loc="lower right", fontsize=10)
plt.show()
def plotProb(outcome, prob, **kwargs):
"""Scatter plot of probabilities for one outcome.
Parameters
----------
outcome : pd.Series
prob : pd.Series
Predicted probabilities returned from computeROC or computeCVROC"""
colors = sns.color_palette('Set1', n_colors=2)
tmp = pd.concat((outcome, prob), join='inner', axis=1)
tmp = tmp.sort_values(by=[outcome.name, 'Prob'])
tmp['x'] = np.arange(tmp.shape[0])
plt.clf()
for color, val in zip(colors, tmp[outcome.name].unique()):
ind = tmp[outcome.name] == val
lab = '%s = %1.0f (%d)' % (outcome.name, val, ind.sum())
plt.scatter(tmp.x.loc[ind], tmp.Prob.loc[ind], label=lab, color=color, **kwargs)
plt.plot([0, tmp.shape[0]], [0.5, 0.5], 'k--', lw=1)
plt.legend(loc='upper left')
plt.ylabel('Predicted Pr(%s)' % outcome.name)
plt.ylim((-0.05, 1.05))
plt.xlim(-1, tmp.shape[0])
plt.show()
def plotLogisticL1Paths(lo):
tmp = lo['paths'].mean(axis=0)
if len(lo['Xvars']) == (tmp.shape[1] - 1):
predVars = np.concatenate((np.array(lo['Xvars']), ['Intercept']))
else:
predVars = np.array(lo['Xvars'])
plt.clf()
plt.plot(np.log10(lo['Cs']), tmp, '-')
yl = plt.ylim()
xl = plt.xlim()
plt.plot(np.log10([lo['optimalCs'].mean()]*2), yl, '--k')
plt.ylabel('Coefficient')
plt.xlabel('Regularization parameter ($log_{10} C$)\n(lower is more regularized)')
topi = np.nonzero(lo['finalResult'].coef_.ravel() != 0)[0]
plt.annotate(s='$N_{vars}=%d$' % len(topi),
xy=(np.log10(lo['finalResult'].C), yl[1]),
ha='left', va='top', size=10)
for i in topi:
a = predVars[i]
cInd = np.where(tmp[:, i] != 0)[0][0]
y = tmp[cInd+2, i]
x = np.log10(lo['Cs'][cInd+2])
plt.annotate(a, xy=(x, y), ha='left', va='center', size=7)
y = tmp[-1, i]
x = np.log10(lo['Cs'][-1])
plt.annotate(a, xy=(x, y), ha='left', va='center', size=7)
plt.show()
def plotLogisticL1NestedTuning(lo):
plt.clf()
colors = sns.color_palette('Set1', n_colors=10)
for outi in range(lo['scores'].shape[0]):
sc = lo['scores'][outi, :, :].mean(axis=0)
plt.plot(np.log10(lo['Cs']), sc, '-', color=colors[outi])
mnmx = sc.min(), sc.max()
plt.plot(np.log10([lo['optimalCs'][outi]]*2), mnmx, '--', color=colors[outi])
plt.xlim(np.log10(lo['Cs'][[0, -1]]))
plt.ylabel('Score (log-likelihood)')
plt.xlabel('Regularization parameter ($log_{10} C$)\n(lower is more regularized)')
plt.title('Regularization tuning in nested CV')
plt.show()
def plotLogisticL1Vars(lo):
pctSelection = 100 * (lo['coefs'] != 0).mean(axis=0)
finalInd = (lo['finalResult'].coef_ != 0).ravel()
x = np.arange(len(pctSelection))
plt.clf()
plt.barh(width=pctSelection[finalInd], bottom=x[finalInd], align='center', color='red', label='Yes')
plt.barh(width=pctSelection[~finalInd], bottom=x[~finalInd], align='center', color='blue', label='No')
plt.yticks(range(len(pctSelection)), lo['Xvars'], size=8)
plt.ylabel('Predictors')
plt.xlabel('% times selected in 10-fold CV')
plt.legend(loc=0, title='Final model?')
def logisticL1NestedCV(df, outcomeVar, predVars, nFolds=10, LPO=None, Cs=10, n_jobs=1):
"""Apply logistic regression with L1-regularization (LASSO) to df.
Uses nested cross-validation framework with inner folds to optimize C
and outer test folds to evaluate performance.
Parameters
----------
df : pd.DataFrame
Must contain outcome and predictor variables.
outcomeVar : str
predVars : ndarray or list
Predictor variables in the model.
nFolds : int
N-fold stratified cross-validation
LPO : int or None
Use Leave-P-Out cross-validation instead of StratifiedNFoldCV
Cs : int or list
Each of the values in Cs describes the inverse of regularization strength.
If Cs is as an int, then a grid of Cs values are chosen in a logarithmic
scale between 1e-4 and 1e4. Smaller values specify stronger regularization.
Returns
-------
results : dict
Contains results as keys below:
fpr: (100, ) average FPR for ROC
tpr: (100, ) average TPR for ROC
AUC: (outerFolds, ) AUC of ROC for each outer test fold
meanAUC: (1, ) AUC of the average ROC
ACC: (outerFolds, ) accuracy across outer test folds
scores: (outerFolds, innerFolds, Cs) log-likelihood for each C across inner and outer CV folds
optimalCs: (outerFolds, ) optimal C from each set of inner CV
finalResult: final fitted model with predict() exposed
prob: (N,) pd.Series of predicted probabilities avg over outer folds
varList: (Nvars, ) list of vars with non-zero coef in final model
Cs: (Cs, ) pre-specified grid of Cs
coefs: (outerFolds, predVars) refit with optimalC in each fold
paths: (outerFolds, Cs, predVars + intercept) avg across inner folds
XVars: list of all vars in X
yVar: name of outcome variable
N: total number of rows/instances in the model"""
if not isinstance(predVars, list):
predVars = list(predVars)
tmp = df[[outcomeVar] + predVars].dropna()
X,y = tmp[predVars].astype(float), tmp[outcomeVar].astype(float)
if LPO is None:
innerCV = StratifiedKFold(n_splits=nFolds, shuffle=True)
outerCV = StratifiedKFold(n_splits=nFolds, shuffle=True)
else:
innerCV = LeavePOut(LPO)
outerCV = LeavePOut(LPO)
scorerFunc = sklearn.metrics.make_scorer(sklearn.metrics.log_loss,
greater_is_better=False,
needs_proba=True,
needs_threshold=False,
labels=[0, 1])
fpr = np.linspace(0, 1, 100)
tpr = np.nan * np.zeros((fpr.shape[0], nFolds))
acc = np.nan * np.zeros(nFolds)
auc = np.nan * np.zeros(nFolds)
paths = []
coefs = []
probs = []
optimalCs = np.nan * np.zeros(nFolds)
scores = []
for outi, (trainInd, testInd) in enumerate(outerCV.split(X=X, y=y)):
Xtrain, Xtest = X.iloc[trainInd], X.iloc[testInd]
ytrain, ytest = y.iloc[trainInd], y.iloc[testInd]
model = sklearn.linear_model.LogisticRegressionCV(Cs=Cs,
cv=innerCV,
penalty='l1',
solver='liblinear',
scoring=scorerFunc,
refit=True,
n_jobs=n_jobs)
"""With refit = True, the scores are averaged across all folds,
and the coefs and the C that corresponds to the best score is taken,
and a final refit is done using these parameters."""
results = model.fit(X=Xtrain, y=ytrain)
prob = results.predict_proba(Xtest)
class1Ind = np.nonzero(results.classes_ == 1)[0][0]
fprTest, tprTest, _ = sklearn.metrics.roc_curve(ytest, prob[:, class1Ind])
tpr[:, outi] = np.interp(fpr, fprTest, tprTest)
auc[outi] = sklearn.metrics.auc(fprTest, tprTest)
acc[outi] = sklearn.metrics.accuracy_score(ytest, np.round(prob[:, class1Ind]), normalize=True)
optimalCs[outi] = results.C_[0]
scores.append(results.scores_[1])
paths.append(results.coefs_paths_[1])
coefs.append(results.coef_)
probs.append(pd.Series(prob[:, class1Ind], index=Xtest.index))
meanTPR = np.mean(tpr, axis=1)
meanTPR[0], meanTPR[-1] = 0, 1
meanACC = np.mean(acc)
meanAUC = sklearn.metrics.auc(fpr, meanTPR)
meanC = 10**np.mean(np.log10(optimalCs))
paths = np.concatenate([p.mean(axis=0, keepdims=True) for p in paths], axis=0)
scores = np.concatenate([s[None, :, :] for s in scores], axis=0)
"""Compute mean probability over test predictions in CV"""
probS = pd.concat(probs).groupby(level=0).agg(np.mean)
probS.name = 'Prob'
"""Refit all the data with the optimal C for variable selection and
classification of holdout data"""
model = sklearn.linear_model.LogisticRegression(C=meanC,
penalty='l1',
solver='liblinear')
result = model.fit(X=X, y=y)
varList = np.array(predVars)[result.coef_.ravel() != 0].tolist()
rocRes = rocStats(y, np.round(probS))
outD = {'fpr':fpr, # (100, ) average FPR for ROC
'tpr':meanTPR, # (100, ) average TPR for ROC
'AUC':auc, # (outerFolds, ) AUC of ROC for each outer test fold
'mAUC': meanAUC, # (1, ) AUC of the average ROC
'ACC':acc, # (outerFolds, ) accuracy across outer test folds
'mACC':np.mean(acc),
'scores': scores, # (outerFolds, innerFolds, Cs) score for each C across inner and outer CV folds
'optimalCs':optimalCs, # (outerFolds, ) optimal C from each set of inner CV
'C':meanC,
'finalResult': result, # final fitted model with predict() exposed
'prob':probS, # (N,) pd.Series of predicted probabilities avg over outer folds
'varList':varList, # list of vars with non-zero coef in final model
'Cs':Cs, # pre-specified grid of Cs
'coefs':np.concatenate(coefs), # (outerFolds, predVars) refit with optimalC in each fold
'paths':paths, # (outerFolds, Cs, predVars + intercept) avg across inner folds
'Xvars':predVars,
'Yvar':outcomeVar,
'N':tmp.shape[0]}
outD.update(rocRes[['Sensitivity', 'Specificity']].to_dict())
return outD
def nestedCVClassifier(df, outcomeVar, predVars, model, params={}, nFolds=10, LPO=None, scorer='log_loss', n_jobs=1):
"""Apply model to df in nested cross-validation framework
with inner folds to optimize hyperparameters.
and outer test folds to evaluate performance.
Parameters
----------
df : pd.DataFrame
Must contain outcome and predictor variables.
outcomeVar : str
predVars : ndarray or list
Predictor variables in the model.
model : sklearn model
nFolds : int
N-fold stratified cross-validation
LPO : int or None
Use Leave-P-Out cross-validation instead of StratifiedNFoldCV
params : dict
Keys of model hyperparameters withe values to try in
a grid search.
Returns
-------
results : dict
Contains results as keys below:
fpr: (100, ) average FPR for ROC
tpr: (100, ) average TPR for ROC
AUC: (outerFolds, ) AUC of ROC for each outer test fold
meanAUC: (1, ) AUC of the average ROC
ACC: (outerFolds, ) accuracy across outer test folds
scores: (outerFolds, innerFolds, Cs) log-likelihood for each C across inner and outer CV folds
optimalCs: (outerFolds, ) optimal C from each set of inner CV
finalResult: final fitted model with predict() exposed
prob: (N,) pd.Series of predicted probabilities avg over outer folds
varList: (Nvars, ) list of vars with non-zero coef in final model
Cs: (Cs, ) pre-specified grid of Cs
coefs: (outerFolds, predVars) refit with optimalC in each fold
paths: (outerFolds, Cs, predVars + intercept) avg across inner folds
XVars: list of all vars in X
yVar: name of outcome variable
N: total number of rows/instances in the model"""
if not isinstance(predVars, list):
predVars = list(predVars)
tmp = df[[outcomeVar] + predVars].dropna()
X,y = tmp[predVars].astype(float), tmp[outcomeVar].astype(float)
if LPO is None:
innerCV = StratifiedKFold(n_splits=nFolds, shuffle=True)
outerCV = StratifiedKFold(n_splits=nFolds, shuffle=True)
else:
innerCV = LeavePOut(LPO)
outerCV = LeavePOut(LPO)
if scorer == 'log_loss':
scorerFunc = sklearn.metrics.make_scorer(sklearn.metrics.log_loss,
greater_is_better=False,
needs_proba=True,
needs_threshold=False,
labels=[0, 1])
elif scorer == 'accuracy':
scorerFunc = sklearn.metrics.make_scorer(sklearn.metrics.accuracy_score,
greater_is_better=True,
needs_proba=False,
needs_threshold=False)
fpr = np.linspace(0, 1, 100)
tpr = np.nan * np.zeros((fpr.shape[0], nFolds))
acc = np.nan * np.zeros(nFolds)
auc = np.nan * np.zeros(nFolds)
probs = []
optimalParams = []
optimalScores = []
cvResults = []
for outi, (trainInd, testInd) in enumerate(outerCV.split(X=X, y=y)):
Xtrain, Xtest = X.iloc[trainInd], X.iloc[testInd]
ytrain, ytest = y.iloc[trainInd], y.iloc[testInd]
clf = GridSearchCV(estimator=model, param_grid=params, cv=innerCV, refit=True, scoring=scorerFunc, n_jobs=n_jobs)
clf.fit(Xtrain, ytrain)
cvResults.append(clf.cv_results_)
optimalParams.append(clf.best_params_)
optimalScores.append(clf.best_score_)
prob = clf.predict_proba(Xtest)
fprTest, tprTest, _ = sklearn.metrics.roc_curve(ytest, prob[:, 1])
tpr[:, outi] = np.interp(fpr, fprTest, tprTest)
auc[outi] = sklearn.metrics.auc(fprTest, tprTest)
acc[outi] = sklearn.metrics.accuracy_score(ytest, np.round(prob[:, 1]), normalize=True)
probs.append(pd.Series(prob[:, 1], index=Xtest.index))
meanTPR = np.mean(tpr, axis=1)
meanTPR[0], meanTPR[-1] = 0, 1
meanACC = np.mean(acc)
meanAUC = sklearn.metrics.auc(fpr, meanTPR)
"""Compute mean probability over test predictions in CV"""
probS = pd.concat(probs).groupby(level=0).agg(np.mean)
probS.name = 'Prob'
"""Select "outer" optimal param for final model"""
avgFunc = lambda v: 10**np.mean(np.log10(v))
# avgFunc = lambda v: np.mean(v)
optP = {k:avgFunc([o[k] for o in optimalParams]) for k in optimalParams[0].keys()}
for k,v in optP.items():
setattr(model, k, v)
result = model.fit(X=X, y=y)
rocRes = rocStats(y, np.round(probS))
outD = {'fpr':fpr,
'tpr':meanTPR,
'AUC':auc,
'mAUC': meanAUC,
'mACC':np.mean(acc),
'ACC':acc,
'CVres':cvResults,
'optimalScores': np.array(optimalScores),
'optimalParams': optimalParams,
'finalParams':optP,
'finalResult': result, # final fitted model with predict() exposed
'prob':probS, # (N,) pd.Series of predicted probabilities avg over outer folds
'Xvars':predVars,
'Yvar':outcomeVar,
'N':tmp.shape[0],
'params':params}
outD.update(rocRes[['Sensitivity', 'Specificity']].to_dict())
return outD
def captureStandardization(df, columns=None):
"""A function factory that creates a function for standardizing all columns
in df with each columns mean and standard deviation."""
if columns is None:
columns = df.columns
stdParams = {}
for c in columns:
mu = df[c].mean()
sigma2 = df[c].std()
stdParams[c] = (mu, sigma2)
def stdFunc(df):
df = df.copy()
for c in columns:
df.loc[:, c] = (df[c] - stdParams[c][0]) / stdParams[c][1]
return df
return stdFunc
def plotNestedCVScores(lo):
scores = _reshape(lo, 'mean_test_score').mean(axis=0)
paramKeys = sorted(lo['params'].keys())
plt.clf()
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
"""plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))"""
plt.pcolormesh(scores)
plt.xlabel('$log_{10} %s$' % paramKeys[1])
plt.ylabel('$log_{10} %s$' % paramKeys[0])
plt.colorbar()
plt.yticks(np.arange(len(lo['params'][paramKeys[0]]))[::2] + 0.5,
np.round(np.log10(lo['params'][paramKeys[0]])[::2], 2))
plt.xticks(np.arange(len(lo['params'][paramKeys[1]]))[::2] + 0.5,
np.round(np.log10(lo['params'][paramKeys[1]])[::2], 2))
plt.title('Mean score over outer CV')
plt.show()
def _reshape(lo, key):
paramKeys = sorted(lo['params'].keys())
paramL = [len(lo['params'][k]) for k in paramKeys]
tmp = [lo['CVres'][i][key][None, :] for i in range(len(lo['CVres']))]
folds = len(tmp)
tmp = [np.array(t, dtype=float) for t in tmp]
tmp = np.concatenate(tmp, axis=0)
rs = (folds, paramL[0], paramL[1])
return tmp.reshape(rs)
def plotNestedCVParams(lo):
"""Shows variability in the outer folds"""
scores = _reshape(lo, 'mean_test_score')
paramKeys = sorted(lo['params'].keys())
nFolds = scores.shape[0]
colors = sns.color_palette('Set1', n_colors=nFolds)
plt.clf()
ax1 = plt.subplot(1,2,1)
for foldi in range(nFolds):
y = scores.mean(axis=2)[foldi,:]
plt.plot(np.log10(lo['params'][paramKeys[0]]), y, color=colors[foldi])
plt.plot(np.log10([lo['optimalParams'][foldi][paramKeys[0]]]*2), [np.min(y), np.max(y)], '--', color=colors[foldi])
x = np.log10([lo['finalParams'][paramKeys[0]]]*2)
yl = plt.ylim()
plt.plot(x, yl, '--k')
plt.xlabel('$log_{10} %s$' % paramKeys[0])
plt.ylabel('Score')
ax2 = plt.subplot(1,2,2)
for foldi in range(nFolds):
y = scores.mean(axis=1)[foldi,:]
plt.plot(np.log10(lo['params'][paramKeys[1]]), y, color=colors[foldi])
plt.plot(np.log10([lo['optimalParams'][foldi][paramKeys[1]]]*2), [np.min(y), np.max(y)], '--', color=colors[foldi])
x = np.log10([lo['finalParams'][paramKeys[1]]]*2)
yl = plt.ylim()
plt.plot(x, yl, '--k')
plt.xlabel('$log_{10} %s$' % paramKeys[1])
ylim1 = ax1.get_ylim()
ylim2 = ax2.get_ylim()
yl = (min(ylim1[0], ylim2[0]), max(ylim1[1], ylim2[1]))
ax1.set_ylim(yl)
ax2.set_ylim(yl)
plt.show()
def computeROC(df, model, outcomeVar, predVars):
"""Apply model to df and return performance metrics.
Parameters
----------
df : pd.DataFrame
Must contain outcome and predictor variables.
model : sklearn or other model
Model must have fit and predict methods.
outcomeVar : str
predVars : ndarray or list
Predictor variables in the model.
Returns
-------
fpr : np.ndarray
False-positive rate
tpr : np.ndarray
True-positive rate
auc : float
Area under the ROC curve
acc : float
Accuracy score
results : returned by model.fit()
Model results object for test prediction in CV
prob : pd.Series
Predicted probabilities with index from df"""
if not isinstance(predVars, list):
predVars = list(predVars)
tmp = df[[outcomeVar] + predVars].dropna()
try:
results = model.fit(X=tmp[predVars], y=tmp[outcomeVar])
if hasattr(results, 'predict_proba'):
prob = results.predict_proba(tmp[predVars])[:, 1]
else:
prob = results.predict(tmp[predVars])
results.predict_proba = results.predict
fpr, tpr, thresholds = sklearn.metrics.roc_curve(tmp[outcomeVar].values, prob)
acc = sklearn.metrics.accuracy_score(tmp[outcomeVar].values, np.round(prob), normalize=True)
auc = sklearn.metrics.auc(fpr, tpr)
tpr[0], tpr[-1] = 0, 1
except:
print('PerfectSeparationError: %s (N = %d; %d predictors)' % (outcomeVar, tmp.shape[0], len(predVars)))
acc = 1.
fpr = np.zeros(5)
tpr = np.ones(5)
tpr[0], tpr[-1] = 0, 1
prob = df[outcomeVar].values.astype(float)
auc = 1.
results = None
assert acc <= 1
outD = {'fpr':fpr,
'tpr':tpr,
'AUC':auc,
'ACC':acc,
'result':results,
'probs': | pd.Series(prob, index=tmp.index, name='Prob') | pandas.Series |
#%%
#importing...
import yfinance as yf
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
from datetime import datetime as dt
from sklearn.model_selection import train_test_split
import numpy as np
import matplotlib.pyplot as plt
Scaler = MinMaxScaler(feature_range=(0,1))
from sklearn.linear_model import LinearRegression
#imports for model
from keras.models import Sequential
from keras.layers import LSTM, Dense, Dropout
from sklearn.model_selection import train_test_split
import math
from sklearn.metrics import mean_squared_error,accuracy_score
import sys
#sys.path.append('../DLpart/')
#from PredictStock import Technicals
import datetime
class LSTMPrediction:
def __init__(self,symbol,look_back):
self.symbol = symbol
self.timeframe = look_back
def fetchFromYahoo(self):
yobj = yf.Ticker(self.symbol)
tickerDict = yobj.info
#print(yobj.info.keys())
df = yobj.history(period=self.timeframe)
df = df.drop(['Stock Splits','Dividends'],axis=1)
df.index = pd.to_datetime(df.index)
#print('\n'+tickerDict['longBusinessSummary'])
print(df.tail())
plt.plot(df['Close'])
return df,tickerDict
def get_train_test_dataset(self,df,training_size=0.70,testing_size=0.30):
try:
print('this will return a training and test data')
print('\n'+'Recent Data' + '\n',df.tail())
print('MEAN CLOSE: ',df['Close'].mean())
print('MAX CLOSE: ',df['Close'].max())
print('MIN CLOSE: ',df['Close'].min())
close_price = df.reset_index()['Close']
close_price = Scaler.fit_transform(np.array(close_price).reshape(-1,1))
train_size = int(len(close_price)*training_size)
test_size = int(len(close_price*testing_size))
train_data = close_price[0:train_size,:]
test_data = close_price[train_size:len(close_price),:1]
return train_data,test_data
except ValueError:
print('Try a different Scrip')
def prepare_data_for_LSTM_krish(self,dataset,timestep=1):
dataX, dataY = [], []
for i in range(len(dataset)- timestep-1):
record = dataset[i:(i+timestep),0]
dataX.append(record)
dataY.append(dataset[i + timestep, 0])
return np.array(dataX), np.array(dataY)
def prepare_data_for_LSTM_kaggle(self,dataset):
dataX = []
dataY = []
for i in range(60, len(dataset)):
dataX.append(dataset[i-60:i, 0])
dataY.append(dataset[i, 0])
if i<=61 :
print(dataX)
print(dataY)
print()
dataX, dataY = np.array(dataX), np.array(dataY)
return dataX, dataY
def reshape_for_LSTM(self,train_data, test_data):
train_data = train_data.reshape(train_data.shape[0],train_data.shape[1],1)
test_data = test_data.reshape(test_data.shape[0],test_data.shape[1],1)
return train_data, test_data
def create_LSTM_model(self,lstm_layers_after_main=0,lstm_units=32,shape=(),loss='mean_squared_error',optimizer='adam'):
dropout = 0.0
model = Sequential()
model.add(LSTM(lstm_units,return_sequences=True,input_shape=shape))
if lstm_layers_after_main > 2 and lstm_layers_after_main < 5:
dropout = 0.4
elif lstm_layers_after_main <= 2:
dropout = 0.1
for i in range(lstm_layers_after_main):
model.add(LSTM(lstm_units,return_sequences=True))
if i % 2 == 0:
continue
model.add(Dropout(dropout))
model.add(LSTM(lstm_units))
model.add(Dense(1))
print('Dropping out ' + str(dropout*100) + '%')
model.summary()
model.compile(loss=loss,optimizer=optimizer)
return model
class LinearRegPrediction:
def get_preds_lin_reg(self, df, target_col='Close'):
regressor = LinearRegression()
x = df.drop(target_col, axis=1)
y = df[target_col]
xtrain, xtest, ytrain, ytest = train_test_split(x,y,test_size=0.1, random_state=0)
regressor.fit(xtrain, ytrain)
y_pred = regressor.predict(xtest)
ytest = np.array(ytest).reshape(-1,1)
y_pred = np.array(y_pred).reshape(-1,1)
print(regressor.score(ytest,y_pred))
#pred_min = min(y_pred)
#print(pred_min)
valid = pd.DataFrame()
valid['Valid'] = ytest
valid['Prediction'] = y_pred
print('Standard Deviation: ',np.std(y_pred))
print('RMSE: ' , np.sqrt(mean_squared_error(ytest,y_pred)))
class Technicals:
def __init__(self,symbol):
self.symbol = symbol
def EMA(self,timeframe=9,on_field='Close',plot=False, period = "1y", interval = "1d"):
df = yf.Ticker(self.symbol).history(period=period, interval=interval)
df = df.drop(['Stock Splits','Dividends'],axis=1)
df.index = pd.to_datetime(df.index)
EMA = df[on_field].ewm(span=timeframe, adjust=False).mean()
df_new = df[[on_field]]
df_new.reset_index(level=0, inplace=True)
df_new.columns=['ds','y']
if plot == True:
plt.figure(figsize=(16,8))
plt.plot(df_new.ds, df_new.y, label='price')
plt.plot(df_new.ds, EMA, label='EMA line',color='red')
plt.show()
#print('Latest EMA on '+on_field+': ',EMA[len(EMA)-1],'\n')
#return EMA
return EMA[len(EMA)-1]
def MACD(self,on_field='Close',plot=False):
df = yf.Ticker(self.symbol).history(period="1y")
df = df.drop(['Stock Splits','Dividends'],axis=1)
df.index = pd.to_datetime(df.index)
df_new = df[[on_field]]
df_new.reset_index(level=0, inplace=True)
df_new.columns=['ds','y']
#df_new.head()
EMA12 = df_new.y.ewm(span=12, adjust=False).mean()
EMA26 = df_new.y.ewm(span=26, adjust=False).mean()
MACD = EMA12-EMA26
EMA9 = MACD.ewm(span=9, adjust=False).mean()
#plt.plot(df_new.ds, df_new.y, label='price')
if plot == True:
plt.figure(figsize=(16,8))
plt.plot(df_new.ds, MACD, label=self.symbol+' MACD', color='blue')
plt.plot(df_new.ds, EMA9, label=self.symbol+' Signal Line', color='red')
plt.legend(loc='upper left')
plt.show()
#print('\n')
#print(EMA9[len(EMA9)-1], MACD[len(MACD)-1])
if MACD[len(MACD)-1] > MACD[len(MACD)-2]:
return True
else:
return False
# if MACD[len(MACD)-1]-EMA9[len(EMA9)-1] <= 4 and MACD[len(MACD)-1]-EMA9[len(EMA9)-1] >= 0:
# print('ALERT: MACD crossover about to occur, Sell side')
# elif MACD[len(MACD)-1]-EMA9[len(EMA9)-1] >= -4 and MACD[len(MACD)-1]-EMA9[len(EMA9)-1] <= 0:
# print('ALERT: MACD crossover about to occur, Buy side')
# else:
# print('No MACD crossovers')
#return EMA9[len(EMA9)-1], MACD[len(MACD)-1] #latest value of EMA9 line and MACD value
def RSI_backUpCode(self, period = 14):
# If the RSI value is over 70, the security is considered overbought, if the value is lower than 30,
# it is considered to be oversold
# Using a conservative approach, sell when the RSI value intersects the overbought line
# buy when the value intersects the oversold line (for blue chip stocks)
yobj = yf.Ticker(self.symbol)
df = yobj.history(period="1y")
df = df.drop(['Stock Splits','Dividends'],axis=1)
df_index = pd.to_datetime(df.index)
change = []
gain = []
loss = []
AvgGain = []
AvgLoss = []
RS = []
RSI = []
df_new = pd.DataFrame(df['Close'], index=df.index)
change.insert(0,0)
#change calc
for i in range(1,len(df_new)):
diff = df_new.Close[i] - df_new.Close[i-1]
change.append(diff)
df_new['Change'] = change
#Gain and loss
for i in range(len(df_new)):
if df_new.Change[i] > 0:
gain.append(df_new.Change[i])
loss.append(0)
elif df_new.Change[i] < 0:
loss.append(abs(df_new.Change[i]))
gain.append(0)
else:
gain.append(0)
loss.append(0)
df_new['Gain'] = gain
df_new['Loss'] = loss
#average gain/loss
averageSum_forgain = 0
averageSum_forloss = 0
averageGain = 0
averageLoss = 0
count = 1
for i in range(0,len(df_new)):
averageSum_forgain = averageSum_forgain + df_new.Gain[i]
averageGain = averageSum_forgain/count
AvgGain.insert(i,round(averageGain,4))
averageSum_forloss = averageSum_forloss + df_new.Loss[i]
averageLoss = averageSum_forloss/count
AvgLoss.insert(i,round(averageLoss,4))
count+=1
if averageGain == 0 or averageLoss == 0:
RS.append(0.0)
else:
RS.append(averageGain/averageLoss)
df_new['AvgGain'] = AvgGain
df_new['AvgLoss'] = AvgLoss
df_new['RS'] = RS
rsi = 0
for i in range(0,len(df_new)):
rsi = 100 - 100/(1+df_new.RS[i])
RSI.append(round(rsi,2))
df_new['RSI'] = RSI
plt.figure(figsize=(16,8))
plt.plot(df_index[len(df_new)-period:len(df_new)],df_new.iloc[len(df_new)-period:len(df_new),-1], label='RSI value')
plt.legend(loc='upper left')
plt.show()
print('\nCurrent RSI value: ' , df_new['RSI'][-1])
Latest_RSI_value = float(df_new['RSI'][-1])
return df_new, Latest_RSI_value
def RSI(self,period = 14, plot = False):
df = yf.Ticker(self.symbol).history(period="1y")
df = df.drop(['Stock Splits','Dividends'],axis=1)
df_index = | pd.to_datetime(df.index) | pandas.to_datetime |
# Module for plotting and fitting EIS data
# (C) <NAME> 2020
import os
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
import warnings
from .utils import polar_from_complex
# ---------------------
# File loading
# ---------------------
def source_extension(source):
"""Get file extension for source"""
extensions = {'gamry': '.DTA', 'zplot': '.z'}
return extensions[source]
def get_file_source(file):
"""Determine file source"""
try:
with open(file, 'r') as f:
txt = f.read()
except UnicodeDecodeError:
with open(file, 'r', encoding='latin1') as f:
txt = f.read()
# determine format
if txt.split('\n')[0] == 'EXPLAIN':
source = 'gamry'
elif txt.split('\n')[0] == 'ZPLOT2 ASCII':
source = 'zplot'
return source
def get_timestamp(file):
"""Get experiment start timestamp from file"""
try:
with open(file, 'r') as f:
txt = f.read()
except UnicodeDecodeError:
with open(file, 'r', encoding='latin1') as f:
txt = f.read()
source = get_file_source(file)
if source == 'gamry':
date_start = txt.find('DATE')
date_end = txt[date_start:].find('\n') + date_start
date_line = txt[date_start:date_end]
date = date_line.split('\t')[2]
time_start = txt.find('TIME')
time_end = txt[time_start:].find('\n') + time_start
time_line = txt[time_start:time_end]
time = time_line.split('\t')[2]
timestr = date + ' ' + time
dt = datetime.strptime(timestr, "%m/%d/%Y %H:%M:%S")
elif source == 'zplot':
date_start = txt.find('Date')
date_end = txt[date_start:].find('\n') + date_start
date_line = txt[date_start:date_end]
date = date_line.split()[1]
time_start = txt.find('Time')
time_end = txt[time_start:].find('\n') + time_start
time_line = txt[time_start:time_end]
time = time_line.split()[1]
timestr = date + ' ' + time
dt = datetime.strptime(timestr, "%m-%d-%Y %H:%M:%S")
return dt
def read_eis(file, warn=True):
"""read EIS zcurve data from Gamry .DTA file"""
try:
with open(file, 'r') as f:
txt = f.read()
except UnicodeDecodeError:
with open(file, 'r', encoding='latin1') as f:
txt = f.read()
source = get_file_source(file)
if source == 'gamry':
# find start of zcurve data
zidx = txt.find('ZCURVE')
# check for experiment aborted flag
if txt.find('EXPERIMENTABORTED') > -1:
skipfooter = len(txt[txt.find('EXPERIMENTABORTED'):].split('\n')) - 1
else:
skipfooter = 0
# preceding text
pretxt = txt[:zidx]
# zcurve data
ztable = txt[zidx:]
# column headers are next line after ZCURVE TABLE line
header_start = ztable.find('\n') + 1
header_end = header_start + ztable[header_start:].find('\n')
header = ztable[header_start:header_end].split('\t')
# units are next line after column headers
unit_end = header_end + 1 + ztable[header_end + 1:].find('\n')
units = ztable[header_end + 1:unit_end].split('\t')
# determine # of rows to skip by counting line breaks in preceding text
skiprows = len(pretxt.split('\n')) + 2
# if table is indented, ignore empty left column
if header[0] == '':
usecols = header[1:]
else:
usecols = header
# if extra tab at end of data rows, add an extra column to header to match (for Igor data)
first_data_row = ztable[unit_end + 1: unit_end + 1 + ztable[unit_end + 1:].find('\n')]
if first_data_row.split('\t')[-1] == '':
header = header + ['extra_tab']
# read data to DataFrame
# python engine required to use skipfooter
data = pd.read_csv(file, sep='\t', skiprows=skiprows, header=None, names=header, usecols=usecols,
skipfooter=skipfooter, engine='python')
# add timestamp
try:
dt = get_timestamp(file)
time_col = np.intersect1d(['Time', 'T'], data.columns)[
0] # EIS files in Repeating jv-EIS files have column named 'Time' instead of 'T'
data['timestamp'] = [dt + timedelta(seconds=t) for t in data[time_col]]
except Exception:
if warn:
warnings.warn(f'Reading timestamp failed for file {file}')
elif source == 'zplot':
# find start of zcurve data
zidx = txt.find('End Comments')
# preceding text
pretxt = txt[:zidx]
# z data
ztable = txt[zidx:]
# column headers are in line above "End Comments"
header = pretxt.split('\n')[-2].strip().split('\t')
# determine # of rows to skip by counting line breaks in preceding text
skiprows = len(pretxt.split('\n'))
# if table is indented, ignore empty left column
if header[0] == '':
usecols = header[1:]
else:
usecols = header
# read data to DataFrame
data = pd.read_csv(file, sep='\t', skiprows=skiprows, header=None, names=header, usecols=usecols)
# rename to standard format
rename = {"Z'(a)": "Zreal", "Z''(b)": "Zimag", "Freq(Hz)": "Freq"}
data = data.rename(rename, axis=1)
# calculate Zmod and Zphz
Zmod, Zphz = polar_from_complex(data)
data['Zmod'] = Zmod
data['Zphz'] = Zphz
return data
def read_jv(file, source='gamry'):
"""read from manual jV txt file"""
try:
with open(file, 'r') as f:
txt = f.read()
except UnicodeDecodeError:
with open(file, 'r', encoding='latin1') as f:
txt = f.read()
if source == 'manual':
"""Manually created j-V txt file"""
jv_idx = txt.find('Current')
pretxt = txt[:jv_idx]
skiprows = len(pretxt.split('\n')) - 1
data = | pd.read_csv(file, sep='\t', skiprows=skiprows) | pandas.read_csv |
import json
from unittest.mock import MagicMock, patch
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from evalml.exceptions import PipelineScoreError
from evalml.model_understanding.prediction_explanations.explainers import (
abs_error,
cross_entropy,
explain_prediction,
explain_predictions,
explain_predictions_best_worst
)
from evalml.problem_types import ProblemTypes
def compare_two_tables(table_1, table_2):
assert len(table_1) == len(table_2)
for row, row_answer in zip(table_1, table_2):
assert row.strip().split() == row_answer.strip().split()
test_features = [[1], np.ones((15, 1)), pd.DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]}).iloc[0],
pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}), pd.DataFrame()]
@pytest.mark.parametrize("test_features", test_features)
def test_explain_prediction_value_error(test_features):
with pytest.raises(ValueError, match="features must be stored in a dataframe or datatable with exactly one row."):
explain_prediction(None, input_features=test_features, training_data=None)
explain_prediction_answer = """Feature Name Feature Value Contribution to Prediction
=========================================================
d 40.00 +++++
b 20.00 -----""".splitlines()
explain_prediction_regression_dict_answer = {
"explanations": [{
"feature_names": ["d", "b"],
"feature_values": [40, 20],
"qualitative_explanation": ["+++++", "-----"],
"quantitative_explanation": [None, None],
"class_name": None
}]
}
explain_predictions_regression_df_answer = pd.DataFrame({'feature_names': ['d', 'b'],
'feature_values': [40, 20],
'qualitative_explanation': ['+++++', '-----'],
"quantitative_explanation": [None, None]})
explain_prediction_binary_dict_answer = {
"explanations": [{
"feature_names": ["d", "b"],
"feature_values": [40, 20],
"qualitative_explanation": ["+++++", "-----"],
"quantitative_explanation": [None, None],
"class_name": "class_1"
}]
}
explain_prediction_binary_df_answer = pd.DataFrame({
"feature_names": ["d", "b"],
"feature_values": [40, 20],
"qualitative_explanation": ["+++++", "-----"],
"quantitative_explanation": [None, None],
"class_name": ["class_1", "class_1"]
})
explain_prediction_multiclass_answer = """Class: class_0
Feature Name Feature Value Contribution to Prediction
=========================================================
a 10.00 +++++
c 30.00 ---
Class: class_1
Feature Name Feature Value Contribution to Prediction
=========================================================
a 10.00 +++
b 20.00 ++
Class: class_2
Feature Name Feature Value Contribution to Prediction
=========================================================
c 30.00 ---
d 40.00 ---
""".splitlines()
explain_prediction_multiclass_dict_answer = {
"explanations": [
{"feature_names": ["a", "c"],
"feature_values": [10, 30],
"qualitative_explanation": ["+++++", "---"],
"quantitative_explanation": [None, None],
"class_name": "class_0"},
{"feature_names": ["a", "b"],
"feature_values": [10, 20],
"qualitative_explanation": ["+++", "++"],
"quantitative_explanation": [None, None],
"class_name": "class_1"},
{"feature_names": ["c", "d"],
"feature_values": [30, 40],
"qualitative_explanation": ["---", "---"],
"quantitative_explanation": [None, None],
"class_name": "class_2"},
]
}
explain_prediction_multiclass_df_answer = pd.DataFrame({
"feature_names": ["a", "c", "a", "b", "c", "d"],
"feature_values": [10, 30, 10, 20, 30, 40],
"qualitative_explanation": ["+++++", "---", "+++", "++", "---", "---"],
"quantitative_explanation": [None, None, None, None, None, None],
"class_name": ['class_0', 'class_0', 'class_1', 'class_1', 'class_2', 'class_2']
})
@pytest.mark.parametrize("problem_type, output_format, shap_values, normalized_shap_values, answer",
[(ProblemTypes.REGRESSION,
"text",
{"a": [1], "b": [-2.1], "c": [-0.25], "d": [2.3]},
{"a": [0.5], "b": [-2.1], "c": [-0.25], "d": [2.3]},
explain_prediction_answer),
(ProblemTypes.REGRESSION,
"dict",
{"a": [1], "b": [-2.1], "c": [-0.25], "d": [2.3]},
{"a": [0.5], "b": [-2.1], "c": [-0.25], "d": [2.3]},
explain_prediction_regression_dict_answer
),
(ProblemTypes.REGRESSION,
"dataframe",
{"a": [1], "b": [-2.1], "c": [-0.25], "d": [2.3]},
{"a": [0.5], "b": [-2.1], "c": [-0.25], "d": [2.3]},
explain_predictions_regression_df_answer
),
(ProblemTypes.BINARY,
"text",
[{}, {"a": [0.5], "b": [-0.89], "c": [0.33], "d": [0.89]}],
[{}, {"a": [0.5], "b": [-0.89], "c": [-0.25], "d": [0.89]}],
explain_prediction_answer),
(ProblemTypes.BINARY,
"dict",
[{}, {"a": [0.5], "b": [-0.89], "c": [0.33], "d": [0.89]}],
[{}, {"a": [0.5], "b": [-0.89], "c": [-0.25], "d": [0.89]}],
explain_prediction_binary_dict_answer),
(ProblemTypes.BINARY,
"dataframe",
[{}, {"a": [0.5], "b": [-0.89], "c": [0.33], "d": [0.89]}],
[{}, {"a": [0.5], "b": [-0.89], "c": [-0.25], "d": [0.89]}],
explain_prediction_binary_df_answer),
(ProblemTypes.MULTICLASS,
"text",
[{}, {}, {}],
[{"a": [1.1], "b": [0.09], "c": [-0.53], "d": [-0.06]},
{"a": [0.53], "b": [0.24], "c": [-0.15], "d": [-0.22]},
{"a": [0.03], "b": [0.02], "c": [-0.42], "d": [-0.47]}],
explain_prediction_multiclass_answer),
(ProblemTypes.MULTICLASS,
"dict",
[{}, {}, {}],
[{"a": [1.1], "b": [0.09], "c": [-0.53], "d": [-0.06]},
{"a": [0.53], "b": [0.24], "c": [-0.15], "d": [-0.22]},
{"a": [0.03], "b": [0.02], "c": [-0.42], "d": [-0.47]}],
explain_prediction_multiclass_dict_answer),
(ProblemTypes.MULTICLASS,
"dataframe",
[{}, {}, {}],
[{"a": [1.1], "b": [0.09], "c": [-0.53], "d": [-0.06]},
{"a": [0.53], "b": [0.24], "c": [-0.15], "d": [-0.22]},
{"a": [0.03], "b": [0.02], "c": [-0.42], "d": [-0.47]}],
explain_prediction_multiclass_df_answer)
])
@pytest.mark.parametrize("input_type", ["pd", "ww"])
@patch("evalml.model_understanding.prediction_explanations._user_interface._compute_shap_values")
@patch("evalml.model_understanding.prediction_explanations._user_interface._normalize_shap_values")
def test_explain_prediction(mock_normalize_shap_values,
mock_compute_shap_values,
problem_type, output_format, shap_values, normalized_shap_values, answer,
input_type):
mock_compute_shap_values.return_value = shap_values
mock_normalize_shap_values.return_value = normalized_shap_values
pipeline = MagicMock()
pipeline.problem_type = problem_type
pipeline.classes_ = ["class_0", "class_1", "class_2"]
# By the time we call transform, we are looking at only one row of the input data.
pipeline.compute_estimator_features.return_value = ww.DataTable(pd.DataFrame({"a": [10], "b": [20], "c": [30], "d": [40]}))
features = pd.DataFrame({"a": [1], "b": [2]})
training_data = pd.DataFrame()
if input_type == "ww":
features = ww.DataTable(features)
training_data = ww.DataTable(training_data)
table = explain_prediction(pipeline, features, output_format=output_format, top_k=2, training_data=training_data)
if isinstance(table, str):
compare_two_tables(table.splitlines(), answer)
elif isinstance(table, pd.DataFrame):
pd.testing.assert_frame_equal(table, answer)
else:
assert table == answer
def test_error_metrics():
pd.testing.assert_series_equal(abs_error(pd.Series([1, 2, 3]), pd.Series([4, 1, 0])), pd.Series([3, 1, 3]))
pd.testing.assert_series_equal(cross_entropy(pd.Series([1, 0]),
pd.DataFrame({"a": [0.1, 0.2], "b": [0.9, 0.8]})),
pd.Series([-np.log(0.9), -np.log(0.2)]))
input_features_and_y_true = [([[1]], pd.Series([1]), "^Input features must be a dataframe with more than 10 rows!"),
(pd.DataFrame({"a": [1]}), pd.Series([1]), "^Input features must be a dataframe with more than 10 rows!"),
(pd.DataFrame({"a": range(15)}), pd.Series(range(12)), "^Parameters y_true and input_features must have the same number of data points.")
]
@pytest.mark.parametrize("input_features,y_true,error_message", input_features_and_y_true)
def test_explain_predictions_best_worst_value_errors(input_features, y_true, error_message):
with pytest.raises(ValueError, match=error_message):
explain_predictions_best_worst(None, input_features, y_true)
def test_explain_predictions_raises_pipeline_score_error():
with pytest.raises(PipelineScoreError, match="Division by zero!"):
def raise_zero_division(input_features):
raise ZeroDivisionError("Division by zero!")
pipeline = MagicMock()
pipeline.problem_type = ProblemTypes.BINARY
pipeline.predict_proba.side_effect = raise_zero_division
explain_predictions_best_worst(pipeline, pd.DataFrame({"a": range(15)}), pd.Series(range(15)))
def test_explain_predictions_value_errors():
with pytest.raises(ValueError, match="Parameter input_features must be a non-empty dataframe."):
explain_predictions(None, pd.DataFrame())
def test_output_format_checked():
input_features, y_true = pd.DataFrame(data=[range(15)]), pd.Series(range(15))
with pytest.raises(ValueError, match="Parameter output_format must be either text, dict, or dataframe. Received bar"):
explain_predictions(None, input_features, output_format="bar")
with pytest.raises(ValueError, match="Parameter output_format must be either text, dict, or dataframe. Received xml"):
explain_prediction(None, input_features=input_features, training_data=None, output_format="xml")
input_features, y_true = pd.DataFrame(data=range(15)), pd.Series(range(15))
with pytest.raises(ValueError, match="Parameter output_format must be either text, dict, or dataframe. Received foo"):
explain_predictions_best_worst(None, input_features, y_true=y_true, output_format="foo")
regression_best_worst_answer = """Test Pipeline Name
Parameters go here
Best 1 of 1
Predicted Value: 1
Target Value: 2
Absolute Difference: 1.0
Index ID: {index_0}
table goes here
Worst 1 of 1
Predicted Value: 2
Target Value: 3
Absolute Difference: 4.0
Index ID: {index_1}
table goes here
"""
regression_best_worst_answer_dict = {
"explanations": [
{"rank": {"prefix": "best", "index": 1},
"predicted_values": {"probabilities": None, "predicted_value": 1, "target_value": 2,
"error_name": "Absolute Difference", "error_value": 1.},
"explanations": ["explanation_dictionary_goes_here"]},
{"rank": {"prefix": "worst", "index": 1},
"predicted_values": {"probabilities": None, "predicted_value": 2, "target_value": 3,
"error_name": "Absolute Difference", "error_value": 4.},
"explanations": ["explanation_dictionary_goes_here"]}
]
}
regression_best_worst_answer_df = pd.DataFrame({
"feature_names": [0, 0],
"feature_values": [0, 0],
"qualitative_explanation": [0, 0],
"quantitative_explanation": [0, 0],
"rank": [1, 1],
"predicted_value": [1, 2],
"target_value": [2, 3],
"error_name": ["Absolute Difference"] * 2,
"error_value": [1., 4.],
"prefix": ["best", "worst"],
})
no_best_worst_answer = """Test Pipeline Name
Parameters go here
1 of 2
table goes here
2 of 2
table goes here
"""
no_best_worst_answer_dict = {
"explanations": [
{"explanations": ["explanation_dictionary_goes_here"]},
{"explanations": ["explanation_dictionary_goes_here"]}
]
}
no_best_worst_answer_df = pd.DataFrame({
"feature_names": [0, 0],
"feature_values": [0, 0],
"qualitative_explanation": [0, 0],
"quantitative_explanation": [0, 0],
"prediction_number": [0, 1]
})
binary_best_worst_answer = """Test Pipeline Name
Parameters go here
Best 1 of 1
Predicted Probabilities: [benign: 0.05, malignant: 0.95]
Predicted Value: malignant
Target Value: malignant
Cross Entropy: 0.2
Index ID: {index_0}
table goes here
Worst 1 of 1
Predicted Probabilities: [benign: 0.1, malignant: 0.9]
Predicted Value: malignant
Target Value: benign
Cross Entropy: 0.78
Index ID: {index_1}
table goes here
"""
binary_best_worst_answer_dict = {
"explanations": [
{"rank": {"prefix": "best", "index": 1},
"predicted_values": {"probabilities": {"benign": 0.05, "malignant": 0.95},
"predicted_value": "malignant", "target_value": "malignant",
"error_name": "Cross Entropy", "error_value": 0.2},
"explanations": ["explanation_dictionary_goes_here"]},
{"rank": {"prefix": "worst", "index": 1},
"predicted_values": {"probabilities": {"benign": 0.1, "malignant": 0.9},
"predicted_value": "malignant", "target_value": "benign",
"error_name": "Cross Entropy", "error_value": 0.78},
"explanations": ["explanation_dictionary_goes_here"]}
]
}
binary_best_worst_answer_df = pd.DataFrame({
"feature_names": [0, 0],
"feature_values": [0, 0],
"qualitative_explanation": [0, 0],
"quantitative_explanation": [0, 0],
"rank": [1, 1],
"prefix": ["best", "worst"],
"label_benign_probability": [0.05, 0.1],
"label_malignant_probability": [0.95, 0.9],
"predicted_value": ["malignant", "malignant"],
"target_value": ["malignant", "benign"],
"error_name": ["Cross Entropy"] * 2,
"error_value": [0.2, 0.78]
})
multiclass_table = """Class: setosa
table goes here
Class: versicolor
table goes here
Class: virginica
table goes here"""
multiclass_best_worst_answer = """Test Pipeline Name
Parameters go here
Best 1 of 1
Predicted Probabilities: [setosa: 0.8, versicolor: 0.1, virginica: 0.1]
Predicted Value: setosa
Target Value: setosa
Cross Entropy: 0.15
Index ID: {{index_0}}
{multiclass_table}
Worst 1 of 1
Predicted Probabilities: [setosa: 0.2, versicolor: 0.75, virginica: 0.05]
Predicted Value: versicolor
Target Value: versicolor
Cross Entropy: 0.34
Index ID: {{index_1}}
{multiclass_table}
""".format(multiclass_table=multiclass_table)
multiclass_best_worst_answer_dict = {
"explanations": [
{"rank": {"prefix": "best", "index": 1},
"predicted_values": {"probabilities": {"setosa": 0.8, "versicolor": 0.1, "virginica": 0.1},
"predicted_value": "setosa", "target_value": "setosa",
"error_name": "Cross Entropy", "error_value": 0.15},
"explanations": ["explanation_dictionary_goes_here"]},
{"rank": {"prefix": "worst", "index": 1},
"predicted_values": {"probabilities": {"setosa": 0.2, "versicolor": 0.75, "virginica": 0.05},
"predicted_value": "versicolor", "target_value": "versicolor",
"error_name": "Cross Entropy", "error_value": 0.34},
"explanations": ["explanation_dictionary_goes_here"]}
]
}
multiclass_best_worst_answer_df = pd.DataFrame({
"feature_names": [0, 0],
"feature_values": [0, 0],
"qualitative_explanation": [0, 0],
"quantitative_explanation": [0, 0],
"rank": [1, 1],
"prefix": ["best", "worst"],
"label_setosa_probability": [0.8, 0.2],
"label_versicolor_probability": [0.1, 0.75],
"label_virginica_probability": [0.1, 0.05],
"predicted_value": ["setosa", "versicolor"],
"target_value": ["setosa", "versicolor"],
"error_name": ["Cross Entropy"] * 2,
"error_value": [0.15, 0.34]
})
multiclass_no_best_worst_answer = """Test Pipeline Name
Parameters go here
1 of 2
{multiclass_table}
2 of 2
{multiclass_table}
""".format(multiclass_table=multiclass_table)
@pytest.mark.parametrize("problem_type,output_format,answer,explain_predictions_answer,custom_index",
[(ProblemTypes.REGRESSION, "text", regression_best_worst_answer, no_best_worst_answer, [0, 1]),
(ProblemTypes.REGRESSION, "dict", regression_best_worst_answer_dict, no_best_worst_answer_dict, [0, 1]),
(ProblemTypes.REGRESSION, "dataframe", regression_best_worst_answer_df, no_best_worst_answer_df, [0, 1]),
(ProblemTypes.REGRESSION, "text", regression_best_worst_answer, no_best_worst_answer, [4, 23]),
(ProblemTypes.REGRESSION, "dict", regression_best_worst_answer_dict, no_best_worst_answer_dict, [4, 10]),
(ProblemTypes.REGRESSION, "dataframe", regression_best_worst_answer_df, no_best_worst_answer_df, [4, 10]),
(ProblemTypes.REGRESSION, "text", regression_best_worst_answer, no_best_worst_answer, ["foo", "bar"]),
(ProblemTypes.REGRESSION, "dict", regression_best_worst_answer_dict, no_best_worst_answer_dict, ["foo", "bar"]),
(ProblemTypes.REGRESSION, "dataframe", regression_best_worst_answer_df, no_best_worst_answer_df, ["foo", "bar"]),
(ProblemTypes.BINARY, "text", binary_best_worst_answer, no_best_worst_answer, [0, 1]),
(ProblemTypes.BINARY, "dict", binary_best_worst_answer_dict, no_best_worst_answer_dict, [0, 1]),
(ProblemTypes.BINARY, "dataframe", binary_best_worst_answer_df, no_best_worst_answer_df, [0, 1]),
(ProblemTypes.BINARY, "text", binary_best_worst_answer, no_best_worst_answer, [7, 11]),
(ProblemTypes.BINARY, "dict", binary_best_worst_answer_dict, no_best_worst_answer_dict, [7, 11]),
(ProblemTypes.BINARY, "dataframe", binary_best_worst_answer_df, no_best_worst_answer_df, [7, 11]),
(ProblemTypes.BINARY, "text", binary_best_worst_answer, no_best_worst_answer, ["first", "second"]),
(ProblemTypes.BINARY, "dict", binary_best_worst_answer_dict, no_best_worst_answer_dict, ["first", "second"]),
(ProblemTypes.BINARY, "dataframe", binary_best_worst_answer_df, no_best_worst_answer_df, ["first", "second"]),
(ProblemTypes.MULTICLASS, "text", multiclass_best_worst_answer, multiclass_no_best_worst_answer, [0, 1]),
(ProblemTypes.MULTICLASS, "dict", multiclass_best_worst_answer_dict, no_best_worst_answer_dict, [0, 1]),
(ProblemTypes.MULTICLASS, "dataframe", multiclass_best_worst_answer_df, no_best_worst_answer_df, [0, 1]),
(ProblemTypes.MULTICLASS, "text", multiclass_best_worst_answer, multiclass_no_best_worst_answer, [19, 103]),
(ProblemTypes.MULTICLASS, "dict", multiclass_best_worst_answer_dict, no_best_worst_answer_dict, [17, 235]),
(ProblemTypes.MULTICLASS, "dataframe", multiclass_best_worst_answer_df, no_best_worst_answer_df, [17, 235]),
(ProblemTypes.MULTICLASS, "text", multiclass_best_worst_answer, multiclass_no_best_worst_answer, ["2020-10", "2020-11"]),
(ProblemTypes.MULTICLASS, "dict", multiclass_best_worst_answer_dict, no_best_worst_answer_dict, ["2020-15", "2020-15"]),
(ProblemTypes.MULTICLASS, "dataframe", multiclass_best_worst_answer_df, no_best_worst_answer_df, ["2020-15", "2020-15"]),
])
@patch("evalml.model_understanding.prediction_explanations.explainers.DEFAULT_METRICS")
@patch("evalml.model_understanding.prediction_explanations._user_interface._make_single_prediction_shap_table")
def test_explain_predictions_best_worst_and_explain_predictions(mock_make_table, mock_default_metrics,
problem_type, output_format, answer,
explain_predictions_answer, custom_index):
if output_format == "text":
mock_make_table.return_value = "table goes here"
elif output_format == "dataframe":
shap_table = pd.DataFrame({
"feature_names": [0],
"feature_values": [0],
"qualitative_explanation": [0],
"quantitative_explanation": [0],
})
# Use side effect so that we always get a new copy of the dataframe
mock_make_table.side_effect = lambda *args, **kwargs: shap_table.copy()
else:
mock_make_table.return_value = {"explanations": ["explanation_dictionary_goes_here"]}
pipeline = MagicMock()
pipeline.parameters = "Parameters go here"
input_features = pd.DataFrame({"a": [3, 4]}, index=custom_index)
pipeline.problem_type = problem_type
pipeline.name = "Test Pipeline Name"
def _add_custom_index(answer, index_best, index_worst, output_format):
if output_format == "text":
answer = answer.format(index_0=index_best, index_1=index_worst)
elif output_format == "dataframe":
col_name = "prefix" if "prefix" in answer.columns else "rank"
n_repeats = answer[col_name].value_counts().tolist()[0]
answer['index_id'] = [index_best] * n_repeats + [index_worst] * n_repeats
else:
answer["explanations"][0]["predicted_values"]["index_id"] = index_best
answer["explanations"][1]["predicted_values"]["index_id"] = index_worst
return answer
if problem_type == ProblemTypes.REGRESSION:
abs_error_mock = MagicMock(__name__="abs_error")
abs_error_mock.return_value = pd.Series([4., 1.], dtype="float64")
mock_default_metrics.__getitem__.return_value = abs_error_mock
pipeline.predict.return_value = ww.DataColumn(pd.Series([2, 1]))
y_true = pd.Series([3, 2], index=custom_index)
answer = _add_custom_index(answer, index_best=custom_index[1],
index_worst=custom_index[0], output_format=output_format)
elif problem_type == ProblemTypes.BINARY:
pipeline.classes_.return_value = ["benign", "malignant"]
cross_entropy_mock = MagicMock(__name__="cross_entropy")
mock_default_metrics.__getitem__.return_value = cross_entropy_mock
cross_entropy_mock.return_value = pd.Series([0.2, 0.78])
pipeline.predict_proba.return_value = ww.DataTable(pd.DataFrame({"benign": [0.05, 0.1], "malignant": [0.95, 0.9]}))
pipeline.predict.return_value = ww.DataColumn(pd.Series(["malignant"] * 2))
y_true = pd.Series(["malignant", "benign"], index=custom_index)
answer = _add_custom_index(answer, index_best=custom_index[0],
index_worst=custom_index[1], output_format=output_format)
else:
# Multiclass text output is formatted slightly different so need to account for that
if output_format == "text":
mock_make_table.return_value = multiclass_table
pipeline.classes_.return_value = ["setosa", "versicolor", "virginica"]
cross_entropy_mock = MagicMock(__name__="cross_entropy")
mock_default_metrics.__getitem__.return_value = cross_entropy_mock
cross_entropy_mock.return_value = pd.Series([0.15, 0.34])
pipeline.predict_proba.return_value = ww.DataTable(pd.DataFrame({"setosa": [0.8, 0.2], "versicolor": [0.1, 0.75],
"virginica": [0.1, 0.05]}))
pipeline.predict.return_value = ww.DataColumn(pd.Series(["setosa", "versicolor"]))
y_true = pd.Series(["setosa", "versicolor"], index=custom_index)
answer = _add_custom_index(answer, index_best=custom_index[0],
index_worst=custom_index[1], output_format=output_format)
best_worst_report = explain_predictions_best_worst(pipeline, input_features, y_true=y_true,
num_to_explain=1, output_format=output_format)
if output_format == "text":
compare_two_tables(best_worst_report.splitlines(), answer.splitlines())
elif output_format == "dataframe":
# Check dataframes equal without caring about column order
assert sorted(best_worst_report.columns.tolist()) == sorted(answer.columns.tolist())
pd.testing.assert_frame_equal(best_worst_report, answer[best_worst_report.columns])
else:
assert best_worst_report == answer
report = explain_predictions(pipeline, input_features, output_format=output_format,
training_data=input_features)
if output_format == "text":
compare_two_tables(report.splitlines(), explain_predictions_answer.splitlines())
elif output_format == "dataframe":
assert report.columns.tolist() == explain_predictions_answer.columns.tolist()
pd.testing.assert_frame_equal(report, explain_predictions_answer[report.columns])
else:
assert report == explain_predictions_answer
regression_custom_metric_answer = """Test Pipeline Name
Parameters go here
Best 1 of 1
Predicted Value: 1
Target Value: 2
sum: 3
Index ID: 1
table goes here
Worst 1 of 1
Predicted Value: 2
Target Value: 3
sum: 5
Index ID: 0
table goes here
"""
regression_custom_metric_answer_dict = {
"explanations": [
{"rank": {"prefix": "best", "index": 1},
"predicted_values": {"probabilities": None, "predicted_value": 1, "target_value": 2,
"error_name": "sum", "error_value": 3,
"index_id": 1},
"explanations": ["explanation_dictionary_goes_here"]},
{"rank": {"prefix": "worst", "index": 1},
"predicted_values": {"probabilities": None, "predicted_value": 2, "target_value": 3,
"error_name": "sum", "error_value": 5,
"index_id": 0},
"explanations": ["explanation_dictionary_goes_here"]}
]
}
@pytest.mark.parametrize("output_format,answer",
[("text", regression_custom_metric_answer),
("dict", regression_custom_metric_answer_dict)])
@patch("evalml.model_understanding.prediction_explanations._user_interface._make_single_prediction_shap_table")
def test_explain_predictions_best_worst_custom_metric(mock_make_table, output_format, answer):
mock_make_table.return_value = "table goes here" if output_format == "text" else {"explanations": ["explanation_dictionary_goes_here"]}
pipeline = MagicMock()
pipeline.parameters = "Parameters go here"
input_features = pd.DataFrame({"a": [5, 6]})
pipeline.problem_type = ProblemTypes.REGRESSION
pipeline.name = "Test Pipeline Name"
pipeline.predict.return_value = ww.DataColumn(pd.Series([2, 1]))
y_true = pd.Series([3, 2])
def sum(y_true, y_pred):
return y_pred + y_true
best_worst_report = explain_predictions_best_worst(pipeline, input_features, y_true=y_true,
num_to_explain=1, metric=sum, output_format=output_format)
if output_format == "text":
compare_two_tables(best_worst_report.splitlines(), regression_custom_metric_answer.splitlines())
else:
assert best_worst_report == answer
@pytest.mark.parametrize("problem_type", [ProblemTypes.REGRESSION, ProblemTypes.BINARY, ProblemTypes.MULTICLASS])
def test_json_serialization(problem_type, X_y_regression, linear_regression_pipeline_class,
X_y_binary, logistic_regression_binary_pipeline_class,
X_y_multi, logistic_regression_multiclass_pipeline_class):
if problem_type == problem_type.REGRESSION:
X, y = X_y_regression
y = pd.Series(y)
pipeline = linear_regression_pipeline_class(parameters={"Linear Regressor": {"n_jobs": 1}})
elif problem_type == problem_type.BINARY:
X, y = X_y_binary
y = | pd.Series(y) | pandas.Series |
import pandas as pd
from fbprophet import Prophet
from fbprophet.plot import plot_plotly, plot_components_plotly
from sklearn.metrics import mean_squared_error
from math import sqrt
#Lendo arquivos em parquet
df_customer=pd.read_parquet('gs://stack-labs-list/processing/df_customer')
df_geolocation=pd.read_parquet('gs://stack-labs-list/processing/df_geolocation')
df_order_items=pd.read_parquet('gs://stack-labs-list/processing/df_order_items')
df_order_payments= | pd.read_parquet('gs://stack-labs-list/processing/df_order_payments') | pandas.read_parquet |
def concat_networks(p_in_dir_data
, p_in_dir_pred
, p_out_file
, file_suffix
, flag_matrix
, p_in_reg
, p_in_target
, flag_method
, l_p_in_net
, nbr_fold
):
from pandas import read_csv, concat, DataFrame, pivot_table
from json import load
if flag_method == 'with_and_without_de':
df_net_with_de = read_csv(l_p_in_net[0], header=None, sep='\t')
df_net_with_de.index = [(reg, target) for reg, target in zip(list(df_net_with_de.iloc[:, 0])
, list(df_net_with_de.iloc[:, 1]))]
df_net_without_de = read_csv(l_p_in_net[1], header=None, sep='\t')
df_net_without_de.index = [(reg, target) for reg, target in zip(list(df_net_without_de.iloc[:, 0]), list(df_net_without_de.iloc[:, 1]))]
# remove edges that were predicted using DE network
df_net_without_de_filtered = df_net_without_de.loc[~df_net_without_de.index.isin(df_net_with_de.index), :]
df_net_all = | concat([df_net_with_de, df_net_without_de_filtered], axis='index') | pandas.concat |
import time
import geocoder
from textblob import TextBlob
import sys
import tweepy
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
import nltk
import pycountry
import re
import string
# from wordcloud import WordCloud, STOPWORDS
from PIL import Image
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from langdetect import detect
from nltk.stem import SnowballStemmer
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from sklearn.feature_extraction.text import CountVectorizer
import random
import nltk
nltk.downloader.download('vader_lexicon')
# beareer = AAAAAAAAAAAAAAAAAAAAABBQPQEAAAAAmsn7HGaleQTElAetFKdr7W%2BPdCk%3DAgEYLIUXSKpBGYs19PIgSAov1C5ypX6xxFM1QmAnv8JoTete1j
# Authentication
consumerKey = "W3cEabcwQkp7evTrmP5XoHASj"
consumerSecret = "<KEY>"
accessToken = "<KEY>"
accessTokenSecret = "<KEY>"
auth = tweepy.OAuthHandler(consumerKey, consumerSecret)
auth.set_access_token(accessToken, accessTokenSecret)
api = tweepy.API(auth)
def percentage(part, whole):
return 100 * float(part) / float(whole)
loc = "india"#sys.argv[1] # location as argument variable
g = geocoder.osm(loc) # getting object that has location's latitude and longitude
closest_loc = api.trends_closest(g.lat, g.lng, lang="en")
trends = api.trends_place(closest_loc[0]['woeid'])
keyword = trends[0]["trends"][random.randint(1, len(trends[0]["trends"]))]["name"]#"#dogetothemoon" # input("Please enter keyword or hashtag to search: ")
noOfTweet = 100 # int(input ("Please enter how many tweets to analyze: "))
tweets = tweepy.Cursor(api.search, q=keyword, lang="en", tweet_mode='extended').items(noOfTweet)
positive = 0
negative = 0
neutral = 0
polarity = 0
tweet_list = []
neutral_list = []
negative_list = []
positive_list = []
for tweet in tweets:
tweet_list.append(tweet.full_text)
analysis = TextBlob(tweet.full_text)
score = SentimentIntensityAnalyzer().polarity_scores(tweet.full_text)
neg = score['neg']
neu = score['neu']
pos = score['pos']
comp = score['compound']
polarity += analysis.sentiment.polarity
if neg > pos:
negative_list.append(tweet.full_text)
negative += 1
elif pos > neg:
positive_list.append(tweet.full_text)
positive += 1
elif pos == neg:
neutral_list.append(tweet.full_text)
neutral += 1
positive = percentage(positive, noOfTweet)
negative = percentage(negative, noOfTweet)
neutral = percentage(neutral, noOfTweet)
polarity = percentage(polarity, noOfTweet)
positive = format(positive, '.1f')
negative = format(negative, '.1f')
neutral = format(neutral, '.1f')
tweet_list = | pd.DataFrame(tweet_list) | pandas.DataFrame |
import os
import fnmatch
import pandas
def load_config_yml(config_file, individual=False):
# loads a configuration YAML file
#
# input
# config_file: full filepath to YAML (.yml) file
#
# output
# config: Configuration object
import os
import yaml
import yamlordereddictloader
from CPAC.utils import Configuration
try:
config_path = os.path.realpath(config_file)
config_dict = yaml.safe_load(open(config_path, 'r'))
config = Configuration(config_dict)
except Exception as e:
err = "\n\n[!] CPAC says: Could not load or read the configuration " \
"YAML file:\n%s\nDetails: %s\n\n" % (config_file, e)
raise Exception(err)
if individual:
config.pipeline_setup['log_directory']['path'] = os.path.abspath(config.pipeline_setup['log_directory']['path'])
config.pipeline_setup['working_directory']['path'] = os.path.abspath(config.pipeline_setup['working_directory']['path'])
config.pipeline_setup['output_directory']['path'] = os.path.abspath(config.pipeline_setup['output_directory']['path'])
config.pipeline_setup['crash_log_directory']['path'] = os.path.abspath(config.pipeline_setup['crash_log_directory']['path'])
return config
def load_text_file(filepath, label="file"):
# loads a text file and returns the lines in a list
#
# input
# filepath: full filepath to the text file
#
# output
# lines_list: list of lines from text file
if not filepath.endswith(".txt"):
err = "\n\n[!] CPAC says: The %s should be a text file (.txt).\n" \
"Path provided: %s\n\n" % (label, filepath)
raise Exception(err)
try:
with open(filepath,"r") as f:
lines_list = f.readlines()
except Exception as e:
err = "\n\n[!] CPAC says: Could not load or read the %s:\n%s\n" \
"Details: %s\n\n" % (label, filepath, e)
raise Exception(err)
# get rid of those \n's that love to show up everywhere
lines_list = [i.rstrip("\n") for i in lines_list]
return lines_list
def grab_pipeline_dir_subs(pipeline_dir, ses=False):
import os
inclusion_list = []
if ses:
pipeline_list = [x for x in os.listdir(pipeline_dir) if os.path.isdir(os.path.join(pipeline_dir, x))]
else:
pipeline_list = [x.split('_')[0] for x in os.listdir(pipeline_dir) if os.path.isdir(os.path.join(pipeline_dir, x))]
for sub_id in pipeline_list:
if sub_id not in inclusion_list:
inclusion_list.append(sub_id)
inclusion_list = sorted(inclusion_list)
return inclusion_list
def read_pheno_csv_into_df(pheno_csv, id_label=None):
"""Read the phenotypic file CSV or TSV into a Pandas DataFrame."""
import pandas as pd
with open(pheno_csv, "r") as f:
if id_label:
if '.tsv' in pheno_csv or '.TSV' in pheno_csv:
pheno_df = pd.read_table(f, dtype={id_label: object})
else:
pheno_df = pd.read_csv(f, dtype={id_label: object})
else:
if '.tsv' in pheno_csv or '.TSV' in pheno_csv:
pheno_df = pd.read_table(f)
else:
pheno_df = pd.read_csv(f)
return pheno_df
def gather_nifti_globs(pipeline_output_folder, resource_list,
pull_func=False):
# the number of directory levels under each participant's output folder
# can vary depending on what preprocessing strategies were chosen, and
# there may be several output filepaths with varying numbers of directory
# levels
# this parses them quickly while also catching each preprocessing strategy
import os
import glob
import pandas as pd
import pkg_resources as p
from __builtin__ import any as b_any
ext = ".nii"
nifti_globs = []
keys_csv = p.resource_filename('CPAC', 'resources/cpac_outputs.csv')
try:
keys = pd.read_csv(keys_csv)
except Exception as e:
err = "\n[!] Could not access or read the cpac_outputs.csv " \
"resource file:\n{0}\n\nError details {1}\n".format(keys_csv, e)
raise Exception(err)
derivative_list = list(
keys[keys['Derivative'] == 'yes'][keys['Space'] == 'template'][
keys['Values'] == 'z-score']['Resource'])
derivative_list = derivative_list + list(
keys[keys['Derivative'] == 'yes'][keys['Space'] == 'template'][
keys['Values'] == 'z-stat']['Resource'])
if pull_func:
derivative_list = derivative_list + list(
keys[keys['Functional timeseries'] == 'yes']['Resource'])
if len(resource_list) == 0:
err = "\n\n[!] No derivatives selected!\n\n"
raise Exception(err)
# remove any extra /'s
pipeline_output_folder = pipeline_output_folder.rstrip("/")
print("\n\nGathering the output file paths from "
"{0}...".format(pipeline_output_folder))
# this is just to keep the fsl feat config file derivative_list entries
# nice and lean
dirs_to_grab = []
for derivative_name in derivative_list:
for resource_name in resource_list:
if resource_name in derivative_name:
dirs_to_grab.append(derivative_name)
# grab MeanFD_Jenkinson just in case
dirs_to_grab.append("power_params")
for resource_name in dirs_to_grab:
glob_string = os.path.join(pipeline_output_folder, "*",
resource_name, "*", "*")
# get all glob strings that result in a list of paths where every path
# ends with a NIFTI file
prog_string = ".."
while len(glob.glob(glob_string)) != 0:
if b_any(ext in x for x in glob.glob(glob_string)) == True:
nifti_globs.append(glob_string)
glob_string = os.path.join(glob_string, "*")
prog_string = prog_string + "."
print(prog_string)
if len(nifti_globs) == 0:
err = "\n\n[!] No output filepaths found in the pipeline output " \
"directory provided for the derivatives selected!\n\nPipeline " \
"output directory provided: %s\nDerivatives selected:%s\n\n" \
% (pipeline_output_folder, resource_list)
raise Exception(err)
return nifti_globs
def grab_raw_score_filepath(filepath, resource_id):
# this lives in the output path collector
import os
import glob
if "vmhc" in resource_id:
raw_score_path = filepath.replace(resource_id,"vmhc_raw_score")
raw_score_path = raw_score_path.replace(raw_score_path.split("/")[-1],"")
raw_score_path = glob.glob(os.path.join(raw_score_path,"*"))[0]
else:
raw_score_path = filepath.replace("_zstd","")
raw_score_path = raw_score_path.replace("_fisher","")
raw_score_path = raw_score_path.replace("_zstat","")
if "sca_roi_files_to_standard" in resource_id:
sub_folder = raw_score_path.split("/")[-2] + "/"
if "z_score" in sub_folder:
raw_score_path = raw_score_path.replace(sub_folder,"")
elif "sca_tempreg_maps_zstat" in resource_id:
sca_filename = raw_score_path.split("/")[-1]
globpath = raw_score_path.replace(sca_filename, "*")
globpath = os.path.join(globpath, sca_filename)
raw_score_path = glob.glob(globpath)[0]
elif "dr_tempreg_maps" in resource_id:
raw_score_path = raw_score_path.replace("map_z_","map_")
raw_filename = raw_score_path.split("/")[-1]
raw_score_path = raw_score_path.replace(raw_filename,"")
raw_score_path = glob.glob(os.path.join(raw_score_path,"*",raw_filename))[0]
else:
# in case filenames are different between z-standardized and raw
raw_score_path = raw_score_path.replace(raw_score_path.split("/")[-1],"")
try:
raw_score_path = glob.glob(os.path.join(raw_score_path,"*"))[0]
except:
raw_score_path = os.path.join(raw_score_path,"*")
if (raw_score_path is None) or (not os.path.exists(raw_score_path)):
err = "\n\n[!] The filepath for the raw score of " \
"%s can not be found.\nFilepath: %s\n\nThis " \
"is needed for the Measure Mean calculation." \
"\n\n" % (resource_id, raw_score_path)
raise Exception(err)
return raw_score_path
def find_power_params_file(filepath, resource_id, series_id):
import os
try:
power_path = filepath.replace(resource_id, "power_params", 1)
series_id_string = "_scan_%s" % series_id
power_first_half = power_path.split(series_id_string)[0]
power_first_half = os.path.join(power_first_half, series_id_string)
participant_id = power_first_half.split("/")[-3]
except Exception as e:
err = "\n\n[!] Something went wrong with finding the power " \
"parameters file for at least one of the participants.\n\n" \
"Error details: %s\n\n" % e
raise Exception(err)
power_params_file = None
for root, dirs, files in os.walk(power_first_half):
for filename in files:
filepath = os.path.join(root, filename)
if "pow_params.txt" in filepath:
power_params_file = filepath
if not power_params_file:
err = "\n\n[!] Could not find the power parameters file for the " \
"following participant and series..\nParticipant: %s\n" \
"Series: %s\n\nIt should be available here: %s\n\n" \
% (participant_id, series_id, power_first_half)
raise Exception(err)
return power_params_file
def extract_power_params(power_params_lines, power_params_filepath):
# check formatting
if len(power_params_lines) != 2:
err = "\n\n[!] There is something wrong with the formatting of the " \
"power parameters file.\nFilepath: %s\n\n" \
% power_params_filepath
raise Exception(err)
names_list = power_params_lines[0].split(",")
values_list = power_params_lines[1].split(",")
# let's make extra sure
if (values_list[0].replace(" ", "") not in power_params_filepath) or \
(values_list[1].replace(" ", "") not in power_params_filepath):
err = "\n\n[!] There is a mismatch between the contents of the " \
"power parameters file and where it is located!\n" \
"Filepath: %s\n\n" % power_params_filepath
raise Exception(err)
if (names_list[2].replace(" ", "") != "MeanFD_Power") or \
(names_list[3].replace(" ", "") != "MeanFD_Jenkinson") or \
(names_list[-1].replace(" ", "") != "MeanDVARS"):
err = "\n\n[!] There is a mismatch between the power parameters " \
"format and what is expected!!\nFilepath: %s\n\n" \
% power_params_filepath
raise Exception(err)
meanfd_power = values_list[2]
meanfd_jenk = values_list[3]
meandvars = values_list[-1]
return meanfd_power, meanfd_jenk, meandvars
def create_output_dict_list(nifti_globs, pipeline_output_folder,
resource_list, get_motion=False,
get_raw_score=False, pull_func=False,
derivatives=None, exts=['nii', 'nii.gz']):
import os
import glob
import itertools
import pandas as pd
import pkg_resources as p
if len(resource_list) == 0:
err = "\n\n[!] No derivatives selected!\n\n"
raise Exception(err)
if derivatives is None:
keys_csv = p.resource_filename('CPAC', 'resources/cpac_outputs.csv')
try:
keys = pd.read_csv(keys_csv)
except Exception as e:
err = "\n[!] Could not access or read the cpac_outputs.csv " \
"resource file:\n{0}\n\nError details {1}\n".format(keys_csv, e)
raise Exception(err)
derivatives = list(
keys[keys['Derivative'] == 'yes'][keys['Space'] == 'template'][
keys['Values'] == 'z-score']['Resource'])
derivatives = derivatives + list(
keys[keys['Derivative'] == 'yes'][keys['Space'] == 'template'][
keys['Values'] == 'z-stat']['Resource'])
if pull_func:
derivatives = derivatives + list(keys[keys['Functional timeseries'] == 'yes']['Resource'])
# remove any extra /'s
pipeline_output_folder = pipeline_output_folder.rstrip("/")
print("\n\nGathering the output file paths from "
"{0}...".format(pipeline_output_folder))
# this is just to keep the fsl feat config file derivatives entries
# nice and lean
search_dirs = []
for derivative_name in derivatives:
for resource_name in resource_list:
if resource_name in derivative_name:
search_dirs.append(derivative_name)
'''
search_dirs = [
resource_name
for resource_name in resource_list
if any([resource_name in derivative_name
for derivative_name in derivatives])
]
'''
# grab MeanFD_Jenkinson just in case
search_dirs += ["power_params"]
exts = ['.' + ext.lstrip('.') for ext in exts]
# parse each result of each "valid" glob string
output_dict_list = {}
for root, _, files in os.walk(pipeline_output_folder):
for filename in files:
filepath = os.path.join(root, filename)
if not any(fnmatch.fnmatch(filepath, pattern) for pattern in nifti_globs):
continue
if not any(filepath.endswith(ext) for ext in exts):
continue
relative_filepath = filepath.split(pipeline_output_folder)[1]
filepath_pieces = [_f for _f in relative_filepath.split("/") if _f]
resource_id = filepath_pieces[1]
if resource_id not in search_dirs:
continue
series_id_string = filepath_pieces[2]
strat_info = "_".join(filepath_pieces[3:])[:-len(ext)]
unique_resource_id = (resource_id, strat_info)
if unique_resource_id not in output_dict_list.keys():
output_dict_list[unique_resource_id] = []
unique_id = filepath_pieces[0]
series_id = series_id_string.replace("_scan_", "")
series_id = series_id.replace("_rest", "")
new_row_dict = {}
new_row_dict["participant_session_id"] = unique_id
new_row_dict["participant_id"], new_row_dict["Sessions"] = \
unique_id.split('_')
new_row_dict["Series"] = series_id
new_row_dict["Filepath"] = filepath
print('{0} - {1} - {2}'.format(
unique_id,
series_id,
resource_id
))
if get_motion:
# if we're including motion measures
power_params_file = find_power_params_file(filepath,
resource_id, series_id)
power_params_lines = load_text_file(power_params_file,
"power parameters file")
meanfd_p, meanfd_j, meandvars = \
extract_power_params(power_params_lines,
power_params_file)
new_row_dict["MeanFD_Power"] = meanfd_p
new_row_dict["MeanFD_Jenkinson"] = meanfd_j
new_row_dict["MeanDVARS"] = meandvars
if get_raw_score:
# grab raw score for measure mean just in case
raw_score_path = grab_raw_score_filepath(filepath,
resource_id)
new_row_dict["Raw_Filepath"] = raw_score_path
# unique_resource_id is tuple (resource_id,strat_info)
output_dict_list[unique_resource_id].append(new_row_dict)
return output_dict_list
def create_output_df_dict(output_dict_list, inclusion_list=None):
import pandas as pd
output_df_dict = {}
# unique_resource_id is tuple (resource_id,strat_info)
for unique_resource_id in output_dict_list.keys():
# NOTE: this dataframe reflects what was found in the C-PAC output
# directory for individual-level analysis outputs,
# NOT what is in the pheno file
new_df = | pd.DataFrame(output_dict_list[unique_resource_id]) | pandas.DataFrame |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/7/14 21:21
Desc: 中国-香港-宏观指标
https://data.eastmoney.com/cjsj/foreign_8_0.html
"""
import demjson
import pandas as pd
import requests
def marco_china_hk_cpi() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-消费者物价指数
https://data.eastmoney.com/cjsj/foreign_8_0.html
:return: 消费者物价指数
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "0",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date
temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date
return temp_df
def marco_china_hk_cpi_ratio() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-消费者物价指数年率
https://data.eastmoney.com/cjsj/foreign_8_1.html
:return: 消费者物价指数年率
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "1",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date
temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date
return temp_df
def marco_china_hk_rate_of_unemployment() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-失业率
https://data.eastmoney.com/cjsj/foreign_8_2.html
:return: 失业率
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "2",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.t | o_numeric(temp_df['前值']) | pandas.to_numeric |
import pandas as pd
import numpy as np
import csv
import matplotlib.pyplot as plt
import datetime as dt
import math
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score, confusion_matrix
import seaborn as sns
from sklearn import tree
import graphviz
from sklearn.naive_bayes import GaussianNB
data = pd.read_csv('ODI-2019-csv.csv', sep=";")
#############################################################################
# CLEAN UP
data.columns = ["time",
"programme",
"exp_ML",
"exp_IR",
"exp_stat",
"exp_DB",
"gender",
"chocolate",
"birthday",
"num_neighbours",
"stand_up",
"DM_competition",
"random_num",
"bedtime",
"good_day_1",
"good_day_2",
"stress_level"]
# COMMA SEP
data = data.stack().str.replace(',','.').unstack()
#DATETIME
data.time = pd.to_datetime(data.time)
#PROGRAMME
data.exp_ML = data.exp_ML.map({"yes": 1, "no": 0})
data.exp_ML = pd.Categorical(data.exp_ML)
data.exp_IR = data.exp_IR.map({"1": 1, "0": 0})
data.exp_stat = data.exp_stat.map({"mu": 1, "sigma": 0})
data.exp_DB = data.exp_DB.map({"ja": 1, "nee": 0})
data.exp_DB = pd.Categorical(data.exp_DB)
data.exp_IR = pd.Categorical(data.exp_IR)
data.exp_stat = pd.Categorical(data.exp_stat)
data.exp_IR.fillna(data.exp_IR.mode(),inplace=True)
data.programme[data.programme.str.contains("AI|Artificial|Intelligence|artificial|intelligence|ai")] = "AI"
data.programme[data.programme.str.contains("CS|Computer Science|cs|Computer science|computer science")] = "CS"
data.programme[data.programme.str.contains("CLS|cls|CLs|Cls|Computational Science")] = "CLS"
data.programme[data.programme.str.contains("Bioinformatics|bioinformatics")] = "Bioinformatics"
data.programme[data.programme.str.contains("business analytics|Business Analytics|Business analystics|BA")] = "BA"
data.programme[data.programme.str.contains("Finance")] = "Finance"
data.programme[data.programme.str.contains("Econometrics|econometrics")] = "Econometrics"
data.programme[data.programme.str.contains("Data Science|Data science|data science")] = "DS"
data.programme = pd.Categorical(data.programme)
#data.programme.value_counts()[(data.programme.value_counts()) != 0].plot(kind="bar")
print(data.programme.value_counts())
# SUBSET PROGRAMME
data["programme_subset"] = np.nan
for idx, row in data.iterrows():
data.programme_subset[idx] = row.programme if row.programme == "AI" \
or row.programme == "CS" else np.nan
data.programme_subset = pd.Categorical(data.programme_subset)
prog_mask = data.programme_subset.notna()
#CHOCOLATE
data.chocolate = pd.Categorical(data.chocolate)
# STRESS LEVEL
pattern_stress = "([0-9]{0,3})"
data.stress_level = data.stress_level.str.extract(pattern_stress, expand=False)
data.stress_level = pd.to_numeric(data.stress_level)
data.stress_level.loc[(data.stress_level > 100)] = 100
print("stress notna", sum(data.stress_level.notna()))
data.stress_level.fillna(data.stress_level.mean(),inplace=True)
labels = ["low", "med", "high"]
data["stress_cat"] = pd.cut(data.stress_level, [0, 33, 66, 101], right=False, labels=labels)
data.stress_cat.value_counts().plot(kind="bar")
plt.savefig("stress_barplot.png")
# RANDOM NUMBER
data.random_num = pd.to_numeric(data.random_num, errors="coerce")
# BED TIME
# problem with different formats (hh:mm and h:mm), at the moment only include hh:mm
#pattern_bed = "((2[0-3]|[01][0-9]):([0,3]?0))$"
pattern_bed = "((0[0-9]|1[0-9]|2[0-3]|[0-9]):[0-5][0-9])$"
#pattern_bed = "(/^([0-9]|0[0-9]|1[0-9]|2[0-3]):[0-5][0-9]$/)"
data.bedtime = data.bedtime.str.extract(pattern_bed)
print("sum na bedtime" , sum(data.bedtime.notna()))
data.bedtime = pd.Categorical(data.bedtime, categories=[u"21:00", u"21:30", u"22:00", u"22:30", u"23:00", u"23:30", u"00:00",
u"00:30", u"01:00", u"01:30", u"02:00", u"02:30", u"03:00", u"03:30",
u"12:30", u"18:00"], ordered=True)
data["bedtime_cat"] = np.nan
data["bedtime_cat"][data.bedtime < u"00:00"] = "before_mid"
data["bedtime_cat"][data.bedtime >= u"00:00"] = "after_midnight"
data.bedtime_cat = data.bedtime_cat.astype("category")
print(data.bedtime_cat)
# GENDER
data.gender = data.gender.astype("category")
# BIRTHDAY
# patter accepts either format dd-mm-yyyy or dd/mm/yyyy
pattern_birthday = "((0[1-9]|[12]\d|3[01])[-/.](0[1-9]|1[0-2])[-/.][12]\d{3})|$"
data.birthday = data.birthday.str.extract(pattern_birthday)
data.birthday = pd.to_datetime(data.birthday, dayfirst=True, errors="coerce")
birthday_mask = data.birthday.notna()
# AGE
data["age"] = 0
now = dt.datetime.now()
for idx,entry in enumerate(data.birthday):
if isinstance(entry, dt.datetime):
data.age[idx] = round((now - entry).days / 365.25)
# NUMBER OF NEIGHBOURS
data.num_neighbours = pd.to_numeric(data.num_neighbours, errors="coerce")
# STANDUP
data.stand_up = pd.Categorical(data.stand_up)
# MONEY DESERVED
data.DM_competition = pd.to_numeric(data.DM_competition, errors="coerce")
####################################################################################################
# PLOTS
#SCATTERPLOT STRESS/RANDOM
#plt.scatter(data.stress_level, data.random_num)
#plt.xlim(0, 100)
#plt.ylim(0, 100)
#HIST STRESS
"""
plt.subplot(1,2,1)
data["stress_level_sqrt"] = 0
for idx, row in data.iterrows():
data.stress_level_sqrt[idx] = math.sqrt(row.stress_level) if row.stress_level > 0 else 0
sns.distplot(data.stress_level_sqrt, hist=True)
plt.xlabel("sqrt(stress_level)")
plt.yticks([])
"""
# AGE STRESS LEVEL PLOT
"""
plt.subplot(2,1,1)
sns.distplot(data.age.dropna(), hist=True)
plt.xlabel("Age)")
plt.subplot(2,1,2)
sns.distplot(data.stress_level, hist=True)
plt.xlabel("stress_level")
plt.gcf().set_size_inches(3,6)
plt.savefig("hists_age_stress.png")
"""
"""
#BARPLOT GENDER
data.gender.value_counts().plot(kind="bar")
plt.show()
"""
"""
#HIST STRESS BY GENDER
data.hist("stress_level", by="gender", range=(0,100))
"""
"""
#BARPLOT BEDTIME
plt.subplot(1,2,1)
data.bedtime.value_counts(sort=False).plot.bar()
plt.gcf().autofmt_xdate()
plt.xlabel("Bedtime")
################################################################# BARPLOT BEDTIME & STRESS
plt.subplot(1,2,2)
sns.boxplot(data.bedtime, data.stress_level)
plt.gcf().autofmt_xdate()
plt.xlabel("Bedtime")
plt.ylabel("Stress level")
plt.gcf().set_size_inches(8,4)
plt.savefig("bedtime.png")
"""
"""
#HIST AGE
print(data.age)
data.age.dropna(inplace=True)
sns.distplot(data.age)
plt.xlabel("Age")
plt.savefig("age_hist.png")
"""
"""
plt.subplot(2,2,1)
sns.countplot(x="exp_ML", hue="programme_subset", data=data)
plt.xlabel("Prior ML course")
plt.subplot(2,2,2)
sns.countplot(x="exp_IR", hue="programme_subset", data=data)
plt.xlabel("Prior IR course")
plt.subplot(2,2,3)
sns.countplot(x="exp_DB", hue="programme_subset", data=data)
plt.xlabel("Prior DB course")
plt.subplot(2,2,4)
sns.countplot(x="exp_stat", hue="programme_subset", data=data)
plt.xlabel("Prior stat course")
plt.savefig("prior_count.png")
"""
############################################################################################
# DESCRIPTIVE STATS
#print(data.describe().round())
print(data.programme.value_counts())
print(data.exp_ML.value_counts())
print(data.exp_DB.value_counts())
print(data.exp_IR.value_counts())
print(data.exp_stat.value_counts())
print(data.stress_cat.value_counts())
print(data.bedtime_cat.value_counts())
print("age mean", data.age.mean())
print("age sd ", data.age.std())
print(data.stress_level.mean())
print(data.stress_level.std())
cordummies = pd.get_dummies(data[["programme_subset", "exp_ML", "exp_IR", "exp_stat", "exp_DB", "stress_cat", "bedtime_cat"]])
cordf = data[["age", "stress_level"]].join(cordummies)
cordf = cordf.loc[prog_mask,]
print(cordf.corr().to_string())
###########################################################################################
# CLASSIFICATION/REGRESSION
##################################################################### TREE
x_tree = | pd.get_dummies(data[["exp_ML", "exp_stat", "exp_IR", "exp_DB"]]) | pandas.get_dummies |
"""
concavity_automator comports multiple scripts automating concavity constraining method for landscape
"""
import lsdtopytools as lsd
import numpy as np
import numba as nb
import pandas as pd
from matplotlib import pyplot as plt
import sys
import matplotlib
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import math
from lsdtopytools.numba_tools import travelling_salesman_algortihm, remove_outliers_in_drainage_divide
import random
import matplotlib.gridspec as gridspec
from multiprocessing import Pool, current_process
from scipy import spatial,stats
import numba as nb
import copy
from pathlib import Path
import pylab as pl
from scipy.signal import savgol_filter
from scipy.interpolate import interp1d
def norm_by_row(A):
"""
Subfunction used to vectorised normalisation of disorder by max of row using apply_along_axis function
B.G
"""
return A/A.max()
def norm_by_row_by_range(A):
"""
Subfunction used to vectorised normalisation of disorder by range of concavity using apply_along_axis function
B.G
"""
return (A - A.min())/(A.max() - A.min())
def numfmt(x, pos):
"""
Plotting subfunction to automate tick formatting from metres to kilometres
B.G
"""
s = '{:d}'.format(int(round(x / 1000.0)))
return s
def get_best_bit_and_err_from_Dstar(thetas, medD, fstD, thdD):
"""
Takes ouput from concavity calculation to calculate the best-fit theta and its error
"""
# Calculating the index of minimum medium disorder to get the best-fit
index_of_BF = np.argmin(medD)
# Getting the Dstar value of the best-fit
dstar_val = medD[index_of_BF]
# Getting the acutal best-fit
BF = thetas[index_of_BF]
# Preformatting 2 arrays for calculating the error: I am just interested by the first half for the first error and the second for the second
A = np.copy(fstD)
A[index_of_BF+1:] = 9999
B = np.copy(fstD)
B[:index_of_BF] = 9999
# calculating the error by extracting the closest theta with a Dstar close to the median best fit ones
err = ( thetas[np.abs(A - dstar_val).argmin()] , thetas[np.abs(B - dstar_val).argmin()] )
# REturning a tuple with [0] being the best fit and [1] another tuple f error
return BF,err
def process_basin(ls, **kwargs):
"""
Main function processing the concavity. It looks a bit convoluted but it is required for clean multiprocessing.
Takes at least one argument: ls, which is a list of arguments
ls[0] -> the number of the basin (heavily used by automatic multiprocessing)
ls[1] -> the X coordinate of the basin outlet
ls[2] -> the Y coordinate of the basin outlet
ls[3] -> area_threshold used for the analysis
ls[4] -> prefix befor the number of the basin to read the file input
Also takes option kwargs argument:
ignore_numbering: jsut use the prefix as name for the DEM
extension: if your extension is not .tif, you can give it here WITHOUT THE DOT
overwrite_dem_name: used if you want to use thefunction from outside the automations: you need to provide the dem name WITH THE EXTENSION
"""
number = ls[0]
X = ls[1]
Y = ls[2]
area_threshold = ls[3]
prefix = ls[4]
print("Processing basin ", number, " with proc ", current_process())
if("ignore_numbering" not in kwargs):
kwargs["ignore_numbering"] = False
if("extension" not in kwargs):
kwargs["extension"] = "tif"
if("n_tribs_by_combo" not in kwargs):
kwargs["n_tribs_by_combo"] = 4
if(kwargs["ignore_numbering"] == True):
name = prefix
else:
name = prefix + "%s"%(number)
if(kwargs["precipitation_raster"] == ""):
precipitation = False
else:
precipitation = True
# I spent a significant amount of time preprocessing it, see SM
n_rivers = 0
dem_name ="%s.%s"%(name,kwargs["extension"])
if("overwrite_dem_name" in kwargs):
dem_name = kwargs["overwrite_dem_name"]
MD = lsd.LSDDEM(file_name = dem_name, already_preprocessed = True)
# Extracting basins
if(precipitation):
MD.CommonFlowRoutines( ingest_precipitation_raster = kwargs["precipitation_raster"], precipitation_raster_multiplier = 1, discharge = True)
else:
MD.CommonFlowRoutines()
MD.ExtractRiverNetwork( method = "area_threshold", area_threshold_min = area_threshold)
print("River extracted")
MD.DefineCatchment( method="from_XY", X_coords = [X], Y_coords = [Y], coord_search_radius_nodes = 10 )#, X_coords = [X_coordinates_outlets[7]], Y_coords = [Y_coordinates_outlets[7]])
print("CAtchment defined")
MD.GenerateChi(theta = 0.4, A_0 = 1)
print("River_network_generated")
n_rivers = MD.df_base_river.source_key.unique().shape[0]
print("You have", n_rivers, "rivers and",MD.df_base_river.shape[0],"river pixels")
MD.df_base_river.to_feather("%s_rivers.feather"%(name))
print("Starting the movern calculation")
MD.cppdem.calculate_movern_disorder(0.05, 0.025, 38, 1, area_threshold, kwargs["n_tribs_by_combo"])
print("DONE with movern, let's format the output")
OVR_dis = MD.cppdem.get_disorder_dict()[0]
OVR_tested = MD.cppdem.get_disorder_vec_of_tested_movern()
pd.DataFrame({"overall_disorder":OVR_dis, "tested_movern":OVR_tested }).to_feather("%s_overall_test.feather"%(name))
normalizer = MD.cppdem.get_n_pixels_by_combinations()[0]
np.save("%s_disorder_normaliser.npy"%(name), normalizer)
all_disorder = MD.cppdem.get_best_fits_movern_per_BK()
np.save("%s_concavity_tot.npy"%(name), all_disorder[0])
print("Getting results")
results = np.array(MD.cppdem.get_all_disorder_values()[0])
np.save("%s_disorder_tot.npy"%(name), results)
XY = MD.cppdem.query_xy_for_each_basin()["0"]
tdf = pd.DataFrame(XY)
tdf.to_feather("%s_XY.feather"%(name))
return 0
def theta_quick_constrain_single_basin(MD,X_coordinate_outlet = 0, Y_coordinate_outlet = 0, area_threshold = 1500):
"""
Main function processing the concavity. It looks a bit convoluted but it is required for clean multiprocessing.
Takes at least one argument: ls, which is a list of arguments
ls[0] -> the number of the basin (heavily used by automatic multiprocessing)
ls[1] -> the X coordinate of the basin outlet
ls[2] -> the Y coordinate of the basin outlet
ls[3] -> area_threshold used for the analysis
ls[4] -> prefix befor the number of the basin to read the file input
Also takes option kwargs argument:
ignore_numbering: jsut use the prefix as name for the DEM
extension: if your extension is not .tif, you can give it here WITHOUT THE DOT
overwrite_dem_name: used if you want to use thefunction from outside the automations: you need to provide the dem name WITH THE EXTENSION
"""
# number = ls[0]
# X = ls[1]
# Y = ls[2]
# area_threshold = ls[3]
# prefix = ls[4]
# print("Processing basin ", number, " with proc ", current_process())
# if("ignore_numbering" not in kwargs):
# kwargs["ignore_numbering"] = False
# if("extension" not in kwargs):
# kwargs["extension"] = "tif"
# if("n_tribs_by_combo" not in kwargs):
# kwargs["n_tribs_by_combo"] = 4
# if(kwargs["ignore_numbering"] == True):
# name = prefix
# else:
# name = prefix + "%s"%(number)
# if(kwargs["precipitation_raster"] == ""):
# precipitation = False
# else:
# precipitation = True
# I spent a significant amount of time preprocessing it, see SM
n_rivers = 0
# dem_name ="%s.%s"%(name,kwargs["extension"])
# if("overwrite_dem_name" in kwargs):
# dem_name = kwargs["overwrite_dem_name"]
# MD = lsd.LSDDEM(file_name = dem_name, already_preprocessed = True)
# # Extracting basins
# if(precipitation):
# MD.CommonFlowRoutines( ingest_precipitation_raster = kwargs["precipitation_raster"], precipitation_raster_multiplier = 1, discharge = True)
# else:
# MD.CommonFlowRoutines()
# print("Experimental function (Gailleton et al., submitted), if it crashes restart from a clean LSDDEM object with only the flow routines processed.")
MD.ExtractRiverNetwork( method = "area_threshold", area_threshold_min = area_threshold)
# print("River pre-extracted")
MD.DefineCatchment( method="from_XY", X_coords = X_coordinate_outlet, Y_coords = Y_coordinate_outlet, coord_search_radius_nodes = 10 )#, X_coords = [X_coordinates_outlets[7]], Y_coords = [Y_coordinates_outlets[7]])
# print("CAtchment defined")
MD.GenerateChi(theta = 0.4, A_0 = 1)
# print("River_network_generated")
n_rivers = MD.df_base_river.source_key.unique().shape[0]
print("DEBUG::You have", n_rivers, "rivers and",MD.df_base_river.shape[0],"river pixels \n")
# MD.df_base_river.to_feather("%s_rivers.feather"%(name))
# print("Starting the movern calculation")
MD.cppdem.calculate_movern_disorder(0.05, 0.025, 38, 1, area_threshold, 4)
# print("DONE with movern, let's format the output")
OVR_dis = MD.cppdem.get_disorder_dict()[0]
OVR_tested = MD.cppdem.get_disorder_vec_of_tested_movern()
# pd.DataFrame({"overall_disorder":OVR_dis, "tested_movern":OVR_tested }).to_feather("%s_overall_test.feather"%(name))
normalizer = MD.cppdem.get_n_pixels_by_combinations()[0]
# np.save("%s_disorder_normaliser.npy"%(name), normalizer)
all_disorder = MD.cppdem.get_best_fits_movern_per_BK()
# np.save("%s_concavity_tot.npy"%(name), all_disorder[0])
# print("Getting results")
results = np.array(MD.cppdem.get_all_disorder_values()[0])
# np.save("%s_disorder_tot.npy"%(name), results)
# XY = MD.cppdem.query_xy_for_each_basin()["0"]
# tdf = pd.DataFrame(XY)
# tdf.to_feather("%s_XY.feather"%(name))
# print("\n\n")
try:
from IPython.display import display, Markdown, Latex
todusplay = r"""
**Thanks for constraning** $\theta$ with the disorder algorithm from _Mudd et al., 2018_ and _Gailleton et al, submitted_.
Keep in mind that it is not straightforward and that the "best fit" we suggest is most of the time the "least worst" value maximising the collinearity in $\chi$ space.
Especially in large, complex basin, several $\theta$ actually fit different areas and the best fit is just a try to make everyone happy where it is not necessarily possible.
$\theta$ constraining results:
median $\theta$ | $1^{st}$ Q | $3^{rd}$ Q
--- | --- | ---
%s | %s | %s
"""%(round(np.nanmedian(all_disorder[0]),3), round(np.nanpercentile(all_disorder[0],25),3), round(np.nanpercentile(all_disorder[0],75),3))
display(Markdown(todusplay))
except:
pass
return all_disorder
def get_median_first_quartile_Dstar(ls):
"""
Function which post-process results from one analysis to return the median and first quartile curve of all best-fits
param:
ls: full prefix (= including basin number if needed)
B.G
"""
print("Normalising D* for ", ls)
name_to_load = ls
# loading the file containng ALL the data
all_data = np.load(name_to_load + "_disorder_tot.npy")
if(all_data.shape[0]>1):
# normalise by max each row
all_data = np.apply_along_axis(norm_by_row,1,all_data)
# Median by column
ALLDmed = np.apply_along_axis(np.median,0,all_data)
# Percentile by column
ALLDfstQ = np.apply_along_axis(lambda z: np.percentile(z,25),0,all_data)
else:
return name_to_load
return ALLDmed, ALLDfstQ, ls
def get_median_first_quartile_Dstar_r(ls):
"""
Function which post-process results from one analysis to return the median and first quartile curve of all best-fits
param:
ls: full prefix (= including basin number if needed)
B.G
"""
print("Normalising D*_r for ", ls)
name_to_load = ls
# loading the file containng ALL the data
all_data = np.load(name_to_load + "_disorder_tot.npy")
if(all_data.shape[0]>1):
# normalise by max each row
all_data = np.apply_along_axis(norm_by_row_by_range,1,all_data)
# Median by column
ALLDmed = np.apply_along_axis(np.median,0,all_data)
# Percentile by column
ALLDfstQ = np.apply_along_axis(lambda z: np.percentile(z,25),0,all_data)
else:
return name_to_load
return ALLDmed, ALLDfstQ, ls
def plot_single_theta(ls, **kwargs):
"""
For a multiple analysis on the same DEM this plot the global with each basins colored by D^*
Need post-processing function to pre-analyse the ouputs.
The layout of this function might seems a bit convoluted, but that's making multiprocessing easy, as they take time to plot
param
"""
this_theta = ls[0]
prefix = ls[1]
# Loading the small summary df
df = pd.read_csv(prefix +"summary_results.csv")
# Loading the HillShade
HS = lsd.raster_loader.load_raster(prefix + "HS.tif")
# Formatting ticks
import matplotlib.ticker as tkr # has classes for tick-locating and -formatting
yfmt = tkr.FuncFormatter(numfmt)
xfmt = tkr.FuncFormatter(numfmt)
print("plotting D* for theta", this_theta)
# Getting the Figure and the ticks right
fig,ax = plt.subplots()
ax.yaxis.set_major_formatter(yfmt)
ax.xaxis.set_major_formatter(xfmt)
# Normalising the Hillshade and taking care of the no data
HS["array"] = HS["array"]/HS["array"].max()
HS["array"][HS["array"]<0] = np.nan
# Plotting the hillshade
ax.imshow(HS["array"], extent = HS["extent"], cmap = "gray", vmin= 0.2, vmax = 0.8)
# Building the array of concavity
A = np.zeros(HS["array"].shape)
A[:,:] = np.nan
# For each raster, I am reading rows and col corresponding to the main raster and potting it with the requested value
for name in df["raster_name"]:
row = np.load(name + "_row.npy")
col = np.load(name + "_col.npy")
val = df["D*_%s"%this_theta][df["raster_name"] == name].values[0] # A wee convoluted but it work and it is fast so...
A[row,col] = val
# PLOTTING THE D*
ax.imshow(A, extent = HS["extent"], cmap= "gnuplot2", zorder = 2, alpha = 0.75, vmin = 0.1, vmax = 0.9)
# You may want to change the extents of the plot
if("xlim" in kwargs):
ax.set_xlim(kwargs["xlim"])
if("ylim" in kwargs):
ax.set_ylim(kwargs["ylim"])
ax.set_xlabel("Easting (km)")
ax.set_ylabel("Northing (km)")
# Saving the figure
plt.tight_layout()#
plt.savefig(prefix + "MAP_disorder_%s.png"%(this_theta), dpi = 500)
plt.close(fig)
print("plotting D*_r for theta", this_theta)
# Getting the Figure and the ticks right
fig,ax = plt.subplots()
ax.yaxis.set_major_formatter(yfmt)
ax.xaxis.set_major_formatter(xfmt)
# Plotting the hillshade
ax.imshow(HS["array"], extent = HS["extent"], cmap = "gray", vmin= 0.2, vmax = 0.8)
# Building the array of concavity
A = np.zeros(HS["array"].shape)
A[:,:] = np.nan
# For each raster, I am reading rows and col corresponding to the main raster and potting it with the requested value
for name in df["raster_name"]:
row = np.load(name + "_row.npy")
col = np.load(name + "_col.npy")
val = df["D*_r_%s"%this_theta][df["raster_name"] == name].values[0] # A wee convoluted but it work and it is fast so...
A[row,col] = val
# PLOTTING THE D*
ax.imshow(A, extent = HS["extent"], cmap= "gnuplot2", zorder = 2, alpha = 0.75, vmin = 0.1, vmax = 0.9)
# You may want to change the extents of the plot
if("xlim" in kwargs):
ax.set_xlim(kwargs["xlim"])
if("ylim" in kwargs):
ax.set_ylim(kwargs["ylim"])
ax.set_xlabel("Easting (km)")
ax.set_ylabel("Northing (km)")
# Saving the figure
plt.tight_layout()#
plt.savefig(prefix + "MAP_disorder_by_range_%s.png"%(this_theta), dpi = 500)
plt.close(fig)
def plot_min_D_star_map(ls, **kwargs):
"""
For a multiple analysis on the same DEM this plot the global with each basins colored by D^*
Need post-processing function to pre-analyse the ouputs.
The layout of this function might seems a bit convoluted, but that's making multiprocessing easy, as they take time to plot
param
"""
this_theta = ls[0]
prefix = ls[1]
# Loading the small summary df
df = pd.read_csv(prefix +"summary_results.csv")
# Loading the HillShade
HS = lsd.raster_loader.load_raster(prefix + "HS.tif")
# Formatting ticks
import matplotlib.ticker as tkr # has classes for tick-locating and -formatting
yfmt = tkr.FuncFormatter(numfmt)
xfmt = tkr.FuncFormatter(numfmt)
print("plotting D* for theta", this_theta)
# Getting the Figure and the ticks right
fig,ax = plt.subplots()
ax.yaxis.set_major_formatter(yfmt)
ax.xaxis.set_major_formatter(xfmt)
# Normalising the Hillshade and taking care of the no data
HS["array"] = HS["array"]/HS["array"].max()
HS["array"][HS["array"]<0] = np.nan
# Plotting the hillshade
ax.imshow(HS["array"], extent = HS["extent"], cmap = "gray", vmin= 0.2, vmax = 0.8)
# Building the array of concavity
A = np.zeros(HS["array"].shape)
A[:,:] = np.nan
df_theta = pd.read_csv(prefix + "all_raster_names.csv")
thetas = np.round(pd.read_feather(df["raster_name"].iloc[0] + "_overall_test.feather")["tested_movern"].values,decimals = 3)
# For each raster, I am reading rows and col corresponding to the main raster and potting it with the requested value
for name in df["raster_name"]:
row = np.load(name + "_row.npy")
col = np.load(name + "_col.npy")
val = 1e12
for tval in thetas:
valtest = df["D*_%s"%tval][df["raster_name"] == name].values[0] # A wee convoluted but it work and it is fast so...
if(valtest<val):
val=valtest
A[row,col] = val
# PLOTTING THE D*
ax.imshow(A, extent = HS["extent"], cmap= "gnuplot2", zorder = 2, alpha = 0.75, vmin = 0.1, vmax = 0.9)
# You may want to change the extents of the plot
if("xlim" in kwargs):
ax.set_xlim(kwargs["xlim"])
if("ylim" in kwargs):
ax.set_ylim(kwargs["ylim"])
ax.set_xlabel("Easting (km)")
ax.set_ylabel("Northing (km)")
# Saving the figure
plt.tight_layout()#
plt.savefig(prefix + "MAP_minimum_disorder_across_theta_%s.png"%(this_theta), dpi = 500)
plt.close(fig)
def post_process_analysis_for_Dstar(prefix, n_proc = 1, base_raster_full_name = "SEC_PP.tif"):
# Loading the list of raster
df = pd.read_csv(prefix + "all_raster_names.csv")
# Preparing the multiprocessing
d_of_med = {}
d_of_fst = {}
d_of_med_r = {}
d_of_fst_r = {}
params = df["raster_name"].tolist()
ras_to_ignore = {}
ras_to_ignore_list = []
for i in params:
ras_to_ignore[i] = False
# running the multiprocessing
with Pool(n_proc) as p:
fprocesses = []
for i in params:
fprocesses.append(p.apply_async(get_median_first_quartile_Dstar, args = (i,)))
for gut in fprocesses:
gut.wait()
# getting the results in the right dictionaries
for gut in fprocesses:
# print(gut.get())
if(isinstance(gut.get(),tuple)):
d_of_med[gut.get()[2]] = gut.get()[0]
d_of_fst[gut.get()[2]] = gut.get()[1]
else:
# print("IGNORING",gut.get() )
ras_to_ignore[gut.get()] = True
ras_to_ignore_list.append(gut.get())
# running the multiprocessing
with Pool(n_proc) as p:
fprocesses = []
for i in params:
fprocesses.append(p.apply_async(get_median_first_quartile_Dstar_r, args = (i,)))
for gut in fprocesses:
gut.wait()
# getting the results in the right dictionaries
for gut in fprocesses:
# print(gut.get())
if(isinstance(gut.get(),tuple)):
d_of_med_r[gut.get()[2]] = gut.get()[0]
d_of_fst_r[gut.get()[2]] = gut.get()[1]
else:
# print("IGNORING",gut.get() )
ras_to_ignore[gut.get()] = True
ras_to_ignore_list.append(gut.get())
# Getting the list of thetas tested
thetas = np.round(pd.read_feather(params[0] + "_overall_test.feather")["tested_movern"].values,decimals = 3)
df["best_fit"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["err_neg"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["err_pos"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["best_fit_norm_by_range"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["err_neg_norm_by_range"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["err_pos_norm_by_range"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
# Preparing my dataframe to ingest
for t in thetas:
df["D*_%s"%t] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["D*_r_%s"%t] = pd.Series(np.zeros(df.shape[0]), index = df.index)
# Ingesting hte results
for i in range(df.shape[0]):
if(ras_to_ignore[df["raster_name"].iloc[i]]):
continue
BF,err = get_best_bit_and_err_from_Dstar(thetas, d_of_med[df["raster_name"].iloc[i]], d_of_fst[df["raster_name"].iloc[i]], 10)
BF_r,err_r = get_best_bit_and_err_from_Dstar(thetas, d_of_med_r[df["raster_name"].iloc[i]], d_of_fst_r[df["raster_name"].iloc[i]], 10)
df["best_fit"].iloc[i] = BF
df["err_neg"].iloc[i] = err[0]
df["err_pos"].iloc[i] = err[1]
df["best_fit_norm_by_range"].iloc[i] = BF_r
df["err_neg_norm_by_range"].iloc[i] = err_r[0]
df["err_pos_norm_by_range"].iloc[i] = err_r[1]
for t in range(thetas.shape[0]):
df["D*_%s"%thetas[t]].iloc[i] = d_of_med[df["raster_name"].iloc[i]][t]
df["D*_r_%s"%thetas[t]].iloc[i] = d_of_med_r[df["raster_name"].iloc[i]][t]
# Getting the hillshade
mydem = lsd.LSDDEM(file_name = base_raster_full_name,already_preprocessed = True)
HS = mydem.get_hillshade(altitude = 45, angle = 315, z_exageration = 1)
mydem.save_array_to_raster_extent( HS, name = prefix + "HS", save_directory = "./")
# will add X-Y to the sumarry dataframe
df["X_median"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["X_firstQ"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["X_thirdtQ"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["Y_median"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["Y_firstQ"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["Y_thirdtQ"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
# I do not mutiprocess here: it would require load the mother raster for each process and would eat a lot of memory
for i in params:
if(ras_to_ignore[i]):
continue
XY = | pd.read_feather(i + "_XY.feather") | pandas.read_feather |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
from numpy import random
import numpy as np
from pandas.compat import lrange, lzip, u
from pandas import (compat, DataFrame, Series, Index, MultiIndex,
date_range, isnull)
import pandas as pd
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
from pandas.core.common import PerformanceWarning
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameSelectReindex(tm.TestCase, TestData):
# These are specific reindex-based tests; other indexing tests should go in
# test_indexing
_multiprocess_can_split_ = True
def test_drop_names(self):
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
df.index.name, df.columns.name = 'first', 'second'
df_dropped_b = df.drop('b')
df_dropped_e = df.drop('e', axis=1)
df_inplace_b, df_inplace_e = df.copy(), df.copy()
df_inplace_b.drop('b', inplace=True)
df_inplace_e.drop('e', axis=1, inplace=True)
for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e):
self.assertEqual(obj.index.name, 'first')
self.assertEqual(obj.columns.name, 'second')
self.assertEqual(list(df.columns), ['d', 'e', 'f'])
self.assertRaises(ValueError, df.drop, ['g'])
self.assertRaises(ValueError, df.drop, ['g'], 1)
# errors = 'ignore'
dropped = df.drop(['g'], errors='ignore')
expected = Index(['a', 'b', 'c'], name='first')
self.assert_index_equal(dropped.index, expected)
dropped = df.drop(['b', 'g'], errors='ignore')
expected = Index(['a', 'c'], name='first')
self.assert_index_equal(dropped.index, expected)
dropped = df.drop(['g'], axis=1, errors='ignore')
expected = Index(['d', 'e', 'f'], name='second')
self.assert_index_equal(dropped.columns, expected)
dropped = df.drop(['d', 'g'], axis=1, errors='ignore')
expected = Index(['e', 'f'], name='second')
self.assert_index_equal(dropped.columns, expected)
def test_drop_col_still_multiindex(self):
arrays = [['a', 'b', 'c', 'top'],
['', '', '', 'OD'],
['', '', '', 'wx']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.randn(3, 4), columns=index)
del df[('a', '', '')]
assert(isinstance(df.columns, MultiIndex))
def test_drop(self):
simple = DataFrame({"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]})
assert_frame_equal(simple.drop("A", axis=1), simple[['B']])
assert_frame_equal(simple.drop(["A", "B"], axis='columns'),
simple[[]])
assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.ix[[2], :])
assert_frame_equal(simple.drop(
[0, 3], axis='index'), simple.ix[[1, 2], :])
self.assertRaises(ValueError, simple.drop, 5)
self.assertRaises(ValueError, simple.drop, 'C', 1)
self.assertRaises(ValueError, simple.drop, [1, 5])
self.assertRaises(ValueError, simple.drop, ['A', 'C'], 1)
# errors = 'ignore'
assert_frame_equal(simple.drop(5, errors='ignore'), simple)
assert_frame_equal(simple.drop([0, 5], errors='ignore'),
simple.ix[[1, 2, 3], :])
assert_frame_equal(simple.drop('C', axis=1, errors='ignore'), simple)
assert_frame_equal(simple.drop(['A', 'C'], axis=1, errors='ignore'),
simple[['B']])
# non-unique - wheee!
nu_df = DataFrame(lzip(range(3), range(-3, 1), list('abc')),
columns=['a', 'a', 'b'])
assert_frame_equal(nu_df.drop('a', axis=1), nu_df[['b']])
assert_frame_equal(nu_df.drop('b', axis='columns'), nu_df['a'])
nu_df = nu_df.set_index(pd.Index(['X', 'Y', 'X']))
nu_df.columns = list('abc')
assert_frame_equal(nu_df.drop('X', axis='rows'), nu_df.ix[["Y"], :])
assert_frame_equal(nu_df.drop(['X', 'Y'], axis=0), nu_df.ix[[], :])
# inplace cache issue
# GH 5628
df = pd.DataFrame(np.random.randn(10, 3), columns=list('abc'))
expected = df[~(df.b > 0)]
df.drop(labels=df[df.b > 0].index, inplace=True)
assert_frame_equal(df, expected)
def test_drop_multiindex_not_lexsorted(self):
# GH 11640
# define the lexsorted version
lexsorted_mi = MultiIndex.from_tuples(
[('a', ''), ('b1', 'c1'), ('b2', 'c2')], names=['b', 'c'])
lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
self.assertTrue(lexsorted_df.columns.is_lexsorted())
# define the non-lexsorted version
not_lexsorted_df = DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3],
[1, 'b2', 'c2', 4]])
not_lexsorted_df = not_lexsorted_df.pivot_table(
index='a', columns=['b', 'c'], values='d')
not_lexsorted_df = not_lexsorted_df.reset_index()
self.assertFalse(not_lexsorted_df.columns.is_lexsorted())
# compare the results
tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
expected = lexsorted_df.drop('a', axis=1)
with tm.assert_produces_warning(PerformanceWarning):
result = not_lexsorted_df.drop('a', axis=1)
tm.assert_frame_equal(result, expected)
def test_merge_join_different_levels(self):
# GH 9455
# first dataframe
df1 = DataFrame(columns=['a', 'b'], data=[[1, 11], [0, 22]])
# second dataframe
columns = MultiIndex.from_tuples([('a', ''), ('c', 'c1')])
df2 = DataFrame(columns=columns, data=[[1, 33], [0, 44]])
# merge
columns = ['a', 'b', ('c', 'c1')]
expected = | DataFrame(columns=columns, data=[[1, 11, 33], [0, 22, 44]]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 17 14:22:41 2019
@author: snoone
"""
import os
import glob
import pandas as pd
import csv
import datetime
import numpy as np
##import all csv files in current dir that need timezone changing to GMT based on hours offset
os.chdir("D:/EEC_canadian_hourly/2000_10/rh")
extension = 'csv'
all_filenames = [i for i in glob.glob('*.{}'.format(extension))]
#combine all files in the list
df= pd.concat([pd.read_csv(f,delimiter=',') for f in all_filenames])
df1=df
df1["Station_name"]=df1["Station_Name"]
df1["Observed_value"]=df1["1"]
df1["Hour"]="1"
df1["Minute"]="00"
df1["Alias_station_name"]=""
df1["Source_QC_flag"]=""
df1['Original_observed_value_units']="perc"
df1['Measurement_code_1']=''
df1['Measurement_code_2']=''
df1['Report_type_code']=''
df1['Original_observed_value']=df1["1"]
df1['Observed_value'].replace('', np.nan, inplace=True)
df1.dropna(subset=['Observed_value'], inplace=True)
df1 = df1[["Source_ID",'Station_ID',"Station_name","Alias_station_name",
"Year","Month","Day","Hour","Minute",
"Latitude","Longitude","Elevation","Observed_value",
"Source_QC_flag","Original_observed_value",
"Original_observed_value_units",
"Report_type_code","Measurement_code_1",
"Measurement_code_2"]]
df2=df
df2["Station_name"]=df["Station_name"]
df2["Observed_value"]=df["2"]
df2["Hour"]="2"
df2["Minute"]="00"
df2["Alias_station_name"]=""
df2["Source_QC_flag"]=""
df2['Original_observed_value_units']="perc"
df2['Measurement_code_1']=''
df2['Measurement_code_2']=''
df2['Report_type_code']=''
df2['Original_observed_value']=df["2"]
df2['Observed_value'].replace('', np.nan, inplace=True)
df2.dropna(subset=['Observed_value'], inplace=True)
df2 = df2[["Source_ID",'Station_ID',"Station_name","Alias_station_name",
"Year","Month","Day","Hour","Minute",
"Latitude","Longitude","Elevation","Observed_value",
"Source_QC_flag","Original_observed_value",
"Original_observed_value_units",
"Report_type_code","Measurement_code_1",
"Measurement_code_2"]]
df3=df
df3["Station_name"]=df["Station_name"]
df3["Observed_value"]=df["3"]
df3["Hour"]="3"
df3["Minute"]="00"
df3["Alias_station_name"]=""
df3["Source_QC_flag"]=""
df3['Original_observed_value_units']="perc"
df3['Measurement_code_1']=''
df3['Measurement_code_2']=''
df3['Report_type_code']=''
df3['Original_observed_value']=df["3"]
df3['Observed_value'].replace('', np.nan, inplace=True)
df3.dropna(subset=['Observed_value'], inplace=True)
df3 = df3[["Source_ID",'Station_ID',"Station_name","Alias_station_name",
"Year","Month","Day","Hour","Minute",
"Latitude","Longitude","Elevation","Observed_value",
"Source_QC_flag","Original_observed_value",
"Original_observed_value_units",
"Report_type_code","Measurement_code_1",
"Measurement_code_2"]]
df4=df
df4["Station_name"]=df["Station_name"]
df4["Observed_value"]=df["4"]
df4["Hour"]="4"
df4["Minute"]="00"
df4["Alias_station_name"]=""
df4["Source_QC_flag"]=""
df4['Original_observed_value_units']="perc"
df4['Measurement_code_1']=''
df4['Measurement_code_2']=''
df4['Report_type_code']=''
df4['Original_observed_value']=df["4"]
df4['Observed_value'].replace('', np.nan, inplace=True)
df4.dropna(subset=['Observed_value'], inplace=True)
df4 = df4[["Source_ID",'Station_ID',"Station_name","Alias_station_name",
"Year","Month","Day","Hour","Minute",
"Latitude","Longitude","Elevation","Observed_value",
"Source_QC_flag","Original_observed_value",
"Original_observed_value_units",
"Report_type_code","Measurement_code_1",
"Measurement_code_2"]]
df5=df
df5["Station_name"]=df["Station_name"]
df5["Observed_value"]=df["5"]
df5["Hour"]="5"
df5["Minute"]="00"
df5["Alias_station_name"]=""
df5["Source_QC_flag"]=""
df5['Original_observed_value_units']="perc"
df5['Measurement_code_1']=''
df5['Measurement_code_2']=''
df5['Report_type_code']=''
df5['Original_observed_value']=df["5"]
df5['Observed_value'].replace('', np.nan, inplace=True)
df5.dropna(subset=['Observed_value'], inplace=True)
df5 = df5[["Source_ID",'Station_ID',"Station_name","Alias_station_name",
"Year","Month","Day","Hour","Minute",
"Latitude","Longitude","Elevation","Observed_value",
"Source_QC_flag","Original_observed_value",
"Original_observed_value_units",
"Report_type_code","Measurement_code_1",
"Measurement_code_2"]]
df6=df
df6["Station_name"]=df["Station_name"]
df6["Observed_value"]=df["6"]
df6["Hour"]="6"
df6["Minute"]="00"
df6["Alias_station_name"]=""
df6["Source_QC_flag"]=""
df6['Original_observed_value_units']="perc"
df6['Measurement_code_1']=''
df6['Measurement_code_2']=''
df6['Report_type_code']=''
df6['Original_observed_value']=df["6"]
df6['Observed_value'].replace('', np.nan, inplace=True)
df6.dropna(subset=['Observed_value'], inplace=True)
df6 = df6[["Source_ID",'Station_ID',"Station_name","Alias_station_name",
"Year","Month","Day","Hour","Minute",
"Latitude","Longitude","Elevation","Observed_value",
"Source_QC_flag","Original_observed_value",
"Original_observed_value_units",
"Report_type_code","Measurement_code_1",
"Measurement_code_2"]]
df7=df
df7["Station_name"]=df["Station_name"]
df7["Observed_value"]=df["7"]
df7["Hour"]="7"
df7["Minute"]="00"
df7["Alias_station_name"]=""
df7["Source_QC_flag"]=""
df7['Original_observed_value_units']="perc"
df7['Measurement_code_1']=''
df7['Measurement_code_2']=''
df7['Report_type_code']=''
df7['Original_observed_value']=df["7"]
df7['Observed_value'].replace('', np.nan, inplace=True)
df7.dropna(subset=['Observed_value'], inplace=True)
df7 = df7[["Source_ID",'Station_ID',"Station_name","Alias_station_name",
"Year","Month","Day","Hour","Minute",
"Latitude","Longitude","Elevation","Observed_value",
"Source_QC_flag","Original_observed_value",
"Original_observed_value_units",
"Report_type_code","Measurement_code_1",
"Measurement_code_2"]]
df8=df
df8["Station_name"]=df["Station_name"]
df8["Observed_value"]=df["8"]
df8["Hour"]="8"
df8["Minute"]="00"
df8["Alias_station_name"]=""
df8["Source_QC_flag"]=""
df8['Original_observed_value_units']="perc"
df8['Measurement_code_1']=''
df8['Measurement_code_2']=''
df8['Report_type_code']=''
df8['Original_observed_value']=df["8"]
df8['Observed_value'].replace('', np.nan, inplace=True)
df8.dropna(subset=['Observed_value'], inplace=True)
df8 = df8[["Source_ID",'Station_ID',"Station_name","Alias_station_name",
"Year","Month","Day","Hour","Minute",
"Latitude","Longitude","Elevation","Observed_value",
"Source_QC_flag","Original_observed_value",
"Original_observed_value_units",
"Report_type_code","Measurement_code_1",
"Measurement_code_2"]]
df9=df
df9["Station_name"]=df["Station_name"]
df9["Observed_value"]=df["9"]
df9["Hour"]="9"
df9["Minute"]="00"
df9["Alias_station_name"]=""
df9["Source_QC_flag"]=""
df9['Original_observed_value_units']="perc"
df9['Measurement_code_1']=''
df9['Measurement_code_2']=''
df9['Report_type_code']=''
df9['Original_observed_value']=df["9"]
df9['Observed_value'].replace('', np.nan, inplace=True)
df9.dropna(subset=['Observed_value'], inplace=True)
df9 = df9[["Source_ID",'Station_ID',"Station_name","Alias_station_name",
"Year","Month","Day","Hour","Minute",
"Latitude","Longitude","Elevation","Observed_value",
"Source_QC_flag","Original_observed_value",
"Original_observed_value_units",
"Report_type_code","Measurement_code_1",
"Measurement_code_2"]]
df10=df
df10["Station_name"]=df["Station_name"]
df10["Observed_value"]=df["10"]
df10["Hour"]="10"
df10["Minute"]="00"
df10["Alias_station_name"]=""
df10["Source_QC_flag"]=""
df10['Original_observed_value_units']="perc"
df10['Measurement_code_1']=''
df10['Measurement_code_2']=''
df10['Report_type_code']=''
df10['Original_observed_value']=df["10"]
df10['Observed_value'].replace('', np.nan, inplace=True)
df10.dropna(subset=['Observed_value'], inplace=True)
df10 = df10[["Source_ID",'Station_ID',"Station_name","Alias_station_name",
"Year","Month","Day","Hour","Minute",
"Latitude","Longitude","Elevation","Observed_value",
"Source_QC_flag","Original_observed_value",
"Original_observed_value_units",
"Report_type_code","Measurement_code_1",
"Measurement_code_2"]]
df11=df
df11["Station_name"]=df["Station_Name"]
df11["Observed_value"]=df["11"]
df11["Hour"]="11"
df11["Minute"]="00"
df11["Alias_station_name"]=""
df11["Source_QC_flag"]=""
df11['Original_observed_value_units']="perc"
df11['Measurement_code_1']=''
df11['Measurement_code_2']=''
df11['Report_type_code']=''
df11['Original_observed_value']=df["11"]
df11['Observed_value'].replace('', np.nan, inplace=True)
df11.dropna(subset=['Observed_value'], inplace=True)
df11 = df11[["Source_ID",'Station_ID',"Station_name","Alias_station_name",
"Year","Month","Day","Hour","Minute",
"Latitude","Longitude","Elevation","Observed_value",
"Source_QC_flag","Original_observed_value",
"Original_observed_value_units",
"Report_type_code","Measurement_code_1",
"Measurement_code_2"]]
df12=df
df12["Station_name"]=df["Station_Name"]
df12["Observed_value"]=df["12"]
df12["Hour"]="12"
df12["Minute"]="00"
df12["Alias_station_name"]=""
df12["Source_QC_flag"]=""
df12['Original_observed_value_units']="perc"
df12['Measurement_code_1']=''
df12['Measurement_code_2']=''
df12['Report_type_code']=''
df12['Original_observed_value']=df["12"]
df12['Observed_value'].replace('', np.nan, inplace=True)
df12.dropna(subset=['Observed_value'], inplace=True)
df12 = df12[["Source_ID",'Station_ID',"Station_name","Alias_station_name",
"Year","Month","Day","Hour","Minute",
"Latitude","Longitude","Elevation","Observed_value",
"Source_QC_flag","Original_observed_value",
"Original_observed_value_units",
"Report_type_code","Measurement_code_1",
"Measurement_code_2"]]
df13=df
df13["Station_name"]=df["Station_Name"]
df13["Observed_value"]=df["13"]
df13["Hour"]="13"
df13["Minute"]="00"
df13["Alias_station_name"]=""
df13["Source_QC_flag"]=""
df13['Original_observed_value_units']="perc"
df13['Measurement_code_1']=''
df13['Measurement_code_2']=''
df13['Report_type_code']=''
df13['Original_observed_value']=df["13"]
df13['Observed_value'].replace('', np.nan, inplace=True)
df13.dropna(subset=['Observed_value'], inplace=True)
df13 = df13[["Source_ID",'Station_ID',"Station_name","Alias_station_name",
"Year","Month","Day","Hour","Minute",
"Latitude","Longitude","Elevation","Observed_value",
"Source_QC_flag","Original_observed_value",
"Original_observed_value_units",
"Report_type_code","Measurement_code_1",
"Measurement_code_2"]]
df14=df
df14["Station_name"]=df["Station_Name"]
df14["Observed_value"]=df["14"]
df14["Hour"]="14"
df14["Minute"]="00"
df14["Alias_station_name"]=""
df14["Source_QC_flag"]=""
df14['Original_observed_value_units']="perc"
df14['Measurement_code_1']=''
df14['Measurement_code_2']=''
df14['Report_type_code']=''
df14['Original_observed_value']=df["14"]
df14['Observed_value'].replace('', np.nan, inplace=True)
df14.dropna(subset=['Observed_value'], inplace=True)
df14 = df14[["Source_ID",'Station_ID',"Station_name","Alias_station_name",
"Year","Month","Day","Hour","Minute",
"Latitude","Longitude","Elevation","Observed_value",
"Source_QC_flag","Original_observed_value",
"Original_observed_value_units",
"Report_type_code","Measurement_code_1",
"Measurement_code_2"]]
df15=df
df15["Station_name"]=df["Station_Name"]
df15["Observed_value"]=df["15"]
df15["Hour"]="15"
df15["Minute"]="00"
df15["Alias_station_name"]=""
df15["Source_QC_flag"]=""
df15['Original_observed_value_units']="perc"
df15['Measurement_code_1']=''
df15['Measurement_code_2']=''
df15['Report_type_code']=''
df15['Original_observed_value']=df["15"]
df15['Observed_value'].replace('', np.nan, inplace=True)
df15.dropna(subset=['Observed_value'], inplace=True)
df15 = df15[["Source_ID",'Station_ID',"Station_name","Alias_station_name",
"Year","Month","Day","Hour","Minute",
"Latitude","Longitude","Elevation","Observed_value",
"Source_QC_flag","Original_observed_value",
"Original_observed_value_units",
"Report_type_code","Measurement_code_1",
"Measurement_code_2"]]
df16=df
df16["Station_name"]=df["Station_Name"]
df16["Observed_value"]=df["16"]
df16["Hour"]="16"
df16["Minute"]="00"
df16["Alias_station_name"]=""
df16["Source_QC_flag"]=""
df16['Original_observed_value_units']="perc"
df16['Measurement_code_1']=''
df16['Measurement_code_2']=''
df16['Report_type_code']=''
df16['Original_observed_value']=df["16"]
df16['Observed_value'].replace('', np.nan, inplace=True)
df16.dropna(subset=['Observed_value'], inplace=True)
df16 = df16[["Source_ID",'Station_ID',"Station_name","Alias_station_name",
"Year","Month","Day","Hour","Minute",
"Latitude","Longitude","Elevation","Observed_value",
"Source_QC_flag","Original_observed_value",
"Original_observed_value_units",
"Report_type_code","Measurement_code_1",
"Measurement_code_2"]]
df17=df
df17["Station_name"]=df["Station_Name"]
df17["Observed_value"]=df["17"]
df17["Hour"]="17"
df17["Minute"]="00"
df17["Alias_station_name"]=""
df17["Source_QC_flag"]=""
df17['Original_observed_value_units']="perc"
df17['Measurement_code_1']=''
df17['Measurement_code_2']=''
df17['Report_type_code']=''
df17['Original_observed_value']=df["17"]
df17['Observed_value'].replace('', np.nan, inplace=True)
df17.dropna(subset=['Observed_value'], inplace=True)
df17 = df17[["Source_ID",'Station_ID',"Station_name","Alias_station_name",
"Year","Month","Day","Hour","Minute",
"Latitude","Longitude","Elevation","Observed_value",
"Source_QC_flag","Original_observed_value",
"Original_observed_value_units",
"Report_type_code","Measurement_code_1",
"Measurement_code_2"]]
df18=df
df18["Station_name"]=df["Station_Name"]
df18["Observed_value"]=df["18"]
df18["Hour"]="18"
df18["Minute"]="00"
df18["Alias_station_name"]=""
df18["Source_QC_flag"]=""
df18['Original_observed_value_units']="perc"
df18['Measurement_code_1']=''
df18['Measurement_code_2']=''
df18['Report_type_code']=''
df18['Original_observed_value']=df["18"]
df18['Observed_value'].replace('', np.nan, inplace=True)
df18.dropna(subset=['Observed_value'], inplace=True)
df18 = df18[["Source_ID",'Station_ID',"Station_name","Alias_station_name",
"Year","Month","Day","Hour","Minute",
"Latitude","Longitude","Elevation","Observed_value",
"Source_QC_flag","Original_observed_value",
"Original_observed_value_units",
"Report_type_code","Measurement_code_1",
"Measurement_code_2"]]
df19=df
df19["Station_name"]=df["Station_Name"]
df19["Observed_value"]=df["19"]
df19["Hour"]="19"
df19["Minute"]="00"
df19["Alias_station_name"]=""
df19["Source_QC_flag"]=""
df19['Original_observed_value_units']="perc"
df19['Measurement_code_1']=''
df19['Measurement_code_2']=''
df19['Report_type_code']=''
df19['Original_observed_value']=df["19"]
df19['Observed_value'].replace('', np.nan, inplace=True)
df19.dropna(subset=['Observed_value'], inplace=True)
df19 = df19[["Source_ID",'Station_ID',"Station_name","Alias_station_name",
"Year","Month","Day","Hour","Minute",
"Latitude","Longitude","Elevation","Observed_value",
"Source_QC_flag","Original_observed_value",
"Original_observed_value_units",
"Report_type_code","Measurement_code_1",
"Measurement_code_2"]]
df20=df
df20["Station_name"]=df["Station_Name"]
df20["Observed_value"]=df["20"]
df20["Hour"]="20"
df20["Minute"]="00"
df20["Alias_station_name"]=""
df20["Source_QC_flag"]=""
df20['Original_observed_value_units']="perc"
df20['Measurement_code_1']=''
df20['Measurement_code_2']=''
df20['Report_type_code']=''
df20['Original_observed_value']=df["20"]
df20['Observed_value'].replace('', np.nan, inplace=True)
df20.dropna(subset=['Observed_value'], inplace=True)
df20 = df20[["Source_ID",'Station_ID',"Station_name","Alias_station_name",
"Year","Month","Day","Hour","Minute",
"Latitude","Longitude","Elevation","Observed_value",
"Source_QC_flag","Original_observed_value",
"Original_observed_value_units",
"Report_type_code","Measurement_code_1",
"Measurement_code_2"]]
df21=df
df21["Station_name"]=df["Station_Name"]
df21["Observed_value"]=df["21"]
df21["Hour"]="21"
df21["Minute"]="00"
df21["Alias_station_name"]=""
df21["Source_QC_flag"]=""
df21['Original_observed_value_units']="perc"
df21['Measurement_code_1']=''
df21['Measurement_code_2']=''
df21['Report_type_code']=''
df21['Original_observed_value']=df["21"]
df21['Observed_value'].replace('', np.nan, inplace=True)
df21.dropna(subset=['Observed_value'], inplace=True)
df21 = df21[["Source_ID",'Station_ID',"Station_name","Alias_station_name",
"Year","Month","Day","Hour","Minute",
"Latitude","Longitude","Elevation","Observed_value",
"Source_QC_flag","Original_observed_value",
"Original_observed_value_units",
"Report_type_code","Measurement_code_1",
"Measurement_code_2"]]
df22=df
df22["Station_name"]=df["Station_Name"]
df22["Observed_value"]=df["22"]
df22["Hour"]="22"
df22["Minute"]="00"
df22["Alias_station_name"]=""
df22["Source_QC_flag"]=""
df22['Original_observed_value_units']="perc"
df22['Measurement_code_1']=''
df22['Measurement_code_2']=''
df22['Report_type_code']=''
df22['Original_observed_value']=df["22"]
df22['Observed_value'].replace('', np.nan, inplace=True)
df22.dropna(subset=['Observed_value'], inplace=True)
df22 = df22[["Source_ID",'Station_ID',"Station_name","Alias_station_name",
"Year","Month","Day","Hour","Minute",
"Latitude","Longitude","Elevation","Observed_value",
"Source_QC_flag","Original_observed_value",
"Original_observed_value_units",
"Report_type_code","Measurement_code_1",
"Measurement_code_2"]]
df23=df
df23["Station_name"]=df["Station_Name"]
df23["Observed_value"]=df["23"]
df23["Hour"]="23"
df23["Minute"]="00"
df23["Alias_station_name"]=""
df23["Source_QC_flag"]=""
df23['Original_observed_value_units']="perc"
df23['Measurement_code_1']=''
df23['Measurement_code_2']=''
df23['Report_type_code']=''
df23['Original_observed_value']=df["23"]
df23['Observed_value'].replace('', np.nan, inplace=True)
df23.dropna(subset=['Observed_value'], inplace=True)
df23 = df23[["Source_ID",'Station_ID',"Station_name","Alias_station_name",
"Year","Month","Day","Hour","Minute",
"Latitude","Longitude","Elevation","Observed_value",
"Source_QC_flag","Original_observed_value",
"Original_observed_value_units",
"Report_type_code","Measurement_code_1",
"Measurement_code_2"]]
df24=df
df24["Station_name"]=df["Station_Name"]
df24["Observed_value"]=df["00"]
df24["Hour"]="24"
df24["Minute"]="00"
df24["Alias_station_name"]=""
df24["Source_QC_flag"]=""
df24['Original_observed_value_units']="perc"
df24['Measurement_code_1']=''
df24['Measurement_code_2']=''
df24['Report_type_code']=''
df24['Original_observed_value']=df["00"]
df24['Observed_value'].replace('', np.nan, inplace=True)
df24.dropna(subset=['Observed_value'], inplace=True)
df24 = df24[["Source_ID",'Station_ID',"Station_name","Alias_station_name",
"Year","Month","Day","Hour","Minute",
"Latitude","Longitude","Elevation","Observed_value",
"Source_QC_flag","Original_observed_value",
"Original_observed_value_units",
"Report_type_code","Measurement_code_1",
"Measurement_code_2"]]
del df
#concatanate all dfs together
df_final=pd.concat([df1,df2,df3,df4,df5,df6,df7,df8,df9,df10,df11,df12,df13,df14,df15,df16,
df17,df18,df19,df20,df21,df22,df23,df24], axis=0)
#remve all dfs
del df1,df2,df3,df4,df5,df6,df7,df8,df9,df10,df11,df12,df13,df14,df15,df16
del df17,df18,df19,df20,df21,df22,df23,df24
#chnage observae dvalue to a numeric value
#df_final["Observed_value"] = pd.to_numeric(df_final["Observed_value"],errors='coerce')
#df_final["Original_observed_value"] = pd.to_numeric(df_final["Original_observed_value"],errors='coerce')
##divide Pa by ten to give Hpa
#df_final['Observed_value']=df_final["Observed_value"]/ 3.6
#df_final['Original_observed_value']=df_final["Original_observed_value"]/ 3.6
#change lat,long, to numeric then set decimal places
df_final["Latitude"] = | pd.to_numeric(df_final["Latitude"],errors='coerce') | pandas.to_numeric |
#!/usr/bin/env python
# coding: utf-8
# In[24]:
import numpy
import pandas as pd
import tensorflow as tf
from PyEMD import CEEMDAN
import warnings
warnings.filterwarnings("ignore")
### import the libraries
from tensorflow import keras
from tensorflow.keras import layers
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from math import sqrt
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return numpy.array(dataX), numpy.array(dataY)
def percentage_error(actual, predicted):
res = numpy.empty(actual.shape)
for j in range(actual.shape[0]):
if actual[j] != 0:
res[j] = (actual[j] - predicted[j]) / actual[j]
else:
res[j] = predicted[j] / np.mean(actual)
return res
def mean_absolute_percentage_error(y_true, y_pred):
return numpy.mean(numpy.abs(percentage_error(numpy.asarray(y_true), numpy.asarray(y_pred)))) * 100
# In[25]:
def lr_model(datass,look_back,data_partition):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
import tensorflow as tf
numpy.random.seed(1234)
tf.random.set_seed(1234)
from sklearn.linear_model import LinearRegression
grid = LinearRegression()
grid.fit(X,y)
y_pred_train_lr= grid.predict(X)
y_pred_test_lr= grid.predict(X1)
y_pred_train_lr=pd.DataFrame(y_pred_train_lr)
y_pred_test_lr=pd.DataFrame(y_pred_test_lr)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_pred_test1_lr= sc_y.inverse_transform (y_pred_test_lr)
y_pred_train1_lr=sc_y.inverse_transform (y_pred_train_lr)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1_rf=pd.DataFrame(y_pred_test1_lr)
y_pred_train1_rf=pd.DataFrame(y_pred_train1_lr)
y_test= pd.DataFrame(y_test)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,y_pred_test1_lr)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1_lr))
mae=metrics.mean_absolute_error(y_test,y_pred_test1_lr)
return mape,rmse,mae
# In[26]:
def svr_model(datass,look_back,data_partition):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.svm import SVR
grid = SVR()
grid.fit(X,y)
y_pred_train_svr= grid.predict(X)
y_pred_test_svr= grid.predict(X1)
y_pred_train_svr=pd.DataFrame(y_pred_train_svr)
y_pred_test_svr=pd.DataFrame(y_pred_test_svr)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_pred_test1_svr= sc_y.inverse_transform (y_pred_test_svr)
y_pred_train1_svr=sc_y.inverse_transform (y_pred_train_svr)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1_svr=pd.DataFrame(y_pred_test1_svr)
y_pred_train1_svr=pd.DataFrame(y_pred_train1_svr)
y_test= pd.DataFrame(y_test)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,y_pred_test1_svr)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1_svr))
mae=metrics.mean_absolute_error(y_test,y_pred_test1_svr)
return mape,rmse,mae
# In[27]:
def ann_model(datass,look_back,data_partition):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
import numpy
trainX = numpy.reshape(X, (X.shape[0], 1, X.shape[1]))
testX = numpy.reshape(X1, (X1.shape[0], 1, X1.shape[1]))
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.neural_network import MLPRegressor
model= MLPRegressor(random_state=1,activation='tanh').fit(X,y)
numpy.random.seed(1234)
# make predictions
y_pred_train = model.predict(X)
y_pred_test = model.predict(X1)
y_pred_test= numpy.array(y_pred_test).ravel()
y_pred_test=pd.DataFrame(y_pred_test)
y1=pd.DataFrame(y1)
y_pred_test1= sc_y.inverse_transform (y_pred_test)
y_test= sc_y.inverse_transform (y1)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,y_pred_test1)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1))
mae=metrics.mean_absolute_error(y_test,y_pred_test1)
return mape,rmse,mae
# In[28]:
def rf_model(datass,look_back,data_partition,max_features):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.ensemble import RandomForestRegressor
grid = RandomForestRegressor(max_features=max_features)
grid.fit(X,y)
y_pred_train_rf= grid.predict(X)
y_pred_test_rf= grid.predict(X1)
y_pred_train_rf=pd.DataFrame(y_pred_train_rf)
y_pred_test_rf=pd.DataFrame(y_pred_test_rf)
y1= | pd.DataFrame(y1) | pandas.DataFrame |
from pythonlatex import Table
from pylatex import Document, NoEscape
import pandas as pd
import numpy as np
# from pylatex import Document, NoEscape
import unittest
import os
import shutil
try:
shutil.rmtree("Latex")
except FileNotFoundError:
pass
# testing DataFrame
df = | pd.DataFrame() | pandas.DataFrame |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta
import sys
import os
import unittest
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, DatetimeIndex,
Int64Index, to_datetime, bdate_range)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import(
range, long, StringIO, lrange, lmap, map, zip, cPickle as pickle, product
)
from pandas import read_pickle
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
from pandas.core.datetools import BDay
import pandas.core.common as com
from pandas import concat
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
# infortunately, too much has changed to handle these legacy pickles
# class TestLegacySupport(unittest.TestCase):
class LegacySupport(object):
_multiprocess_can_split_ = True
@classmethod
def setUpClass(cls):
if compat.PY3:
raise nose.SkipTest("not compatible with Python >= 3")
pth, _ = os.path.split(os.path.abspath(__file__))
filepath = os.path.join(pth, 'data', 'frame.pickle')
with open(filepath, 'rb') as f:
cls.frame = pickle.load(f)
filepath = os.path.join(pth, 'data', 'series.pickle')
with open(filepath, 'rb') as f:
cls.series = pickle.load(f)
def test_pass_offset_warn(self):
buf = StringIO()
sys.stderr = buf
DatetimeIndex(start='1/1/2000', periods=10, offset='H')
sys.stderr = sys.__stderr__
def test_unpickle_legacy_frame(self):
dtindex = DatetimeIndex(start='1/3/2005', end='1/14/2005',
freq=BDay(1))
unpickled = self.frame
self.assertEquals(type(unpickled.index), DatetimeIndex)
self.assertEquals(len(unpickled), 10)
self.assert_((unpickled.columns == Int64Index(np.arange(5))).all())
self.assert_((unpickled.index == dtindex).all())
self.assertEquals(unpickled.index.offset, BDay(1, normalize=True))
def test_unpickle_legacy_series(self):
from pandas.core.datetools import BDay
unpickled = self.series
dtindex = DatetimeIndex(start='1/3/2005', end='1/14/2005',
freq=BDay(1))
self.assertEquals(type(unpickled.index), DatetimeIndex)
self.assertEquals(len(unpickled), 10)
self.assert_((unpickled.index == dtindex).all())
self.assertEquals(unpickled.index.offset, BDay(1, normalize=True))
def test_unpickle_legacy_len0_daterange(self):
pth, _ = os.path.split(os.path.abspath(__file__))
filepath = os.path.join(pth, 'data', 'series_daterange0.pickle')
result = pd.read_pickle(filepath)
ex_index = DatetimeIndex([], freq='B')
self.assert_(result.index.equals(ex_index))
tm.assert_isinstance(result.index.freq, offsets.BDay)
self.assertEqual(len(result), 0)
def test_arithmetic_interaction(self):
index = self.frame.index
obj_index = index.asobject
dseries = Series(rand(len(index)), index=index)
oseries = Series(dseries.values, index=obj_index)
result = dseries + oseries
expected = dseries * 2
| tm.assert_isinstance(result.index, DatetimeIndex) | pandas.util.testing.assert_isinstance |
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
from pandas import Index, Period, PeriodIndex, Series, date_range, offsets, period_range
import pandas.core.indexes.period as period
import pandas.util.testing as tm
class TestPeriodIndex:
def setup_method(self, method):
pass
def test_construction_base_constructor(self):
# GH 13664
arr = [pd.Period("2011-01", freq="M"), pd.NaT, pd.Period("2011-03", freq="M")]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Period("2011-03", freq="M")]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr)))
arr = [pd.Period("2011-01", freq="M"), pd.NaT, pd.Period("2011-03", freq="D")]
tm.assert_index_equal(pd.Index(arr), pd.Index(arr, dtype=object))
tm.assert_index_equal(
pd.Index(np.array(arr)), pd.Index(np.array(arr), dtype=object)
)
def test_constructor_use_start_freq(self):
# GH #1118
p = Period("4/2/2012", freq="B")
with tm.assert_produces_warning(FutureWarning):
index = PeriodIndex(start=p, periods=10)
expected = period_range(start="4/2/2012", periods=10, freq="B")
tm.assert_index_equal(index, expected)
index = period_range(start=p, periods=10)
tm.assert_index_equal(index, expected)
def test_constructor_field_arrays(self):
# GH #1264
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex(year=years, quarter=quarters, freq="Q-DEC")
expected = period_range("1990Q3", "2009Q2", freq="Q-DEC")
tm.assert_index_equal(index, expected)
index2 = PeriodIndex(year=years, quarter=quarters, freq="2Q-DEC")
tm.assert_numpy_array_equal(index.asi8, index2.asi8)
index = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(index, expected)
years = [2007, 2007, 2007]
months = [1, 2]
msg = "Mismatched Period array lengths"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq="M")
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq="2M")
msg = "Can either instantiate from fields or endpoints, but not both"
with pytest.raises(ValueError, match=msg):
PeriodIndex(
year=years, month=months, freq="M", start=Period("2007-01", freq="M")
)
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex(year=years, month=months, freq="M")
exp = period_range("2007-01", periods=3, freq="M")
tm.assert_index_equal(idx, exp)
def test_constructor_U(self):
# U was used as undefined period
with pytest.raises(ValueError, match="Invalid frequency: X"):
period_range("2007-1-1", periods=500, freq="X")
def test_constructor_nano(self):
idx = period_range(
start=Period(ordinal=1, freq="N"), end=Period(ordinal=4, freq="N"), freq="N"
)
exp = PeriodIndex(
[
Period(ordinal=1, freq="N"),
Period(ordinal=2, freq="N"),
Period(ordinal=3, freq="N"),
Period(ordinal=4, freq="N"),
],
freq="N",
)
tm.assert_index_equal(idx, exp)
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000, dtype=np.int64).repeat(4)
quarters = np.tile(np.array([1, 2, 3, 4], dtype=np.int64), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(pindex.year, pd.Index(years))
tm.assert_index_equal(pindex.quarter, pd.Index(quarters))
def test_constructor_invalid_quarters(self):
msg = "Quarter must be 1 <= q <= 4"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=range(2000, 2004), quarter=list(range(4)), freq="Q-DEC")
def test_constructor_corner(self):
msg = "Not enough parameters to construct Period range"
with pytest.raises(ValueError, match=msg):
PeriodIndex(periods=10, freq="A")
start = Period("2007", freq="A-JUN")
end = Period("2010", freq="A-DEC")
msg = "start and end must have same freq"
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start, end=end)
msg = (
"Of the three parameters: start, end, and periods, exactly two"
" must be specified"
)
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start)
with pytest.raises(ValueError, match=msg):
PeriodIndex(end=end)
result = period_range("2007-01", periods=10.5, freq="M")
exp = period_range("2007-01", periods=10, freq="M")
tm.assert_index_equal(result, exp)
def test_constructor_fromarraylike(self):
idx = period_range("2007-01", periods=20, freq="M")
# values is an array of Period, thus can retrieve freq
tm.assert_index_equal(PeriodIndex(idx.values), idx)
tm.assert_index_equal(PeriodIndex(list(idx.values)), idx)
msg = "freq not specified and cannot be inferred"
with pytest.raises(ValueError, match=msg):
PeriodIndex(idx._ndarray_values)
with pytest.raises(ValueError, match=msg):
PeriodIndex(list(idx._ndarray_values))
msg = "'Period' object is not iterable"
with pytest.raises(TypeError, match=msg):
PeriodIndex(data=Period("2007", freq="A"))
result = PeriodIndex(iter(idx))
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx)
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq="M")
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq=offsets.MonthEnd())
tm.assert_index_equal(result, idx)
assert result.freq == "M"
result = PeriodIndex(idx, freq="2M")
tm.assert_index_equal(result, idx.asfreq("2M"))
assert result.freq == "2M"
result = PeriodIndex(idx, freq=offsets.MonthEnd(2))
tm.assert_index_equal(result, idx.asfreq("2M"))
assert result.freq == "2M"
result = PeriodIndex(idx, freq="D")
exp = idx.asfreq("D", "e")
tm.assert_index_equal(result, exp)
def test_constructor_datetime64arr(self):
vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64)
vals = vals.view(np.dtype("M8[us]"))
msg = r"Wrong dtype: datetime64\[us\]"
with pytest.raises(ValueError, match=msg):
PeriodIndex(vals, freq="D")
@pytest.mark.parametrize("box", [None, "series", "index"])
def test_constructor_datetime64arr_ok(self, box):
# https://github.com/pandas-dev/pandas/issues/23438
data = pd.date_range("2017", periods=4, freq="M")
if box is None:
data = data._values
elif box == "series":
data = pd.Series(data)
result = PeriodIndex(data, freq="D")
expected = PeriodIndex(
["2017-01-31", "2017-02-28", "2017-03-31", "2017-04-30"], freq="D"
)
tm.assert_index_equal(result, expected)
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = PeriodIndex(["2013-01", "2013-03"], dtype="period[M]")
exp = PeriodIndex(["2013-01", "2013-03"], freq="M")
tm.assert_index_equal(idx, exp)
assert idx.dtype == "period[M]"
idx = PeriodIndex(["2013-01-05", "2013-03-05"], dtype="period[3D]")
exp = PeriodIndex(["2013-01-05", "2013-03-05"], freq="3D")
tm.assert_index_equal(idx, exp)
assert idx.dtype == "period[3D]"
# if we already have a freq and its not the same, then asfreq
# (not changed)
idx = PeriodIndex(["2013-01-01", "2013-01-02"], freq="D")
res = PeriodIndex(idx, dtype="period[M]")
exp = PeriodIndex(["2013-01", "2013-01"], freq="M")
tm.assert_index_equal(res, exp)
assert res.dtype == "period[M]"
res = PeriodIndex(idx, freq="M")
tm.assert_index_equal(res, exp)
assert res.dtype == "period[M]"
msg = "specified freq and dtype are different"
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(["2011-01"], freq="M", dtype="period[D]")
def test_constructor_empty(self):
idx = pd.PeriodIndex([], freq="M")
assert isinstance(idx, PeriodIndex)
assert len(idx) == 0
assert idx.freq == "M"
with pytest.raises(ValueError, match="freq not specified"):
pd.PeriodIndex([])
def test_constructor_pi_nat(self):
idx = PeriodIndex(
[Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="M")]
)
exp = PeriodIndex(["2011-01", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
np.array([Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="M")])
)
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
[pd.NaT, pd.NaT, Period("2011-01", freq="M"), Period("2011-01", freq="M")]
)
exp = PeriodIndex(["NaT", "NaT", "2011-01", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
np.array(
[
pd.NaT,
pd.NaT,
Period("2011-01", freq="M"),
Period("2011-01", freq="M"),
]
)
)
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([pd.NaT, pd.NaT, "2011-01", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex([pd.NaT, pd.NaT])
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(np.array([pd.NaT, pd.NaT]))
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(["NaT", "NaT"])
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(np.array(["NaT", "NaT"]))
def test_constructor_incompat_freq(self):
msg = "Input has different freq=D from PeriodIndex\\(freq=M\\)"
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
[Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="D")]
)
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
np.array(
[Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="D")]
)
)
# first element is pd.NaT
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
[pd.NaT, Period("2011-01", freq="M"), Period("2011-01", freq="D")]
)
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
np.array(
[pd.NaT, Period("2011-01", freq="M"), Period("2011-01", freq="D")]
)
)
def test_constructor_mixed(self):
idx = PeriodIndex(["2011-01", pd.NaT, Period("2011-01", freq="M")])
exp = PeriodIndex(["2011-01", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(["NaT", pd.NaT, Period("2011-01", freq="M")])
exp = PeriodIndex(["NaT", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([Period("2011-01-01", freq="D"), pd.NaT, "2012-01-01"])
exp = PeriodIndex(["2011-01-01", "NaT", "2012-01-01"], freq="D")
tm.assert_index_equal(idx, exp)
def test_constructor_simple_new(self):
idx = period_range("2007-01", name="p", periods=2, freq="M")
result = idx._simple_new(idx, name="p", freq=idx.freq)
tm.assert_index_equal(result, idx)
result = idx._simple_new(idx.astype("i8"), name="p", freq=idx.freq)
tm.assert_index_equal(result, idx)
def test_constructor_simple_new_empty(self):
# GH13079
idx = PeriodIndex([], freq="M", name="p")
result = idx._simple_new(idx, name="p", freq="M")
tm.assert_index_equal(result, idx)
@pytest.mark.parametrize("floats", [[1.1, 2.1], np.array([1.1, 2.1])])
def test_constructor_floats(self, floats):
msg = r"PeriodIndex\._simple_new does not accept floats"
with pytest.raises(TypeError, match=msg):
pd.PeriodIndex._simple_new(floats, freq="M")
msg = "PeriodIndex does not allow floating point in construction"
with pytest.raises(TypeError, match=msg):
pd.PeriodIndex(floats, freq="M")
def test_constructor_nat(self):
msg = "start and end must not be NaT"
with pytest.raises(ValueError, match=msg):
period_range(start="NaT", end="2011-01-01", freq="M")
with pytest.raises(ValueError, match=msg):
period_range(start="2011-01-01", end="NaT", freq="M")
def test_constructor_year_and_quarter(self):
year = pd.Series([2001, 2002, 2003])
quarter = year - 2000
idx = PeriodIndex(year=year, quarter=quarter)
strs = ["%dQ%d" % t for t in zip(quarter, year)]
lops = list(map(Period, strs))
p = PeriodIndex(lops)
tm.assert_index_equal(p, idx)
@pytest.mark.parametrize(
"func, warning", [(PeriodIndex, FutureWarning), (period_range, None)]
)
def test_constructor_freq_mult(self, func, warning):
# GH #7811
with tm.assert_produces_warning(warning):
# must be the same, but for sure...
pidx = func(start="2014-01", freq="2M", periods=4)
expected = PeriodIndex(["2014-01", "2014-03", "2014-05", "2014-07"], freq="2M")
tm.assert_index_equal(pidx, expected)
with tm.assert_produces_warning(warning):
pidx = func(start="2014-01-02", end="2014-01-15", freq="3D")
expected = PeriodIndex(
["2014-01-02", "2014-01-05", "2014-01-08", "2014-01-11", "2014-01-14"],
freq="3D",
)
tm.assert_index_equal(pidx, expected)
with tm.assert_produces_warning(warning):
pidx = func(end="2014-01-01 17:00", freq="4H", periods=3)
expected = PeriodIndex(
["2014-01-01 09:00", "2014-01-01 13:00", "2014-01-01 17:00"], freq="4H"
)
tm.assert_index_equal(pidx, expected)
msg = "Frequency must be positive, because it" " represents span: -1M"
with pytest.raises(ValueError, match=msg):
PeriodIndex(["2011-01"], freq="-1M")
msg = "Frequency must be positive, because it" " represents span: 0M"
with pytest.raises(ValueError, match=msg):
PeriodIndex(["2011-01"], freq="0M")
msg = "Frequency must be positive, because it" " represents span: 0M"
with pytest.raises(ValueError, match=msg):
period_range("2011-01", periods=3, freq="0M")
@pytest.mark.parametrize("freq", ["A", "M", "D", "T", "S"])
@pytest.mark.parametrize("mult", [1, 2, 3, 4, 5])
def test_constructor_freq_mult_dti_compat(self, mult, freq):
freqstr = str(mult) + freq
pidx = period_range(start="2014-04-01", freq=freqstr, periods=10)
expected = date_range(start="2014-04-01", freq=freqstr, periods=10).to_period(
freqstr
)
tm.assert_index_equal(pidx, expected)
def test_constructor_freq_combined(self):
for freq in ["1D1H", "1H1D"]:
pidx = PeriodIndex(["2016-01-01", "2016-01-02"], freq=freq)
expected = PeriodIndex(["2016-01-01 00:00", "2016-01-02 00:00"], freq="25H")
for freq in ["1D1H", "1H1D"]:
pidx = period_range(start="2016-01-01", periods=2, freq=freq)
expected = PeriodIndex(["2016-01-01 00:00", "2016-01-02 01:00"], freq="25H")
tm.assert_index_equal(pidx, expected)
def test_constructor_range_based_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
pi = PeriodIndex(freq="A", start="1/1/2001", end="12/1/2009")
assert len(pi) == 9
def test_constructor_range_based_deprecated_different_freq(self):
with tm.assert_produces_warning(FutureWarning) as m:
PeriodIndex(start="2000", periods=2)
warning, = m
assert 'freq="A-DEC"' in str(warning.message)
def test_constructor(self):
pi = period_range(freq="A", start="1/1/2001", end="12/1/2009")
assert len(pi) == 9
pi = period_range(freq="Q", start="1/1/2001", end="12/1/2009")
assert len(pi) == 4 * 9
pi = period_range(freq="M", start="1/1/2001", end="12/1/2009")
assert len(pi) == 12 * 9
pi = period_range(freq="D", start="1/1/2001", end="12/31/2009")
assert len(pi) == 365 * 9 + 2
pi = period_range(freq="B", start="1/1/2001", end="12/31/2009")
assert len(pi) == 261 * 9
pi = period_range(freq="H", start="1/1/2001", end="12/31/2001 23:00")
assert len(pi) == 365 * 24
pi = period_range(freq="Min", start="1/1/2001", end="1/1/2001 23:59")
assert len(pi) == 24 * 60
pi = period_range(freq="S", start="1/1/2001", end="1/1/2001 23:59:59")
assert len(pi) == 24 * 60 * 60
start = Period("02-Apr-2005", "B")
i1 = period_range(start=start, periods=20)
assert len(i1) == 20
assert i1.freq == start.freq
assert i1[0] == start
end_intv = Period("2006-12-31", "W")
i1 = period_range(end=end_intv, periods=10)
assert len(i1) == 10
assert i1.freq == end_intv.freq
assert i1[-1] == end_intv
end_intv = Period("2006-12-31", "1w")
i2 = period_range(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period("2006-12-31", ("w", 1))
i2 = period_range(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period("2005-05-01", "B")
i1 = period_range(start=start, end=end_intv)
# infer freq from first element
i2 = PeriodIndex([end_intv, Period("2005-05-05", "B")])
assert len(i2) == 2
assert i2[0] == end_intv
i2 = PeriodIndex(np.array([end_intv, Period("2005-05-05", "B")]))
assert len(i2) == 2
assert i2[0] == end_intv
# Mixed freq should fail
vals = [end_intv, Period("2006-12-31", "w")]
msg = r"Input has different freq=W-SUN from PeriodIndex\(freq=B\)"
with pytest.raises(IncompatibleFrequency, match=msg):
PeriodIndex(vals)
vals = np.array(vals)
with pytest.raises(IncompatibleFrequency, match=msg):
PeriodIndex(vals)
def test_constructor_error(self):
start = Period("02-Apr-2005", "B")
end_intv = Period("2006-12-31", ("w", 1))
msg = "start and end must have same freq"
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start, end=end_intv)
msg = (
"Of the three parameters: start, end, and periods, "
"exactly two must be specified"
)
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start)
@pytest.mark.parametrize(
"freq", ["M", "Q", "A", "D", "B", "T", "S", "L", "U", "N", "H"]
)
def test_recreate_from_data(self, freq):
org = period_range(start="2001/04/01", freq=freq, periods=1)
idx = PeriodIndex(org.values, freq=freq)
tm.assert_index_equal(idx, org)
def test_map_with_string_constructor(self):
raw = [2005, 2007, 2009]
index = PeriodIndex(raw, freq="A")
expected = Index([str(num) for num in raw])
res = index.map(str)
# should return an Index
assert isinstance(res, Index)
# preserve element types
assert all(isinstance(resi, str) for resi in res)
# lastly, values should compare equal
| tm.assert_index_equal(res, expected) | pandas.util.testing.assert_index_equal |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 13 12:31:33 2017
@author: Astrid
"""
import os
import pandas as pd
import numpy as np
from collections import Counter
import re
import multiprocessing
def getFileList(dir_name, ext=''):
file_dir_list = list()
file_list = list()
for file in os.listdir(dir_name):
# If no extension is specified, create list with all files
if not ext:
file_dir_list.append(os.path.join(dir_name, file))
file_list.append(file)
# If extension is specified, create list with only ext files
elif file.endswith(ext):
file_dir_list.append(os.path.join(dir_name, file))
file_list.append(file)
return file_list, file_dir_list
def string2vec(string):
vec = []
for t in string.split():
try:
vec.append(float(t))
except ValueError:
pass
return vec
def readDPJ(filename):
#Read .dpj file line by line
file_obj = open(filename, 'r', encoding='utf8')
file = file_obj.readlines()
file_obj.close()
del file_obj
# Search in file for lines that need to be changes, save those lines in a dataframe
# Create an array with the x-discretisation grid, an array with the y-discretisation grid and an array with the assignments
x_discretisation = list()
y_discretisation = list()
assignments = pd.DataFrame(columns=['line','type','range','name'])
parameters = pd.DataFrame(columns = ['line','parameter'])
l=23 #start looking on 24th line
# INITIALISATION SETTINGS
# Find start year and start time
while l < len(file):
if 'START_YEAR' in file[l]:
parameters = parameters.append({'line': l,'parameter':'start year'},ignore_index=True)
parameters = parameters.append({'line': l+1,'parameter':'start time'},ignore_index=True)
l=l+4;
break
# If the parameter is not found at the end of the file, there is a problem in the code
elif l == len(file)-1:
print('Error: start year and start time not found')
l=l+1
# MATERIAL PARAMETERS
k=l
# Find air layer properties - included only when using an air layer to implement an interior climate dependent on V, n, HIR and exterior climate
while l < len(file):
if 'air room' in file[l].lower():
while file[l].strip() != '[MATERIAL]' and '; **' not in file[l]:
if 'CE' in file[l]:
parameters = parameters.append({'line': l,'parameter':'room air thermal capacity'},ignore_index=True)
l=l+1
continue
elif 'THETA_POR' in file[l]:
parameters = parameters.append({'line': l,'parameter':'room air theta_por'},ignore_index=True)
l=l+1
continue
elif 'THETA_EFF' in file[l]:
parameters = parameters.append({'line': l,'parameter':'room air theta_eff'},ignore_index=True)
l=l+1
continue
elif 'THETA_80' in file[l]:
parameters = parameters.append({'line': l,'parameter':'room air theta_80'},ignore_index=True)
l=l+1
continue
elif 'Theta_l(RH)' in file[l]:
parameters = parameters.append({'line': l,'parameter':'room air sorption curve'},ignore_index=True)
l=l+1
continue
l=l+1
l=l+5
break
# If the parameter is not found at the end of the file, there is no air layer. We must start looking for the next parameter from the same begin line, so we don't skip part of the file.
elif l == len(file)-2:
l=k
break
l=l+1
# WALLS
# Find wall conditions
while l < len(file):
if '[WALL_DATA]' in file[l]:
parameters = parameters.append({'line': l+2,'parameter':'wall orientation'},ignore_index=True)
parameters = parameters.append({'line': l+3,'parameter':'wall inclination'},ignore_index=True)
parameters = parameters.append({'line': l+4,'parameter':'latitude'},ignore_index=True)
l=l+9
break
# If the parameter is not found at the end of the file, there is a problem in the code
elif l == len(file)-1:
print('Error: wall orientation and inclination not found')
l=l+1
# CLIMATE CONDITIONS
while l < len(file):
if '[CLIMATE_CONDITIONS]' in file[l]:
k=l
break
elif l == len(file)-1:
print('Error: climate conditions section not found')
l=l+1
# Find climatic conditions
# Interior temperature
l=k # start at beginning of climate conditions
while l < len(file):
if 'TEMPER' in file[l] and 'inside' in file[l+1].lower():
parameters = parameters.append({'line': l+3,'parameter':'interior temperature'},ignore_index=True)
break
# If the parameter is not found at the end of the file, there is a problem in the code
elif l == len(file)-1:
print('Error: interior temperature not found')
l=l+1
# Exterior temperature
l=k # start at beginning of climate conditions
while l < len(file):
if 'TEMPER' in file[l] and 'outside' in file[l+1].lower():
parameters = parameters.append({'line': l+3,'parameter':'exterior temperature'},ignore_index=True)
break
# If the parameter is not found at the end of the file, there is a problem in the code
elif l == len(file)-1:
print('Error: interior temperature not found')
l=l+1
# Interior relative humidity
l=k # start at beginning of climate conditions
while l < len(file):
if 'RELHUM' in file[l] and 'inside' in file[l+1].lower():
parameters = parameters.append({'line': l+3,'parameter':'interior relative humidity'},ignore_index=True)
break
l=l+1
# Exterior relative humidity
l=k # start at beginning of climate conditions
while l < len(file):
if 'RELHUM' in file[l] and 'outside' in file[l+1].lower():
parameters = parameters.append({'line': l+3,'parameter':'exterior relative humidity'},ignore_index=True)
break
l=l+1
# Interior vapour pressure
l=k # start at beginning of climate conditions
while l < len(file):
if 'VAPPRES' in file[l] and 'inside' in file[l+1].lower():
parameters = parameters.append({'line': l+3,'parameter':'interior vapour pressure'},ignore_index=True)
break
l=l+1
# Rain load - imposed flux on vertical surface
l=k # start at beginning of climate conditions
while l < len(file):
if 'NORRAIN' in file[l]:
parameters = parameters.append({'line': l+3,'parameter':'rain vertical surface'},ignore_index=True)
break
l=l+1
# Rain load - flux on horizontal surface
l=k # start at beginning of climate conditions
while l < len(file):
if 'HORRAIN' in file[l]:
parameters = parameters.append({'line': l+3,'parameter':'rain horizontal surface'},ignore_index=True)
break
l=l+1
# Wind direction
l=k # start at beginning of climate conditions
while l < len(file):
if 'WINDDIR' in file[l]:
parameters = parameters.append({'line': l+3,'parameter':'wind direction'},ignore_index=True)
break
l=l+1
# Wind velocity
l=k # start at beginning of climate conditions
while l < len(file):
if 'WINDVEL' in file[l]:
parameters = parameters.append({'line': l+3,'parameter':'wind velocity'},ignore_index=True)
break
l=l+1
# Direct sun radiation
l=k # start at beginning of climate conditions
while l < len(file):
if 'DIRRAD' in file[l]:
parameters = parameters.append({'line': l+3,'parameter':'direct radiation'},ignore_index=True)
break
l=l+1
# Diffuse sun radiation
l=k # start at beginning of climate conditions
while l < len(file):
if 'DIFRAD' in file[l]:
parameters = parameters.append({'line': l+3,'parameter':'diffuse radiation'},ignore_index=True)
break
l=l+1
# Cloud covering
l=k # start at beginning of climate conditions
while l < len(file):
if 'CLOUDCOV' in file[l]:
parameters = parameters.append({'line': l+3,'parameter':'cloud cover'},ignore_index=True)
break
l=l+1
# Sky radiation
l=k # start at beginning of climate conditions
while l < len(file):
if 'SKYEMISS' in file[l]:
parameters = parameters.append({'line': l+3,'parameter':'sky radiation'},ignore_index=True)
break
l=l+1
# Sky temperature
l=k # start at beginning of climate conditions
while l < len(file):
if 'SKYTEMP' in file[l]:
parameters = parameters.append({'line': l+3,'parameter':'sky temperature'},ignore_index=True)
break
l=l+1
# BOUNDARY CONDITIONS
while l < len(file):
if '[BOUNDARY_CONDITIONS]' in file[l]:
k=l
break
elif l == len(file)-1:
print('Error: boundary conditions section not found')
l=l+1
# Find exterior heat transfer coefficient
l=k; # start at beginning of boundary conditions
while l < len(file):
if 'HEATCOND' in file[l] and 'outside' in file[l+1].lower():
while file[l].strip() != '[BOUND_COND]':
if 'EXCOEFF' in file[l]:
parameters = parameters.append({'line': l,'parameter':'exterior heat transfer coefficient'},ignore_index=True)
if 'EXCH_SLOPE' in file[l+1].strip():
l=l+1
parameters = parameters.append({'line': l,'parameter':'exterior heat transfer coefficient slope'},ignore_index=True)
break
l=l+1
break
l=l+1
# Find interior vapour surface resistance coefficient
l=k # start at beginning of boundary conditions
while l < len(file):
if 'VAPDIFF' in file[l] and 'inside' in file[l+1].lower():
while file[l].strip() != '[BOUND_COND]':
if 'EXCOEFF' in file[l]:
parameters = parameters.append({'line': l,'parameter':'interior vapour diffusion transfer coefficient'},ignore_index=True)
break
l=l+1
break
l=l+1
# Find exterior vapour surface resistance coefficient
l=k # start at beginning of boundary conditions
while l < len(file):
if 'VAPDIFF' in file[l] and 'outside' in file[l+1].lower():
while file[l].strip() != '[BOUND_COND]':
if 'EXCOEFF' in file[l]:
parameters = parameters.append({'line': l,'parameter':'exterior vapour diffusion transfer coefficient'},ignore_index=True)
if 'EXCH_SLOPE' in file[l+1].strip():
l=l+1
parameters = parameters.append({'line': l,'parameter':'exterior vapour diffusion transfer coefficient slope'},ignore_index=True)
break
l=l+1
break
l=l+1
# Find solar absorption
l=k #start at beginning of boundary conditions
while l < len(file):
if 'SURABSOR' in file[l]:
parameters = parameters.append({'line': l,'parameter':'solar absorption'},ignore_index=True)
break
l=l+1
# Find scale factor catch ratio
l=k #start at beginning of boundary conditions
while l < len(file):
if 'EXPCOEFF' in file[l]:
parameters = parameters.append({'line': l,'parameter':'scale factor catch ratio'},ignore_index=True)
break
l=l+1
# DISCRETISATION
while l < len(file):
if '[DISCRETISATION]' in file[l]:
k=l
break
elif l == len(file)-1:
print('Error: discretisation section not found')
l=l+1
# Find discretisation
l=k #start at beginning of discretisation
while l < len(file):
if '[DISCRETISATION]' in file[l]:
x_discr_str = file[l+3]
parameters = parameters.append({'line': l+3,'parameter':'x-discretisation'},ignore_index=True)
y_discr_str = file[l+4]
parameters = parameters.append({'line': l+4,'parameter':'y-discretisation'},ignore_index=True)
# remove characters and convert to vector
x_discretisation = string2vec(x_discr_str)
y_discretisation = string2vec(y_discr_str)
break
# If the discretisation is not found at the end of the file, there is a problem in the code
elif l == len(file)-1:
print('Error: discretisation not found')
l=l+1
# %OUTPUTS
while l < len(file):
if '[OUTPUTS]' in file[l]:
k=l
break
elif l == len(file)-1:
print('Error: outputs section not found')
l=l+1
# Find output folder
l=k # start at beginning of outputs
while l < len(file):
if 'OUTPUT_FOLDER' in file[l]:
parameters = parameters.append({'line': l,'parameter':'output folder'},ignore_index=True)
break
#If the output folder is not found at the end of the file, there is a problem in the code
elif l == len(file)-1:
print('Error: output folder not found')
l=l+1
# Find output files
while l < len(file):
if '[FILES]' in file[l]:
l=l+3
while '; **' not in file[l]:
if 'NAME' in file[l]:
output_file = file[l]
parameters = parameters.append({'line': l,'parameter':output_file[33:]},ignore_index=True)
l=l+5
continue
l=l+1
break
# If the output files are not found at the end of the file, there is a problem in the code
elif l == len(file)-1:
print('Error: output files not found')
l=l+1
# ASSIGNMENTS
while l < len(file):
if '[ASSIGNMENTS]' in file[l]:
k=l
break
elif l == len(file):
print('Error: assignments section not found')
l=l+1
# Find assignments
l=k # start at beginning of assignments
while l < len(file):
if 'RANGE' in file[l]:
assignments = assignments.append({'line': l, 'type': file[l-1][30:-1].strip(),'range': [int(i) for i in string2vec(file[l])],'name': file[l+1][30:-1].strip()},ignore_index=True)
l=l+4
continue
l=l+1
#If the discretisation is not found at the end of the file, there is a problem in the code
if assignments.empty:
print('Error: assignments not found')
return file, x_discretisation, y_discretisation, assignments, parameters
def readccd(ccdfile, date=False):
# Find header
with open(ccdfile, 'r') as f:
l = 0
for line in f:
if '0:00:00' in line:
header = l
break
l = l+1
# Read ccd
value = np.loadtxt(ccdfile,skiprows=header,usecols=2,dtype='f').tolist()
if date:
day = np.loadtxt(ccdfile,skiprows=header,usecols=0,dtype='i').tolist()
hour = np.loadtxt(ccdfile,skiprows=header,usecols=1,dtype='U').tolist()
return value, day, hour
else:
return value
def saveccd(path, value):
days = int(len(value)/24)
df = | pd.DataFrame() | pandas.DataFrame |
# coding: utf-8
# In[11]:
# First of all, we import all the necessary libs
import nltk
import re
import unicodedata
import string
from nltk.corpus import stopwords
from nltk.stem import LancasterStemmer, WordNetLemmatizer
import pandas as pd
import inflect
import pickle
import math
from scipy.spatial import distance
import heapq
from geopy import geocoders
import numpy as np
from geopy import distance as geodis
from IPython.display import clear_output
from termcolor import colored
from IPython.display import Markdown
import matplotlib.pyplot as plt
import folium
# This strings open a connection to GeoPy Database in order to get cities and addresses coordinates knowing their name
gn = geocoders.GeoNames(username = "clabat9")
gl = geocoders.Nominatim( user_agent = "<PASSWORD>")
# ---------- SECTION 1 : DOCUMENTS PREPROCESSING ----------
# F1 : This function removes stop words from list of tokenized words
def remove_stopwords(wrd):
new_wrd = [] #List of updated words
for word in wrd:
if word not in stopwords.words('english'): # If the current word is not a stopword (ckeck using nltk)
new_wrd.append(word) #appends it to the list
return new_wrd
# F2 : This function removes punctuation from list of tokenized words
def remove_punctuation(wrd):
new_wrds = [] #List of updated words
for word in wrd:
new_wrd = re.sub(r'[^\w\s]', '', word) # Replaces all punctuation word with "" using RegEx
if new_wrd != '':
new_wrds.append(new_wrd) #And then appends all words different from "" to the list
return new_wrds
# F3 : This function stems words in a list of tokenized words
def stem_words(wrd):
stemmer = LancasterStemmer() # Selects the stemmmer from nltk
stems = [] # List of updated words
for word in wrd:
stem = stemmer.stem(word) # Stems the word
stems.append(stem) # and appends it to the list
return stems
# F4 : This functions removes non ascii chars from a list of tokenized words
def remove_non_ascii(wrd):
new_wrds = [] # List of updated words
for word in wrd:
new_wrd = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore') # Filters non ascii chars
new_wrds.append(new_wrd) # Appends the word to the list
return new_wrds
# F5 : This function converts all characters to lowercase from a list of tokenized words
def to_lowercase(wrd):
new_wrds = [] # List of updated words
for word in wrd:
new_wrd = word.lower() # Converts the current word to lower case
new_wrds.append(new_wrd) # And append it to the list
return new_wrds
# F5 : This function replaces all integers occurences in list of tokenized words with textual representation
def replace_numbers(wrd):
d = inflect.engine() # Libs inflect contains what we need
new_wrds = [] # List of updated words
for word in wrd:
if word.isdigit(): # If the current word is a number
new_wrd = d.number_to_words(word) # Converts it to its textual representation
new_wrds.append(new_wrd) # And appends it to the list
else:
new_wrds.append(word) # If the current word is not a number appends it to the list
return new_wrds
# The following function takes a record of a dataFrame containg our docs and preprocesses it's title and description
# with all the previous functions
def preProcessing (x):
x.fillna("*", inplace = True) # fills NA with "*"
xt = x["title"] # Takes title and description
xd = x["description"]
if xt != "*":
xt = nltk.word_tokenize(xt) # Tokenizes title using nltk
if xd != "*":
xd = nltk.word_tokenize(xd) # Tokenizes description using nltk
# Uses previous functions
xt = replace_numbers(xt)
xd = replace_numbers(xd)
xt = remove_stopwords(xt)
xd = remove_stopwords(xd)
xt = remove_punctuation(xt)
xd = remove_punctuation(xd)
xt = stem_words(xt)
xd = stem_words(xd)
xt = remove_non_ascii(xt)
xd = remove_non_ascii(xd)
xt = to_lowercase(xt)
xd = to_lowercase(xd)
x["title"] = xt
x["description"] = xd
return x # Returns the preprocessed doc
# This function takes the query and preprocecesses it with all the previous methods
def query_preProcessing (x):
xt = nltk.word_tokenize(x) # Tokenizes query using nltk
# Uses previous functions
xt = replace_numbers(xt)
xt = remove_stopwords(xt)
xt = remove_punctuation(xt)
xt = stem_words(xt)
xt = remove_non_ascii(xt)
xt = to_lowercase(xt)
return xt
# ---------- SECTION 2 : SPLITTING ----------
# F1 : This function takes a path of a DataFrame or the DataFrame it's self and exports each one of its rows as a .tsv file
# Important : if the function receives both the path and the DataFrame, it will use the DataFrame option.
# For our purposes it is not fundamental to guarantee that the file is in the specified path or that the df is consistent,
# but it's clear that in a more general context will be useful to insert some simple code to catch errors.
def csv_to_tsv(path_of_the_doc_to_convert = None ,pdata = pd.DataFrame()):
if not pdata.empty : # If it receives a DataFrame
pdata.to_csv("processed_data.tsv",encoding = "utf-8", sep = "\t") # Saves it as a .tsv
f = open("processed_data.tsv","r", encoding = "utf-8") # Loads it
leng = 0 # Counter of the number of documents
for line in f: # For each record (document)
with open(r"D:\Claudio\Uni\M 1° anno Sapienza\AMDS\Homeworks\Hw3\ptsv\doc_"+str(leng)+".tsv", "w", encoding = "utf-8" ) as ftmp:
ftmp.write(line) # Saves the record as .tsv
leng += 1 # Update counter
return leng # Returns the number of documents
else: # If it receives a path
data = open(path_of_the_doc_to_convert,"r", encoding = "utf-8") # Opens the data in the specified path
leng = 0 # And makes the same procedure above
for line in data:
with open(r"D:\Claudio\Uni\M 1° anno Sapienza\AMDS\Homeworks\Hw3\tsv\doc_"+str(leng)+".tsv", "w", encoding = "utf-8" ) as ftmp:
ftmp.write(re.sub(r",","\t",line))
leng += 1
return leng
# ----------SECTION 3 : CREATION OF VOCABULARY, INVERTED INDECES AND SCORE FUNCTIONS----------
# This function takes the path where (preprocessed) documents are saved and their total number
# and returns the vocabulary of the indicated corpus
def create_vocabulary(number_of_docs, path):
vocabulary = {} # The vocabulary is a dictionary of the form "Word : word_id"
wid = 0 # word_id
for idx in range(1,number_of_docs): # for every document..
with open(path+"doc_"+str(idx)+".tsv", "r", encoding = "utf-8" ) as ftmp:
first_line = ftmp.readline() # It opens the doc and reads the first line (in our case docs are made by only one line)
desc = (first_line.split(sep = "\t"))[6] # Takes in account only title and description of the record
title = (first_line.split(sep = "\t"))[9]
# Following lines clean up some unuseful chars
desc = desc.split("'")
title = title.split("'")
foo = ["]","[",", "]
desc = list(filter(lambda x: not x in foo, desc))
title = list(filter(lambda x: not x in foo, title))
for word in title+desc: # For every word in title + description
if not word in list(vocabulary.keys()) : # if the word is not in the dic
vocabulary[word] = wid # adds it
wid += 1 # Update word_id
with open("vocabulary", "wb") as f :
pickle.dump(vocabulary, f) # Saves the vocabulary as a pickle
return vocabulary # Returns the vocabulary
# This function create the first inverted index we need in the form "word (key) : [list of docs that contain word] (value)".
# It takes the number of (preprocessed) docs and the path where they are saved and returns the reverted index as a dictionary.
def create_inverted_index(number_of_docs, path):
inverted_index = {} # Initializes the inverted index, in our case a dic
for idx in range(1,number_of_docs+1): # for every document
# Opens the doc, cleans it and extracts title and description as the previous function
with open(path+"doc_"+str(idx)+".tsv", "r", encoding = "utf-8" ) as ftmp:
first_line = ftmp.readline()
desc = (first_line.split(sep = "\t"))[6]
title = (first_line.split(sep = "\t"))[9]
desc = desc.split("'")
title = title.split("'")
foo = ["]","[",", "]
desc = list(filter(lambda x: not x in foo, desc))
title = list(filter(lambda x: not x in foo, title))
for word in title+desc: # for every word in title + description
if word in list(inverted_index.keys()) : # if the word is in the inverted index
inverted_index[word] = inverted_index[word] + ["doc_"+str(idx)] # adds the current doc to the list of docs that contain the word
else :
inverted_index[word] = ["doc_"+str(idx)] # else creates a record in the dic for the current word and doc
with open("inverted_index", "wb") as f :
pickle.dump(inverted_index, f) # Saves the inverted index as a pickle
return inverted_index # returns the inverted index
# This function takes a term, a riverted index and the total number of docs in the corpus to compute the IDF of the term
def IDFi(term, reverted_index, number_of_docs):
return math.log10(number_of_docs/len(reverted_index[term]))
# This function create the second inverted index we need in the form "word (key) : [(doc that contain the word, TFID of the term in the doc),....]"
# It takes the number of (preprocessed) docs, the path where they are saved, the vocabulary and a list containig all the idfs and returns the reverted index as a dictionary.
def create_inverted_index_with_TFIDF(number_of_docs, path, vocabulary, idfi):
inverted_index2 = {} # Initializes the inverted index, in our case a dic
for idx in range(1, number_of_docs+1): # for every document
# Opens the doc, cleans it and extracts title and description as the previous function
with open(path+"doc_"+str(idx)+".tsv", "r", encoding = "utf-8" ) as ftmp:
first_line = ftmp.readline()
desc = (first_line.split(sep = "\t"))[6]
title = (first_line.split(sep = "\t"))[9]
desc = desc.split("'")
title = title.split("'")
foo = ["]","[",", "]
desc = list(filter(lambda x: not x in foo, desc))
title = list(filter(lambda x: not x in foo, title))
for word in title+desc: # for every word in title + description
if word in list(inverted_index2.keys()) : # if the word is inthe inverted index
# adds to the index line of the current word a tuple that contains the current doc and its TFID for the current word. It uses the vocabulary to get the index of the word
# in the IDF list.
inverted_index2[word] = inverted_index2[word] + [("doc_"+str(idx),((title+desc).count(word)/len(title+desc))*idfi[vocabulary[word]])] # Just applying the def
else :
# Makes the same initializing the index line of the current word
inverted_index2[word] = [("doc_"+str(idx),((title+desc).count(word)/len(title+desc))*idfi[vocabulary[word]])]
with open("inverted_index2", "wb") as f : # Saves the inverted index as a pickle
pickle.dump(inverted_index2, f)
# This function takes the two inverted indices , the (processed) query, the document the query has to be compared to and the vocabulary
# and returns the cosine similarity between them
def score(pquery, document, inverted_index, inverted_index_with_TFIDF, vocabulary, idfi):
#the first vector is made by the all the tfid of the words in thw query. To build it we use a simple list comprehension
# that computes the tfid for all the words in set(query) in order to not process the same word more times
v1 = [((pquery.count(word)/len(pquery))*idfi[vocabulary[word]]) if word in vocabulary.keys() else 0 for word in set(pquery)]
v2 = []
# We don't need to work on vectors in R^(number of distinct words in query+document) becouse, in that case, all elements that
# are not simultaneously non zero will give a 0 contribute in the computation of the similarity,
# so we just need to work in R^(number of different words in query).
#(the optimal solution will be to work in R^(dim of intersection of different words in query+ different words in document)) .
# In the end, to build the vector associated to the doc:
for word in set(pquery) : # for every distinc word in the query
if word in vocabulary.keys(): # if the word is in the corpus vocabulary
if document in inverted_index[word]: # if the document contains the word
idx = inverted_index[word].index(document) # gets the index of the doc in the second inverted index using the first inverted index
# order will be the same
v2.append(inverted_index_with_TFIDF[word][idx][1]) # appends the tfid of the current word for the selected doc
# gettin it from the second inverted index
else: # if the doc doesnt contain the word the associated component is 0
v2.append(0)
else: # if the word is not in the vocabulary the associated component of the doc vectror is 0
v2.append(0)
if not all(v == 0 for v in v2): # if at least one word is in common
return (1 - distance.cosine(v1, v2)) # returns the cosine similarity
else: # if the query and the doc haven't nothing in common their similarity is 0
return 0
# This function implements our score function explained in the notebook. It takes the max rate user prefers to spend, the number of
# bed user prefers to have in it's holiday house, the city user prefers to stay in and one of the doc that match his query and returns it's score.
def score_function(max_cash, pref_bed, pref_city, coords, result):
score = 0
max_distance = 1298 # Normalization factor for the distances computed on the two farthest points of the Texas
cash = float(result["average_rate_per_night"].split("$")[1])
try :
bed = int(result["bedrooms_count"])
except :
bed = 0.5
if (cash < max_cash) & (cash > 0) :
score += (5)*math.exp(-cash/max_cash)
score += (4)*min(bed/pref_bed, pref_bed/bed)
coord = (result.loc["latitude"], result.loc["longitude"])
score += 3*(1 - geodis.distance(coords,coord).km/1298)
return (100/12)*score
# ----------SECTION 4: SEARCH ENGINES----------
# This function implements a search engine that returns the docs containing ALL the words in the query.
# It takes the path where (preprocessed) docs are saved and the inverted index above
# and returns the list of the names of the docs containing all the world of the query, a df containing all features of this docs
# (useful later) and a df containing only the requested features.
# We tried to have some fun inserting code that allows the user to retry the search if it returns no results.
def first_search_engine(inverted_index, path):
check = "y" # This var controls the logic of the multiple attempts
while check == "y": # while it is "y"
print(colored("Insert your query:", "blue", attrs =["bold","underline"]))#(Markdown('<span style="color: #800080">Insert your query: </span>')) # Get users query (asking in a nice colored way :) )
query = input()
pq = query_preProcessing(query) #Preprocesses the query
l = set() # our results are in a set
not_first_word = 0 # Var to know if it's the first word of the query
for el in pq: # for every word in the query
if el in list(inverted_index.keys()): # if the word is in at least one document
if not_first_word == 0: # If it's the first word of the query
l = set(inverted_index[el]) # Adds all the docs that contain the word to results
not_first_word += 1 # The next word is not the first
else : # If it isn't the first word
l = l.intersection(set(inverted_index[el])) # Takes the docs that contain the word in a set and intersect it with
# the set of the results
else: # if a word is not in the corpus there will be no results for this kind of search.
l = [] # empty list
break #exit the loop
if len(l) == 0: # If there are no results
print(colored("Your search did not bring results. Do you wanna try again? [y/n]", "red", attrs = ["underline"])) # Get users query (asking in a nice colored way :) )
check = input() # asks the user if he wanna make another search
while (check != "y")&(check !="n") : # force the user to give an acceptable answer
print(colored("You can choose [y/n] !","red", attrs = ["underline"])) # Get users query (asking in a nice colored way :) )
check = input()
# If the user wants to retry, the while statement loops again
if check == "n": # If the user prefers to not retry
return [],l,pd.DataFrame(), pd.DataFrame() # returns empty data structures
else: # If there are results
res = []
for doc in l : # for every doc of the results creates a df ( a column for each feature, as in the original one)
res.append(pd.read_csv(path +doc+ ".tsv", sep = "\t", engine = "python", names = ["id","average_rate_per_night","bedrooms_count","city","date_of_listing","description","latitude","longitude","title","url"]))
complete_results = pd.concat(res).reset_index() # And then concatenates all of them
# Takes only requested features and makes some operations to have a better visualization and clean up some junks
results = complete_results.loc[:,["title","description","city","url","bedrooms_count","average_rate_per_night"]]
results.columns = map(str.upper, results.columns)
results["TITLE"] = results["TITLE"].apply(lambda x : re.sub(r"\\n"," ",x))
results["DESCRIPTION"] = results["DESCRIPTION"].apply(lambda x : re.sub(r"\\n"," ",x))
return pq,l,complete_results,results # returns results (and the query, useful later)
# This function implements a search engine that returns the first k documents with the highest cosine similiraty
# within respect the query. It takes the two inverted indices, the vocabulary, the number of (preprocessed) docs, the paths where
# they are saved and k and returns a df containig the results.
def second_search_engine(inverted_index, inverted_index_with_TFIDF, vocabulary, path, idfi, k = 10):
# Use the first search engine to get the results we need to compute the scores
pq1,docs,_,_ = first_search_engine(inverted_index, path)
scores = [] # Initializes the list containing the scores
for doct in docs: # for every documents that matches the query
# Appends to "scores" the cosine similarity between the doc and the query, and the name of the doc as a tuple
scores.append((score(pq1,doct,inverted_index, inverted_index_with_TFIDF, vocabulary,idfi),doct))
heapq._heapify_max(scores) # Creates a max heap based on the scores in "scores"
res = heapq.nlargest(k, scores, key=lambda x: x[0]) # Get the first k highest score elements of "scores"
# The following codes simply build up the presentation of the results, similiar to the first one but with a column "SCORES"
out = []
for doc in res :
out.append(pd.read_csv(path+str(doc[1])+ ".tsv", sep = "\t", engine = "python", names = ["id","average_rate_per_night","bedrooms_count","city","date_of_listing","description","latitude","longitude","title","url"]))
results = | pd.concat(out) | pandas.concat |
import argparse
import numpy as np
import pandas as pd
import torch
import transformers as ppb # pytorch transformers
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
'''
python3 createEmbeddings.py --cell_file ./word_tokenized/C1Train.csv --output_file C1TrainEmbeddings.csv
python3 createEmbeddings.py --cell_file ./word_tokenized/C2Train.csv --output_file C2TrainEmbeddings.csv
python3 createEmbeddings.py --cell_file ./word_tokenized/C1Valid.csv --output_file C1ValidEmbeddings.csv
python3 createEmbeddings.py --cell_file ./word_tokenized/C2Valid.csv --output_file C2ValidEmbeddings.csv
python3 createEmbeddings.py --cell_file ./word_tokenized/DiffTrain.csv --output_file ./embeddings/DiffTrainEmbeddings.csv
python3 createEmbeddings.py --cell_file ./word_tokenized/DiffValid.csv --output_file ./embeddings/DiffValidEmbeddings.csv
python3 createEmbeddings.py --cell_file ./word_tokenized/DiffTest.csv --output_file ./embeddings/DiffTestEmbeddings.csv
'''
def createEmbeddings(cell_file, output_file):
MODEL_DIR = "GeneBERT_1500"
model_class, tokenizer_class, pretrained_weights = (ppb.RobertaForMaskedLM, ppb.RobertaTokenizerFast, MODEL_DIR)
tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
model = model_class.from_pretrained(pretrained_weights)
from transformers import pipeline
fill_mask = pipeline(
"fill-mask",
model=MODEL_DIR,
tokenizer=MODEL_DIR
)
print(fill_mask("hll hrr <mask>."))
df = | pd.DataFrame({"A": ["hhhh llhl <mask>", "hhlmmm mmmmm <mask>"]}) | pandas.DataFrame |
'''
Find out appliance energy for all months
'''
# Standard imports
import os
# Internal imports
from run_appliance_opt import RunAppModel
from model import generate_csvs
import pandas as pd
import numpy as np
NUM_OF_MONTE_CARLO_RUN = 100
appliance_with_constant_mc = ['lighting', 'water_pump','washing_machine', 'dishwashers', 'washer_dryer', \
'hair_dryer', 'toaster', 'coffe_maker', 'laptops', 'televisions', 'routers','clothing_iron', 'refrigerator_l300', 'refrigerator_g300' ]
# appliance_with_variable_mc = ['fans', 'coolers', 'air_heater', 'ac_unit', 'incandescent_bulb', \
# 'cfl_bulb', 'led_bulb', 'tubelights', \
# 'water_pump', 'geyser_electric' ]
appliance_with_variable_mc = ['fans', 'coolers', 'air_heater', 'ac_unit', 'geyser_electric' ]
constant_mc_result_mean = {'Appliances': appliance_with_constant_mc, 'mean_energy': []}
constant_mc_result_sd = {'Appliances': appliance_with_constant_mc, 'std_energy': []}
constant_mc_result = []
variable_mc_result = []
obj_values = []
for num in range(NUM_OF_MONTE_CARLO_RUN):
print(f'Running for {num} iteration --------------------------------------------------->')
# update csvs
customers = generate_csvs(num_of_customers=30)
# Run optimization
instance = RunAppModel(data_path = r'C:\\Users\\KDUWADI\\Desktop\\BRPL\\survey_data',
export_path = r'C:\Users\KDUWADI\Desktop\BRPL\survey_result',
solver = 'ipopt')
# Get result
constant_mc_result.append(instance.get_result_constant_mc())
variable_mc_result.append(instance.get_result_variable_mc())
obj_values.append(instance.get_objective_func())
energy_consumption = []
for subdict in constant_mc_result:
values = [el if el!=None else 0 for el in list(subdict.values())]
energy_consumption.append(values)
mean_energy_consumption = [sum(x)/len(x) for x in zip(*energy_consumption)]
sd_energy_consumption = [np.std(x) for x in zip(*energy_consumption)]
constant_mc_result_mean['mean_energy'] = mean_energy_consumption
constant_mc_result_sd['std_energy'] = sd_energy_consumption
months = ['Jan', 'Feb','Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
appliance_month_dict = {appliance: {month: [] for month in months} for appliance in appliance_with_variable_mc}
appliance_month_dict_mean = {appliance: {month: [] for month in months} for appliance in appliance_with_variable_mc}
appliance_month_dict_sd = {appliance: {month: [] for month in months} for appliance in appliance_with_variable_mc}
for df in variable_mc_result:
for appliance in df.columns:
for month in months:
appliance_month_dict[appliance][month].append(df[appliance][month])
for appliance, energydict in appliance_month_dict.items():
for month, values in energydict.items():
appliance_month_dict_mean[appliance][month] = sum(values)/len(values)
appliance_month_dict_sd[appliance][month] = np.std(values)
df = pd.DataFrame(constant_mc_result_mean)
df.to_csv(os.path.join(r'C:\Users\KDUWADI\Desktop\BRPL\survey_result', 'constant_mc_mean.csv'))
df = | pd.DataFrame(constant_mc_result_sd) | pandas.DataFrame |
"""
@FileName: calModel.py
@Description: calculate the total of parameters and the total of flops for a given model in pytorch
@Author : Lj
@CreateDate: 2019/11/6 10:22
@LastEditTime: 2019/12/23 15:34
@LastEditors: Lj
@Version: v0.1
"""
from collections import OrderedDict
import pandas as pd
from utils import *
def getALL(model, input, *args, **kwargs):
"""
calculate FLOPs and parameters
:param model: the given model
:param input: input
:param args: dynamic parameters
:param kwargs:
:return: flops and parameters
"""
params = calParameters(model, False)
flops, macs = calGuideline(model, input, False, *args, **kwargs)
print(params)
print(flops, macs)
return params, flops, macs
def calParameters(model, detail=False, *args, **kwargs):
"""
calculate the amount of parameters for a given model
:param model: the given model
:param detail: if true return the precise result, else return scientific notation after three decimal places
:return: the total of all parameters, the total of trainable parameters, the total of non-trainable parameter
"""
total_params = np.sum(param.numel() for param in model.parameters())
trainable_params = np.sum(param.numel() for param in model.parameters() if param.requires_grad)
constant_params = total_params - trainable_params
if detail:
return {'Total Parameters': total_params, 'Trainable Parameters': trainable_params, 'Constant Parametes':constant_params}
else:
total_params = secientificNotation(total_params)
trainable_params = secientificNotation(trainable_params)
constant_params = secientificNotation(constant_params)
return {'Total Parameters': total_params, 'Trainable Parameters': trainable_params, 'Constant Parametes':constant_params}
def calGuideline(model, input, detail=True, *args, **kwargs):
"""
calculate the FLOPs and MAC of a given model
:param model: the given model
:param input_size: the input size of the given model
:param detail: if true return the precise result, else return scientific notation after three decimal places
:return: the total of flops of the given model
"""
def register_hook(module):
def flopsHook(module, input, outputs):
"""
calculate the flops of current layer
:param module: current layer module
:param input: input of current layer
:param outputs: output of current layer
:return:
"""
layer_type = str(module).split('(')[0] # layer type conv/bn/fc/pool and etc
if "group" in str(module):
group = int(str(module).split('group=')[1][0])
else:
group = 1
layer_index = len(ordered_layers) # current layer order in network
has_bias = True if 'True' in str(module) else False # whether current layer has bias
# find current layer in all layers and rename them as {order in netword}_{name}
for name, item in layers.items():
if item == module:
key = "{}_{}".format(layer_index, name)
info = OrderedDict()
info["id"] = id(module)
# if the output is a list or tuple, get the output size
if isinstance(outputs, (list, tuple)):
try:
info["out_size"] = list(outputs[0].size())
except AttributeError:
# pack_padded_seq and pad_packed_seq store feature into data attribute
info["out_size"] = list(outputs[0].data.size())
else:
# output is a tensor, get the output size
info["out_size"] = list(outputs.size())
info["kernel_size"] = "-"
info["inner"] = OrderedDict()
info["flops"] = 0
info["macs"] = 0
# calculate the flops
for name, param in module.named_parameters():
if name == "weight":
kernel_size = list(param.size())
# adjust into [I, O, k, k]
if len(kernel_size) > 1:
kernel_size[0], kernel_size[1] = kernel_size[1], kernel_size[0]
info["kernel_size"] = kernel_size
input_size = list(input[0].shape)
output_size = list(outputs.shape)
# for each kind of layer type, calculate the corresponding flops and MAC
if "Conv" in layer_type:
if has_bias:
info["flops"] += np.prod(output_size[1:]) * (input_size[1] * np.prod(kernel_size[2:]) + 1)
else:
info["flops"] += np.prod(output_size[1:]) * (input_size[1] * np.prod(kernel_size[2:]))
# calculate macs
info["macs"] += np.prod(kernel_size[2:]) * (input_size[1] + output_size[1]) + input_size[1] * output_size[1] / group
elif "Batch" in layer_type:
info["flops"] += np.prod(input_size) * 4
elif "Max" in layer_type:
info["flops"] += np.prod(kernel_size) * np.prod(output_size[1:])
elif "Avg" in layer_type:
info["flops"] += (np.prod(kernel_size) + 1) * np.prod(output_size[1:])
elif "Adaptive" in layer_type:
stride = np.ceil(input_size[0] / output_size[0])
kernel_size = input_size[0] - (output_size[0] - 1) * stride
info["flops"] += kernel_size ** 2 * np.prod(output_size[0])
elif "ReLU" in layer_type:
info["flops"] += np.prod(input_size[1:])
elif "Linear" in layer_type:
if has_bias:
info["flops"] += (2 * input_size[0]) * output_size[0]
else:
info["flops"] += (2 * input_size[0] - 1) * output_size[0]
# RNN modules have inner weights
elif "weight" in name:
info["inner"][name] = list(param.size())
info["flops"] += param.nelement()
# if the current module has been calculated, mark as "used"
# check if this module has params
if list(module.named_parameters()):
for v in ordered_layers.values():
if info["id"] == v["id"]:
info["params"] = "used"
ordered_layers[key] = info
# ignore Sequential and ModuleList
if not module._modules:
hooks.append(module.register_forward_hook(flopsHook))
# get the layers name in the model
layers = getLayers(model)
hooks = []
# store layers according their order in the model
ordered_layers = OrderedDict()
model.apply(register_hook)
try:
with torch.no_grad():
if not (kwargs or args):
model(input)
else:
model(input, *args, **kwargs)
finally:
for hook in hooks:
hook.remove()
# calculate the sum of flops
df = pd.DataFrame(ordered_layers).T
df["flops"] = pd.to_numeric(df["flops"], errors="coerce")
df["macs"] = | pd.to_numeric(df["macs"], errors="coerce") | pandas.to_numeric |
import multiprocessing
import argparse
import os
import shutil
import numpy as np
import pandas as pd
import torch
from allennlp.common.params import Params
from allennlp.training.learning_rate_schedulers import LearningRateScheduler
from allennlp.training.optimizers import Optimizer
from torch.nn import DataParallel
from torch.nn.modules import BatchNorm2d
from tqdm import tqdm
import config
import gc
gc.collect()
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# from dataloaders.vcr import VCR, VCRLoader
from dataloaders.vcr_crf import VCR, VCRLoader
from utils.pytorch_misc import time_batch, save_checkpoint, clip_grad_norm, \
restore_checkpoint, print_para, restore_best_checkpoint
#os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"]="2"
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.DEBUG)
# This is needed to make the imports work
from allennlp.models import Model
import models
seed = 522
import torch
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
import numpy as np
np.random.seed(seed)
def _init_fn(worker_id):
np.random.seed(seed)
#################################
#################################
######## Data loading stuff
#################################
#################################
parser = argparse.ArgumentParser(description='train')
parser.add_argument(
'-params',
dest='params',
help='Params location',
type=str,
)
parser.add_argument(
'-rationale',
action="store_true",
help='use rationale',
)
parser.add_argument(
'-folder',
dest='folder',
help='folder location',
type=str,
)
parser.add_argument(
'-vcr_data',
dest='vcr_data',
help='vcr data location',
type=str,
)
parser.add_argument(
'-no_tqdm',
dest='no_tqdm',
action='store_true',
)
parser.add_argument(
'-aug_flag',
dest='aug_flag',
action='store_true',
)
parser.add_argument(
'-att_reg',
type=float,
dest='att_reg'
)
args = parser.parse_args()
config.VCR_ANNOTS_DIR = args.__dict__['vcr_data']
print('vcr annots dir', config.VCR_ANNOTS_DIR)
params = Params.from_file(args.params)
train, val, test = VCR.splits(embs_to_load=params['dataset_reader'].get('embs', 'bert_da'),
only_use_relevant_dets=params['dataset_reader'].get('only_use_relevant_dets', True), aug_flag=args.aug_flag)
#NUM_GPUS = torch.cuda.device_count()
#NUM_CPUS = multiprocessing.cpu_count()
NUM_GPUS = 2
NUM_CPUS = 8
print('number gpus: ', NUM_GPUS)
if NUM_GPUS == 0:
raise ValueError("you need gpus!")
def _to_gpu(td):
if NUM_GPUS > 1:
return td
for k in td:
if k != 'metadata':
td[k] = {k2: v.cuda(non_blocking=True) for k2, v in td[k].items()} if isinstance(td[k], dict) else td[
k].cuda(
non_blocking=True)
# td[k] = {k2: v.cuda(async=True) for k2, v in td[k].items()} if isinstance(td[k], dict) else td[k].cuda(
# async=True)
return td
# num_workers = (4 * NUM_GPUS if NUM_CPUS == 32 else 2*NUM_GPUS)-1
num_workers = 8
batch_size = 24
print(f"Using {num_workers} workers out of {NUM_CPUS} possible", flush=True)
# loader_params = {'batch_size': batch_size// NUM_GPUS, 'num_gpus':NUM_GPUS, 'num_workers':num_workers, 'worker_init_fn': _init_fn}
loader_params = {'batch_size': batch_size// NUM_GPUS, 'num_gpus':NUM_GPUS, 'num_workers':num_workers}
train_loader = VCRLoader.from_dataset(train, **loader_params)
val_loader = VCRLoader.from_dataset(val, **loader_params)
# test_loader = VCRLoader.from_dataset(test, **loader_params)
ARGS_RESET_EVERY = 100
print("Loading {} for {}".format(params['model'].get('type', 'WTF?'), 'rationales' if args.rationale else 'answer'), flush=True)
print(str(params['model']))
model = Model.from_params(vocab=train.vocab, params=params['model'])
if config.double_flag:
model.double()
print('*'*100)
for submodule in model.detector.backbone.modules():
if isinstance(submodule, BatchNorm2d):
submodule.track_running_stats = False
for p in submodule.parameters():
p.requires_grad = False
att_flag = model.att_flag
multi_flag = model.multi_flag
wo_qa_flag = model.wo_qa
wo_qr_flag = model.wo_qr
print('att flag: ', att_flag)
print('multi flag: ', multi_flag)
print('qa flag: ', wo_qa_flag)
print('qr flag: ', wo_qr_flag)
model = DataParallel(model).cuda() if NUM_GPUS > 1 else model.cuda()
# model = model.cuda()
optimizer = Optimizer.from_params([x for x in model.named_parameters() if x[1].requires_grad],
params['trainer']['optimizer'])
lr_scheduler_params = params['trainer'].pop("learning_rate_scheduler", None)
scheduler = LearningRateScheduler.from_params(optimizer, lr_scheduler_params) if lr_scheduler_params else None
if os.path.exists(args.folder):
print("Found folder! restoring "+args.folder, flush=True)
start_epoch, val_metric_per_epoch = restore_checkpoint(model, optimizer, serialization_dir=args.folder,
learning_rate_scheduler=scheduler)
else:
print("Making directories: ", args.folder)
os.makedirs(args.folder, exist_ok=True)
start_epoch, val_metric_per_epoch = 0, []
shutil.copy2(args.params, args.folder)
param_shapes = print_para(model)
num_batches = 0
multi_task_lambda = 1.0
for epoch_num in range(start_epoch, params['trainer']['num_epochs'] + start_epoch):
train_results = []
# norms = []
model.train()
for b, (time_per_batch, batch) in enumerate(time_batch(train_loader if args.no_tqdm else tqdm(train_loader), reset_every=ARGS_RESET_EVERY)):
batch = _to_gpu(batch)
optimizer.zero_grad()
output_dict = model(**batch)
loss = output_dict['loss'].mean() + output_dict['cnn_regularization_loss'].mean()
# loss = output_dict['loss'].mean() + output_dict['cnn_regularization_loss'].mean() + output_dict['answer_gd_loss'].mean() + output_dict['rationale_gd_loss'].mean()
if(multi_flag):
if not wo_qa_flag:
loss += multi_task_lambda*output_dict['answer_loss'].mean()
if not wo_qr_flag:
loss += multi_task_lambda*output_dict['rationale_loss'].mean()
# loss += output_dict['answer_loss'].mean() + output_dict['rationale_loss'].mean()
# if(att_flag):
# loss += kl_lambda*output_dict['kl_loss'].mean()
loss.backward()
num_batches += 1
if scheduler:
scheduler.step_batch(num_batches)
clip_grad_norm(model.named_parameters(), max_norm=params['trainer']['grad_norm'], clip=True, verbose=False)
# orms.append(
# clip_grad_norm(model.named_parameters(), max_norm=params['trainer']['grad_norm'], clip=True, verbose=False)
# )
optimizer.step()
# torch.cuda.empty_cache()
temp_dict ={'loss': output_dict['loss'].detach().mean().item(),
'crl': output_dict['cnn_regularization_loss'].detach().mean().item(),
'accuracy': (model.module if NUM_GPUS > 1 else model).get_metrics(
reset=(b % ARGS_RESET_EVERY) == 0)[
'accuracy'],
'sec_per_batch': time_per_batch,
'hr_per_epoch': len(train_loader) * time_per_batch / 3600,
}
# if(att_flag):
# temp_dict['kl_loss'] = output_dict['kl_loss'].detach().mean().item()
if multi_flag:
if not wo_qa_flag:
temp_dict['answer_loss'] = output_dict['answer_loss'].detach().mean().item()
if not wo_qr_flag:
temp_dict['rationale_loss'] = output_dict['rationale_loss'].detach().mean().item()
train_results.append(pd.Series(temp_dict))
del batch, output_dict, loss
print("---\nTRAIN EPOCH {:2d}:\n{}\n----".format(epoch_num, | pd.DataFrame(train_results) | pandas.DataFrame |
from __future__ import print_function
from __future__ import division
import numpy as np
import scipy.sparse as spa
from builtins import range
import os
import pandas as pd
# Import subprocess to run matlab script
from subprocess import call
from platform import system
# For importing python modules from string
import importlib
class QPmatrices(object):
"""
QP problem matrices
q_vecs is the matrix containing different linear costs
"""
def __init__(self, P, q_vecs, A, l, u, n, m):
self.P = P
self.q_vecs = q_vecs
self.A = A
self.l = l
self.u = u
self.n = n
self.m = m
def gen_qp_matrices(m, n, gammas):
"""
Generate QP matrices for lasso problem
"""
# Reset random seed for repetibility
np.random.seed(1)
# Problem parameters
dens_lvl = 0.4
# Generate data
Ad = spa.random(m, n, density=dens_lvl, format='csc')
x_true = np.multiply((np.random.rand(n) > 0.5).astype(float),
np.random.randn(n)) / np.sqrt(n)
bd = Ad.dot(x_true) + .5*np.random.randn(m)
# minimize y.T * y + gamma * np.ones(n).T * t
# subject to y = Ax - b
# -t <= x <= t
P = spa.block_diag((spa.csc_matrix((n, n)), spa.eye(m),
spa.csc_matrix((n, n))), format='csc')
# q = np.append(np.zeros(m + n), gamma*np.ones(n))
In = spa.eye(n)
Onm = spa.csc_matrix((n, m))
A = spa.vstack([spa.hstack([Ad, -spa.eye(m), Onm.T]),
spa.hstack([In, Onm, In]),
spa.hstack([-In, Onm, In])]).tocsc()
l = np.hstack([bd, np.zeros(2*n)])
u = np.hstack([bd, np.inf * np.ones(2*n)])
# Create linear cost vectors
q_vecs = np.empty((2*n + m, 0))
for gamma in gammas:
q_vecs = np.column_stack(
(q_vecs, np.append(np.zeros(n+m), gamma*np.ones(n))))
qp_matrices = QPmatrices(P, q_vecs, A, l, u, n, m)
# Return QP matrices
return qp_matrices
def solve_loop(qp_matrices, solver='emosqp'):
"""
Solve portfolio optimization loop for all gammas
"""
# Shorter name for qp_matrices
qp = qp_matrices
print('\nSolving lasso problem loop for n = %d and solver %s' %
(qp.n, solver))
# Get number of problems to solve
n_prob = qp.q_vecs.shape[1]
# Results list
results = []
if solver == 'emosqp':
# Pass the data to OSQP
m = osqp.OSQP()
m.setup(qp.P, qp.q_vecs[:, 0], qp.A, qp.l, qp.u,
rho=10., verbose=False)
# Get extension name
module_name = 'emosqpn%s' % str(qp.n)
# Generate the code
m.codegen("code", python_ext_name=module_name, force_rewrite=True)
# Import module
emosqp = importlib.import_module(module_name)
for i in range(n_prob):
q = qp.q_vecs[:, i]
# Update linear cost
emosqp.update_lin_cost(q)
# Solve
x, y, status, niter, time = emosqp.solve()
# Check if status correct
if status != 1:
print('OSQP did not solve the problem!')
import ipdb
ipdb.set_trace()
raise ValueError('OSQP did not solve the problem!')
# Solution statistics
solution_dict = {'solver': [solver],
'runtime': [time],
'iter': [niter],
'n': [qp.n]}
results.append(pd.DataFrame(solution_dict))
elif solver == 'qpoases':
n_dim = qp.P.shape[0]
m_dim = qp.A.shape[0]
# Initialize qpoases and set options
qpoases_m = qpoases.PyQProblem(n_dim, m_dim)
options = qpoases.PyOptions()
options.printLevel = qpoases.PyPrintLevel.NONE
qpoases_m.setOptions(options)
# Setup matrix P and A
P = np.ascontiguousarray(qp.P.todense())
A = np.ascontiguousarray(qp.A.todense())
for i in range(n_prob):
# Get linera cost as contiguous array
q = np.ascontiguousarray(qp.q_vecs[:, i])
# Reset cpu time
qpoases_cpu_time = np.array([10.])
# Reset number of of working set recalculations
nWSR = np.array([10000])
if i == 0:
res_qpoases = qpoases_m.init(P, q, A, None, None, qp.l, qp.u,
nWSR, qpoases_cpu_time)
else:
# Solve new hot started problem
res_qpoases = qpoases_m.hotstart(q, None, None, qp.l, qp.u,
nWSR, qpoases_cpu_time)
# Solution statistics
solution_dict = {'solver': [solver],
'runtime': [qpoases_cpu_time[0]],
'iter': [nWSR[0]],
'n': [qp.n]}
results.append(pd.DataFrame(solution_dict))
elif solver == 'gurobi':
n_dim = qp.P.shape[0]
m_dim = qp.A.shape[0]
for i in range(n_prob):
# Get linera cost as contiguous array
q = np.ascontiguousarray(qp.q_vecs[:, i])
# solve with gurobi
prob = mpbpy.QuadprogProblem(qp.P, q, qp.A, qp.l, qp.u)
res = prob.solve(solver=mpbpy.GUROBI, verbose=False)
# Solution statistics
solution_dict = {'solver': [solver],
'runtime': [res.cputime],
'iter': [res.total_iter],
'n': [qp.n]}
results.append(pd.DataFrame(solution_dict))
else:
raise ValueError('Solver not understood')
return pd.concat(results)
'''
Problem parameters
'''
# Generate gamma parameters and cost vectors
n_gamma = 21
gammas = np.logspace(2, -2, n_gamma)
# Number of parameters
n_vec = np.array([10, 20, 30, 50, 80, 100, 150, 200, 250, 300, 350, 400])
# Measurements
m_vec = (10 * n_vec).astype(int)
# Setup if solve with gurobi/qpoases or not
solve_osqp = True
solve_gurobi = True
solve_qpoases = True
# Define statistics for osqp, gurobi and qpoases
if solve_osqp:
import osqp
osqp_stats = []
problem_stats = []
if solve_gurobi:
import mathprogbasepy as mpbpy
gurobi_stats = []
if solve_qpoases:
import qpoases
qpoases_stats = []
# Size of the exe file generated by OSQP
if solve_osqp:
if system() == 'Windows':
cmdsep = '&'
makefile = '"MinGW Makefiles"'
example_fullname = 'example.exe'
else:
cmdsep = ';'
makefile = '"Unix Makefiles"'
example_fullname = 'example'
'''
Solve problems
'''
for i in range(len(n_vec)):
# Generate QP sparse matrices
qp_matrices = gen_qp_matrices(m_vec[i], n_vec[i], gammas)
if solve_osqp:
# Solving loop with emosqp
stats = solve_loop(qp_matrices, 'emosqp')
osqp_stats.append(stats)
# Get size of the generated exe file in KB
call('cd code %s ' % (cmdsep) +
'mkdir build %s ' % (cmdsep) +
'cd build %s ' % (cmdsep) +
'cmake -G %s .. %s ' % (makefile, cmdsep) +
' cmake --build .',
shell=True)
example_path = os.path.join('code', 'build', 'out', example_fullname)
example_size = int(round(os.path.getsize(example_path) / 1024.))
# Problem statistics
N = qp_matrices.P.nnz + qp_matrices.A.nnz
problem_dict = {'n': [qp_matrices.n],
'm': [qp_matrices.m],
'N': [N],
'filesize': example_size}
problem_stats.append(pd.DataFrame(problem_dict))
if solve_qpoases:
# Solving loop with qpoases
stats = solve_loop(qp_matrices, 'qpoases')
qpoases_stats.append(stats)
if solve_gurobi:
# Solve loop with gurobi
stats = solve_loop(qp_matrices, 'gurobi')
gurobi_stats.append(stats)
'''
Store results in CSV files
'''
if solve_osqp:
# Combine OSQP stats and store them in a CSV file
df = pd.concat(osqp_stats)
df.to_csv('osqp_stats.csv', index=False)
# Combine problem stats and store them in a CSV file
df = pd.concat(problem_stats)
df.to_csv('problem_stats.csv', index=False)
if solve_gurobi:
# Combine GUROBI stats and store them in a CSV file
df = pd.concat(gurobi_stats)
df.to_csv('gurobi_stats.csv', index=False)
if solve_qpoases:
# Combine QPOASES stats and store them in a CSV file
df = | pd.concat(qpoases_stats) | pandas.concat |
import pickle
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from skmultilearn.problem_transform import ClassifierChain
from utils.preprocessing import clean_text
def list2string(list):
return ','.join(map(str, list))
file_tweets = "new_personality_combined.csv"
file_personalities = "personality-data.txt"
data_tweets = pd.read_csv(file_tweets, sep=",", encoding="utf8", index_col=0)
data_personalities = pd.read_csv(file_personalities, sep="\t", encoding="utf8", index_col=10)
print(data_tweets)
# Join the two dataframes together
merged_df = pd.merge(data_tweets, data_personalities, on='twitter_uid', how='inner')
merged_df.reset_index(drop=True, inplace=True)
# Drop the statues (the text)
personality_categories = list(merged_df.columns.values)[2:]
# Print dataset statistics
print("Final number of data in personality dataset =", merged_df.shape[0])
print("Number of personality categories =", len(personality_categories))
print("Personality categories =", ', '.join(personality_categories))
print(merged_df['statuses'])
merged_df['statuses'] = merged_df.statuses.apply(clean_text)
print(merged_df['statuses'])
merged_df['statuses'] = [list2string(list) for list in merged_df['statuses']]
# Split the personality categories into 3 quantiles to convert the problem to classification
bins = 3
labels = [0, 1, 2]
merged_df['ADMIRATION'] = pd.cut(merged_df['ADMIRATION'], bins, labels=labels)
merged_df['AGRE'] = pd.cut(merged_df['AGRE'], bins, labels=labels)
merged_df['ANXIETY'] = pd.cut(merged_df['ANXIETY'], bins, labels=labels)
merged_df['AVOIDANCE'] = pd.cut(merged_df['AVOIDANCE'], bins, labels=labels)
merged_df['CONS'] = pd.cut(merged_df['CONS'], bins, labels=labels)
merged_df['EXTR'] = pd.cut(merged_df['EXTR'], bins, labels=labels)
merged_df['NARCISSISM'] = pd.cut(merged_df['NARCISSISM'], bins, labels=labels)
merged_df['NEUR'] = pd.cut(merged_df['NEUR'], bins, labels=labels)
merged_df['OPEN'] = pd.cut(merged_df['OPEN'], bins, labels=labels)
merged_df['RIVALRY'] = | pd.cut(merged_df['RIVALRY'], bins, labels=labels) | pandas.cut |
import time
import pandas as pd
import numpy as np
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print("Greetings Mr. Hoffman! It seems that our new biking service is doing great!")
# get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
city_check = 0
while city_check == 0:
city = input("Please select a city between Chicago, New York City and Washington.")
if (str(city) != 'Chicago') and (str(city) != 'New York City') and (str(city) != 'Washington'):
print(str(city))
print('Please try again. I need you to use the exact words I typed before!')
else:
city_check = 1
# get user input for month (all, january, february, ... , june)
month_check = 0
L_month = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'all']
while month_check == 0:
month = input("Please select a month between Jan, Feb, Mar, Apr, May, Jun or all.")
if str(month) not in L_month:
print('Please try again. I need you to use the exact words I typed before!')
else:
month_check = 1
# get user input for day of week (all, monday, tuesday, ... sunday)
day_check = 0
L_day = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun', 'all']
while day_check == 0:
day = input("Please select a day between Mon, Tue, Wed, Thu, Fri, Sat, Sun or all.")
if str(day) not in L_day:
print('Please try again. I need you to use the exact words I typed before!')
else:
day_check = 1
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
if str(city) == 'Chicago':
df = pd.read_csv('chicago.csv', sep=',', na_values='-', encoding='utf-8')
elif str(city) == 'New York City':
df = pd.read_csv('new_york_city.csv', sep=',', na_values='-', encoding='utf-8')
else:
df = | pd.read_csv('washington.csv', sep=',', na_values='-', encoding='utf-8') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 21 13:46:35 2021
@author: dmattox
"""
import pickle
import umap
import random
import collections
import re
import itertools
import plotnine as p9
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import networkx as nx
import cbk.sugarTrees.bin.sugarTrees as SugarTrees
import cbk.sugarTrees.bin.getStats as SugarStats
pd.set_option('display.max_columns', None)
| pd.set_option('display.max_rows', None) | pandas.set_option |
import numpy as np
import pandas as pd
import math
import requests
data = pd.read_csv("CF-Insider-Trading-equities-29-09-2020-to-29-12-2020.csv")
final = pd.DataFrame()
x = | pd.DataFrame() | pandas.DataFrame |
import os
import pandas as pd
from tqdm import tqdm
def load_tag_info(path):
tag_info = open(path).read()
tag_info = tag_info.replace(' ', ' ')
tag_info = tag_info.replace(' ', ' ')
tag_info = tag_info.replace(' ', ' ')
tag_info = tag_info.replace(':', '')
tag_info = tag_info.replace(' \n', '\n')
tag_info_list = tag_info.split('\n')
tmp = [[i for i in line.split(' ')] for line in tag_info_list[2:] if ' ' in line]
tag_df = pd.DataFrame(tmp)
tag_df.columns = ['tag_id', 'x', 'y', 'z']
tag_df['tag_id'] = tag_df['tag_id'].astype('int')
for key in ['x', 'y', 'z']:
tag_df[key] = tag_df[key].astype('float')*10.0
return tag_df
def load_distince_data_origin(path):
d1 = open(path).read()
d2 = d1.split('\n')
d3 = pd.DataFrame([line.split(':') for line in d2 if len(line.split(':')) == 9])
d3.columns = ['c1', 'unixtime', 'c3', 'tag_id', 'anchor_id', 'distance', 'distance_check', 'c8', 'data_index']
d3['tag_id'] = d3['tag_id'].astype('float')
d3['distance'] = d3['distance'].astype('float')
d3['distance_check'] = d3['distance_check'].astype('float')
return d3
def load_distince_data(path):
d3 = load_distince_data_origin(path)
d41 = d3[['data_index', 'anchor_id', 'distance']].pivot(index='data_index', columns='anchor_id', values='distance')
d41.reset_index(inplace=True)
d41.columns = ['data_index', 'dis_0', 'dis_1', 'dis_2', 'dis_3']
d42 = d3[['data_index', 'anchor_id', 'distance_check']].pivot(
index='data_index', columns='anchor_id', values='distance_check')
d42.reset_index(inplace=True)
d42.columns = ['data_index', 'dis_c_0', 'dis_c_1', 'dis_c_2', 'dis_c_3']
d5 = d3[['c1', 'data_index', 'c3', 'tag_id', 'c8', 'unixtime']].groupby(['data_index']).max().reset_index()
d6 = pd.merge(d5, d41, on=['data_index'])
d6 = | pd.merge(d6, d42, on=['data_index']) | pandas.merge |
""" Module contains functions to retrieve
and process data from the database folder"""
import os
import numpy as np
import shutil
import csv
import pandas as pd
import pkg_resources
pd.options.mode.chained_assignment = None # default='warn'
ROOT = pkg_resources.resource_filename('optimol', '')
DATABASE = ROOT + '/database_chemspider'
def get_all_dataset(set1=None, set2=0):
"""
Get all dataset from the database and combine them to one dataframe
and the samples are randomly selected. When two return sets are requested,
the samples are randomly picked from the same list, matching values
between two sets can happen.
:param set1: amount of samples wanted for the first set
:param set2: amount of samples wanted for the second set
:type set1: int
:type set2: int
:return dataframe contains all of the datasets
"""
if False in [isinstance(set1, int),
isinstance(set2, int)]:
raise TypeError()
id_list = get_id()
max_length = len(id_list)
if True in [set1 < 0, set2 < 0,
set1 == 0,
set1 > max_length,
set2 > max_length]:
raise ValueError()
set1_items = np.random.randint(0, max_length, set1)
set2_items = np.random.randint(0, max_length, set2)
train_set = pd.DataFrame()
# i = 0
for item in set1_items:
# print(str(i) + ': ' + str(item)) # for debugging
[coord_2d, _, coord_3d, _] = get_df_database(id_list[item])
# remove unwanted data
del coord_2d['atom']
del coord_2d['connect_to_2d']
del coord_2d['2d_z']
del coord_3d['atom']
del coord_3d['connect_to_3d']
# combine dataframes into one
coord = pd.concat([coord_2d, coord_3d], axis=1)
coord.insert(0, column='id', value=id_list[item]) # add id value
# pd.concat([all_dataset, coord])
train_set = train_set.append(coord, ignore_index=True)
test_set = pd.DataFrame()
if set2 >= 1:
for item in set2_items:
# print(str(i) + ': ' + str(item)) # for debugging
[coord_2d, _, coord_3d, _] = get_df_database(id_list[item])
# remove unwanted data
del coord_2d['atom']
del coord_2d['connect_to_2d']
del coord_2d['2d_z']
del coord_3d['atom']
del coord_3d['connect_to_3d']
# combine dataframes into one
coord = | pd.concat([coord_2d, coord_3d], axis=1) | pandas.concat |
from re import template
from turtle import width
from samples import Samples
from samples import Experiment
from plotting import Plotting
from os.path import join
from os.path import exists
import pandas as pd
from Bio import SeqIO
from subprocess import call
import re
"""
################################################################################
Author: https://github.com/nahanoo
This script analyzes all outputs generated from PacBio sample processing.
If you are interested in the sample processing you can check out the Snakemake
workflow: https://github.com/nahanoo/black_queen_hypothesis/blob/main/scripts/workflows/pacbio/Snakefile.
All plots call this small plotting class in plotting.py
################################################################################
"""
s = Samples()
p = Plotting()
e = Experiment()
def plot_genome_length():
"""This function plots the assembly length and the number of contigs
produced by assembling the PacBio data of the evolved strains."""
# Storing all generated dfs per strain for potential future applications.
genome_length = {strain: None for strain in s.strains}
# Iterating over all strains
for strain in s.strains:
# Get all treatments of a strain
treatments = s.treatments[strain]
# df for assembly length
length = pd.DataFrame(columns=treatments, index=[sample['name']
for sample in s.strains[strain] if sample['platform'] == 'pacbio'])
# df for n contigs
n_contigs = pd.DataFrame(columns=treatments, index=[sample['name']
for sample in s.strains[strain] if sample['platform'] == 'pacbio'])
# Iterating over all samples of a strain
for sample in s.strains[strain]:
# Getting genome lenght and n contigs
if sample['platform'] == 'pacbio':
# Parsing contigs of assembly
contigs = [contig for contig in SeqIO.parse(
join(sample['dir_name'], 'assembly.fasta'), 'fasta')]
# Storing assembly length
length.at[sample['name'], sample['treatment']] = sum(
[len(contig) for contig in contigs])
# Storing n contigs
n_contigs.at[sample['name'],
sample['treatment']] = len(contigs)
# Wrtiting to dictionary for potential future applications
genome_length[strain] = length
# Plotting genome length
fig = p.subplot_treatments(strain, length)
# Adding genome length of wild-type
reference_length = sum(
[len(contig) for contig in SeqIO.parse(s.references[strain], 'fasta')])
fig.add_hline(y=reference_length,
annotation_text='reference', line_dash="dash")
title = 'Assembly length in '+strain
# Updating labels and dumping to png
fig.update_layout(
xaxis_title='samples',
yaxis_title='assembly length in bp',
title=title)
fig.update_traces(showlegend=False)
fig.write_image(join('..', 'plots', 'genome_length',
title.replace(' ', '_')+'.png'))
# Plotting n contigs
fig = p.subplot_treatments(strain, n_contigs)
# Updating labels and dumping to png
title = 'N contigs in '+strain
fig.update_layout(
xaxis_title='samples',
yaxis_title='n contigs',
title=title)
fig.update_traces(showlegend=False)
fig.write_image(join('..', 'plots', 'contigs',
title.replace(' ', '_')+'.png'))
return genome_length
def get_indels():
"""This function plots the sum of deleted and inserted base pairs."""
# Storing dfs per strain for potential future applications.
d = {strain: None for strain in s.strains}
i = {strain: None for strain in s.strains}
i_filtered = {strain: None for strain in s.strains}
is_e = {strain: None for strain in s.strains}
# Iterating over every strain
for strain in s.strains:
# Getting all treatments of a strain
treatments = s.treatments[strain]
# df for storing sum of deleted bases
deleted_bases = pd.DataFrame(columns=treatments, index=[sample['name']
for sample in s.strains[strain] if sample['platform'] == 'pacbio'])
# df for storing sum of inserted bases
inserted_bases = pd.DataFrame(columns=treatments, index=[sample['name']
for sample in s.strains[strain] if sample['platform'] == 'pacbio'])
# df for storing sum of filtered inserted bases
filtered_inserted_bases = pd.DataFrame(columns=treatments, index=[sample['name']
for sample in s.strains[strain] if sample['platform'] == 'pacbio'])
inserted_elements = pd.DataFrame(columns=treatments, index=[sample['name']
for sample in s.strains[strain] if sample['platform'] == 'pacbio'])
# Iterating over every sample of a strain
for sample in s.strains[strain]:
if sample['platform'] == 'pacbio':
# Biggest impact have regions with no coverage outputted from
# https://github.com/nahanoo/deletion_detection
no_coverage = join(
sample['dir_name'], 'no_coverage.tsv')
if exists(no_coverage):
# Writing sum ob base paris with no alignment to df
deleted_bases.at[sample['name'], sample['treatment']] = sum(pd.read_csv(no_coverage, sep='\t',
usecols=['chromosome', 'position', 'length']).drop_duplicates()['length'])
# Storing sum of inserted base pairs to df derived from
# https://github.com/nahanoo/deletion_detection
insertions = join(sample['dir_name'],
'insertions.tsv')
if exists(insertions):
# Writing sum of inserted base pairs to df
inserted_bases.at[sample['name'], sample['treatment']] = sum(pd.read_csv(insertions, sep='\t',
usecols=['chromosome', 'position', 'length']).drop_duplicates()['length'])
# Storing sum of filtered inserted base pairs
filtered_insertions = join(
sample['dir_name'], 'insertions.filtered.tsv')
if exists(insertions):
# Writing sum of inserted base pairs to df
filtered_inserted_bases.at[sample['name'], sample['treatment']] = sum(pd.read_csv(filtered_insertions, sep='\t',
usecols=['chromosome', 'position', 'length']).drop_duplicates()['length'])
is_elements = join(sample['dir_name'],'ise_scan',sample['name'],'assembly.fasta.tsv')
print(is_elements)
if exists(is_elements):
inserted_elements.at[sample['name'],sample['treatment']] = len(pd.read_csv(is_elements).drop_duplicates())
# Storing dfs in dictionary for future processing
d[strain] = deleted_bases
i[strain] = inserted_bases
i_filtered[strain] = filtered_inserted_bases
is_e[strain] = inserted_elements
return d, i, i_filtered,is_e
def plot_deletions(d):
# Plotting deleted base pairs
fig = p.subplot_strains(d)
title = 'Deletions'
fig.update_layout(
xaxis_title='Treatments',
yaxis_title='Deleted base-pairs',
margin=dict(
l=0,
r=10,
b=0,
t=45,
pad=4
),
width=180,
height=300
)
fig.update_traces(showlegend=False)
fig.update_yaxes(type='log')
fig.write_image(join('..', 'plots', 'deleted_bases',
title.replace(' ', '_')+'.png'), scale=2)
# Plotting delete base pairs per sample
for strain in s.strains:
fig = p.subplot_treatments(strain, d[strain])
title = 'Deleted bases in '+strain
fig.update_layout(
xaxis_title='samples',
yaxis_title='deleted bp',
title=title)
fig.update_traces(showlegend=False)
fig.write_image(join('..', 'plots', 'deleted_bases',
title.replace(' ', '_')+'.png'), scale=2)
def plot_insertions(i_filtered):
fig = p.subplot_strains(i_filtered)
title = 'Insertions'
fig.update_layout(
xaxis_title='Treatments',
yaxis_title='Inserted base-pairs',
margin=dict(
l=0,
r=10,
b=0,
t=45,
pad=4
),
width=380,
height=300
)
fig.update_yaxes(type='log')
fig.write_image(join('..', 'plots', 'inserted_bases',
title.replace(' ', '_')+'.png'), scale=2)
for strain in s.strains:
fig = p.subplot_treatments(strain, i_filtered[strain])
title = "Filtered inserted bases in " + strain
fig.update_layout(xaxis_title="samples",
yaxis_title="inserted bp", title=title)
fig.update_traces(showlegend=False)
fig.write_image(
join(
"..",
"plots",
"corrected_inserted_bases",
title.replace(" ", "_") + ".png",
)
)
def get_transposons_insertions():
ts = {strain: None for strain in s.strains}
for strain, samples in s.strains.items():
treatments = s.treatments[strain]
t = pd.DataFrame(columns=treatments, index=[sample['name']
for sample in s.strains[strain] if sample['platform'] == 'pacbio'])
for sample in samples:
if sample['platform'] == 'illumina':
f = join(sample['dir_name'], 'snippy','snps.tab')
df = pd.read_csv(f, sep='\t').drop_duplicates().dropna(subset='PRODUCT')
mask = []
for product in df['PRODUCT']:
try:
if (re.search('transc', product, flags=re.IGNORECASE)):
mask.append(True)
else:
mask.append(False)
except TypeError:
pass
df = df[mask]
t.at[sample['name'], sample['treatment']] = len(df)
ts[strain] = t
fig = p.subplot_strains(ts)
title = 'Transpositions'
fig.update_layout(
xaxis_title='Treatments',
yaxis_title='Products linked to active transposition',
margin=dict(
l=0,
r=10,
b=0,
t=45,
pad=4
)
)
fig.write_image(join('..', 'plots', 'hgts',
title.replace(' ', '_')+'.png'), scale=2)
return ts
def get_transposons_gbk():
ts = {strain: None for strain in s.strains}
for strain, samples in s.strains.items():
treatments = s.treatments[strain]
t = pd.DataFrame(columns=treatments, index=[sample['name']
for sample in s.strains[strain] if sample['platform'] == 'pacbio'])
for sample in samples:
if sample['platform'] == 'pacbio':
f = join(sample['dir_name'], 'bakta','assembly.gbff')
products = []
for contig in SeqIO.parse(f,'genbank'):
for feature in contig.features:
try:
products.append(feature.qualifiers['product'][0])
except KeyError:
pass
for product in products:
transpos = re.search('ribosomal', product, flags=re.IGNORECASE)
integr = re.search('integr', product, flags=re.IGNORECASE)
IS = re.search('IS', product)
if (transpos):# or (integr) or (IS):
if | pd.isna(t.at[sample['name'], sample['treatment']]) | pandas.isna |
import os
from subprocess import Popen, PIPE
from numpy import nan
import pandas as pd
from os import listdir
from os.path import isfile, join
import matplotlib.pyplot as plt
plt.style.use("classic")
ROOT_DIR = os.getcwd()
VERSIONS = ["Release",
"Release GCC-7.1", "Optimized GCC-7.1", "Graphite GCC-7.1",
"Release Clang-4.0", "Optimized Clang-4.0", "Polly Clang-4.0",
"Release Intel", "Optimized Intel"]
def run_profiler(n_iterations, n_divs, smoothing=True, acceleration=True, method="SingleThreaded"):
cmd = "./FiniteDifferencePricing -profile"
cmd += " -iter " + str(n_iterations)
cmd += " -divs " + str(n_divs)
if smoothing:
cmd += " -smooth"
if acceleration:
cmd += " -acc"
if method == "SingleThreaded":
cmd += " -method single"
elif method == "MultiThreaded":
cmd += " -method multi"
else:
raise NotImplementedError()
p = Popen(cmd, shell=True, stdout=PIPE)
out, _ = p.communicate()
lines = out.decode().split("\n")
avg_time_line = lines[3].split(":")[-1].strip()
avg_opt_sec_line = lines[5].split(":")[-1].strip()
return avg_time_line, avg_opt_sec_line
def run_callgrind(n_divs, smoothing=True, acceleration=True, version=None):
# clean out previous runs
os.popen("rm -f callgrind.out*")
os.popen("rm -f *.dot")
cmd = "valgrind --tool=callgrind --instr-atstart=no ./FiniteDifferencePricing -profile -iter 100"
cmd += " -divs " + str(n_divs)
if smoothing:
cmd += " -smooth"
if acceleration:
cmd += " -acc"
cmd += " -method single"
p = Popen(cmd, shell=True, stderr=PIPE, stdout=PIPE)
_, out = p.communicate()
lines = out.decode().split("\n")
# Generate call graph
if version is not None:
generate_png_cmd = "$HOME/anaconda3/bin/gprof2dot --format=callgrind --output=out.dot "
callgrind_file = [f for f in listdir(os.getcwd()) if isfile(join(os.getcwd(), f)) and f.endswith(".1")]
if len(callgrind_file) != 1:
raise ValueError("Callgrind generated " + str(len(callgrind_file)) + " files!")
generate_png_cmd += callgrind_file[0]
Popen(generate_png_cmd, shell=True).communicate()
version = str(version).replace(" ", "\\")
os.popen("dot -Tpng out.dot -o " + ROOT_DIR + "/Results/" + version + ".png")
# clean out
os.popen("rm -f callgrind.out*")
os.popen("rm -f *.dot")
return lines[8].split(":")[-1].strip()
def run_version(version, n_iterations, n_divs, smoothing=True, acceleration=True, method="SingleThreaded", profile=False):
wd = ROOT_DIR + "/" + version + "/"
os.chdir(wd)
if method == "SingleThreaded":
instruction_reads = run_callgrind(n_divs, smoothing, acceleration, version)
else:
instruction_reads = nan
ret = [instruction_reads]
if profile:
avg_time_line, avg_opt_sec_line = run_profiler(n_iterations, n_divs, smoothing, acceleration, method)
ret.append(avg_time_line)
ret.append(avg_opt_sec_line)
return ret
def run_all(n_iterations, n_divs, smoothing=True, acceleration=True, method="SingleThreaded", profile=False):
columns = ["Instructions"]
if profile:
columns.append("Average Time Per Option (ms)")
columns.append("Option Per Second")
columns = ["Version"] + columns
df = pd.DataFrame(columns=columns)
for version in VERSIONS:
df.loc[len(df)] = [version] + run_version(version, n_iterations, n_divs, smoothing, acceleration, method, profile)
df["Dividends"] = n_divs
df["Smoothing"] = smoothing
df["Acceleration"] = acceleration
df["Threads"] = method
df = df.set_index("Version")
return df
def recompile_all():
for version in VERSIONS:
wd = ROOT_DIR + "/" + version + "/"
os.chdir(wd)
p = Popen("make clean && make all -j8", shell=True)
p.communicate()
def summary(n_iterations, method="SingleThreaded", profile=False, recompile=False):
if recompile:
recompile_all()
smoothing = False
acceleration = False
def worker():
_df = run_all(n_iterations, 0, smoothing, acceleration, method=method, profile=profile)
for n_divs in range(1, 9):
_df = _df.append(run_all(n_iterations, n_divs, smoothing, acceleration, method=method, profile=profile))
return _df
df = worker()
smoothing = True
acceleration = False
df = df.append(worker())
smoothing = False
acceleration = True
df = df.append(worker())
smoothing = True
acceleration = True
df = df.append(worker())
os.chdir(ROOT_DIR + "/Results")
df.to_csv("summary" + method + ".csv")
print(df)
def plot_version(group_df, version, y):
version_df = group_df.get_group(version)
version_df_no_improvements = version_df[(version_df["Smoothing"] == False) & (version_df["Acceleration"] == False)]
version_df_smoothing = version_df[(version_df["Smoothing"] == True) & (version_df["Acceleration"] == False)]
version_df_acceleration = version_df[(version_df["Smoothing"] == False) & (version_df["Acceleration"] == True)]
version_df_all = version_df[(version_df["Smoothing"] == True) & (version_df["Acceleration"] == True)]
try:
is_subplot = len(y) > 1
except AttributeError:
is_subplot = False
ax = version_df_no_improvements.plot(y=y, x="Dividends", color='b', label="No Improvements", subplots=is_subplot, layout=(1, 2))
version_df_smoothing.plot(y=y, x="Dividends", ax=ax, color='g', label="Smoothing", subplots=is_subplot)
version_df_acceleration.plot(y=y, x="Dividends", ax=ax, color='k', label="Acceleration", subplots=is_subplot)
version_df_all.plot(y=y, x="Dividends", ax=ax, color='r', label="All", subplots=is_subplot)
fig = plt.figure(1)
fig.suptitle(version)
plt.show()
def plot_version_comparisons(n_divs, smoothing, acceleration, method="SingleThreaded"):
os.chdir(ROOT_DIR + "/Results/")
df = pd.DataFrame.from_csv("summary" + method + ".csv")
group_df = df.groupby("Version")
points = | pd.DataFrame() | pandas.DataFrame |
import sys
sys.path.append("/home/wyshi/simulator")
from simulator.loose_user import LooseUser
# from simulator.user import Goal
from simulator.user import User
from simulator.system import System
from simulator.loose_system import LooseSystem
from sequicity_user.seq_user import Seq_User
from sequicity_user.seq_user_act import Seq_User_Act
from simulator.env import Enviroment
import simulator.dialog_config as dialog_config
import numpy as np
from simulator.agent.core import SystemAct
from config import Config
config = Config()
config.use_new_reward = False
def accum_slots(usr_act_turns):
inform_hist = {}
book_inform_hist = {}
output_str = []
for usr_act in usr_act_turns:
if usr_act.act in ['inform_type', 'inform_type_change']:
inform_hist.update(usr_act.parameters)
elif usr_act.act in ['make_reservation', 'make_reservation_change_time']:
book_inform_hist.update(usr_act.parameters)
for slot_name in inform_hist.keys():
output_str.append(inform_hist[slot_name])
output_str.append('EOS_Z1')
for slot_name in book_inform_hist.keys():
output_str.append(book_inform_hist[slot_name])
output_str.append('EOS_Z3')
if usr_act_turns[-1].act in ['request']:
for slot in usr_act_turns[-1].parameters:
output_str.append(slot)
output_str.append('EOS_Z2')
return ' '.join(output_str)
TEST_SEQ_USER = False
# if False:
# user = LooseUser(nlg_sample=False)
# system = LooseSystem(config=config)
# else:
# user = User(nlg_sample=False, nlg_template=False)
# system = System(config=config)
user = User(nlg_sample=False, nlg_template=False)
system = System(config=config)
if TEST_SEQ_USER:
if config.use_sl_generative:
user = Seq_User(nlg_sample=False, nlg_template=False)
else:
user = Seq_User_Act(nlg_sample=True, nlg_template=False)
env = Enviroment(user=user, system=system, verbose=True, config=config)
sys_act = None
status = []
MODE = dialog_config.RL_WARM_START#RANDOM_ACT#RL_WARM_START#RANDOM_ACT#RL_WARM_START#INTERACTIVE#RL_TRAINING#RANDOM_ACT#RL_WARM_START
mean_reward_test = []
mean_len_test = []
mean_success_test = []
for _ in tqdm(range(200)):
print("-"*20)
usr_act_seq = []
next_state = env.reset(mode=MODE)
usr_act_seq.append(env.last_usr_act_true)
# print("*"*20)
# print(accum_slots(usr_act_seq))
# print("*"*20)
sys_act = None # initial sys act
total_rewards = 0
while True:
if MODE == dialog_config.RANDOM_ACT:
provided_sys_act = np.random.choice(range(6))
index_to_action_dict = {0: SystemAct.ASK_TYPE,
1: [SystemAct.PRESENT_RESULT, SystemAct.NOMATCH_RESULT, SystemAct.NO_OTHER],
2: SystemAct.PROVIDE_INFO,
3: [SystemAct.BOOKING_SUCCESS, SystemAct.BOOKING_FAIL],
4: SystemAct.GOODBYE,
5: SystemAct.ASK_RESERVATION_INFO}
print(index_to_action_dict[provided_sys_act])
else:
provided_sys_act = None
next_state, reward, done = env.step(provided_sys_act=provided_sys_act, mode=MODE)
print(env.last_usr_act_true)
usr_act_seq.append(env.last_usr_act_true)
# print("*" * 20)
# print(accum_slots(usr_act_seq))
# print("per turn reward", reward)
# print("*" * 20)
total_rewards += reward
# usr_act, usr_sent = user.respond(sys_act=sys_act)
# sys_act, sys_sent = system.respond(usr_sent=usr_sent, warm_start=True, usr_act=usr_act)
# sys_act = next_sys_act
print("user turn status: ", env.user.dialog_status)
if done:
status.append(user.dialog_status)
# assert env.success
print('dialog_status: {}'.format(env.success))
print('reward: {}'.format(total_rewards))
mean_reward_test.append(total_rewards)
mean_success_test.append(env.success)
mean_len_test.append(env.step_i)
print("-"*20)
print("\n\n\n")
break
import pandas as pd
import time
cnt = 'rule-base-system'
cur_time = "-".join([str(t) for t in list(time.localtime())])
full_result = zip(mean_reward_test, mean_len_test, mean_success_test)
| pd.DataFrame(full_result, columns=["reward", "len", "success"]) | pandas.DataFrame |
"""""" #
"""
Copyright (c) 2020-2022, <NAME>
All rights reserved.
This work is licensed under BSD 3-Clause "New" or "Revised" License.
License available at https://github.com/dcajasn/Riskfolio-Lib/blob/master/LICENSE.txt
"""
import numpy as np
import pandas as pd
import statsmodels.api as sm
import sklearn.covariance as skcov
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from numpy.linalg import inv
import riskfolio.AuxFunctions as af
import arch.bootstrap as bs
import riskfolio.DBHT as db
def mean_vector(X, method="hist", d=0.94):
r"""
Calculate the expected returns vector using the selected method.
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Features matrix, where n_samples is the number of samples and
n_features is the number of features.
method : str, optinal
The method used to estimate the expected returns.
The default value is 'hist'. Posible values are:
- 'hist': use historical estimates.
- 'ewma1'': use ewma with adjust=True, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
- 'ewma2': use ewma with adjust=False, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
d : scalar
The smoothing factor of ewma methods.
The default is 0.94.
Returns
-------
mu : 1d-array
The estimation of expected returns.
Raises
------
ValueError
When the value cannot be calculated.
"""
if not isinstance(X, pd.DataFrame):
raise ValueError("X must be a DataFrame")
assets = X.columns.tolist()
if method == "hist":
mu = np.array(X.mean(), ndmin=2)
elif method == "ewma1":
mu = np.array(X.ewm(alpha=1 - d).mean().iloc[-1, :], ndmin=2)
elif method == "ewma2":
mu = np.array(X.ewm(alpha=1 - d, adjust=False).mean().iloc[-1, :], ndmin=2)
mu = pd.DataFrame(np.array(mu, ndmin=2), columns=assets)
return mu
def covar_matrix(X, method="hist", d=0.94, **kwargs):
r"""
Calculate the covariance matrix using the selected method.
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Features matrix, where n_samples is the number of samples and
n_features is the number of features.
method : str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Posible values are:
- 'hist': use historical estimates.
- 'ewma1'': use ewma with adjust=True, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
- 'ewma2': use ewma with adjust=False, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: :cite:`b-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of :cite:`b-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of :cite:`b-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of :cite:`b-MLforAM`.
d : scalar
The smoothing factor of ewma methods.
The default is 0.94.
**kwargs:
Other variables related to covariance estimation. See
`Scikit Learn <https://scikit-learn.org/stable/modules/covariance.html>`_
and chapter 2 of :cite:`b-MLforAM` for more details.
Returns
-------
cov : nd-array
The estimation of covariance matrix.
Raises
------
ValueError
When the value cannot be calculated.
"""
if not isinstance(X, pd.DataFrame):
raise ValueError("X must be a DataFrame")
assets = X.columns.tolist()
if method == "hist":
cov = np.cov(X.T)
elif method == "ewma1":
cov = X.ewm(alpha=1 - d).cov()
item = cov.iloc[-1, :].name[0]
cov = cov.loc[(item, slice(None)), :]
elif method == "ewma2":
cov = X.ewm(alpha=1 - d, adjust=False).cov()
item = cov.iloc[-1, :].name[0]
cov = cov.loc[(item, slice(None)), :]
elif method == "ledoit":
lw = skcov.LedoitWolf(**kwargs)
lw.fit(X)
cov = lw.covariance_
elif method == "oas":
oas = skcov.OAS(**kwargs)
oas.fit(X)
cov = oas.covariance_
elif method == "shrunk":
sc = skcov.ShrunkCovariance(**kwargs)
sc.fit(X)
cov = sc.covariance_
elif method == "gl":
gl = skcov.GraphicalLassoCV(**kwargs)
gl.fit(X)
cov = gl.covariance_
elif method == "jlogo":
S = np.cov(X.T)
R = np.corrcoef(X.T)
D = np.sqrt(np.clip((1 - R) / 2, a_min=0.0, a_max=1.0))
(_, _, separators, cliques, _) = db.PMFG_T2s(1 - D ** 2, nargout=4)
cov = db.j_LoGo(S, separators, cliques)
cov = np.linalg.inv(cov)
elif method in ["fixed", "spectral", "shrink"]:
cov = np.cov(X.T)
T, N = X.shape
q = T / N
cov = af.denoiseCov(cov, q, kind=method, **kwargs)
cov = pd.DataFrame(np.array(cov, ndmin=2), columns=assets, index=assets)
return cov
def forward_regression(X, y, criterion="pvalue", threshold=0.05, verbose=False):
r"""
Select the variables that estimate the best model using stepwise
forward regression. In case none of the variables has a p-value lower
than threshold, the algorithm will select the variable with lowest p-value.
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Features matrix, where n_samples is the number of samples and
n_features is the number of features.
y : Series of shape (n_samples, 1)
Target vector, where n_samples in the number of samples.
criterion : str, optional
The default is 'pvalue'. Posible values of the criterion used to select
the best features are:
- 'pvalue': select the features based on p-values.
- 'AIC': select the features based on lowest Akaike Information Criterion.
- 'SIC': select the features based on lowest Schwarz Information Criterion.
- 'R2': select the features based on highest R Squared.
- 'R2_A': select the features based on highest Adjusted R Squared.
thresholdt : scalar, optional
Is the maximum p-value for each variable that will be
accepted in the model. The default is 0.05.
verbose : bool, optional
Enable verbose output. The default is False.
Returns
-------
value : list
A list of the variables that produce the best model.
Raises
------
ValueError
When the value cannot be calculated.
"""
if not isinstance(X, pd.DataFrame):
raise ValueError("X must be a DataFrame")
if not isinstance(y, pd.DataFrame) and not isinstance(y, pd.Series):
raise ValueError("y must be a column DataFrame")
if isinstance(y, pd.DataFrame):
if y.shape[0] > 1 and y.shape[1] > 1:
raise ValueError("y must be a column DataFrame")
included = []
aic = 1e10
sic = 1e10
r2 = -1e10
r2_a = -1e10
pvalues = None
if criterion == "pvalue":
value = 0
while value <= threshold:
excluded = list(set(X.columns) - set(included))
best_pvalue = 999999
new_feature = None
for i in excluded:
factors = included + [i]
X1 = X[factors]
X1 = sm.add_constant(X1)
results = sm.OLS(y, X1).fit()
new_pvalues = results.pvalues
new_pvalues = new_pvalues[new_pvalues.index != "const"]
cond_1 = new_pvalues.max()
if best_pvalue > new_pvalues[i] and cond_1 <= threshold:
best_pvalue = results.pvalues[i]
new_feature = i
pvalues = new_pvalues.copy()
if pvalues is not None:
value = pvalues[pvalues.index != "const"].max()
if new_feature is None:
break
else:
included.append(new_feature)
if verbose:
print("Add {} with p-value {:.6}".format(new_feature, best_pvalue))
# This part is how to deal when there isn't an asset with pvalue lower than threshold
if len(included) == 0:
excluded = list(set(X.columns) - set(included))
best_pvalue = 999999
new_feature = None
for i in excluded:
factors = included + [i]
X1 = X[factors]
X1 = sm.add_constant(X1)
results = sm.OLS(y, X1).fit()
new_pvalues = results.pvalues
new_pvalues = new_pvalues[new_pvalues.index != "const"]
if best_pvalue > new_pvalues[i]:
best_pvalue = results.pvalues[i]
new_feature = i
pvalues = new_pvalues.copy()
value = pvalues[pvalues.index != "const"].max()
included.append(new_feature)
if verbose:
print(
"Add {} with p-value {:.6}".format(pvalues.idxmax(), pvalues.max())
)
else:
excluded = X.columns.tolist()
for i in range(X.shape[1]):
j = 0
value = None
for i in excluded:
factors = included.copy()
factors.append(i)
X1 = X[factors]
X1 = sm.add_constant(X1)
results = sm.OLS(y, X1).fit()
if criterion == "AIC":
if results.aic < aic:
value = i
aic = results.aic
if criterion == "SIC":
if results.bic < sic:
value = i
sic = results.bic
if criterion == "R2":
if results.rsquared > r2:
value = i
r2 = results.rsquared
if criterion == "R2_A":
if results.rsquared_adj > r2_a:
value = i
r2_a = results.rsquared_adj
j += 1
if j == len(excluded):
if value is None:
break
else:
excluded.remove(value)
included.append(value)
if verbose:
if criterion == "AIC":
print(
"Add {} with AIC {:.6}".format(value, results.aic)
)
elif criterion == "SIC":
print(
"Add {} with SIC {:.6}".format(value, results.bic)
)
elif criterion == "R2":
print(
"Add {} with R2 {:.6}".format(
value, results.rsquared
)
)
elif criterion == "R2_A":
print(
"Add {} with Adjusted R2 {:.6}".format(
value, results.rsquared_adj
)
)
return included
def backward_regression(X, y, criterion="pvalue", threshold=0.05, verbose=False):
r"""
Select the variables that estimate the best model using stepwise
backward regression. In case none of the variables has a p-value lower
than threshold, the algorithm will select the variable with lowest p-value.
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Features matrix, where n_samples is the number of samples and
n_features is the number of features.
y : Series of shape (n_samples, 1)
Target vector, where n_samples in the number of samples.
criterion : str, optional
The default is 'pvalue'. Posible values of the criterion used to select
the best features are:
- 'pvalue': select the features based on p-values.
- 'AIC': select the features based on lowest Akaike Information Criterion.
- 'SIC': select the features based on lowest Schwarz Information Criterion.
- 'R2': select the features based on highest R Squared.
- 'R2_A': select the features based on highest Adjusted R Squared.
threshold : scalar, optional
Is the maximum p-value for each variable that will be
accepted in the model. The default is 0.05.
verbose : bool, optional
Enable verbose output. The default is False.
Returns
-------
value : list
A list of the variables that produce the best model.
Raises
------
ValueError
When the value cannot be calculated.
"""
if not isinstance(X, pd.DataFrame):
raise ValueError("X must be a DataFrame")
if not isinstance(y, pd.DataFrame) and not isinstance(y, pd.Series):
raise ValueError("y must be a column DataFrame")
if isinstance(y, pd.DataFrame):
if y.shape[0] > 1 and y.shape[1] > 1:
raise ValueError("y must be a column DataFrame")
X1 = sm.add_constant(X)
results = sm.OLS(y, X1).fit()
pvalues = results.pvalues
aic = results.aic
sic = results.bic
r2 = results.rsquared
r2_a = results.rsquared_adj
included = pvalues.index.tolist()
excluded = ["const"]
if criterion == "pvalue":
while pvalues[pvalues.index != "const"].max() > threshold:
factors = pvalues[~pvalues.index.isin(excluded)].index.tolist()
X1 = X[factors]
X1 = sm.add_constant(X1)
results = sm.OLS(y, X1).fit()
pvalues = results.pvalues
pvalues = pvalues[pvalues.index != "const"]
if pvalues.shape[0] == 0:
break
excluded = ["const", pvalues.idxmax()]
if verbose and pvalues.max() > threshold:
print(
"Drop {} with p-value {:.6}".format(pvalues.idxmax(), pvalues.max())
)
included = pvalues.index.tolist()
# This part is how to deal when there isn't an asset with pvalue lower than threshold
if len(included) == 0:
excluded = list(set(X.columns) - set(included))
best_pvalue = 999999
new_feature = None
for i in excluded:
factors = included + [i]
X1 = X[factors]
X1 = sm.add_constant(X1)
results = sm.OLS(y, X1).fit()
new_pvalues = results.pvalues
new_pvalues = results.pvalues
new_pvalues = new_pvalues[new_pvalues.index != "const"]
if best_pvalue > new_pvalues[i]:
best_pvalue = results.pvalues[i]
new_feature = i
pvalues = new_pvalues.copy()
value = pvalues[pvalues.index != "const"].max()
included.append(new_feature)
if verbose:
print(
"Add {} with p-value {:.6}".format(pvalues.idxmax(), pvalues.max())
)
else:
included.remove("const")
for i in range(X.shape[1]):
j = 0
value = None
for i in included:
factors = included.copy()
factors.remove(i)
X1 = X[factors]
X1 = sm.add_constant(X1)
results = sm.OLS(y, X1).fit()
if criterion == "AIC":
if results.aic < aic:
value = i
aic = results.aic
elif criterion == "SIC":
if results.bic < sic:
value = i
sic = results.bic
elif criterion == "R2":
if results.rsquared > r2:
value = i
r2 = results.rsquared
elif criterion == "R2_A":
if results.rsquared_adj > r2_a:
value = i
r2_a = results.rsquared_adj
j += 1
if j == len(included):
if value is None:
break
else:
included.remove(value)
if verbose:
if criterion == "AIC":
print(
"Drop {} with AIC {:.6}".format(value, results.aic)
)
elif criterion == "SIC":
print(
"Drop {} with SIC {:.6}".format(value, results.bic)
)
elif criterion == "R2":
print(
"Drop {} with R2 {:.6}".format(
value, results.rsquared
)
)
elif criterion == "R2_A":
print(
"Drop {} with Adjusted R2 {:.6}".format(
value, results.rsquared_adj
)
)
return included
def PCR(X, y, n_components=0.95):
r"""
Estimate the coeficients using Principal Components Regression (PCR).
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Features matrix, where n_samples is the number of samples and
n_features is the number of features.
y : Series of shape (n_samples, 1)
Target vector, where n_samples in the number of samples.
n_components : int, float, None or str, optional
if 1 < n_components (int), it represents the number of components that
will be keep. if 0 < n_components < 1 (float), it represents the
percentage of variance that the is explained by the components keeped.
See `PCA <https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html>`_
for more details. The default is 0.95.
Returns
-------
value : nd-array
An array with the coefficients of the model calculated using PCR.
Raises
------
ValueError
When the value cannot be calculated.
"""
if not isinstance(X, pd.DataFrame):
raise ValueError("X must be a DataFrame")
if not isinstance(y, pd.DataFrame) and not isinstance(y, pd.Series):
raise ValueError("y must be a column DataFrame")
if isinstance(y, pd.DataFrame):
if y.shape[0] > 1 and y.shape[1] > 1:
raise ValueError("y must be a column DataFrame")
scaler = StandardScaler()
scaler.fit(X)
X_std = scaler.transform(X)
pca = PCA(n_components=n_components)
pca.fit(X_std)
Z_p = pca.transform(X_std)
V_p = pca.components_.T
results = sm.OLS(y, sm.add_constant(Z_p)).fit()
beta_pc = results.params[1:]
beta_pc = np.array(beta_pc, ndmin=2)
std = np.array(np.std(X, axis=0, ddof=1), ndmin=2)
mean = np.array(np.mean(X, axis=0), ndmin=2)
beta = V_p @ beta_pc.T / std.T
beta_0 = np.array(y.mean(), ndmin=2) - np.sum(beta * mean.T)
beta = np.insert(beta, 0, beta_0)
beta = np.array(beta, ndmin=2)
return beta
def loadings_matrix(
X,
Y,
feature_selection="stepwise",
stepwise="Forward",
criterion="pvalue",
threshold=0.05,
n_components=0.95,
verbose=False,
):
r"""
Estimate the loadings matrix using stepwise regression.
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Features matrix, where n_samples is the number of samples and
n_features is the number of features.
Y : DataFrame of shape (n_samples, n_assets)
Target matrix, where n_samples in the number of samples and
n_assets is the number of assets.
feature_selection: str 'stepwise' or 'PCR', optional
Indicate the method used to estimate the loadings matrix.
The default is 'stepwise'.
stepwise: str 'Forward' or 'Backward', optional
Indicate the method used for stepwise regression.
The default is 'Forward'.
criterion : str, optional
The default is 'pvalue'. Posible values of the criterion used to select
the best features are:
- 'pvalue': select the features based on p-values.
- 'AIC': select the features based on lowest Akaike Information Criterion.
- 'SIC': select the features based on lowest Schwarz Information Criterion.
- 'R2': select the features based on highest R Squared.
- 'R2_A': select the features based on highest Adjusted R Squared.
threshold : scalar, optional
Is the maximum p-value for each variable that will be
accepted in the model. The default is 0.05.
n_components : int, float, None or str, optional
if 1 < n_components (int), it represents the number of components that
will be keep. if 0 < n_components < 1 (float), it represents the
percentage of variance that the is explained by the components keeped.
See `PCA <https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html>`_
for more details. The default is 0.95.
verbose : bool, optional
Enable verbose output. The default is False.
Returns
-------
loadings : DataFrame
A DataFrame with the loadings matrix.
Raises
------
ValueError
When the value cannot be calculated.
"""
if not isinstance(X, pd.DataFrame):
raise ValueError("X must be a DataFrame")
if not isinstance(Y, pd.DataFrame):
raise ValueError("Y must be a DataFrame")
rows = Y.columns.tolist()
cols = X.columns.tolist()
cols.insert(0, "const")
loadings = np.zeros((len(rows), len(cols)))
loadings = pd.DataFrame(loadings, index=rows, columns=cols)
for i in rows:
if feature_selection == "stepwise":
if stepwise == "Forward":
included = forward_regression(
X, Y[i], criterion=criterion, threshold=threshold, verbose=verbose
)
elif stepwise == "Backward":
included = backward_regression(
X, Y[i], criterion=criterion, threshold=threshold, verbose=verbose
)
else:
raise ValueError("Choose and adecuate stepwise method")
results = sm.OLS(Y[i], sm.add_constant(X[included])).fit()
params = results.params
loadings.loc[i, params.index.tolist()] = params.T
elif feature_selection == "PCR":
beta = PCR(X, Y[i], n_components=n_components)
beta = pd.Series(np.ravel(beta), index=cols)
loadings.loc[i, cols] = beta.T
return loadings
def risk_factors(
X,
Y,
B=None,
const=False,
method_mu="hist",
method_cov="hist",
feature_selection="stepwise",
stepwise="Forward",
criterion="pvalue",
threshold=0.05,
n_components=0.95,
error=True,
**kwargs
):
r"""
Estimate the expected returns vector and covariance matrix based on risk
factors models :cite:`b-Ross` :cite:`b-Fan`.
.. math::
\begin{aligned}
R & = \alpha + B F + \epsilon \\
\mu_{f} & = \alpha +BE(F) \\
\Sigma_{f} & = B \Sigma_{F} B^{T} + \Sigma_{\epsilon} \\
\end{aligned}
where:
:math:`R` is the series returns.
:math:`\alpha` is the intercept.
:math:`B` is the loadings matrix.
:math:`F` is the expected returns vector of the risk factors.
:math:`\Sigma_{F}` is the covariance matrix of the risk factors.
:math:`\Sigma_{\epsilon}` is the covariance matrix of error terms.
:math:`\mu_{f}` is the expected returns vector obtained with the
risk factor model.
:math:`\Sigma_{f}` is the covariance matrix obtained with the risk
factor model.
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Features matrix, where n_samples is the number of samples and
n_features is the number of features.
Y : DataFrame of shape (n_samples, n_assets)
Target matrix, where n_samples in the number of samples and
n_assets is the number of assets.
B : DataFrame of shape (n_assets, n_features), optional
Loadings matrix. If is not specified, is estimated using
stepwise regression. The default is None.
const : bool, optional
Indicate if the loadings matrix has a constant.
The default is False.
method: str, 'stepwise' or 'PCR', optional
Indicate the method used to estimate the loadings matrix.
The default is 'stepwise'.
stepwise: str, 'Forward' or 'Backward'
Indicate the method used for stepwise regression.
The default is 'Forward'.
criterion : str, optional
The default is 'pvalue'. Posible values of the criterion used to select
the best features are:
- 'pvalue': select the features based on p-values.
- 'AIC': select the features based on lowest Akaike Information Criterion.
- 'SIC': select the features based on lowest Schwarz Information Criterion.
- 'R2': select the features based on highest R Squared.
- 'R2_A': select the features based on highest Adjusted R Squared.
threshold : scalar, optional
Is the maximum p-value for each variable that will be
accepted in the model. The default is 0.05.
n_components : int, float, None or str, optional
if 1 < n_components (int), it represents the number of components that
will be keep. if 0 < n_components < 1 (float), it represents the
percentage of variance that the is explained by the components keeped.
See `PCA <https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html>`_
for more details. The default is 0.95.
error : bool
Indicate if diagonal covariance matrix of errors is included (only
when B is estimated through a regression).
**kwargs : dict
Other variables related to the expected returns and covariance estimation.
Returns
-------
mu : DataFrame
The mean vector of risk factors model.
cov : DataFrame
The covariance matrix of risk factors model.
returns : DataFrame
The returns based on a risk factor model.
nav : DataFrame
The cumulated uncompound returns based on a risk factor model.
Raises
------
ValueError
When the value cannot be calculated.
"""
if not isinstance(X, pd.DataFrame) and not isinstance(Y, pd.DataFrame):
raise ValueError("X and Y must be DataFrames")
if B is None:
B = loadings_matrix(
X,
Y,
feature_selection=feature_selection,
stepwise=stepwise,
criterion=criterion,
threshold=threshold,
n_components=n_components,
verbose=False,
)
elif not isinstance(B, pd.DataFrame):
raise ValueError("B must be a DataFrame")
X1 = X.copy()
if const == True or "const" in B.columns.tolist():
X1 = sm.add_constant(X)
assets = Y.columns.tolist()
dates = X.index.tolist()
mu_f = np.array(mean_vector(X1, method=method_mu, **kwargs), ndmin=2)
S_f = np.array(covar_matrix(X1, method=method_cov, **kwargs), ndmin=2)
B = np.array(B, ndmin=2)
returns = np.array(X1, ndmin=2) @ B.T
mu = B @ mu_f.T
if error == True:
e = np.array(Y, ndmin=2) - returns
S_e = np.diag(np.var(np.array(e), ddof=1, axis=0))
S = B @ S_f @ B.T + S_e
elif error == False:
S = B @ S_f @ B.T
mu = pd.DataFrame(mu.T, columns=assets)
cov = pd.DataFrame(S, index=assets, columns=assets)
returns = pd.DataFrame(returns, index=dates, columns=assets)
nav = returns.cumsum()
return mu, cov, returns, nav
def black_litterman(
X, w, P, Q, delta=1, rf=0, eq=True, method_mu="hist", method_cov="hist", **kwargs
):
r"""
Estimate the expected returns vector and covariance matrix based
on the Black Litterman model :cite:`b-BlackLitterman` :cite:`b-Black1`.
.. math::
\begin{aligned}
\Pi & = \delta \Sigma w \\
\Pi_{BL} & = \left [ (\tau\Sigma)^{-1}+ P^{T} \Omega^{-1}P \right]^{-1}
\left[(\tau\Sigma)^{-1} \Pi + P^{T} \Omega^{-1} Q \right] \\
M & = \left((\tau\Sigma)^{-1} + P^{T}\Omega^{-1} P \right)^{-1} \\
\mu_{BL} & = \Pi_{BL} + r_{f} \\
\Sigma_{BL} & = \Sigma + M \\
\end{aligned}
where:
:math:`r_{f}` is the risk free rate.
:math:`\delta` is the risk aversion factor.
:math:`\Pi` is the equilibrium excess returns.
:math:`\Sigma` is the covariance matrix.
:math:`P` is the views matrix.
:math:`Q` is the views returns matrix.
:math:`\Omega` is the covariance matrix of the error views.
:math:`\mu_{BL}` is the mean vector obtained with the black
litterman model.
:math:`\Sigma_{BL}` is the covariance matrix obtained with the black
litterman model.
Parameters
----------
X : DataFrame of shape (n_samples, n_assets)
Assets matrix, where n_samples is the number of samples and
n_assets is the number of assets.
w : DataFrame of shape (n_assets, 1)
Weights matrix, where n_assets is the number of assets.
P : DataFrame of shape (n_views, n_assets)
Analyst's views matrix, can be relative or absolute.
Q : DataFrame of shape (n_views, 1)
Expected returns of analyst's views.
delta : float, optional
Risk aversion factor. The default value is 1.
rf : scalar, optional
Risk free rate. The default is 0.
eq : bool, optional
Indicate if use equilibrum or historical excess returns.
The default is True.
method_mu : str, can be {'hist', 'ewma1' or 'ewma2'}
The method used to estimate the expected returns.
The default value is 'hist'.
- 'hist': use historical estimates.
- 'ewma1'': use ewma with adjust=True, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
- 'ewma2': use ewma with adjust=False, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
method_cov : str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Posible values are:
- 'hist': use historical estimates.
- 'ewma1'': use ewma with adjust=True, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
- 'ewma2': use ewma with adjust=False, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: :cite:`b-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of :cite:`b-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of :cite:`b-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of :cite:`b-MLforAM`.
**kwargs : dict
Other variables related to the expected returns and covariance estimation.
Returns
-------
mu : DataFrame
The mean vector of Black Litterman model.
cov : DataFrame
The covariance matrix of Black Litterman model.
w : DataFrame
The equilibrium weights of Black Litterman model, without constraints.
Raises
------
ValueError
When the value cannot be calculated.
"""
if not isinstance(X, pd.DataFrame) and not isinstance(w, pd.DataFrame):
raise ValueError("X and w must be DataFrames")
if w.shape[0] > 1 and w.shape[1] > 1:
raise ValueError("w must be a column DataFrame")
assets = X.columns.tolist()
w = np.array(w, ndmin=2)
if w.shape[0] == 1:
w = w.T
mu = np.array(mean_vector(X, method=method_mu, **kwargs), ndmin=2)
S = np.array(covar_matrix(X, method=method_cov, **kwargs), ndmin=2)
P = np.array(P, ndmin=2)
Q = np.array(Q, ndmin=2)
tau = 1 / X.shape[0]
Omega = np.array(np.diag(np.diag(P @ (tau * S) @ P.T)), ndmin=2)
if eq == True:
PI = delta * (S @ w)
elif eq == False:
PI = mu.T - rf
PI_ = inv(inv(tau * S) + P.T @ inv(Omega) @ P) @ (
inv(tau * S) @ PI + P.T @ inv(Omega) @ Q
)
M = inv(inv(tau * S) + P.T @ inv(Omega) @ P)
# PI_1 = PI + (tau * S* P.T) * inv(P * tau * S * P.T + Omega) * (Q - P * PI)
# M = tau * S - (tau * S * P.T) * inv(P * tau * S * P.T + Omega) * P * tau * S
mu = PI_ + rf
mu = mu.T
cov = S + M
w = inv(delta * cov) @ PI_
mu = pd.DataFrame(mu, columns=assets)
cov = pd.DataFrame(cov, index=assets, columns=assets)
w = pd.DataFrame(w, index=assets)
return mu, cov, w
def augmented_black_litterman(
X,
w,
F=None,
B=None,
P=None,
Q=None,
P_f=None,
Q_f=None,
delta=1,
rf=0,
eq=True,
const=True,
method_mu="hist",
method_cov="hist",
**kwargs
):
r"""
Estimate the expected returns vector and covariance matrix based
on the Augmented Black Litterman model :cite:`b-WCheung`.
.. math::
\begin{aligned}
\Pi^{a} & = \delta \left [ \begin{array}{c} \Sigma \\ \Sigma_{F} B^{T} \\ \end{array} \right ] w \\
P^{a} & = \left [ \begin{array}{cc} P & 0 \\ 0 & P_{F} \\ \end{array} \right ] \\
Q^{a} & = \left [ \begin{array}{c} Q \\ Q_{F} \\ \end{array} \right ] \\
\Sigma^{a} & = \left [ \begin{array}{cc} \Sigma & B \Sigma_{F}\\ \Sigma_{F} B^{T} & \Sigma_{F} \\ \end{array} \right ] \\
\Omega^{a} & = \left [ \begin{array}{cc} \Omega & 0 \\ 0 & \Omega_{F} \\ \end{array} \right ] \\
\Pi^{a}_{BL} & = \left [ (\tau \Sigma^{a})^{-1} + (P^{a})^{T} (\Omega^{a})^{-1} P^{a} \right ]^{-1}
\left [ (\tau\Sigma^{a})^{-1} \Pi^{a} + (P^{a})^{T} (\Omega^{a})^{-1} Q^{a} \right ] \\
M^{a} & = \left ( (\tau\Sigma^{a})^{-1} + (P^{a})^{T} (\Omega^{a})^{-1} P^{a} \right )^{-1} \\
\mu^{a}_{BL} & = \Pi^{a}_{BL} + r_{f} \\
\Sigma^{a}_{BL} & = \Sigma^{a} + M^{a} \\
\end{aligned}
where:
:math:`r_{f}` is the risk free rate.
:math:`\delta` is the risk aversion factor.
:math:`B` is the loadings matrix.
:math:`\Sigma` is the covariance matrix of assets.
:math:`\Sigma_{F}` is the covariance matrix of factors.
:math:`\Sigma^{a}` is the augmented covariance matrix.
:math:`P` is the assets views matrix.
:math:`Q` is the assets views returns matrix.
:math:`P_{F}` is the factors views matrix.
:math:`Q_{F}` is the factors views returns matrix.
:math:`P^{a}` is the augmented views matrix.
:math:`Q^{a}` is the augmented views returns matrix.
:math:`\Pi^{a}` is the augmented equilibrium excess returns.
:math:`\Omega` is the covariance matrix of errors of assets views.
:math:`\Omega_{F}` is the covariance matrix of errors of factors views.
:math:`\Omega^{a}` is the covariance matrix of errors of augmented views.
:math:`\mu^{a}_{BL}` is the mean vector obtained with the Augmented Black
Litterman model.
:math:`\Sigma^{a}_{BL}` is the covariance matrix obtained with the Augmented
Black Litterman model.
Parameters
----------
X : DataFrame of shape (n_samples, n_assets)
Assets matrix, where n_samples is the number of samples and
n_assets is the number of features.
w : DataFrame of shape (n_assets, 1)
Weights matrix, where n_assets is the number of assets.
F : DataFrame of shape (n_samples, n_features)
Features matrix, where n_samples is the number of samples and
n_features is the number of features.
B : DataFrame of shape (n_assets, n_features), optional
Loadings matrix. The default is None.
P : DataFrame of shape (n_views, n_assets)
Analyst's views matrix, can be relative or absolute.
Q : DataFrame of shape (n_views, 1)
Expected returns of analyst's views.
P_f : DataFrame of shape (n_views, n_features)
Analyst's factors views matrix, can be relative or absolute.
Q_f : DataFrame of shape (n_views, 1)
Expected returns of analyst's factors views.
delta : float, optional
Risk aversion factor. The default value is 1.
rf : scalar, optional
Risk free rate. The default is 0.
eq : bool, optional
Indicate if use equilibrum or historical excess returns.
The default is True.
const : bool, optional
Indicate if use equilibrum or historical excess returns.
The default is True.
method_mu : str, can be {'hist', 'ewma1' or 'ewma2'}
The method used to estimate the expected returns.
The default value is 'hist'.
- 'hist': use historical estimates.
- 'ewma1'': use ewma with adjust=True, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
- 'ewma2': use ewma with adjust=False, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
method_cov : str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Posible values are:
- 'hist': use historical estimates.
- 'ewma1'': use ewma with adjust=True, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
- 'ewma2': use ewma with adjust=False, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: :cite:`b-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of :cite:`b-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of :cite:`b-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of :cite:`b-MLforAM`.
**kwargs : dict
Other variables related to the expected returns and covariance estimation.
Returns
-------
mu : DataFrame
The mean vector of Augmented Black Litterman model.
cov : DataFrame
The covariance matrix of Augmented Black Litterman model.
w : DataFrame
The equilibrium weights of Augmented Black Litterman model, without constraints.
Raises
------
ValueError
When the value cannot be calculated.
"""
if not isinstance(X, pd.DataFrame) and not isinstance(w, pd.DataFrame):
raise ValueError("X and w must be DataFrames")
if not isinstance(F, pd.DataFrame) and not isinstance(B, pd.DataFrame):
raise ValueError("F and B must be DataFrames")
if w.shape[0] > 1 and w.shape[1] > 1:
raise ValueError("w must be a column DataFrame")
assets = X.columns.tolist()
N = len(assets)
w = np.array(w, ndmin=2)
if w.shape[0] == 1:
w = w.T
if B is not None:
B = np.array(B, ndmin=2)
if const == True:
alpha = B[:, :1]
B = B[:, 1:]
mu = np.array(mean_vector(X, method=method_mu, **kwargs), ndmin=2)
S = np.array(covar_matrix(X, method=method_cov, **kwargs), ndmin=2)
tau = 1 / X.shape[0]
if F is not None:
mu_f = np.array(mean_vector(F, method=method_mu, **kwargs), ndmin=2)
S_f = np.array(covar_matrix(F, method=method_cov, **kwargs), ndmin=2)
if P is not None and Q is not None and P_f is None and Q_f is None:
S_a = S
P_a = P
Q_a = Q
Omega = np.array(np.diag(np.diag(P @ (tau * S) @ P.T)), ndmin=2)
Omega_a = Omega
if eq == True:
PI_a_ = delta * S_a @ w
elif eq == False:
PI_a_ = mu.T - rf
elif P is None and Q is None and P_f is not None and Q_f is not None:
S_a = S_f
P_a = P_f
Q_a = Q_f
Omega_f = np.array(np.diag(np.diag(P_f @ (tau * S_f) @ P_f.T)), ndmin=2)
Omega_a = Omega_f
if eq == True:
PI_a_ = delta * (S_f @ B.T) @ w
elif eq == False:
PI_a_ = mu_f.T - rf
elif P is not None and Q is not None and P_f is not None and Q_f is not None:
S_a = np.hstack((np.vstack((S, S_f @ B.T)), np.vstack((B @ S_f, S_f))))
P = np.array(P, ndmin=2)
Q = np.array(Q, ndmin=2)
P_f = np.array(P_f, ndmin=2)
Q_f = np.array(Q_f, ndmin=2)
zeros_1 = np.zeros((P_f.shape[0], P.shape[1]))
zeros_2 = np.zeros((P.shape[0], P_f.shape[1]))
P_a = np.hstack((np.vstack((P, zeros_1)), np.vstack((zeros_2, P_f))))
Q_a = np.vstack((Q, Q_f))
Omega = np.array(np.diag(np.diag(P @ (tau * S) @ P.T)), ndmin=2)
Omega_f = np.array(np.diag(np.diag(P_f @ (tau * S_f) @ P_f.T)), ndmin=2)
zeros = np.zeros((Omega.shape[0], Omega_f.shape[0]))
Omega_a = np.hstack((np.vstack((Omega, zeros.T)), np.vstack((zeros, Omega_f))))
if eq == True:
PI_a_ = delta * (np.vstack((S, S_f @ B.T)) @ w)
elif eq == False:
PI_a_ = np.vstack((mu.T, mu_f.T)) - rf
PI_a = inv(inv(tau * S_a) + P_a.T @ inv(Omega_a) @ P_a) @ (
inv(tau * S_a) @ PI_a_ + P_a.T @ inv(Omega_a) @ Q_a
)
M_a = inv(inv(tau * S_a) + P_a.T @ inv(Omega_a) @ P_a)
# PI_a = PI_a_ + (tau * S_a @ P_a.T) * inv(P_a @ tau * S_a @ P_a.T + Omega) * (Q_a - P_a @ PI_a_)
# M = tau * S_a - (tau * S_a @ P_a.T) * inv(P_a @ tau * S_a @ P_a.T + Omega_a) @ P_a @ tau * S_a
mu_a = PI_a + rf
mu_a = mu_a.T
cov_a = S_a + M_a
w_a = inv(delta * cov_a) @ PI_a
if P is None and Q is None and P_f is not None and Q_f is not None:
mu_a = mu_a @ B.T
cov_a = B @ cov_a @ B.T
w_a = inv(delta * cov_a) @ B @ PI_a
if const == True:
mu_a = mu_a[:, :N] + alpha.T
mu_a = pd.DataFrame(mu_a[:, :N], columns=assets)
cov_a = pd.DataFrame(cov_a[:N, :N], index=assets, columns=assets)
w_a = pd.DataFrame(w_a[:N, 0], index=assets)
return mu_a, cov_a, w_a
def black_litterman_bayesian(
X,
F,
B,
P_f,
Q_f,
delta=1,
rf=0,
eq=True,
const=True,
diag=True,
method_mu="hist",
method_cov="hist",
**kwargs
):
r"""
Estimate the expected returns vector and covariance matrix based
on the black litterman model :cite:`b-BLB`.
.. math::
\begin{aligned}
\Sigma_{F} & = B \Sigma_{F} B^{T} + D \\
\overline{\Pi}_{F} & = \left ( \Sigma_{F}^{-1} + P_{F}^{T}\Omega_{F}^{-1}P_{F} \right )^{-1} \left ( \Sigma_{F}^{-1}\Pi_{F} + P_{F}^{T}\Omega_{F}^{-1}Q_{F} \right) \\
\overline{\Sigma}_{F} & = \left ( \Sigma_{F}^{-1} + P_{F}^{T}\Omega_{F}^{-1}P_{F} \right )^{-1} \\
\Sigma_{BLB} & = \left( \Sigma^{-1} - \Sigma^{-1} B \left( \overline{\Sigma}_{F}^{-1} + B^{T}\Sigma^{-1}B \right)^{-1} B^{T}\Sigma^{-1} \right )^{-1} \\
\mu_{BLB} & = \Sigma_{BLB} \left ( \Sigma^{-1} B \left( \overline{\Sigma}_{F}^{-1} +B^{T}\Sigma^{-1}B \right)^{-1} \overline{\Sigma}_{F}^{-1} \overline{\Pi}_{F} \right ) + r_{f} \\
\end{aligned}
where:
:math:`r_{f}` is the risk free rate.
:math:`B` is the loadings matrix.
:math:`D` is a diagonal matrix of variance of errors of a factor model.
:math:`\Sigma` is the covariance matrix obtained with a factor model.
:math:`\Pi_{F}` is the equilibrium excess returns of factors.
:math:`\overline{\Pi}_{F}` is the posterior excess returns of factors.
:math:`\Sigma_{F}` is the covariance matrix of factors.
:math:`\overline{\Sigma}_{F}` is the posterior covariance matrix of factors.
:math:`P_{F}` is the factors views matrix.
:math:`Q_{F}` is the factors views returns matrix.
:math:`\Omega_{F}` is the covariance matrix of errors of factors views.
:math:`\mu_{BLB}` is the mean vector obtained with the Black
Litterman Bayesian model or posterior predictive mean.
:math:`\Sigma_{BLB}` is the covariance matrix obtained with the Black
Litterman Bayesian model or posterior predictive covariance.
Parameters
----------
X : DataFrame of shape (n_samples, n_assets)
Assets matrix, where n_samples is the number of samples and
n_assets is the number of assets.
F : DataFrame of shape (n_samples, n_features)
Features matrix, where n_samples is the number of samples and
n_features is the number of features.
B : DataFrame of shape (n_assets, n_features), optional
Loadings matrix. The default is None.
P_f : DataFrame of shape (n_views, n_features)
Analyst's factors views matrix, can be relative or absolute.
Q_f : DataFrame of shape (n_views, 1)
Expected returns of analyst's factors views.
delta : float, optional
Risk aversion factor. The default value is 1.
rf : scalar, optional
Risk free rate. The default is 0.
eq : bool, optional
Indicate if use equilibrum or historical excess returns.
The default is True.
const : bool, optional
Indicate if the loadings matrix has a constant.
The default is True.
diag : bool, optional
Indicate if we use the diagonal matrix to calculate covariance matrix
of factor model, only useful when we work with a factor model based on
a regresion model (only equity portfolio).
The default is True.
method_mu : str, can be {'hist', 'ewma1' or 'ewma2'}
The method used to estimate the expected returns.
The default value is 'hist'.
- 'hist': use historical estimates.
- 'ewma1'': use ewma with adjust=True, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
- 'ewma2': use ewma with adjust=False, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
method_cov : str, optional
The method used to estimate the covariance matrix:
The default is 'hist'. Posible values are:
- 'hist': use historical estimates.
- 'ewma1'': use ewma with adjust=True, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
- 'ewma2': use ewma with adjust=False, see `EWM <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_ for more details.
- 'ledoit': use the Ledoit and Wolf Shrinkage method.
- 'oas': use the Oracle Approximation Shrinkage method.
- 'shrunk': use the basic Shrunk Covariance method.
- 'gl': use the basic Graphical Lasso Covariance method.
- 'jlogo': use the j-LoGo Covariance method. For more information see: :cite:`b-jLogo`.
- 'fixed': denoise using fixed method. For more information see chapter 2 of :cite:`b-MLforAM`.
- 'spectral': denoise using spectral method. For more information see chapter 2 of :cite:`b-MLforAM`.
- 'shrink': denoise using shrink method. For more information see chapter 2 of :cite:`b-MLforAM`.
**kwargs : dict
Other variables related to the expected returns and covariance estimation.
Returns
-------
mu : DataFrame
The mean vector of Black Litterman model.
cov : DataFrame
The covariance matrix of Black Litterman model.
w : DataFrame
The equilibrium weights of Black Litterman model, without constraints.
Raises
------
ValueError
When the value cannot be calculated.
"""
if not isinstance(X, pd.DataFrame):
raise ValueError("X must be DataFrames")
if not isinstance(F, pd.DataFrame) and not isinstance(B, pd.DataFrame):
raise ValueError("F and B must be DataFrames")
assets = X.columns.tolist()
if B is not None:
B = np.array(B, ndmin=2)
if const == True:
alpha = B[:, :1]
B = B[:, 1:]
mu_f = np.array(mean_vector(F, method=method_mu, **kwargs), ndmin=2)
mu_f = (mu_f - rf).T
tau = 1 / X.shape[0]
S_f = np.array(covar_matrix(F, method=method_cov, **kwargs), ndmin=2)
S = B @ S_f @ B.T
if diag == True:
D = X.to_numpy() - F @ B.T
D = np.diag(D.var())
S = S + D
Omega_f = np.array(np.diag(np.diag(P_f @ (tau * S_f) @ P_f.T)), ndmin=2)
S_hat = inv(inv(S_f) + P_f.T @ inv(Omega_f) @ P_f)
Pi_hat = S_hat @ (inv(S_f) @ mu_f + P_f.T @ inv(Omega_f) @ Q_f)
S_blb = inv(inv(S) - inv(S) @ B @ inv(inv(S_hat) + B.T @ inv(S) @ B) @ B.T @ inv(S))
Pi_blb = (
S_blb @ inv(S) @ B @ inv(inv(S_hat) + B.T @ inv(S) @ B) @ inv(S_hat) @ Pi_hat
)
mu = Pi_blb + rf
if const == True:
mu = mu + alpha
mu = mu.T
cov = S_blb
w = inv(delta * cov) @ mu.T
mu = | pd.DataFrame(mu, columns=assets) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Tests for abagen.correct module
"""
import itertools
import numpy as np
import pandas as pd
import pytest
import scipy.stats as sstats
from abagen import allen, correct, io
from abagen.utils import flatten_dict
@pytest.fixture(scope='module')
def donor_expression(testfiles, atlas):
return allen.get_expression_data(atlas['image'], atlas['info'],
exact=False, return_donors=True,
donors=['12876', '15496'])
def test__unpack_tuple():
assert correct._unpack_tuple((3,)) == 3
assert correct._unpack_tuple((3, 3)) == (3, 3)
assert correct._unpack_tuple([2]) == 2
assert correct._unpack_tuple([2, 4]) == [2, 4]
assert correct._unpack_tuple(np.array([3])) == 3
assert np.all(correct._unpack_tuple(np.array([3, 3])) == [3, 3])
def test__batch():
rs = np.random.RandomState(1234)
# p-values for ANOVA should all be ~0 (large group differences) before
# batch correction
y = [rs.normal(size=(100, 1000)) + f for f in [5, 0, 0]]
assert np.allclose(sstats.f_oneway(*y)[1], 0)
# F-values for ANOVA should all be ~0 (no group differences) after batch
# correction; p-values returned here are sometimes NaN so not a good test
out = correct._batch_correct(y)
assert np.allclose(sstats.f_oneway(*out)[0], 0)
# mean expressions after correction should be ~equal
assert np.allclose([o.mean() for o in out], 1.24871965683026)
with pytest.raises(ValueError):
correct._batch_correct([y[0]])
def test__rescale():
rs = np.random.RandomState(1234)
y = rs.normal(size=(100, 1000)) + 10
out = correct._rescale(y)
# default max = 1, min =0
assert np.allclose(out.max(axis=0), 1) and np.allclose(out.min(axis=0), 0)
# can specify alternative min/max
out = correct._rescale(y, low=5, high=6)
assert np.allclose(out.max(axis=0), 6) and np.allclose(out.min(axis=0), 5)
# different axis works, too!
out = correct._rescale(y, axis=1)
assert np.allclose(out.max(axis=1), 1) and np.allclose(out.min(axis=1), 0)
@pytest.mark.parametrize('a', [0, 1])
def test__rs(a):
rs = np.random.RandomState(1234)
# create an array with a pretty ridiculous outlier effect to try and fix
y = rs.normal(size=(100, 1000))
y[0] += 1000
y[:, 0] += 1000
out = correct._rs(y, axis=a)
# max will always be less than one, min will always be greater than zero
assert np.all(out.max(axis=a) <= 1) and np.all(out.min(axis=a) >= 0)
# we should have reduced skewness / kurtosis compared to the original
assert np.all(sstats.skew(out, axis=a) < sstats.skew(y, axis=a))
assert np.all(sstats.kurtosis(out, axis=a) < sstats.kurtosis(y, axis=a))
# this is a weird test; we're gonna bin the data at 0.2 intervals and make
# sure no bins are empty. if one is something probably went wrong, right?
for low in np.arange(0, 1, 0.2):
hi = low + 0.2 + np.spacing(1) # include 1
assert np.all(np.sum(np.logical_and(out >= low, out < hi), axis=a) > 0)
@pytest.mark.parametrize('a', [0, 1])
def test__srs(a):
rs = np.random.RandomState(1234)
# create an array with a pretty ridiculous outlier effect to try and fix
y = rs.normal(size=(100, 1000))
y[0] += 1000
y[:, 0] += 1000
out = correct._srs(y, axis=a)
# max will always be one, min will always be zero
assert np.allclose(out.max(axis=a), 1) and np.allclose(out.min(axis=a), 0)
# we should have reduced skewness / kurtosis compared to the original
assert np.all(sstats.skew(out, axis=a) < sstats.skew(y, axis=a))
assert np.all(sstats.kurtosis(out, axis=a) < sstats.kurtosis(y, axis=a))
# this is a weird test; we're gonna bin the data at 0.2 intervals and make
# sure no bins are empty. if one is something probably went wrong, right?
for low in np.arange(0, 1, 0.2):
hi = low + 0.2 + np.spacing(1) # include 1
assert np.all(np.sum(np.logical_and(out >= low, out < hi), axis=a) > 0)
@pytest.mark.parametrize('method', [
'center', 'zscore', 'minmax', 'sigmoid', 'scaled_sigmoid',
'scaled_sigmoid_quantiles', 'robust_sigmoid', 'scaled_robust_sigmoid',
'mixed_sigmoid'
])
def test_normalize_expression_real(testfiles, method):
# load in data and add some NaN values for "realness"
micro = [
io.read_microarray(f).T
for f in flatten_dict(testfiles, 'microarray').values()
]
inds = [[5, 15, 25], [0, 10, 20]]
for n, idx in enumerate(inds):
micro[n].iloc[idx] = np.nan
minmax = [
'minmax', 'scaled_sigmoid', 'scaled_sigmoid_quantiles',
'scaled_robust_sigmoid', 'mixed_sigmoid'
]
out = correct.normalize_expression(micro, norm=method)
for exp, idx in zip(out, inds):
assert np.all(np.isnan(exp.iloc[idx]))
exp = exp.dropna(axis=1, how='all')
if method in minmax:
assert np.allclose(exp.max(axis=0), 1)
assert np.allclose(exp.min(axis=0), 0)
elif method == 'robust_sigmoid':
assert np.all(exp.max(axis=0) <= 1)
assert np.all(exp.min(axis=0) >= 0)
elif method in ['center', 'zscore']:
assert np.allclose(exp.mean(axis=0), 0)
if method == 'zscore':
assert np.allclose(exp.std(axis=0, ddof=1), 1)
# # batch correct: force means identical
# out = correct.normalize_expression(micro, norm='batch')
# assert np.allclose(*[e.mean(axis=0, skipna=True) for e in out])
# # the NaN values should still be there, though
# for exp, idx in zip(out, inds):
# assert np.all(np.isnan(exp.iloc[idx]))
# invalid norm parameter
with pytest.raises(ValueError):
correct.normalize_expression(micro, norm='notanorm')
# # can't do batch correction with only one donor
# with pytest.raises(ValueError):
# correct.normalize_expression(micro[0], norm='batch')
def test_remove_distance(donor_expression, atlas):
expr = | pd.concat(donor_expression) | pandas.concat |
import pytest
from pandas import (
DataFrame,
Index,
Series,
)
import pandas._testing as tm
@pytest.mark.parametrize("n, frac", [(2, None), (None, 0.2)])
def test_groupby_sample_balanced_groups_shape(n, frac):
values = [1] * 10 + [2] * 10
df = DataFrame({"a": values, "b": values})
result = df.groupby("a").sample(n=n, frac=frac)
values = [1] * 2 + [2] * 2
expected = DataFrame({"a": values, "b": values}, index=result.index)
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].sample(n=n, frac=frac)
expected = Series(values, name="b", index=result.index)
tm.assert_series_equal(result, expected)
def test_groupby_sample_unbalanced_groups_shape():
values = [1] * 10 + [2] * 20
df = DataFrame({"a": values, "b": values})
result = df.groupby("a").sample(n=5)
values = [1] * 5 + [2] * 5
expected = DataFrame({"a": values, "b": values}, index=result.index)
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].sample(n=5)
expected = Series(values, name="b", index=result.index)
tm.assert_series_equal(result, expected)
def test_groupby_sample_index_value_spans_groups():
values = [1] * 3 + [2] * 3
df = DataFrame({"a": values, "b": values}, index=[1, 2, 2, 2, 2, 2])
result = df.groupby("a").sample(n=2)
values = [1] * 2 + [2] * 2
expected = DataFrame({"a": values, "b": values}, index=result.index)
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].sample(n=2)
expected = Series(values, name="b", index=result.index)
tm.assert_series_equal(result, expected)
def test_groupby_sample_n_and_frac_raises():
df = DataFrame({"a": [1, 2], "b": [1, 2]})
msg = "Please enter a value for `frac` OR `n`, not both"
with pytest.raises(ValueError, match=msg):
df.groupby("a").sample(n=1, frac=1.0)
with pytest.raises(ValueError, match=msg):
df.groupby("a")["b"].sample(n=1, frac=1.0)
def test_groupby_sample_frac_gt_one_without_replacement_raises():
df = DataFrame({"a": [1, 2], "b": [1, 2]})
msg = "Replace has to be set to `True` when upsampling the population `frac` > 1."
with pytest.raises(ValueError, match=msg):
df.groupby("a").sample(frac=1.5, replace=False)
with pytest.raises(ValueError, match=msg):
df.groupby("a")["b"].sample(frac=1.5, replace=False)
@pytest.mark.parametrize("n", [-1, 1.5])
def test_groupby_sample_invalid_n_raises(n):
df = DataFrame({"a": [1, 2], "b": [1, 2]})
if n < 0:
msg = "Please provide positive value"
else:
msg = "Only integers accepted as `n` values"
with pytest.raises(ValueError, match=msg):
df.groupby("a").sample(n=n)
with pytest.raises(ValueError, match=msg):
df.groupby("a")["b"].sample(n=n)
def test_groupby_sample_oversample():
values = [1] * 10 + [2] * 10
df = DataFrame({"a": values, "b": values})
result = df.groupby("a").sample(frac=2.0, replace=True)
values = [1] * 20 + [2] * 20
expected = DataFrame({"a": values, "b": values}, index=result.index)
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].sample(frac=2.0, replace=True)
expected = Series(values, name="b", index=result.index)
tm.assert_series_equal(result, expected)
def test_groupby_sample_without_n_or_frac():
values = [1] * 10 + [2] * 10
df = DataFrame({"a": values, "b": values})
result = df.groupby("a").sample(n=None, frac=None)
expected = DataFrame({"a": [1, 2], "b": [1, 2]}, index=result.index)
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].sample(n=None, frac=None)
expected = | Series([1, 2], name="b", index=result.index) | pandas.Series |
from pandas.core.frame import DataFrame
import streamlit as st
from re import U, split, sub
import numpy as np
import pandas as pd
from functools import reduce
from nested_dict import nested_dict
from pprint import pprint
import json
import os
import pandas as pd
import pandas.io.json as pdjson
import seaborn as sns
from apps import benchstruct
def app():
st.title("Sequential Benchmarks")
# Problem : right now the structure is a nested dict of
# `(hostname * (timestamp * (variants list)) dict ) dict`
# and this nested structure although works but it is a bit difficult to work with
# so we need to create a class object which is a record type and add functions to
# <host 1>
# |--- <timestamp 1>
# |--- <commit 1>
# |--- <variant 1>
# |--- <variant 2>
# ....
# ....
# ....
# |--- <variant n>
# |--- <commit 2>
# ....
# ....
# ....
# ....
# ....
# ....
# |--- <commit n>
# ....
# ....
# ....
# ....
# ....
# ....
# ....
# |--- <timestamp n>
# ....
# ....
# <host 2>
# ....
# ....
# ....
# <host n>
current = os.getcwd().split('/')
current.pop()
artifacts_dir = '/'.join(current) + '/sandmark-nightly'
# print(artifacts_dir)
benches = benchstruct.BenchStruct("sequential", artifacts_dir, "_1.orun.summary.bench")
benches.add_files(benches.get_bench_files())
benches.sort()
st.header("Select variants")
n = int(st.text_input('Number of variants','2', key=benches.config["bench_type"]))
containers = [st.columns(3) for i in range(n)]
# [[a11, a12 ... a1n], [a21, a22 ... a2n], ... [am1, am2 ... amn]] => [a11]
def flatten(lst):
return reduce(lambda a, b: a + b, lst)
# [(a1, b1), (a2, b2) ... (an, bn)] => ([a1, a2, ... an], [b1, b2, ... bn])
def unzip(lst):
return (list(zip(*lst)))
def unzip_dict(d):
a = unzip(list(d))
# print(a)
(x, y) = a[0], flatten(a[1])
return (x, y)
def fmt_variant(commit, variant):
return (variant.split('_')[0] + '+' + str(commit) + '_' + variant.split('_')[1])
def unfmt_variant(variant):
commit = variant.split('_')[0].split('+')[-1]
variant_root = variant.split('_')[1]
variant_stem = variant.split('_')[0].split('+')
variant_stem.pop()
variant_stem = reduce(lambda a, b: b if a == "" else a + "+" + b, variant_stem, "")
new_variant = variant_stem + '_' + variant_root
# st.write(new_variant)
return (commit , new_variant)
def get_selected_values(n):
lst = []
for i in range(n):
# create the selectbox in columns
host_val = containers[i][0].selectbox('hostname', benches.structure.keys(), key = str(i) + '0_' + benches.config["bench_type"])
timestamp_val = containers[i][1].selectbox('timestamp', benches.structure[host_val].keys(), key = str(i) + '1_' + benches.config["bench_type"])
# st.write(benches.structure)
commits, variants = unzip_dict((benches.structure[host_val][timestamp_val]).items())
# st.write(variants)
fmtted_variants = [fmt_variant(c, v) for c,v in zip(commits, variants)]
# st.write(fmtted_variant)
variant_val = containers[i][2].selectbox('variant', fmtted_variants, key = str(i) + '2_' + benches.config["bench_type"])
selected_commit, selected_variant = unfmt_variant(variant_val)
lst.append({"host" : host_val, "timestamp" : timestamp_val, "commit" : selected_commit, "variant" : selected_variant})
return lst
selected_benches = benchstruct.BenchStruct("sequential", artifacts_dir, "_1.orun.summary.bench")
_ = [selected_benches.add(f["host"], f["timestamp"], f["commit"], f["variant"]) for f in get_selected_values(n)]
selected_benches.sort()
# Expander for showing bench files
with st.expander("Show metadata of selected benchmarks"):
st.write(selected_benches.structure)
selected_files = flatten(selected_benches.to_filepath())
def dataframe_intersection(data_frames):
intersection_set_list = [set(df['name']) for df in data_frames]
list_diff = list(reduce(lambda x, y: x.intersection(y), intersection_set_list))
new_data_frames = []
for elem in list_diff:
for df in data_frames:
new_data_frames.append(df[(df.name == elem)])
list_diff.sort()
# st.write(list_diff)
return new_data_frames
def get_dataframe(file):
# json to dataframe
# print(file)
with open(file) as f:
data = []
for l in f:
temp = json.loads(l)
data.append(temp)
df = pd.json_normalize(data)
value = file.split('/' + benches.config["bench_type"] + '/')[1]
date = value.split('/')[1].split('_')[0]
commit_id = value.split('/')[2][:7]
variant = value.split('/')[3].split('_')[0]
df["variant"] = variant + '_' + date + '_' + commit_id
return df
def get_dataframes_from_files(files):
data_frames = [get_dataframe(file) for file in files]
new_data_frames = dataframe_intersection(data_frames=data_frames)
df = | pd.concat(new_data_frames, sort=False) | pandas.concat |
import json
import logging
import re
import sys
from genericpath import exists
from itertools import chain
from os import makedirs, listdir
from os.path import join
from pprint import pformat
import warnings
import gensim
import pandas as pd
from gensim.corpora import Dictionary, MmCorpus
from gensim.models import Doc2Vec, Word2Vec, FastText, LdaModel, LsiModel
from pandas.errors import DtypeWarning
from constants import (
ETL_PATH, NLP_PATH, SMPL_PATH, LDA_PATH, DSETS, PARAMS, NBTOPICS, METRICS, VERSIONS,
EMB_PATH, CORPUS_TYPE, NOUN_PATTERN, BAD_TOKENS, PLACEHOLDER, LSI_PATH,
TPX_PATH)
try:
from tabulate import tabulate
except ImportError as ie:
print(ie)
warnings.simplefilter(action='ignore', category=DtypeWarning)
def tprint(df, head=0, floatfmt=None, to_latex=False):
if df is None:
return
shape = df.shape
if head > 0:
df = df.head(head)
elif head < 0:
df = df.tail(-head)
kwargs = dict()
if floatfmt is not None:
kwargs['floatfmt'] = floatfmt
try:
print(tabulate(df, headers="keys", tablefmt="pipe", showindex="always", **kwargs))
except:
print(df)
print('shape:', shape, '\n')
if to_latex:
print(df.to_latex(bold_rows=True))
def index_level_dtypes(df):
return [
f"{df.index.names[i]}: {df.index.get_level_values(n).dtype}"
for i, n in enumerate(df.index.names)
]
def hms_string(sec_elapsed):
h = int(sec_elapsed / (60 * 60))
m = int((sec_elapsed % (60 * 60)) / 60)
s = sec_elapsed % 60
return f"{h}:{m:>02}:{s:>05.2f}"
def init_logging(name='', basic=True, to_stdout=False, to_file=True, log_file=None, log_dir='../logs'):
if log_file is None:
log_file = name+'.log' if name else 'train.log'
if basic:
if to_file:
if not exists(log_dir):
makedirs(log_dir)
file_path = join(log_dir, log_file)
logging.basicConfig(
filename=file_path,
format='%(asctime)s - %(name)s - %(levelname)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO
)
else:
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO
)
logger = logging.getLogger()
else:
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
if to_file:
# create path if necessary
if not exists(log_dir):
makedirs(log_dir)
file_path = join(log_dir, log_file)
fh = logging.FileHandler(file_path)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
if to_stdout:
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.info('')
logger.info('#' * 50)
logger.info('----- %s -----' % name.upper())
logger.info('----- start -----')
logger.info('python: ' + sys.version.replace('\n', ' '))
logger.info('pandas: ' + pd.__version__)
logger.info('gensim: ' + gensim.__version__)
return logger
def log_args(logger, args):
logger.info('\n' + pformat(vars(args)))
def multiload(dataset, purpose='etl', deprecated=False):
if dataset.lower().startswith('dewa'):
dewac = True
elif dataset.lower().startswith('dewi'):
dewac = False
else:
print('unkown dataset')
return
if purpose is not None and purpose.lower() in ['simple', 'smpl', 'phrase']:
if dewac:
dpath = join(SMPL_PATH, 'wiki_phrases')
pattern = re.compile(r'^dewac_[0-9]{2}_simple_wiki_phrases\.pickle')
files = sorted([join(dpath, f) for f in listdir(dpath) if pattern.match(f)])
else:
dpath = join(SMPL_PATH, 'dewiki')
pattern = re.compile(r'^dewiki_[0-9]+_[0-9]+__[0-9]+_simple\.pickle')
files = sorted([join(dpath, f) for f in listdir(dpath) if pattern.match(f)])
elif purpose is not None and purpose.lower() == 'nlp':
dpath = NLP_PATH
if dewac:
pattern = re.compile(r'^dewac_[0-9]{2}_nlp\.pickle')
files = sorted([join(dpath, f) for f in listdir(dpath) if pattern.match(f)])
else:
pattern = re.compile(r'^dewiki_[0-9]+_[0-9]+_nlp\.pickle')
files = sorted([join(dpath, f) for f in listdir(dpath) if pattern.match(f)])
else:
dpath = ETL_PATH
if dewac:
pattern = re.compile(r'^dewac_[0-9]{2}\.pickle')
files = sorted([join(dpath, f) for f in listdir(dpath) if pattern.match(f)])
else:
if deprecated:
dpath = join(dpath, 'deprecated')
pattern = re.compile(r'^dewiki_[0-9]{2}.*\.pickle\.gz')
files = sorted([join(dpath, f) for f in listdir(dpath) if pattern.match(f)])
else:
files = [join(dpath, 'dewiki.pickle')]
length = len(files)
for i, file in enumerate(files, 1):
print(f'Reading {i:02d}/{length}: {file}')
yield pd.read_pickle(file)
def reduce_df(df, metrics, params, nbtopics):
if len(metrics) > 0:
try:
df = df.query('metric in @metrics')
except Exception as e:
print(e)
if len(params) > 0:
try:
df = df.query('param_id in @params')
except Exception as e:
print(e)
if len(nbtopics) > 0:
try:
df = df.query('nb_topics in @nbtopics')
except Exception as e:
print(e)
return df
def flatten_columns(df):
df = pd.DataFrame(df.to_records())
def rename_column(col):
if col.startswith('('):
col = eval(col)
if col[0] == 'score':
col = col[1]
else:
col = '_'.join(col)
return col
df = df.rename(columns=rename_column)
df = set_index(df)
return df
def set_index(df):
keys = [
key for key in ['dataset', 'param_id', 'nb_topics', 'topic_idx', 'label_method', 'metric']
if key in df.columns
]
df = df.set_index(keys)
return df
def load_scores(
dataset, version, corpus_type, metrics, params, nbtopics, logg=print, rerank=False, lsi=False
):
dfs = []
tpx_path = join(LDA_PATH, version, corpus_type, 'topics')
if rerank:
file_prefix = join(tpx_path, f'{dataset}_reranker-eval')
elif lsi:
file_prefix = join(tpx_path, f'{dataset}_lsi_{version}_{corpus_type}_topic-scores')
else:
file_prefix = join(tpx_path, f'{dataset}_{version}_{corpus_type}_topic-scores')
try:
file = file_prefix + '.csv'
logg(f'Reading {file}')
df = pd.read_csv(file, header=[0, 1], skipinitialspace=True)
cols = list(df.columns)
for column in cols:
if column[0].startswith('Unnamed'):
col_name = df.loc[0, column]
df[col_name] = df[column]
df = df.drop(column, axis=1)
df = df.drop(0)
if 'nb_topics' in df.columns:
df.nb_topics = df.nb_topics.astype(int)
if 'topic_idx' in df.columns:
df.topic_idx = df.topic_idx.astype(int)
df = df.drop(['stdev', 'support'], level=0, axis=1)
df = set_index(df)
df = flatten_columns(df)
df = reduce_df(df, metrics, params, nbtopics)
dfs.append(df)
except Exception as e:
logg(e)
try:
file = file_prefix + '_germanet.csv'
logg(f'Reading {file}')
df = pd.read_csv(file, header=0)
df = set_index(df)
df = reduce_df(df, metrics, params, nbtopics)
dfs.append(df)
except Exception as e:
logg(e)
return pd.concat(dfs, axis=1)
def load(*args, logger=None, logg=print):
"""
work in progress: may not work for all cases, especially not yet for reading distributed
datsets like dewiki and dewac.
"""
logg = logger.info if logger else logg
if not args:
logg('no arguments, no load')
return
single = {
'hashmap': join(ETL_PATH, 'dewiki_hashmap.pickle'),
'meta': join(ETL_PATH, 'dewiki_metadata.pickle'),
'phrases': join(ETL_PATH, 'dewiki_phrases_lemmatized.pickle'),
'links': join(ETL_PATH, 'dewiki_links.pickle'),
'categories': join(ETL_PATH, 'dewiki_categories.pickle'),
'disamb': join(ETL_PATH, 'dewiki_disambiguation.pickle'),
'wikt': join(ETL_PATH, 'wiktionary_lemmatization_map.pickle'),
}
dataset = None
purposes = {
'goodids', 'etl', 'nlp', 'simple', 'smpl', 'wiki_phrases', 'embedding',
'topic', 'topics', 'label', 'labels', 'lda', 'ldamodel', 'score', 'scores',
'lemmap', 'disamb', 'dict', 'corpus', 'texts', 'wiki_scores', 'x2v_scores',
'rerank', 'rerank_score', 'rerank_scores', 'rerank_eval'
}
purpose = None
version = None
corpus_type = None
params = []
nbtopics = []
metrics = []
deprecated = False
dsets = (
list(DSETS.keys())
+ list(DSETS.values())
+ ['gurevych', 'gur', 'simlex', 'ws', 'rel', 'similarity', 'survey']
)
if isinstance(args, str):
args = [args]
args = [arg.replace('-', '_') if isinstance(arg, str) else arg for arg in args]
# --- parse args ---
for arg in args:
arg = arg.lower() if isinstance(arg, str) else arg
if arg in single:
if arg == 'phrases' and 'lemmap' in args:
dataset = 'dewiki_phrases'
purpose = 'lemmap'
else:
purpose = 'single'
dataset = arg
break
elif not dataset and arg in dsets:
dataset = DSETS.get(arg, arg)
elif not purpose and arg in purposes:
purpose = arg
elif not purpose and any([s in arg for s in ['d2v', 'w2v', 'ftx'] if isinstance(arg, str)]):
purpose = 'embedding'
dataset = arg
elif arg in PARAMS:
params.append(arg)
elif arg in NBTOPICS:
nbtopics.append(arg)
elif arg in METRICS:
metrics.append(arg)
elif not version and arg in VERSIONS:
version = arg
elif not corpus_type and arg in CORPUS_TYPE:
corpus_type = arg
elif arg == 'deprecated':
deprecated = True
# --- setting default values ---
if version is None:
version = 'noun'
if corpus_type is None:
corpus_type = 'bow'
if 'default' in args:
params.append('e42')
nbtopics.append('100')
metrics.append('ref')
# --- single ---
if purpose == 'single':
df = pd.read_pickle(single[dataset])
if 'phrases' in args and 'minimal' in args:
df = df.set_index('token').text
df = df[df.str.match(NOUN_PATTERN)]
return df
# --- good_ideas ---
elif purpose == 'goodids' and dataset in ['dewac', 'dewiki']:
file = join(ETL_PATH, f'{dataset}_good_ids.pickle')
logg(f'Loading {file}')
return pd.read_pickle(file)
# --- lemmap ---
elif purpose == 'lemmap':
file = join(ETL_PATH, f'{dataset}_lemmatization_map.pickle')
logg(f'Loading {file}')
return pd.read_pickle(file)
# --- embeddings ---
elif purpose == 'embedding':
file = join(EMB_PATH, dataset, dataset)
try:
logg(f'Reading {file}')
if 'd2v' in dataset:
return Doc2Vec.load(file)
if 'w2v' in dataset:
return Word2Vec.load(file)
if 'ftx' in dataset:
return FastText.load(file)
except Exception as e:
logg(e)
# --- gensim dict ---
elif purpose == 'dict':
if dataset == 'dewiki' and 'unfiltered' in args:
dict_path = join(
LDA_PATH, version, corpus_type, f'dewiki_noun_{corpus_type}_unfiltered.dict'
)
else:
dict_path = join(LDA_PATH, version, corpus_type, f'{dataset}_{version}_{corpus_type}.dict')
try:
logg(f'Loading dict from {dict_path}')
dict_from_corpus = Dictionary.load(dict_path)
_ = dict_from_corpus[0] # init dictionary
return dict_from_corpus
except Exception as e:
logg(e)
# --- MM corpus ---
elif purpose == 'corpus':
corpus_path = join(LDA_PATH, version, corpus_type, f'{dataset}_{version}_{corpus_type}.mm')
try:
logg(f'Loading corpus from {corpus_path}')
corpus = MmCorpus(corpus_path)
corpus = list(corpus)
return corpus
except Exception as e:
logg(e)
# --- json texts ---
elif purpose == 'texts':
doc_path = join(LDA_PATH, version, f'{dataset}_{version}_texts.json')
try:
with open(doc_path, 'r') as fp:
logg(f'Loading texts from {doc_path}')
texts = json.load(fp)
return texts
except Exception as e:
logg(e)
# --- rerank topics / scores / eval_scores ---
elif isinstance(purpose, str) and purpose.startswith('rerank'):
tpx_path = join(LDA_PATH, version, corpus_type, 'topics')
if purpose.startswith('rerank_score'):
file = join(tpx_path, f'{dataset}_reranker-scores.csv')
elif purpose.startswith('rerank_eval'):
return load_scores(
dataset, version, corpus_type, metrics, params, nbtopics, logg=logg, rerank=True
)
else:
file = join(tpx_path, f'{dataset}_reranker-candidates.csv')
logg(f'Reading {file}')
try:
df = pd.read_csv(file, header=0, index_col=[0, 1, 2, 3, 4])
df = reduce_df(df, metrics, params, nbtopics)
return df
except Exception as e:
logg(e)
# --- topics ---
elif purpose in {'topic', 'topics'}:
cols = ['Lemma1', 'Lemma2']
if dataset in ['gur', 'gurevych']:
file = join(ETL_PATH, 'gurevych_datasets.csv')
logg(f'Reading {file}')
df = pd.read_csv(file, header=0, index_col=[0, 1])
return df[cols]
elif dataset in ['simlex']:
file = join(ETL_PATH, 'simlex999.csv')
logg(f'Reading {file}')
df = pd.read_csv(file, header=0, index_col=[0, 1])
return df[cols]
elif dataset in ['ws']:
file = join(ETL_PATH, 'ws353.csv')
logg(f'Reading {file}')
df = pd.read_csv(file, header=0, index_col=[0, 1])
return df[cols]
elif dataset in ['rel', 'similarity']:
file = join(ETL_PATH, 'similarity_datasets.csv')
logg(f'Reading {file}')
df = pd.read_csv(file, header=0, index_col=[0, 1])
return df[cols]
elif dataset in ['survey']:
file = join(TPX_PATH, 'survey_topics.csv')
logg(f'Reading {file}')
df = pd.read_csv(file, header=0, index_col=[0, 1, 2, 3])
survey_cols = [f'term{i}' for i in range(20)]
return df[survey_cols]
file = join(
LDA_PATH, version, corpus_type, 'topics',
f'{dataset}_{version}_{corpus_type}_topic-candidates.csv'
)
try:
df = pd.read_csv(file, header=0)
logg(f'Reading {file}')
df = set_index(df)
return reduce_df(df, metrics, params, nbtopics)
except Exception as e:
# logg(e)
# logg('Loading topics via TopicsLoader')
lsi = 'lsi' in args
kwargs = dict(dataset=dataset, version=version, corpus_type=corpus_type, topn=10, lsi=lsi)
if params:
kwargs['param_ids'] = params
if nbtopics:
kwargs['nbs_topics'] = nbtopics
return TopicsLoader(**kwargs).topics
# --- labels ---
elif purpose in {'label', 'labels'}:
def _load_label_file(file_):
logg(f'Reading {file_}')
df_ = pd.read_csv(file_, header=0)
df_ = set_index(df_)
df_ = df_.applymap(eval)
if 'minimal' in args:
df_ = df_.query('label_method in ["comb", "comb_ftx"]').applymap(lambda x: x[0])
return reduce_df(df_, metrics, params, nbtopics)
df = None
if 'rerank' in args:
fpath = join(LDA_PATH, version, corpus_type, 'topics', dataset)
try:
file = fpath + '_reranker-candidates.csv'
df = _load_label_file(file)
except Exception as e:
logg(e)
else:
fpath = join(LDA_PATH, version, corpus_type, 'topics', f'{dataset}_{version}_{corpus_type}')
df = w2v = None
if 'w2v' in args or 'ftx' not in args:
try:
file = fpath + '_label-candidates.csv'
df = w2v = _load_label_file(file)
except Exception as e:
logg(e)
if 'ftx' in args or 'w2v' not in args:
try:
file = fpath + '_label-candidates_ftx.csv'
df = ftx = _load_label_file(file)
if w2v is not None:
ftx = ftx.query('label_method != "d2v"')
df = w2v.append(ftx).sort_index()
except Exception as e:
logg(e)
return df
# --- scores ---
elif purpose in {'score', 'scores'}:
if 'lsi' in args:
return load_scores(
dataset, version, corpus_type, metrics, params, nbtopics, lsi=True, logg=logg
)
elif 'rerank' in args:
return load_scores(
dataset, version, corpus_type, metrics, params, nbtopics, rerank=True, logg=logg
)
else:
return load_scores(dataset, version, corpus_type, metrics, params, nbtopics, logg=logg)
# --- pipelines ---
elif purpose in {'nlp', 'simple', 'smpl', 'wiki', 'wiki_phrases', 'phrases', 'etl', None}:
if dataset in ['gur', 'gurevych']:
file = join(ETL_PATH, 'gurevych_datasets.csv')
logg(f'Reading {file}')
df = pd.read_csv(file, header=0, index_col=[0, 1])
return df
elif dataset in ['simlex']:
file = join(ETL_PATH, 'simlex999.csv')
logg(f'Reading {file}')
df = pd.read_csv(file, header=0, index_col=[0, 1])
return df
elif dataset in ['ws']:
file = join(ETL_PATH, 'ws353.csv')
logg(f'Reading {file}')
df = pd.read_csv(file, header=0, index_col=[0, 1])
return df
elif dataset in ['rel', 'similarity']:
file = join(ETL_PATH, 'similarity_datasets.csv')
logg(f'Reading {file}')
df = pd.read_csv(file, header=0, index_col=[0, 1])
return df
elif dataset in ['survey']:
file = join(TPX_PATH, 'survey_topics.csv')
logg(f'Reading {file}')
df = | pd.read_csv(file, header=0, index_col=[0, 1, 2, 3]) | pandas.read_csv |
from distutils.version import LooseVersion
from warnings import catch_warnings
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
MultiIndex,
Series,
_testing as tm,
bdate_range,
concat,
date_range,
isna,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
tables,
)
from pandas.io.pytables import Term
pytestmark = pytest.mark.single
def test_select_columns_in_where(setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_select_with_dups(setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_select(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame({"A": np.random.rand(20), "B": np.random.rand(20)})
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
{
"A": np.random.rand(20),
"B": np.random.rand(20),
"index": np.arange(20, dtype="f8"),
}
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
"B": range(300),
"users": ["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ [f"a{i:03d}" for i in range(100)],
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select("df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']")
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + [f"a{i:03d}" for i in range(60)]
result = store.select("df", "ts>=Timestamp('2012-02-01') and users=selector")
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with | ensure_clean_path(setup_path) | pandas.tests.io.pytables.common.ensure_clean_path |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper for GroupedData to behave similar to pandas GroupBy.
"""
from abc import ABCMeta, abstractmethod
import sys
import inspect
from collections import OrderedDict, namedtuple
from distutils.version import LooseVersion
from functools import partial
from itertools import product
from typing import (
Any,
Callable,
Dict,
Generic,
Iterator,
Mapping,
List,
Optional,
Sequence,
Set,
Tuple,
Union,
cast,
TYPE_CHECKING,
)
import warnings
import pandas as pd
from pandas.api.types import is_hashable, is_list_like
if LooseVersion(pd.__version__) >= LooseVersion("1.3.0"):
from pandas.core.common import _builtin_table
else:
from pandas.core.base import SelectionMixin
_builtin_table = SelectionMixin._builtin_table
from pyspark.sql import Column, DataFrame as SparkDataFrame, Window, functions as F
from pyspark.sql.types import (
NumericType,
StructField,
StructType,
StringType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, FrameLike, Label, Name
from pyspark.pandas.typedef import infer_return_type, DataFrameType, ScalarType, SeriesType
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.internal import (
InternalField,
InternalFrame,
HIDDEN_COLUMNS,
NATURAL_ORDER_COLUMN_NAME,
SPARK_INDEX_NAME_FORMAT,
SPARK_DEFAULT_SERIES_NAME,
SPARK_INDEX_NAME_PATTERN,
)
from pyspark.pandas.missing.groupby import (
MissingPandasLikeDataFrameGroupBy,
MissingPandasLikeSeriesGroupBy,
)
from pyspark.pandas.series import Series, first_series
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.config import get_option
from pyspark.pandas.utils import (
align_diff_frames,
is_name_like_tuple,
is_name_like_value,
name_like_string,
same_anchor,
scol_for,
verify_temp_column_name,
log_advice,
)
from pyspark.pandas.spark.utils import as_nullable_spark_type, force_decimal_precision_scale
from pyspark.pandas.exceptions import DataError
if TYPE_CHECKING:
from pyspark.pandas.window import RollingGroupby, ExpandingGroupby
# to keep it the same as pandas
NamedAgg = namedtuple("NamedAgg", ["column", "aggfunc"])
class GroupBy(Generic[FrameLike], metaclass=ABCMeta):
"""
:ivar _psdf: The parent dataframe that is used to perform the groupby
:type _psdf: DataFrame
:ivar _groupkeys: The list of keys that will be used to perform the grouping
:type _groupkeys: List[Series]
"""
def __init__(
self,
psdf: DataFrame,
groupkeys: List[Series],
as_index: bool,
dropna: bool,
column_labels_to_exclude: Set[Label],
agg_columns_selected: bool,
agg_columns: List[Series],
):
self._psdf = psdf
self._groupkeys = groupkeys
self._as_index = as_index
self._dropna = dropna
self._column_labels_to_exclude = column_labels_to_exclude
self._agg_columns_selected = agg_columns_selected
self._agg_columns = agg_columns
@property
def _groupkeys_scols(self) -> List[Column]:
return [s.spark.column for s in self._groupkeys]
@property
def _agg_columns_scols(self) -> List[Column]:
return [s.spark.column for s in self._agg_columns]
@abstractmethod
def _apply_series_op(
self,
op: Callable[["SeriesGroupBy"], Series],
should_resolve: bool = False,
numeric_only: bool = False,
) -> FrameLike:
pass
@abstractmethod
def _cleanup_and_return(self, psdf: DataFrame) -> FrameLike:
pass
# TODO: Series support is not implemented yet.
# TODO: not all arguments are implemented comparing to pandas' for now.
def aggregate(
self,
func_or_funcs: Optional[Union[str, List[str], Dict[Name, Union[str, List[str]]]]] = None,
*args: Any,
**kwargs: Any,
) -> DataFrame:
"""Aggregate using one or more operations over the specified axis.
Parameters
----------
func_or_funcs : dict, str or list
a dict mapping from column name (string) to
aggregate functions (string or list of strings).
Returns
-------
Series or DataFrame
The return can be:
* Series : when DataFrame.agg is called with a single function
* DataFrame : when DataFrame.agg is called with several functions
Return Series or DataFrame.
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({'A': [1, 1, 2, 2],
... 'B': [1, 2, 3, 4],
... 'C': [0.362, 0.227, 1.267, -0.562]},
... columns=['A', 'B', 'C'])
>>> df
A B C
0 1 1 0.362
1 1 2 0.227
2 2 3 1.267
3 2 4 -0.562
Different aggregations per column
>>> aggregated = df.groupby('A').agg({'B': 'min', 'C': 'sum'})
>>> aggregated[['B', 'C']].sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
A
1 1 0.589
2 3 0.705
>>> aggregated = df.groupby('A').agg({'B': ['min', 'max']})
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
B
min max
A
1 1 2
2 3 4
>>> aggregated = df.groupby('A').agg('min')
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
A
1 1 0.227
2 3 -0.562
>>> aggregated = df.groupby('A').agg(['min', 'max'])
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
min max min max
A
1 1 2 0.227 0.362
2 3 4 -0.562 1.267
To control the output names with different aggregations per column, pandas-on-Spark
also supports 'named aggregation' or nested renaming in .agg. It can also be
used when applying multiple aggregation functions to specific columns.
>>> aggregated = df.groupby('A').agg(b_max=ps.NamedAgg(column='B', aggfunc='max'))
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
b_max
A
1 2
2 4
>>> aggregated = df.groupby('A').agg(b_max=('B', 'max'), b_min=('B', 'min'))
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
b_max b_min
A
1 2 1
2 4 3
>>> aggregated = df.groupby('A').agg(b_max=('B', 'max'), c_min=('C', 'min'))
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
b_max c_min
A
1 2 0.227
2 4 -0.562
"""
# I think current implementation of func and arguments in pandas-on-Spark for aggregate
# is different than pandas, later once arguments are added, this could be removed.
if func_or_funcs is None and kwargs is None:
raise ValueError("No aggregation argument or function specified.")
relabeling = func_or_funcs is None and is_multi_agg_with_relabel(**kwargs)
if relabeling:
(
func_or_funcs,
columns,
order,
) = normalize_keyword_aggregation( # type: ignore[assignment]
kwargs
)
if not isinstance(func_or_funcs, (str, list)):
if not isinstance(func_or_funcs, dict) or not all(
is_name_like_value(key)
and (
isinstance(value, str)
or isinstance(value, list)
and all(isinstance(v, str) for v in value)
)
for key, value in func_or_funcs.items()
):
raise ValueError(
"aggs must be a dict mapping from column name "
"to aggregate functions (string or list of strings)."
)
else:
agg_cols = [col.name for col in self._agg_columns]
func_or_funcs = OrderedDict([(col, func_or_funcs) for col in agg_cols])
psdf: DataFrame = DataFrame(
GroupBy._spark_groupby(self._psdf, func_or_funcs, self._groupkeys)
)
if self._dropna:
psdf = DataFrame(
psdf._internal.with_new_sdf(
psdf._internal.spark_frame.dropna(
subset=psdf._internal.index_spark_column_names
)
)
)
if not self._as_index:
should_drop_index = set(
i for i, gkey in enumerate(self._groupkeys) if gkey._psdf is not self._psdf
)
if len(should_drop_index) > 0:
psdf = psdf.reset_index(level=should_drop_index, drop=True)
if len(should_drop_index) < len(self._groupkeys):
psdf = psdf.reset_index()
if relabeling:
psdf = psdf[order]
psdf.columns = columns
return psdf
agg = aggregate
@staticmethod
def _spark_groupby(
psdf: DataFrame,
func: Mapping[Name, Union[str, List[str]]],
groupkeys: Sequence[Series] = (),
) -> InternalFrame:
groupkey_names = [SPARK_INDEX_NAME_FORMAT(i) for i in range(len(groupkeys))]
groupkey_scols = [s.spark.column.alias(name) for s, name in zip(groupkeys, groupkey_names)]
multi_aggs = any(isinstance(v, list) for v in func.values())
reordered = []
data_columns = []
column_labels = []
for key, value in func.items():
label = key if is_name_like_tuple(key) else (key,)
if len(label) != psdf._internal.column_labels_level:
raise TypeError("The length of the key must be the same as the column label level.")
for aggfunc in [value] if isinstance(value, str) else value:
column_label = tuple(list(label) + [aggfunc]) if multi_aggs else label
column_labels.append(column_label)
data_col = name_like_string(column_label)
data_columns.append(data_col)
col_name = psdf._internal.spark_column_name_for(label)
if aggfunc == "nunique":
reordered.append(
F.expr("count(DISTINCT `{0}`) as `{1}`".format(col_name, data_col))
)
# Implement "quartiles" aggregate function for ``describe``.
elif aggfunc == "quartiles":
reordered.append(
F.expr(
"percentile_approx(`{0}`, array(0.25, 0.5, 0.75)) as `{1}`".format(
col_name, data_col
)
)
)
else:
reordered.append(
F.expr("{1}(`{0}`) as `{2}`".format(col_name, aggfunc, data_col))
)
sdf = psdf._internal.spark_frame.select(groupkey_scols + psdf._internal.data_spark_columns)
sdf = sdf.groupby(*groupkey_names).agg(*reordered)
return InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in groupkey_names],
index_names=[psser._column_label for psser in groupkeys],
index_fields=[
psser._internal.data_fields[0].copy(name=name)
for psser, name in zip(groupkeys, groupkey_names)
],
column_labels=column_labels,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
)
def count(self) -> FrameLike:
"""
Compute count of group, excluding missing values.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5],
... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])
>>> df.groupby('A').count().sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
A
1 2 3
2 2 2
"""
return self._reduce_for_stat_function(F.count, only_numeric=False)
# TODO: We should fix See Also when Series implementation is finished.
def first(self) -> FrameLike:
"""
Compute first of group values.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
"""
return self._reduce_for_stat_function(F.first, only_numeric=False)
def last(self) -> FrameLike:
"""
Compute last of group values.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
"""
return self._reduce_for_stat_function(
lambda col: F.last(col, ignorenulls=True), only_numeric=False
)
def max(self) -> FrameLike:
"""
Compute max of group values.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
"""
return self._reduce_for_stat_function(F.max, only_numeric=False)
# TODO: examples should be updated.
def mean(self) -> FrameLike:
"""
Compute mean of groups, excluding missing values.
Returns
-------
pyspark.pandas.Series or pyspark.pandas.DataFrame
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5],
... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])
Groupby one column and return the mean of the remaining columns in
each group.
>>> df.groupby('A').mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
A
1 3.0 1.333333
2 4.0 1.500000
"""
return self._reduce_for_stat_function(F.mean, only_numeric=True)
def min(self) -> FrameLike:
"""
Compute min of group values.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
"""
return self._reduce_for_stat_function(F.min, only_numeric=False)
# TODO: sync the doc.
def std(self, ddof: int = 1) -> FrameLike:
"""
Compute standard deviation of groups, excluding missing values.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
"""
assert ddof in (0, 1)
return self._reduce_for_stat_function(
F.stddev_pop if ddof == 0 else F.stddev_samp, only_numeric=True
)
def sum(self) -> FrameLike:
"""
Compute sum of group values
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
"""
return self._reduce_for_stat_function(F.sum, only_numeric=True)
# TODO: sync the doc.
def var(self, ddof: int = 1) -> FrameLike:
"""
Compute variance of groups, excluding missing values.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
"""
assert ddof in (0, 1)
return self._reduce_for_stat_function(
F.var_pop if ddof == 0 else F.var_samp, only_numeric=True
)
# TODO: skipna should be implemented.
def all(self) -> FrameLike:
"""
Returns True if all values in the group are truthful, else False.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({'A': [1, 1, 2, 2, 3, 3, 4, 4, 5, 5],
... 'B': [True, True, True, False, False,
... False, None, True, None, False]},
... columns=['A', 'B'])
>>> df
A B
0 1 True
1 1 True
2 2 True
3 2 False
4 3 False
5 3 False
6 4 None
7 4 True
8 5 None
9 5 False
>>> df.groupby('A').all().sort_index() # doctest: +NORMALIZE_WHITESPACE
B
A
1 True
2 False
3 False
4 True
5 False
"""
return self._reduce_for_stat_function(
lambda col: F.min(F.coalesce(col.cast("boolean"), SF.lit(True))), only_numeric=False
)
# TODO: skipna should be implemented.
def any(self) -> FrameLike:
"""
Returns True if any value in the group is truthful, else False.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({'A': [1, 1, 2, 2, 3, 3, 4, 4, 5, 5],
... 'B': [True, True, True, False, False,
... False, None, True, None, False]},
... columns=['A', 'B'])
>>> df
A B
0 1 True
1 1 True
2 2 True
3 2 False
4 3 False
5 3 False
6 4 None
7 4 True
8 5 None
9 5 False
>>> df.groupby('A').any().sort_index() # doctest: +NORMALIZE_WHITESPACE
B
A
1 True
2 True
3 False
4 True
5 False
"""
return self._reduce_for_stat_function(
lambda col: F.max(F.coalesce(col.cast("boolean"), SF.lit(False))), only_numeric=False
)
# TODO: groupby multiply columns should be implemented.
def size(self) -> Series:
"""
Compute group sizes.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({'A': [1, 2, 2, 3, 3, 3],
... 'B': [1, 1, 2, 3, 3, 3]},
... columns=['A', 'B'])
>>> df
A B
0 1 1
1 2 1
2 2 2
3 3 3
4 3 3
5 3 3
>>> df.groupby('A').size().sort_index()
A
1 1
2 2
3 3
dtype: int64
>>> df.groupby(['A', 'B']).size().sort_index()
A B
1 1 1
2 1 1
2 1
3 3 3
dtype: int64
For Series,
>>> df.B.groupby(df.A).size().sort_index()
A
1 1
2 2
3 3
Name: B, dtype: int64
>>> df.groupby(df.A).B.size().sort_index()
A
1 1
2 2
3 3
Name: B, dtype: int64
"""
groupkeys = self._groupkeys
groupkey_names = [SPARK_INDEX_NAME_FORMAT(i) for i in range(len(groupkeys))]
groupkey_scols = [s.spark.column.alias(name) for s, name in zip(groupkeys, groupkey_names)]
sdf = self._psdf._internal.spark_frame.select(
groupkey_scols + self._psdf._internal.data_spark_columns
)
sdf = sdf.groupby(*groupkey_names).count()
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in groupkey_names],
index_names=[psser._column_label for psser in groupkeys],
index_fields=[
psser._internal.data_fields[0].copy(name=name)
for psser, name in zip(groupkeys, groupkey_names)
],
column_labels=[None],
data_spark_columns=[scol_for(sdf, "count")],
)
return first_series(DataFrame(internal))
def diff(self, periods: int = 1) -> FrameLike:
"""
First discrete difference of element.
Calculates the difference of a DataFrame element compared with another element in the
DataFrame group (default is the element in the same column of the previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative values.
Returns
-------
diffed : DataFrame or Series
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c'])
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.groupby(['b']).diff().sort_index()
a c
0 NaN NaN
1 1.0 3.0
2 NaN NaN
3 NaN NaN
4 NaN NaN
5 NaN NaN
Difference with previous column in a group.
>>> df.groupby(['b'])['a'].diff().sort_index()
0 NaN
1 1.0
2 NaN
3 NaN
4 NaN
5 NaN
Name: a, dtype: float64
"""
return self._apply_series_op(
lambda sg: sg._psser._diff(periods, part_cols=sg._groupkeys_scols), should_resolve=True
)
def cumcount(self, ascending: bool = True) -> Series:
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
.. code-block:: python
self.apply(lambda x: pd.Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Returns
-------
Series
Sequence number of each element within each group.
Examples
--------
>>> df = ps.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount().sort_index()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False).sort_index()
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
ret = (
self._groupkeys[0]
.rename()
.spark.transform(lambda _: SF.lit(0))
._cum(F.count, True, part_cols=self._groupkeys_scols, ascending=ascending)
- 1
)
internal = ret._internal.resolved_copy
return first_series(DataFrame(internal))
def cummax(self) -> FrameLike:
"""
Cumulative max for each group.
Returns
-------
Series or DataFrame
See Also
--------
Series.cummax
DataFrame.cummax
Examples
--------
>>> df = ps.DataFrame(
... [[1, None, 4], [1, 0.1, 3], [1, 20.0, 2], [4, 10.0, 1]],
... columns=list('ABC'))
>>> df
A B C
0 1 NaN 4
1 1 0.1 3
2 1 20.0 2
3 4 10.0 1
By default, iterates over rows and finds the sum in each column.
>>> df.groupby("A").cummax().sort_index()
B C
0 NaN 4
1 0.1 4
2 20.0 4
3 10.0 1
It works as below in Series.
>>> df.C.groupby(df.A).cummax().sort_index()
0 4
1 4
2 4
3 1
Name: C, dtype: int64
"""
return self._apply_series_op(
lambda sg: sg._psser._cum(F.max, True, part_cols=sg._groupkeys_scols),
should_resolve=True,
numeric_only=True,
)
def cummin(self) -> FrameLike:
"""
Cumulative min for each group.
Returns
-------
Series or DataFrame
See Also
--------
Series.cummin
DataFrame.cummin
Examples
--------
>>> df = ps.DataFrame(
... [[1, None, 4], [1, 0.1, 3], [1, 20.0, 2], [4, 10.0, 1]],
... columns=list('ABC'))
>>> df
A B C
0 1 NaN 4
1 1 0.1 3
2 1 20.0 2
3 4 10.0 1
By default, iterates over rows and finds the sum in each column.
>>> df.groupby("A").cummin().sort_index()
B C
0 NaN 4
1 0.1 3
2 0.1 2
3 10.0 1
It works as below in Series.
>>> df.B.groupby(df.A).cummin().sort_index()
0 NaN
1 0.1
2 0.1
3 10.0
Name: B, dtype: float64
"""
return self._apply_series_op(
lambda sg: sg._psser._cum(F.min, True, part_cols=sg._groupkeys_scols),
should_resolve=True,
numeric_only=True,
)
def cumprod(self) -> FrameLike:
"""
Cumulative product for each group.
Returns
-------
Series or DataFrame
See Also
--------
Series.cumprod
DataFrame.cumprod
Examples
--------
>>> df = ps.DataFrame(
... [[1, None, 4], [1, 0.1, 3], [1, 20.0, 2], [4, 10.0, 1]],
... columns=list('ABC'))
>>> df
A B C
0 1 NaN 4
1 1 0.1 3
2 1 20.0 2
3 4 10.0 1
By default, iterates over rows and finds the sum in each column.
>>> df.groupby("A").cumprod().sort_index()
B C
0 NaN 4
1 0.1 12
2 2.0 24
3 10.0 1
It works as below in Series.
>>> df.B.groupby(df.A).cumprod().sort_index()
0 NaN
1 0.1
2 2.0
3 10.0
Name: B, dtype: float64
"""
return self._apply_series_op(
lambda sg: sg._psser._cumprod(True, part_cols=sg._groupkeys_scols),
should_resolve=True,
numeric_only=True,
)
def cumsum(self) -> FrameLike:
"""
Cumulative sum for each group.
Returns
-------
Series or DataFrame
See Also
--------
Series.cumsum
DataFrame.cumsum
Examples
--------
>>> df = ps.DataFrame(
... [[1, None, 4], [1, 0.1, 3], [1, 20.0, 2], [4, 10.0, 1]],
... columns=list('ABC'))
>>> df
A B C
0 1 NaN 4
1 1 0.1 3
2 1 20.0 2
3 4 10.0 1
By default, iterates over rows and finds the sum in each column.
>>> df.groupby("A").cumsum().sort_index()
B C
0 NaN 4
1 0.1 7
2 20.1 9
3 10.0 1
It works as below in Series.
>>> df.B.groupby(df.A).cumsum().sort_index()
0 NaN
1 0.1
2 20.1
3 10.0
Name: B, dtype: float64
"""
return self._apply_series_op(
lambda sg: sg._psser._cumsum(True, part_cols=sg._groupkeys_scols),
should_resolve=True,
numeric_only=True,
)
def apply(self, func: Callable, *args: Any, **kwargs: Any) -> Union[DataFrame, Series]:
"""
Apply function `func` group-wise and combine the results together.
The function passed to `apply` must take a DataFrame as its first
argument and return a DataFrame. `apply` will
then take care of combining the results back together into a single
dataframe. `apply` is therefore a highly flexible
grouping method.
While `apply` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods
like `agg` or `transform`. pandas-on-Spark offers a wide range of method that will
be much faster than using `apply` for their specific purposes, so try to
use them before reaching for `apply`.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def pandas_div(x) -> ps.DataFrame[int, [float, float]]:
... return x[['B', 'C']] / x[['B', 'C']]
If the return type is specified, the output column names become
`c0, c1, c2 ... cn`. These names are positionally mapped to the returned
DataFrame in ``func``.
To specify the column names, you can assign them in a NumPy compound type style
as below:
>>> def pandas_div(x) -> ps.DataFrame[("index", int), [("a", float), ("b", float)]]:
... return x[['B', 'C']] / x[['B', 'C']]
>>> pdf = pd.DataFrame({'B': [1.], 'C': [3.]})
>>> def plus_one(x) -> ps.DataFrame[
... (pdf.index.name, pdf.index.dtype), zip(pdf.columns, pdf.dtypes)]:
... return x[['B', 'C']] / x[['B', 'C']]
.. note:: the dataframe within ``func`` is actually a pandas dataframe. Therefore,
any pandas API within this function is allowed.
Parameters
----------
func : callable
A callable that takes a DataFrame as its first argument, and
returns a dataframe.
*args
Positional arguments to pass to func.
**kwargs
Keyword arguments to pass to func.
Returns
-------
applied : DataFrame or Series
See Also
--------
aggregate : Apply aggregate function to the GroupBy object.
DataFrame.apply : Apply a function to a DataFrame.
Series.apply : Apply a function to a Series.
Examples
--------
>>> df = ps.DataFrame({'A': 'a a b'.split(),
... 'B': [1, 2, 3],
... 'C': [4, 6, 5]}, columns=['A', 'B', 'C'])
>>> g = df.groupby('A')
Notice that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Below the functions passed to `apply` takes a DataFrame as
its argument and returns a DataFrame. `apply` combines the result for
each group together into a new DataFrame:
>>> def plus_min(x):
... return x + x.min()
>>> g.apply(plus_min).sort_index() # doctest: +NORMALIZE_WHITESPACE
A B C
0 aa 2 8
1 aa 3 10
2 bb 6 10
>>> g.apply(sum).sort_index() # doctest: +NORMALIZE_WHITESPACE
A B C
A
a aa 3 10
b b 3 5
>>> g.apply(len).sort_index() # doctest: +NORMALIZE_WHITESPACE
A
a 2
b 1
dtype: int64
You can specify the type hint and prevent schema inference for better performance.
>>> def pandas_div(x) -> ps.DataFrame[int, [float, float]]:
... return x[['B', 'C']] / x[['B', 'C']]
>>> g.apply(pandas_div).sort_index() # doctest: +NORMALIZE_WHITESPACE
c0 c1
0 1.0 1.0
1 1.0 1.0
2 1.0 1.0
>>> def pandas_div(x) -> ps.DataFrame[("index", int), [("f1", float), ("f2", float)]]:
... return x[['B', 'C']] / x[['B', 'C']]
>>> g.apply(pandas_div).sort_index() # doctest: +NORMALIZE_WHITESPACE
f1 f2
index
0 1.0 1.0
1 1.0 1.0
2 1.0 1.0
In case of Series, it works as below.
>>> def plus_max(x) -> ps.Series[np.int]:
... return x + x.max()
>>> df.B.groupby(df.A).apply(plus_max).sort_index() # doctest: +SKIP
0 6
1 3
2 4
Name: B, dtype: int64
>>> def plus_min(x):
... return x + x.min()
>>> df.B.groupby(df.A).apply(plus_min).sort_index()
0 2
1 3
2 6
Name: B, dtype: int64
You can also return a scalar value as a aggregated value of the group:
>>> def plus_length(x) -> np.int:
... return len(x)
>>> df.B.groupby(df.A).apply(plus_length).sort_index() # doctest: +SKIP
0 1
1 2
Name: B, dtype: int64
The extra arguments to the function can be passed as below.
>>> def calculation(x, y, z) -> np.int:
... return len(x) + y * z
>>> df.B.groupby(df.A).apply(calculation, 5, z=10).sort_index() # doctest: +SKIP
0 51
1 52
Name: B, dtype: int64
"""
if not callable(func):
raise TypeError("%s object is not callable" % type(func).__name__)
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
should_infer_schema = return_sig is None
should_retain_index = should_infer_schema
is_series_groupby = isinstance(self, SeriesGroupBy)
psdf = self._psdf
if self._agg_columns_selected:
agg_columns = self._agg_columns
else:
agg_columns = [
psdf._psser_for(label)
for label in psdf._internal.column_labels
if label not in self._column_labels_to_exclude
]
psdf, groupkey_labels, groupkey_names = GroupBy._prepare_group_map_apply(
psdf, self._groupkeys, agg_columns
)
if is_series_groupby:
name = psdf.columns[-1]
pandas_apply = _builtin_table.get(func, func)
else:
f = _builtin_table.get(func, func)
def pandas_apply(pdf: pd.DataFrame, *a: Any, **k: Any) -> Any:
return f(pdf.drop(groupkey_names, axis=1), *a, **k)
should_return_series = False
if should_infer_schema:
# Here we execute with the first 1000 to get the return type.
log_advice(
"If the type hints is not specified for `grouby.apply`, "
"it is expensive to infer the data type internally."
)
limit = get_option("compute.shortcut_limit")
pdf = psdf.head(limit + 1)._to_internal_pandas()
groupkeys = [
pdf[groupkey_name].rename(psser.name)
for groupkey_name, psser in zip(groupkey_names, self._groupkeys)
]
grouped = pdf.groupby(groupkeys)
if is_series_groupby:
pser_or_pdf = grouped[name].apply(pandas_apply, *args, **kwargs)
else:
pser_or_pdf = grouped.apply(pandas_apply, *args, **kwargs)
psser_or_psdf = ps.from_pandas(pser_or_pdf)
if len(pdf) <= limit:
if isinstance(psser_or_psdf, ps.Series) and is_series_groupby:
psser_or_psdf = psser_or_psdf.rename(cast(SeriesGroupBy, self)._psser.name)
return cast(Union[Series, DataFrame], psser_or_psdf)
if len(grouped) <= 1:
with warnings.catch_warnings():
warnings.simplefilter("always")
warnings.warn(
"The amount of data for return type inference might not be large enough. "
"Consider increasing an option `compute.shortcut_limit`."
)
if isinstance(psser_or_psdf, Series):
should_return_series = True
psdf_from_pandas = psser_or_psdf._psdf
else:
psdf_from_pandas = cast(DataFrame, psser_or_psdf)
index_fields = [
field.normalize_spark_type() for field in psdf_from_pandas._internal.index_fields
]
data_fields = [
field.normalize_spark_type() for field in psdf_from_pandas._internal.data_fields
]
return_schema = StructType([field.struct_field for field in index_fields + data_fields])
else:
return_type = infer_return_type(func)
if not is_series_groupby and isinstance(return_type, SeriesType):
raise TypeError(
"Series as a return type hint at frame groupby is not supported "
"currently; however got [%s]. Use DataFrame type hint instead." % return_sig
)
if isinstance(return_type, DataFrameType):
data_fields = cast(DataFrameType, return_type).data_fields
return_schema = cast(DataFrameType, return_type).spark_type
index_fields = cast(DataFrameType, return_type).index_fields
should_retain_index = len(index_fields) > 0
psdf_from_pandas = None
else:
should_return_series = True
dtype = cast(Union[SeriesType, ScalarType], return_type).dtype
spark_type = cast(Union[SeriesType, ScalarType], return_type).spark_type
if is_series_groupby:
data_fields = [
InternalField(
dtype=dtype, struct_field=StructField(name=name, dataType=spark_type)
)
]
else:
data_fields = [
InternalField(
dtype=dtype,
struct_field=StructField(
name=SPARK_DEFAULT_SERIES_NAME, dataType=spark_type
),
)
]
return_schema = StructType([field.struct_field for field in data_fields])
def pandas_groupby_apply(pdf: pd.DataFrame) -> pd.DataFrame:
if is_series_groupby:
pdf_or_ser = pdf.groupby(groupkey_names)[name].apply(pandas_apply, *args, **kwargs)
else:
pdf_or_ser = pdf.groupby(groupkey_names).apply(pandas_apply, *args, **kwargs)
if should_return_series and isinstance(pdf_or_ser, pd.DataFrame):
pdf_or_ser = pdf_or_ser.stack()
if not isinstance(pdf_or_ser, pd.DataFrame):
return | pd.DataFrame(pdf_or_ser) | pandas.DataFrame |
# -*- coding:utf-8 -*- #
import numpy as np
import pandas as pd
import heapq
def get_max_min_2d(min_list_all, max_list_all, n=100):
min_list = pd.Series(min_list_all)
data_count = min_list.value_counts(bins=2, normalize=True, sort=False)
print(data_count)
for k, v in data_count.items():
if v > 0.7:
num_thresh = str(k).split(',')[-1].split(']')[0] # k.right即可代替,取区间的右值
min_list = np.array(min_list[min_list.astype(np.float32) <= float(num_thresh)])
if len(min_list) > n:
min_list = np.array(min_list)
min_list_index = heapq.nsmallest(n, range(len(min_list)), min_list.take)
min_list = min_list[min_list_index]
elif len(min_list) == 0:
min_list = min_list_all
min_input_all = np.mean(min_list)
max_list = pd.Series(max_list_all)
data_count = max_list.value_counts(bins=2, normalize=True, sort=False)
print(data_count)
for k, v in data_count.items():
if v > 0.7:
num_thresh = str(k).split(',')[0].split('(')[1] # k.left代替即可
max_list = np.array(max_list[max_list.astype(np.float32) >= float(num_thresh)]) # 理论上应该取大于
if len(max_list) > n:
max_list = np.array(max_list)
max_list_index = heapq.nlargest(n, range(len(max_list)), max_list.take)
max_list = max_list[max_list_index]
elif len(max_list) == 0:
max_list = max_list_all
max_input_all = np.mean(max_list)
return min_input_all, max_input_all
def get_max_min_3d(min_list_all, max_list_all, n=100):
min_list = pd.Series(min_list_all)
data_count = min_list.value_counts(bins=2, normalize=True, sort=False)
print(data_count)
for k, v in data_count.items():
if v > 0.7:
num_thresh = str(k).split(',')[-1].split(']')[0] # k.right即可代替,取区间的右值
min_list = np.array(min_list[min_list.astype(np.float32) <= float(num_thresh)])
if len(min_list) > n:
min_list = np.array(min_list)
min_list_index = heapq.nsmallest(n, range(len(min_list)), min_list.take)
min_list = min_list[min_list_index]
min_input_all = np.min(min_list)
max_list = | pd.Series(max_list_all) | pandas.Series |
import glob
import json
import os
import numpy as np
import pandas as pd
import datetime
import matplotlib.pyplot as plt
from vxbt_calc import vxbt_calc
#from datetime import datetime
capi_data_path = '/path/to/coinapi_csvs'
start_c = pd.to_datetime('2019-05-01 00:00:00')
end_c = pd.to_datetime('2020-05-01 00:00:00')
instrument_start_end = dict()
capi_orderbook_data = dict()
capi_indices_df = pd.DataFrame(columns=['timestamp', 'vxbt', 'gvxbt', 'avxbt'])
results = {}
def read_orderbook_data(csv_paths, expiry, coinapi=False, data_dict=dict()):
if expiry not in data_dict:
data_dict[expiry] = dict()
else:
# Already read
return data_dict
near_next_csv = list()
for path in csv_paths:
near_next_csv += glob.glob(path + f'BTC-{expiry}-*-*.csv')
#if len(near_next_csv) == 0:
# raise ValueError(f'{expiry} data unavailable!')
print(f'Reading {expiry} data from disk...')
for file_path in near_next_csv:
instrument = os.path.basename(file_path).split('-')
exp, strike, cp = instrument[1], int(instrument[2]), instrument[3].split('.')[0]
if strike not in data_dict[exp]:
data_dict[exp][strike] = dict()
try:
df = pd.read_csv(file_path).filter(['timestamp', 'best_bid_price', 'best_ask_price'])
df['timestamp'] = | pd.to_datetime(df['timestamp'], unit='ms', errors='coerce') | pandas.to_datetime |
import sys
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns
import pandas as pd
from datetime import datetime
import time
import logging
from html_telegraph_poster.upload_images import upload_image
import config
from addons import file_manager
def graph_maker():
while True:
minutes = datetime.now().minute
seconds = datetime.now().second
microseconds = datetime.now().microsecond
if minutes not in {0, 10, 20, 30, 40, 50}:
snooze = ((10 - minutes % 10) * 60) - \
(seconds + microseconds/1000000.0)
time.sleep(snooze)
else:
try:
cacheFile = file_manager.readJson(config.CACHE_FILE_PATH)
cache_key_list = []
for keys, values in cacheFile.items():
cache_key_list.append(keys)
player_count = cacheFile['online_player_count']
old_player_data = pd.read_csv(
config.PLAYER_CHART_FILE_PATH, parse_dates=['DateTime'])
old_player_data.drop(0, axis=0, inplace=True)
temp_player_data = pd.DataFrame([[datetime.utcnow().strftime(
'%Y-%m-%d %H:%M:%S'), player_count]], columns=['DateTime', 'Players'])
new_player_data = pd.concat(
[old_player_data, temp_player_data])
new_player_data.to_csv(
config.PLAYER_CHART_FILE_PATH, index=False)
player_data = pd.read_csv(
config.PLAYER_CHART_FILE_PATH, parse_dates=['DateTime'])
sns.set_style('whitegrid')
fig, ax = plt.subplots(figsize=(10, 2.5))
ax.plot('DateTime', 'Players', data=player_data,
color='red', linewidth=.7, marker='o', markevery=[-1])
ax.fill_between(
player_data['DateTime'], player_data['Players'], 0, facecolor='red', color='red', alpha=.4)
ax.margins(x=0)
ax.grid(b=True, axis='y', linestyle='--', alpha=.3)
ax.grid(b=False, axis='x')
ax.spines['bottom'].set_position('zero')
ax.spines['bottom'].set_color('black')
ax.set_ylabel('')
ax.set_xlabel('')
ax.xaxis.set_ticks_position('bottom')
ax.xaxis.set_major_locator(mdates.DayLocator())
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d %b'))
ax.legend(loc='upper left')
ax.axhline(y=0, color='none')
ax.axhline(y=1400000, color='none')
plt.yticks(ticks=[0, 250000, 500000, 750000, 1000000, 1250000])
plt.subplots_adjust(top=1, bottom=0.077, left=0, right=1)
plt.text(0.989, 0.058, '0', transform=ax.transAxes, alpha=.3)
plt.text(0.965, 0.215, '250k',
transform=ax.transAxes, alpha=.3)
plt.text(0.965, 0.377, '500k',
transform=ax.transAxes, alpha=.3)
plt.text(0.965, 0.54, '700k', transform=ax.transAxes, alpha=.3)
plt.text(0.951, 0.705, '1 000k',
transform=ax.transAxes, alpha=.3)
plt.text(0.951, 0.865, '1 250k',
transform=ax.transAxes, alpha=.3)
plt.text(0.156, 0.874, 'Made by @csgobeta\nupd every 10 min',
ha='center', transform=ax.transAxes, color='black', size='6')
plt.close()
fig.savefig(config.GRAPH_IMG_FILE_PATH)
trigger1 = True
while trigger1:
try:
url1 = upload_image(config.GRAPH_IMG_FILE_PATH)
if url1.startswith('http'):
trigger1 = False
except:
pass
cacheFile = file_manager.readJson(config.CACHE_FILE_PATH)
if cacheFile['graph_url'] != url1:
file_manager.updateJson(
config.CACHE_FILE_PATH, url1, cache_key_list[22])
except Exception as e:
print(f' - Error:\n{e}\n')
time.sleep(70)
try:
cacheFile = file_manager.readJson(config.CACHE_FILE_PATH)
cache_key_list = []
for keys, values in cacheFile.items():
cache_key_list.append(keys)
dev_count = cacheFile['dev_player_count']
old_dev_data = pd.read_csv(
config.DEV_CHART_FILE_PATH, parse_dates=['DateTime'])
old_dev_data.drop(0, axis=0, inplace=True)
temp_dev_data = pd.DataFrame([[datetime.utcnow().strftime(
'%Y-%m-%d %H:%M:%S'), dev_count]], columns=['DateTime', 'Devs'])
new_dev_data = | pd.concat([old_dev_data, temp_dev_data]) | pandas.concat |
"""
Script to create word2vec models, given a set of mapped POIs.
"""
# Authors: <NAME> <<EMAIL>> <NAME> <<EMAIL>>
import argparse
import os
import math
import errno
import pandas as pd
import geopandas as gpd
from geopandas import GeoDataFrame
from shapely.geometry import Point
import sys
sys.path.append("../GeoL")
import getopt
import pathlib
import re
import gensim
import numpy as np
from sklearn import preprocessing
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
from scipy import stats
import seaborn as sns
sns.set_style("ticks")
sns.set_context("paper")
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
import sklearn
from sklearn.model_selection import train_test_split
from sklearn import metrics # Additional scklearn functions
from sklearn.model_selection import GridSearchCV # Perforing grid search
from sklearn.svm import SVR
from sklearn.model_selection import learning_curve
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.svm import LinearSVR
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
import sklearn.metrics as metrics
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 15, 4
import csv
def mergeUA_CellVector(BASE_DIR_CITY, CITY_NAME, SIZE1, METRIC, SIZE2, S, WS, C):
"""
Given a city name and a grid size,
returns a DataFrame joning Cell Vectors and Urban Atlas data
"""
if METRIC == 'distance':
# input Cell Vectors data mapped to grid
CELLVECTOR_COUNT = os.path.join(BASE_DIR_CITY, 'embeddings', CITY_NAME + "_gs" + str(
SIZE1) + "_skip_" + METRIC + "_s" + str(S) + "_ws" + str(WS) + "_c" + str(C) + ".txt")
else:
# input Cell Vectors data mapped to grid
CELLVECTOR_COUNT = os.path.join(BASE_DIR_CITY, 'embeddings', CITY_NAME + "_gs" + str(
SIZE1) + "_skip_" + METRIC+"_" + str(SIZE2) + "_s" + str(S) + "_ws" + str(WS) + "_c" + str(C) + ".txt")
# input UA data mapped to grid
UA_COUNT = os.path.join(BASE_DIR_CITY, 'count',
CITY_NAME + "_ua_count_" + str(SIZE1) + ".csv")
print("------------------------------------------------------")
print("Analising: \n", CELLVECTOR_COUNT, '\n', UA_COUNT)
# load cellvector Count
cellvector_tessellation = pd.read_csv(
CELLVECTOR_COUNT, sep='\t', header=None)
cols = [int(i) for i in cellvector_tessellation.columns]
cols[0] = 'cellID'
cellvector_tessellation.columns = cols
cellvector_tessellation.columns = list(map(
lambda x: 'f_fs_' + str(x) if x != "cellID" else x, cellvector_tessellation.columns))
cellvector_tessellation.head(2)
# load UA mapped onto grid
ua_tessellation = | pd.read_csv(UA_COUNT) | pandas.read_csv |
from __future__ import division #brings in Python 3.0 mixed type calculations
import numpy as np
import os
import pandas as pd
import sys
#find parent directory and import model
parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(parentddir)
from base.uber_model import UberModel, ModelSharedInputs
class BeerexInputs(ModelSharedInputs):
"""
Input class for Beerex
"""
def __init__(self):
"""Class representing the inputs for Beerex"""
super(BeerexInputs, self).__init__()
#self.incorporation_depth = pd.Series([], dtype="float")
self.application_rate = pd.Series([], dtype="float")
self.application_method = pd.Series([], dtype="object")
self.crop_type = pd.Series([], dtype="object")
# self.application_units = pd.Series([], dtype="object")
self.empirical_residue = pd.Series([], dtype="object")
self.empirical_pollen = pd.Series([], dtype="float")
self.empirical_nectar = pd.Series([], dtype="float")
self.empirical_jelly = pd.Series([], dtype="float")
self.adult_contact_ld50 = pd.Series([], dtype="float")
self.adult_oral_ld50 = pd.Series([], dtype="float")
self.adult_oral_noael = pd.Series([], dtype="float")
self.larval_ld50 = pd.Series([], dtype="float")
self.larval_noael = pd.Series([], dtype="float")
self.log_kow = pd.Series([], dtype="float")
self.koc = pd.Series([], dtype="float")
self.mass_tree_vegetation = pd.Series([], dtype="float")
self.lw1_jelly = pd.Series([], dtype="float")
self.lw2_jelly = pd.Series([], dtype="float")
self.lw3_jelly = pd.Series([], dtype="float")
self.lw4_nectar = pd.Series([], dtype="float")
self.lw4_pollen = pd.Series([], dtype="float")
self.lw5_nectar = pd.Series([], dtype="float")
self.lw5_pollen = pd.Series([], dtype="float")
self.ld6_nectar = pd.Series([], dtype="float")
self.ld6_pollen = pd.Series([], dtype="float")
self.lq1_jelly = pd.Series([], dtype="float")
self.lq2_jelly = pd.Series([], dtype="float")
self.lq3_jelly = pd.Series([], dtype="float")
self.lq4_jelly = pd.Series([], dtype="float")
self.aw_cell_nectar = pd.Series([], dtype="float")
self.aw_cell_pollen = pd.Series([], dtype="float")
self.aw_brood_nectar = pd.Series([], dtype="float")
self.aw_brood_pollen = pd.Series([], dtype="float")
self.aw_comb_nectar = pd.Series([], dtype="float")
self.aw_comb_pollen = pd.Series([], dtype="float")
self.aw_fpollen_nectar = pd.Series([], dtype="float")
self.aw_fpollen_pollen = pd.Series([], dtype="float")
self.aw_fnectar_nectar = pd.Series([], dtype="float")
self.aw_fnectar_pollen = pd.Series([], dtype="float")
self.aw_winter_nectar = pd.Series([], dtype="float")
self.aw_winter_pollen = pd.Series([], dtype="float")
self.ad_nectar = pd.Series([], dtype="float")
self.ad_pollen = pd.Series([], dtype="float")
self.aq_jelly = pd.Series([], dtype="float")
class BeerexOutputs(object):
"""
Output class for Beerex
"""
def __init__(self):
"""Class representing the outputs for Beerex"""
super(BeerexOutputs, self).__init__()
self.out_eec_spray = pd.Series(name="out_eec_spray", dtype="float")
self.out_eec_soil = pd.Series(name="out_eec_soil", dtype="float")
self.out_eec_seed = pd.Series(name="out_eec_seed", dtype="float")
self.out_eec_tree = pd.Series(name="out_eec_tree", dtype="float")
self.out_eec = pd.Series(name="out_eec", dtype="float")
self.out_lw1_total_dose = pd.Series(name="out_lw1_total_dose", dtype="float")
self.out_lw2_total_dose = pd.Series(name="out_lw2_total_dose", dtype="float")
self.out_lw3_total_dose = pd.Series(name="out_lw3_total_dose", dtype="float")
self.out_lw4_total_dose = pd.Series(name="out_lw4_total_dose", dtype="float")
self.out_lw5_total_dose = pd.Series(name="out_lw5_total_dose", dtype="float")
self.out_ld6_total_dose = pd.Series(name="out_ld6_total_dose", dtype="float")
self.out_lq1_total_dose = pd.Series(name="out_lq1_total_dose", dtype="float")
self.out_lq2_total_dose = pd.Series(name="out_lq2_total_dose", dtype="float")
self.out_lq3_total_dose = pd.Series(name="out_lq3_total_dose", dtype="float")
self.out_lq4_total_dose = pd.Series(name="out_lq4_total_dose", dtype="float")
self.out_aw_cell_total_dose = pd.Series(name="out_aw_cell_total_dose", dtype="float")
self.out_aw_brood_total_dose = pd.Series(name="out_aw_brood_total_dose", dtype="float")
self.out_aw_comb_total_dose = pd.Series(name="out_aw_comb_total_dose", dtype="float")
self.out_aw_pollen_total_dose = pd.Series(name="out_aw_pollen_total_dose", dtype="float")
self.out_aw_nectar_total_dose = pd.Series(name="out_aw_nectar_total_dose", dtype="float")
self.out_aw_winter_total_dose = pd.Series(name="out_aw_winter_total_dose", dtype="float")
self.out_ad_total_dose = pd.Series(name="out_ad_total_dose", dtype="float")
self.out_aq_total_dose = pd.Series(name="out_aq_total_dose", dtype="float")
self.out_lw1_acute_rq = pd.Series(name="out_lw1_acute_rq", dtype="float")
self.out_lw2_acute_rq = pd.Series(name="out_lw2_acute_rq", dtype="float")
self.out_lw3_acute_rq = pd.Series(name="out_lw3_acute_rq", dtype="float")
self.out_lw4_acute_rq = pd.Series(name="out_lw4_acute_rq", dtype="float")
self.out_lw5_acute_rq = pd.Series(name="out_lw5_acute_rq", dtype="float")
self.out_ld6_acute_rq = pd.Series(name="out_ld6_acute_rq", dtype="float")
self.out_lq1_acute_rq = pd.Series(name="out_lq1_acute_rq", dtype="float")
self.out_lq2_acute_rq = pd.Series(name="out_lq2_acute_rq", dtype="float")
self.out_lq3_acute_rq = pd.Series(name="out_lq3_acute_rq", dtype="float")
self.out_lq4_acute_rq = pd.Series(name="out_lq4_acute_rq", dtype="float")
self.out_aw_cell_acute_rq = pd.Series(name="out_aw_cell_acute_rq", dtype="float")
self.out_aw_brood_acute_rq = pd.Series(name="out_aw_brood_acute_rq", dtype="float")
self.out_aw_comb_acute_rq = pd.Series(name="out_aw_comb_acute_rq", dtype="float")
self.out_aw_pollen_acute_rq = pd.Series(name="out_aw_pollen_acute_rq", dtype="float")
self.out_aw_nectar_acute_rq = pd.Series(name="out_aw_nectar_acute_rq", dtype="float")
self.out_aw_winter_acute_rq = pd.Series(name="out_aw_winter_acute_rq", dtype="float")
self.out_ad_acute_rq = pd.Series(name="out_ad_acute_rq", dtype="float")
self.out_aq_acute_rq = | pd.Series(name="out_aq_acute_rq", dtype="float") | pandas.Series |
# -*- coding: utf-8 -*-
"""Datareader for cell testers and potentiostats.
This module is used for loading data and databases created by different cell
testers. Currently it only accepts arbin-type res-files (access) data as
raw data files, but we intend to implement more types soon. It also creates
processed files in the hdf5-format.
Example:
>>> d = CellpyData()
>>> d.loadcell(names = [file1.res, file2.res]) # loads and merges the runs
>>> voltage_curves = d.get_cap()
>>> d.save("mytest.hdf")
"""
import os
from pathlib import Path
import logging
import sys
import collections
import warnings
import csv
import itertools
import time
import copy
import numpy as np
import pandas as pd
from pandas.errors import PerformanceWarning
from scipy import interpolate
from cellpy.parameters import prms
from cellpy.parameters.legacy import internal_settings as old_settings
from cellpy.exceptions import WrongFileVersion, DeprecatedFeature, NullData
from cellpy.parameters.internal_settings import (
get_headers_summary,
get_cellpy_units,
get_headers_normal,
get_headers_step_table,
ATTRS_CELLPYFILE,
ATTRS_DATASET,
ATTRS_DATASET_DEEP,
ATTRS_CELLPYDATA,
)
from cellpy.readers.core import (
FileID,
Cell,
CELLPY_FILE_VERSION,
MINIMUM_CELLPY_FILE_VERSION,
xldate_as_datetime,
interpolate_y_on_x,
identify_last_data_point,
)
HEADERS_NORMAL = get_headers_normal()
HEADERS_SUMMARY = get_headers_summary()
HEADERS_STEP_TABLE = get_headers_step_table()
# TODO: @jepe - performance warnings - mixed types within cols (pytables)
performance_warning_level = "ignore" # "ignore", "error"
warnings.filterwarnings(
performance_warning_level, category=pd.io.pytables.PerformanceWarning
)
pd.set_option("mode.chained_assignment", None) # "raise", "warn", None
module_logger = logging.getLogger(__name__)
class CellpyData(object):
"""Main class for working and storing data.
This class is the main work-horse for cellpy where all the functions for
reading, selecting, and tweaking your data is located. It also contains the
header definitions, both for the cellpy hdf5 format, and for the various
cell-tester file-formats that can be read. The class can contain
several cell-tests and each test is stored in a list. If you see what I mean...
Attributes:
cells (list): list of DataSet objects.
"""
def __str__(self):
txt = "<CellpyData>\n"
if self.name:
txt += f"name: {self.name}\n"
if self.table_names:
txt += f"table_names: {self.table_names}\n"
if self.tester:
txt += f"tester: {self.tester}\n"
if self.cells:
txt += "datasets: [ ->\n"
for i, d in enumerate(self.cells):
txt += f" ({i})\n"
for t in str(d).split("\n"):
txt += " "
txt += t
txt += "\n"
txt += "\n"
txt += "]"
else:
txt += "datasets: []"
txt += "\n"
return txt
def __bool__(self):
if self.cells:
return True
else:
return False
def __init__(
self,
filenames=None,
selected_scans=None,
profile=False,
filestatuschecker=None, # "modified"
fetch_one_liners=False,
tester=None,
initialize=False,
):
"""CellpyData object
Args:
filenames: list of files to load.
selected_scans:
profile: experimental feature.
filestatuschecker: property to compare cellpy and raw-files;
default read from prms-file.
fetch_one_liners: experimental feature.
tester: instrument used (e.g. "arbin") (checks prms-file as
default).
initialize: create a dummy (empty) dataset; defaults to False.
"""
if tester is None:
self.tester = prms.Instruments.tester
else:
self.tester = tester
self.loader = None # this will be set in the function set_instrument
self.logger = logging.getLogger(__name__)
self.logger.debug("created CellpyData instance")
self.name = None
self.profile = profile
self.minimum_selection = {}
if filestatuschecker is None:
self.filestatuschecker = prms.Reader.filestatuschecker
else:
self.filestatuschecker = filestatuschecker
self.forced_errors = 0
self.summary_exists = False
if not filenames:
self.file_names = []
else:
self.file_names = filenames
if not self._is_listtype(self.file_names):
self.file_names = [self.file_names]
if not selected_scans:
self.selected_scans = []
else:
self.selected_scans = selected_scans
if not self._is_listtype(self.selected_scans):
self.selected_scans = [self.selected_scans]
self.cells = []
self.status_datasets = []
self.selected_cell_number = 0
self.number_of_datasets = 0
self.capacity_modifiers = ["reset"]
self.list_of_step_types = [
"charge",
"discharge",
"cv_charge",
"cv_discharge",
"taper_charge",
"taper_discharge",
"charge_cv",
"discharge_cv",
"ocvrlx_up",
"ocvrlx_down",
"ir",
"rest",
"not_known",
]
# - options
self.force_step_table_creation = prms.Reader.force_step_table_creation
self.force_all = prms.Reader.force_all
self.sep = prms.Reader.sep
self._cycle_mode = prms.Reader.cycle_mode
# self.max_res_filesize = prms.Reader.max_res_filesize
self.load_only_summary = prms.Reader.load_only_summary
self.select_minimal = prms.Reader.select_minimal
# self.chunk_size = prms.Reader.chunk_size # 100000
# self.max_chunks = prms.Reader.max_chunks
# self.last_chunk = prms.Reader.last_chunk
self.limit_loaded_cycles = prms.Reader.limit_loaded_cycles
# self.load_until_error = prms.Reader.load_until_error
self.ensure_step_table = prms.Reader.ensure_step_table
self.daniel_number = prms.Reader.daniel_number
# self.raw_datadir = prms.Reader.raw_datadir
self.raw_datadir = prms.Paths.rawdatadir
# self.cellpy_datadir = prms.Reader.cellpy_datadir
self.cellpy_datadir = prms.Paths.cellpydatadir
# search in prm-file for res and hdf5 dirs in loadcell:
self.auto_dirs = prms.Reader.auto_dirs
# - headers and instruments
self.headers_normal = get_headers_normal()
self.headers_summary = get_headers_summary()
self.headers_step_table = get_headers_step_table()
self.table_names = None # dictionary defined in set_instruments
self.set_instrument()
# - units used by cellpy
self.cellpy_units = get_cellpy_units()
if initialize:
self.initialize()
def initialize(self):
self.logger.debug("Initializing...")
self.cells.append(Cell())
@property
def cell(self):
"""returns the DataSet instance"""
# could insert a try-except thingy here...
cell = self.cells[self.selected_cell_number]
return cell
@cell.setter
def cell(self, new_cell):
self.cells[self.selected_cell_number] = new_cell
@property
def dataset(self):
"""returns the DataSet instance"""
# could insert a try-except thingy here...
warnings.warn(
"The .dataset property is deprecated, please use .cell instead.",
DeprecationWarning,
)
cell = self.cells[self.selected_cell_number]
return cell
@property
def empty(self):
"""gives False if the CellpyData object is empty (or un-functional)"""
return not self.check()
@classmethod
def vacant(cls, cell=None):
"""Create a CellpyData instance.
Args:
cell (CellpyData instance): the attributes from the cell will be copied
to the new Cellpydata instance.
Returns:
CellpyData instance.
"""
new_cell = cls(initialize=True)
if cell is not None:
for attr in ATTRS_DATASET:
value = getattr(cell.cell, attr)
setattr(new_cell.cell, attr, value)
for attr in ATTRS_DATASET_DEEP:
value = getattr(cell.cell, attr)
setattr(new_cell.cell, attr, copy.deepcopy(value))
for attr in ATTRS_CELLPYDATA:
value = getattr(cell, attr)
setattr(new_cell, attr, value)
return new_cell
def split(self, cycle=None):
"""Split experiment (CellpyData object) into two sub-experiments. if cycle
is not give, it will split on the median cycle number"""
if isinstance(cycle, int) or cycle is None:
return self.split_many(base_cycles=cycle)
def drop_from(self, cycle=None):
"""Select first part of experiment (CellpyData object) up to cycle number
'cycle'"""
if isinstance(cycle, int):
c1, c2 = self.split_many(base_cycles=cycle)
return c1
def drop_to(self, cycle=None):
"""Select last part of experiment (CellpyData object) from cycle number
'cycle'"""
if isinstance(cycle, int):
c1, c2 = self.split_many(base_cycles=cycle)
return c2
def drop_edges(self, start, end):
"""Select middle part of experiment (CellpyData object) from cycle
number 'start' to 'end"""
if end < start:
raise ValueError("end cannot be larger than start")
if end == start:
raise ValueError("end cannot be the same as start")
return self.split_many([start, end])[1]
def split_many(self, base_cycles=None):
"""Split experiment (CellpyData object) into several sub-experiments.
Args:
base_cycles (int or list of ints): cycle(s) to do the split on.
Returns:
List of CellpyData objects
"""
h_summary_index = HEADERS_SUMMARY.cycle_index
h_raw_index = HEADERS_NORMAL.cycle_index_txt
h_step_cycle = HEADERS_STEP_TABLE.cycle
if base_cycles is None:
all_cycles = self.get_cycle_numbers()
base_cycles = int(np.median(all_cycles))
cells = list()
if not isinstance(base_cycles, (list, tuple)):
base_cycles = [base_cycles]
dataset = self.cell
steptable = dataset.steps
data = dataset.raw
summary = dataset.summary
# In case Cycle_Index has been promoted to index [#index]
if h_summary_index not in summary.columns:
summary = summary.reset_index(drop=False)
for b_cycle in base_cycles:
steptable0, steptable = [
steptable[steptable[h_step_cycle] < b_cycle],
steptable[steptable[h_step_cycle] >= b_cycle],
]
data0, data = [
data[data[h_raw_index] < b_cycle],
data[data[h_raw_index] >= b_cycle],
]
summary0, summary = [
summary[summary[h_summary_index] < b_cycle],
summary[summary[h_summary_index] >= b_cycle],
]
new_cell = CellpyData.vacant(cell=self)
old_cell = CellpyData.vacant(cell=self)
new_cell.cell.steps = steptable0
new_cell.cell.raw = data0
new_cell.cell.summary = summary0
new_cell.cell = identify_last_data_point(new_cell.cell)
old_cell.cell.steps = steptable
old_cell.cell.raw = data
old_cell.cell.summary = summary
old_cell.cell = identify_last_data_point(old_cell.cell)
cells.append(new_cell)
cells.append(old_cell)
return cells
# TODO: @jepe - merge the _set_xxinstrument methods into one method
def set_instrument(self, instrument=None):
"""Set the instrument (i.e. tell cellpy the file-type you use).
Args:
instrument: (str) in ["arbin", "bio-logic-csv", "bio-logic-bin",...]
Sets the instrument used for obtaining the data (i.e. sets fileformat)
"""
if instrument is None:
instrument = self.tester
self.logger.debug(f"Setting instrument: {instrument}")
if instrument in ["arbin", "arbin_res"]:
from cellpy.readers.instruments.arbin import ArbinLoader as RawLoader
self._set_instrument(RawLoader)
self.tester = "arbin"
elif instrument == "arbin_sql":
warnings.warn(f"{instrument} not implemented yet")
self.tester = "arbin"
elif instrument in ["pec", "pec_csv"]:
warnings.warn("Experimental! Not ready for production!")
from cellpy.readers.instruments.pec import PECLoader as RawLoader
self._set_instrument(RawLoader)
self.tester = "pec"
elif instrument in ["biologics", "biologics_mpr"]:
from cellpy.readers.instruments.biologics_mpr import MprLoader as RawLoader
warnings.warn("Experimental! Not ready for production!")
self._set_instrument(RawLoader)
self.tester = "biologic"
elif instrument == "custom":
from cellpy.readers.instruments.custom import CustomLoader as RawLoader
self._set_instrument(RawLoader)
self.tester = "custom"
else:
raise Exception(f"option does not exist: '{instrument}'")
def _set_instrument(self, loader_class):
self.loader_class = loader_class()
# ----- get information --------------------------
self.raw_units = self.loader_class.get_raw_units()
self.raw_limits = self.loader_class.get_raw_limits()
# ----- create the loader ------------------------
self.loader = self.loader_class.loader
def _create_logger(self):
from cellpy import log
self.logger = logging.getLogger(__name__)
log.setup_logging(default_level="DEBUG")
def set_cycle_mode(self, cycle_mode):
"""set the cycle mode"""
# TODO: remove this
warnings.warn(
"deprecated - use it as a property instead, e.g.: cycle_mode = 'anode'",
DeprecationWarning,
)
self._cycle_mode = cycle_mode
@property
def cycle_mode(self):
return self._cycle_mode
@cycle_mode.setter
def cycle_mode(self, cycle_mode):
self.logger.debug(f"-> cycle_mode: {cycle_mode}")
self._cycle_mode = cycle_mode
def set_raw_datadir(self, directory=None):
"""Set the directory containing .res-files.
Used for setting directory for looking for res-files.@
A valid directory name is required.
Args:
directory (str): path to res-directory
Example:
>>> d = CellpyData()
>>> directory = "MyData/Arbindata"
>>> d.set_raw_datadir(directory)
"""
if directory is None:
self.logger.info("No directory name given")
return
if not os.path.isdir(directory):
self.logger.info(directory)
self.logger.info("Directory does not exist")
return
self.raw_datadir = directory
def set_cellpy_datadir(self, directory=None):
"""Set the directory containing .hdf5-files.
Used for setting directory for looking for hdf5-files.
A valid directory name is required.
Args:
directory (str): path to hdf5-directory
Example:
>>> d = CellpyData()
>>> directory = "MyData/HDF5"
>>> d.set_raw_datadir(directory)
"""
if directory is None:
self.logger.info("No directory name given")
return
if not os.path.isdir(directory):
self.logger.info("Directory does not exist")
return
self.cellpy_datadir = directory
def check_file_ids(self, rawfiles, cellpyfile, detailed=False):
"""Check the stats for the files (raw-data and cellpy hdf5).
This function checks if the hdf5 file and the res-files have the same
timestamps etc to find out if we need to bother to load .res -files.
Args:
cellpyfile (str): filename of the cellpy hdf5-file.
rawfiles (list of str): name(s) of raw-data file(s).
detailed (bool): return a dict containing True or False for each
individual raw-file
Returns:
If detailed is False:
False if the raw files are newer than the cellpy hdf5-file
(update needed).
True if update is not needed.
If detailed is True it returns a dict containing True or False for each
individual raw-file.
"""
txt = "Checking file ids - using '%s'" % self.filestatuschecker
self.logger.info(txt)
ids_cellpy_file = self._check_cellpy_file(cellpyfile)
self.logger.debug(f"cellpyfile ids: {ids_cellpy_file}")
if not ids_cellpy_file:
# self.logger.debug("hdf5 file does not exist - needs updating")
return False
ids_raw = self._check_raw(rawfiles)
if detailed:
similar = self._parse_ids(ids_raw, ids_cellpy_file)
return similar
else:
similar = self._compare_ids(ids_raw, ids_cellpy_file)
if not similar:
# self.logger.debug("hdf5 file needs updating")
return False
else:
# self.logger.debug("hdf5 file is updated")
return True
def _check_raw(self, file_names, abort_on_missing=False):
"""Get the file-ids for the res_files."""
strip_file_names = True
check_on = self.filestatuschecker
if not self._is_listtype(file_names):
file_names = [file_names]
ids = dict()
for f in file_names:
self.logger.debug(f"checking res file {f}")
fid = FileID(f)
# self.logger.debug(fid)
if fid.name is None:
warnings.warn(f"file does not exist: {f}")
if abort_on_missing:
sys.exit(-1)
else:
if strip_file_names:
name = os.path.basename(f)
else:
name = f
if check_on == "size":
ids[name] = int(fid.size)
elif check_on == "modified":
ids[name] = int(fid.last_modified)
else:
ids[name] = int(fid.last_accessed)
return ids
def _check_cellpy_file(self, filename):
"""Get the file-ids for the cellpy_file."""
strip_filenames = True
parent_level = prms._cellpyfile_root
fid_dir = prms._cellpyfile_fid
check_on = self.filestatuschecker
self.logger.debug("checking cellpy-file")
self.logger.debug(filename)
if not os.path.isfile(filename):
self.logger.debug("cellpy-file does not exist")
return None
try:
store = pd.HDFStore(filename)
except Exception as e:
self.logger.debug(f"could not open cellpy-file ({e})")
return None
try:
fidtable = store.select(parent_level + fid_dir)
except KeyError:
self.logger.warning("no fidtable -" " you should update your hdf5-file")
fidtable = None
finally:
store.close()
if fidtable is not None:
raw_data_files, raw_data_files_length = self._convert2fid_list(fidtable)
txt = "contains %i res-files" % (len(raw_data_files))
self.logger.debug(txt)
ids = dict()
for fid in raw_data_files:
full_name = fid.full_name
size = fid.size
mod = fid.last_modified
self.logger.debug(f"fileID information for: {full_name}")
self.logger.debug(f" modified: {mod}")
self.logger.debug(f" size: {size}")
if strip_filenames:
name = os.path.basename(full_name)
else:
name = full_name
if check_on == "size":
ids[name] = int(fid.size)
elif check_on == "modified":
ids[name] = int(fid.last_modified)
else:
ids[name] = int(fid.last_accessed)
return ids
else:
return None
@staticmethod
def _compare_ids(ids_res, ids_cellpy_file):
similar = True
l_res = len(ids_res)
l_cellpy = len(ids_cellpy_file)
if l_res == l_cellpy and l_cellpy > 0:
for name, value in list(ids_res.items()):
if ids_cellpy_file[name] != value:
similar = False
else:
similar = False
return similar
@staticmethod
def _parse_ids(ids_raw, ids_cellpy_file):
similar = dict()
for name in ids_raw:
v_cellpy = ids_cellpy_file.get(name, None)
v_raw = ids_raw[name]
similar[name] = False
if v_raw is not None:
if v_raw == v_cellpy:
similar[name] = True
return similar
def loadcell(
self,
raw_files,
cellpy_file=None,
mass=None,
summary_on_raw=False,
summary_ir=True,
summary_ocv=False,
summary_end_v=True,
only_summary=False,
force_raw=False,
use_cellpy_stat_file=None,
**kwargs,
):
"""Loads data for given cells.
Args:
raw_files (list): name of res-files
cellpy_file (path): name of cellpy-file
mass (float): mass of electrode or active material
summary_on_raw (bool): use raw-file for summary
summary_ir (bool): summarize ir
summary_ocv (bool): summarize ocv steps
summary_end_v (bool): summarize end voltage
only_summary (bool): get only the summary of the runs
force_raw (bool): only use raw-files
use_cellpy_stat_file (bool): use stat file if creating summary
from raw
**kwargs: passed to from_raw
Example:
>>> srnos = my_dbreader.select_batch("testing_new_solvent")
>>> cell_datas = []
>>> for srno in srnos:
>>> ... my_run_name = my_dbreader.get_cell_name(srno)
>>> ... mass = my_dbreader.get_mass(srno)
>>> ... rawfiles, cellpyfiles = \
>>> ... filefinder.search_for_files(my_run_name)
>>> ... cell_data = cellreader.CellpyData()
>>> ... cell_data.loadcell(raw_files=rawfiles,
>>> ... cellpy_file=cellpyfiles)
>>> ... cell_data.set_mass(mass)
>>> ... if not cell_data.summary_exists:
>>> ... cell_data.make_summary() # etc. etc.
>>> ... cell_datas.append(cell_data)
>>>
"""
# This is a part of a dramatic API change. It will not be possible to
# load more than one set of datasets (i.e. one single cellpy-file or
# several raw-files that will be automatically merged)
# TODO @jepe Make setting or prm so that it is possible to update only new data
self.logger.info("Started cellpy.cellreader.loadcell")
if cellpy_file is None:
similar = False
elif force_raw:
similar = False
else:
similar = self.check_file_ids(raw_files, cellpy_file)
self.logger.debug("checked if the files were similar")
if only_summary:
self.load_only_summary = True
else:
self.load_only_summary = False
if not similar:
self.logger.debug("cellpy file(s) needs updating - loading raw")
self.logger.info("Loading raw-file")
self.logger.debug(raw_files)
self.from_raw(raw_files, **kwargs)
self.logger.debug("loaded files")
# Check if the run was loaded ([] if empty)
if self.status_datasets:
if mass:
self.set_mass(mass)
if summary_on_raw:
nom_cap = kwargs.pop("nom_cap", None)
if nom_cap is not None:
self.set_nom_cap(nom_cap)
self.make_summary(
all_tests=False,
find_ocv=summary_ocv,
find_ir=summary_ir,
find_end_voltage=summary_end_v,
use_cellpy_stat_file=use_cellpy_stat_file,
# nom_cap=nom_cap,
)
else:
self.logger.warning("Empty run!")
else:
self.load(cellpy_file)
if mass:
self.set_mass(mass)
return self
def dev_update_loadcell(
self,
raw_files,
cellpy_file=None,
mass=None,
summary_on_raw=False,
summary_ir=True,
summary_ocv=False,
summary_end_v=True,
force_raw=False,
use_cellpy_stat_file=None,
nom_cap=None,
):
self.logger.info("Started cellpy.cellreader.loadcell")
if cellpy_file is None or force_raw:
similar = None
else:
similar = self.check_file_ids(raw_files, cellpy_file, detailed=True)
self.logger.debug("checked if the files were similar")
if similar is None:
# forcing to load only raw_files
self.from_raw(raw_files)
if self.status_datasets:
if mass:
self.set_mass(mass)
if summary_on_raw:
self.make_summary(
all_tests=False,
find_ocv=summary_ocv,
find_ir=summary_ir,
find_end_voltage=summary_end_v,
use_cellpy_stat_file=use_cellpy_stat_file,
nom_cap=nom_cap,
)
else:
self.logger.warning("Empty run!")
return self
self.load(cellpy_file)
if mass:
self.set_mass(mass)
if all(similar.values()):
self.logger.info("Everything is up to date")
return
start_file = True
for i, f in enumerate(raw_files):
f = Path(f)
if not similar[f.name] and start_file:
try:
last_data_point = self.cell.raw_data_files[i].last_data_point
except IndexError:
last_data_point = 0
self.dev_update_from_raw(
file_names=f, data_points=[last_data_point, None]
)
self.cell = self.dev_update_merge()
elif not similar[f.name]:
try:
last_data_point = self.cell.raw_data_files[i].last_data_point
except IndexError:
last_data_point = 0
self.dev_update_from_raw(
file_names=f, data_points=[last_data_point, None]
)
self.merge()
start_file = False
self.dev_update_make_steps()
self.dev_update_make_summary(
all_tests=False,
find_ocv=summary_ocv,
find_ir=summary_ir,
find_end_voltage=summary_end_v,
use_cellpy_stat_file=use_cellpy_stat_file,
)
return self
def dev_update(self, file_names=None, **kwargs):
print("NOT FINISHED YET - but close")
if len(self.cell.raw_data_files) != 1:
self.logger.warning(
"Merged cell. But can only update based on the last file"
)
print(self.cell.raw_data_files)
for fid in self.cell.raw_data_files:
print(fid)
last = self.cell.raw_data_files[0].last_data_point
self.dev_update_from_raw(
file_names=file_names, data_points=[last, None], **kwargs
)
print("lets try to merge")
self.cell = self.dev_update_merge()
print("now it is time to update the step table")
self.dev_update_make_steps()
print("and finally, lets update the summary")
self.dev_update_make_summary()
def dev_update_merge(self):
print("NOT FINISHED YET - but very close")
number_of_tests = len(self.cells)
if number_of_tests != 2:
self.logger.warning(
"Cannot merge if you do not have exactly two cell-objects"
)
return
t1, t2 = self.cells
if t1.raw.empty:
self.logger.debug("OBS! the first dataset is empty")
if t2.raw.empty:
t1.merged = True
self.logger.debug("the second dataset was empty")
self.logger.debug(" -> merged contains only first")
return t1
test = t1
cycle_index_header = self.headers_normal.cycle_index_txt
if not t1.raw.empty:
t1.raw = t1.raw.iloc[:-1]
raw2 = pd.concat([t1.raw, t2.raw], ignore_index=True)
test.no_cycles = max(raw2[cycle_index_header])
test.raw = raw2
else:
test.no_cycles = max(t2.raw[cycle_index_header])
test = t2
self.logger.debug(" -> merged with new dataset")
return test
def dev_update_make_steps(self, **kwargs):
old_steps = self.cell.steps.iloc[:-1]
# Note! hard-coding header name (might fail if changing default headers)
from_data_point = self.cell.steps.iloc[-1].point_first
new_steps = self.make_step_table(from_data_point=from_data_point, **kwargs)
merged_steps = pd.concat([old_steps, new_steps]).reset_index(drop=True)
self.cell.steps = merged_steps
def dev_update_make_summary(self, **kwargs):
print("NOT FINISHED YET - but not critical")
# Update not implemented yet, running full summary calculations for now.
# For later:
# old_summary = self.cell.summary.iloc[:-1]
cycle_index_header = self.headers_summary.cycle_index
from_cycle = self.cell.summary.iloc[-1][cycle_index_header]
self.make_summary(from_cycle=from_cycle, **kwargs)
# For later:
# (Remark! need to solve how to merge culumated columns)
# new_summary = self.make_summary(from_cycle=from_cycle)
# merged_summary = pd.concat([old_summary, new_summary]).reset_index(drop=True)
# self.cell.summary = merged_summary
def dev_update_from_raw(self, file_names=None, data_points=None, **kwargs):
"""This method is under development. Using this to develop updating files
with only new data.
"""
print("NOT FINISHED YET - but very close")
if file_names:
self.file_names = file_names
if file_names is None:
self.logger.info(
"No filename given and no stored in the file_names "
"attribute. Returning None"
)
return None
if not isinstance(self.file_names, (list, tuple)):
self.file_names = [file_names]
raw_file_loader = self.loader
set_number = 0
test = None
self.logger.debug("start iterating through file(s)")
print(self.file_names)
for f in self.file_names:
self.logger.debug("loading raw file:")
self.logger.debug(f"{f}")
# get a list of cellpy.readers.core.Cell objects
test = raw_file_loader(f, data_points=data_points, **kwargs)
# remark that the bounds are included (i.e. the first datapoint
# is 5000.
self.logger.debug("added the data set - merging file info")
# raw_data_file = copy.deepcopy(test[set_number].raw_data_files[0])
# file_size = test[set_number].raw_data_files_length[0]
# test[set_number].raw_data_files.append(raw_data_file)
# test[set_number].raw_data_files_length.append(file_size)
# return test
self.cells.append(test[set_number])
self.number_of_datasets = len(self.cells)
self.status_datasets = self._validate_datasets()
self._invent_a_name()
return self
def from_raw(self, file_names=None, **kwargs):
"""Load a raw data-file.
Args:
file_names (list of raw-file names): uses CellpyData.file_names if
None. If the list contains more than one file name, then the
runs will be merged together.
Other keywords depending on loader:
[ArbinLoader]:
bad_steps (list of tuples): (c, s) tuples of steps s (in cycle c)
to skip loading.
dataset_number (int): the data set number to select if you are dealing
with arbin files with more than one data-set.
data_points (tuple of ints): load only data from data_point[0] to
data_point[1] (use None for infinite). NOT IMPLEMEMTED YET.
"""
# This function only loads one test at a time (but could contain several
# files). The function from_res() used to implement loading several
# datasets (using list of lists as input), however it is now deprecated.
if file_names:
self.file_names = file_names
if not isinstance(self.file_names, (list, tuple)):
self.file_names = [file_names]
# file_type = self.tester
raw_file_loader = self.loader
# test is currently a list of tests - this option will be removed in the future
# so set_number is hard-coded to 0, i.e. actual-test is always test[0]
set_number = 0
test = None
counter = 0
self.logger.debug("start iterating through file(s)")
for f in self.file_names:
self.logger.debug("loading raw file:")
self.logger.debug(f"{f}")
new_tests = raw_file_loader(f, **kwargs)
if new_tests:
# retrieving the first cell data (e.g. first file)
if test is None:
self.logger.debug("getting data from first file")
if new_tests[set_number].no_data:
self.logger.debug("NO DATA")
else:
test = new_tests
# appending cell data file to existing
else:
self.logger.debug("continuing reading files...")
_test = self._append(test[set_number], new_tests[set_number])
if not _test:
self.logger.warning(f"EMPTY TEST: {f}")
continue
test[set_number] = _test
# retrieving file info in a for-loop in case of multiple files
# Remark!
# - the raw_data_files attribute is a list
# - the raw_data_files_length attribute is a list
# The reason for this choice is not clear anymore, but
# let us keep it like this for now
self.logger.debug("added the data set - merging file info")
for j in range(len(new_tests[set_number].raw_data_files)):
raw_data_file = new_tests[set_number].raw_data_files[j]
file_size = new_tests[set_number].raw_data_files_length[j]
test[set_number].raw_data_files.append(raw_data_file)
test[set_number].raw_data_files_length.append(file_size)
counter += 1
if counter > 10:
self.logger.debug("ERROR? Too many files to merge")
raise ValueError(
"Too many files to merge - "
"could be a p2-p3 zip thing"
)
else:
self.logger.debug("NOTHING LOADED")
self.logger.debug("finished loading the raw-files")
test_exists = False
if test:
if test[0].no_data:
self.logging.debug(
"the first dataset (or only dataset) loaded from the raw data file is empty"
)
else:
test_exists = True
if test_exists:
if not prms.Reader.sorted_data:
self.logger.debug("sorting data")
test[set_number] = self._sort_data(test[set_number])
self.cells.append(test[set_number])
else:
self.logger.warning("No new datasets added!")
self.number_of_datasets = len(self.cells)
self.status_datasets = self._validate_datasets()
self._invent_a_name()
return self
def from_res(self, filenames=None, check_file_type=True):
"""Convenience function for loading arbin-type data into the
datastructure.
Args:
filenames: ((lists of) list of raw-file names): uses
cellpy.file_names if None.
If list-of-list, it loads each list into separate datasets.
The files in the inner list will be merged.
check_file_type (bool): check file type if True
(res-, or cellpy-format)
"""
raise DeprecatedFeature
def _validate_datasets(self, level=0):
self.logger.debug("validating test")
level = 0
# simple validation for finding empty datasets - should be expanded to
# find not-complete datasets, datasets with missing prms etc
v = []
if level == 0:
for test in self.cells:
# check that it contains all the necessary headers
# (and add missing ones)
# test = self._clean_up_normal_table(test)
# check that the test is not empty
v.append(self._is_not_empty_dataset(test))
self.logger.debug(f"validation array: {v}")
return v
def check(self):
"""Returns False if no datasets exists or if one or more of the datasets
are empty"""
if len(self.status_datasets) == 0:
return False
if all(self.status_datasets):
return True
return False
# TODO: maybe consider being a bit more concice (re-implement)
def _is_not_empty_dataset(self, dataset):
if dataset is self._empty_dataset():
return False
else:
return True
# TODO: check if this is useful and if it is rename, if not delete
def _clean_up_normal_table(self, test=None, dataset_number=None):
# check that test contains all the necessary headers
# (and add missing ones)
raise NotImplementedError
# TODO: this is used for the check-datasetnr-thing. Will soon be obsolete?
def _report_empty_dataset(self):
self.logger.info("Empty set")
@staticmethod
def _empty_dataset():
return None
def _invent_a_name(self, filename=None, override=False):
if filename is None:
self.name = "nameless"
return
if self.name and not override:
return
path = Path(filename)
self.name = path.with_suffix("").name
def partial_load(self, **kwargs):
"""Load only a selected part of the cellpy file."""
raise NotImplementedError
def link(self, **kwargs):
"""Create a link to a cellpy file.
If the file is very big, it is sometimes better to work with the data
out of memory (i.e. on disk). A CellpyData object with a linked file
will in most cases work as a normal object. However, some of the methods
might be disabled. And it will be slower.
Notes:
2020.02.08 - maybe this functionality is not needed and can be replaced
by using dask or similar?
"""
raise NotImplementedError
def dev_load(self, cellpy_file, parent_level=None, return_cls=True, accept_old=False):
"""Loads a cellpy file.
Args:
cellpy_file (path, str): Full path to the cellpy file.
parent_level (str, optional): Parent level. Warning! Deprecating this soon!
return_cls (bool): Return the class.
accept_old (bool): Accept loading old cellpy-file versions.
Instead of raising WrongFileVersion it only issues a warning.
Returns:
cellpy.CellPyData class if return_cls is True
"""
try:
self.logger.debug("loading cellpy-file (hdf5):")
self.logger.debug(cellpy_file)
new_datasets = self._dev_load_hdf5(cellpy_file, parent_level, accept_old)
self.logger.debug("cellpy-file loaded")
except AttributeError:
new_datasets = []
self.logger.warning(
"This cellpy-file version is not supported by"
"current reader (try to update cellpy)."
)
if new_datasets:
for dataset in new_datasets:
self.cells.append(dataset)
else:
# raise LoadError
self.logger.warning("Could not load")
self.logger.warning(str(cellpy_file))
self.number_of_datasets = len(self.cells)
self.status_datasets = self._validate_datasets()
self._invent_a_name(cellpy_file)
if return_cls:
return self
def load(self, cellpy_file, parent_level=None, return_cls=True, accept_old=False):
"""Loads a cellpy file.
Args:
cellpy_file (path, str): Full path to the cellpy file.
parent_level (str, optional): Parent level. Warning! Deprecating this soon!
return_cls (bool): Return the class.
accept_old (bool): Accept loading old cellpy-file versions.
Instead of raising WrongFileVersion it only issues a warning.
Returns:
cellpy.CellPyData class if return_cls is True
"""
try:
self.logger.debug("loading cellpy-file (hdf5):")
self.logger.debug(cellpy_file)
new_datasets = self._load_hdf5(cellpy_file, parent_level, accept_old)
self.logger.debug("cellpy-file loaded")
except AttributeError:
new_datasets = []
self.logger.warning(
"This cellpy-file version is not supported by"
"current reader (try to update cellpy)."
)
if new_datasets:
for dataset in new_datasets:
self.cells.append(dataset)
else:
# raise LoadError
self.logger.warning("Could not load")
self.logger.warning(str(cellpy_file))
self.number_of_datasets = len(self.cells)
self.status_datasets = self._validate_datasets()
self._invent_a_name(cellpy_file)
if return_cls:
return self
def _get_cellpy_file_version(self, filename, meta_dir="/info", parent_level=None):
if parent_level is None:
parent_level = prms._cellpyfile_root
with pd.HDFStore(filename) as store:
try:
meta_table = store.select(parent_level + meta_dir)
except KeyError:
raise WrongFileVersion(
"This file is VERY old - cannot read file version number"
)
try:
cellpy_file_version = self._extract_from_dict(
meta_table, "cellpy_file_version"
)
except Exception as e:
warnings.warn(f"Unhandled exception raised: {e}")
return 0
return cellpy_file_version
def _dev_load_hdf5(self, filename, parent_level=None, accept_old=False):
"""Load a cellpy-file.
Args:
filename (str): Name of the cellpy file.
parent_level (str) (optional): name of the parent level
(defaults to "CellpyData"). DeprecationWarning!
accept_old (bool): accept old file versions.
Returns:
loaded datasets (DataSet-object)
"""
CELLPY_FILE_VERSION = 6
HEADERS_SUMMARY["cycle_index"] = "cycle_index"
HEADERS_SUMMARY["discharge_capacity"] = "discharge_capacity_mAh_g"
if parent_level is None:
parent_level = prms._cellpyfile_root
if parent_level != prms._cellpyfile_root:
self.logger.debug(
f"Using non-default parent label for the " f"hdf-store: {parent_level}"
)
if not os.path.isfile(filename):
self.logger.info(f"File does not exist: {filename}")
raise IOError(f"File does not exist: {filename}")
cellpy_file_version = self._get_cellpy_file_version(filename)
if cellpy_file_version > CELLPY_FILE_VERSION:
raise WrongFileVersion(
f"File format too new: {filename} :: version: {cellpy_file_version}"
f"Reload from raw or upgrade your cellpy!"
)
elif cellpy_file_version < MINIMUM_CELLPY_FILE_VERSION:
raise WrongFileVersion(
f"File format too old: {filename} :: version: {cellpy_file_version}"
f"Reload from raw or downgrade your cellpy!"
)
elif cellpy_file_version < CELLPY_FILE_VERSION:
if accept_old:
self.logger.debug(f"old cellpy file version {cellpy_file_version}")
self.logger.debug(f"filename: {filename}")
self.logger.warning(f"Loading old file-type. It is recommended that you remake the step table and the "
f"summary table.")
new_data = self._load_old_hdf5(filename, cellpy_file_version)
else:
raise WrongFileVersion(
f"File format too old: {filename} :: version: {cellpy_file_version}"
f"Try loading setting accept_old=True"
)
else:
self.logger.debug(f"Loading {filename} :: v{cellpy_file_version}")
new_data = self._load_hdf5_current_version(filename)
return new_data
def _load_hdf5(self, filename, parent_level=None, accept_old=False):
"""Load a cellpy-file.
Args:
filename (str): Name of the cellpy file.
parent_level (str) (optional): name of the parent level
(defaults to "CellpyData"). DeprecationWarning!
accept_old (bool): accept old file versions.
Returns:
loaded datasets (DataSet-object)
"""
if parent_level is None:
parent_level = prms._cellpyfile_root
if parent_level != prms._cellpyfile_root:
self.logger.debug(
f"Using non-default parent label for the " f"hdf-store: {parent_level}"
)
if not os.path.isfile(filename):
self.logger.info(f"File does not exist: {filename}")
raise IOError(f"File does not exist: {filename}")
cellpy_file_version = self._get_cellpy_file_version(filename)
if cellpy_file_version > CELLPY_FILE_VERSION:
raise WrongFileVersion(
f"File format too new: {filename} :: version: {cellpy_file_version}"
f"Reload from raw or upgrade your cellpy!"
)
elif cellpy_file_version < MINIMUM_CELLPY_FILE_VERSION:
raise WrongFileVersion(
f"File format too old: {filename} :: version: {cellpy_file_version}"
f"Reload from raw or downgrade your cellpy!"
)
elif cellpy_file_version < CELLPY_FILE_VERSION:
if accept_old:
self.logger.debug(f"old cellpy file version {cellpy_file_version}")
self.logger.debug(f"filename: {filename}")
new_data = self._load_old_hdf5(filename, cellpy_file_version)
else:
raise WrongFileVersion(
f"File format too old: {filename} :: version: {cellpy_file_version}"
f"Try loading setting accept_old=True"
)
else:
self.logger.debug(f"Loading {filename} :: v{cellpy_file_version}")
new_data = self._load_hdf5_current_version(filename)
return new_data
def _load_hdf5_current_version(self, filename, meta_dir="/info", parent_level=None):
if parent_level is None:
parent_level = prms._cellpyfile_root
raw_dir = prms._cellpyfile_raw
step_dir = prms._cellpyfile_step
summary_dir = prms._cellpyfile_summary
fid_dir = prms._cellpyfile_fid
with pd.HDFStore(filename) as store:
data, meta_table = self._create_initial_data_set_from_cellpy_file(
meta_dir, parent_level, store
)
self._check_keys_in_cellpy_file(
meta_dir, parent_level, raw_dir, store, summary_dir
)
self._extract_summary_from_cellpy_file(
data, parent_level, store, summary_dir
)
self._extract_raw_from_cellpy_file(data, parent_level, raw_dir, store)
self._extract_steps_from_cellpy_file(data, parent_level, step_dir, store)
fid_table, fid_table_selected = self._extract_fids_from_cellpy_file(
fid_dir, parent_level, store
)
self._extract_meta_from_cellpy_file(data, meta_table, filename)
if fid_table_selected:
(data.raw_data_files, data.raw_data_files_length,) = self._convert2fid_list(
fid_table
)
else:
data.raw_data_files = None
data.raw_data_files_length = None
# this does not yet allow multiple sets
new_tests = [
data
] # but cellpy is ready when that time comes (if it ever happens)
return new_tests
def _load_hdf5_v5(self, filename):
parent_level = "CellpyData"
raw_dir = "/raw"
step_dir = "/steps"
summary_dir = "/summary"
fid_dir = "/fid"
meta_dir = "/info"
with | pd.HDFStore(filename) | pandas.HDFStore |
from __future__ import print_function
import pandas as pd
import numpy as np
import os
from collections import OrderedDict
from pria_lifechem.function import *
from prospective_screening_model_names import *
from prospective_screening_metric_names import *
dataframe = pd.read_csv('../../dataset/fixed_dataset/pria_prospective.csv.gz')
molecule_ids = dataframe['Molecule'].tolist()
actual_labels = dataframe['Keck_Pria_AS_Retest'].tolist()
inhibits = dataframe['Keck_Pria_Continuous'].tolist()
complete_df = pd.DataFrame({'molecule': molecule_ids, 'label': actual_labels, 'inhibition': inhibits})
column_names = ['molecule', 'label', 'inhibition']
complete_df = complete_df[column_names]
dir_ = '../../output/stage_2_predictions/Keck_Pria_AS_Retest'
model_names = []
for model_name in model_name_mapping.keys():
file_path = '{}/{}.npz'.format(dir_, model_name)
if not os.path.exists(file_path):
print('model: {} doesn\'t exist'.format(model_name))
continue
data = np.load(file_path)
# print(file_path, '\t', data.keys(), '\t', data['y_pred_on_test'].shape)
y_pred = data['y_pred_on_test'][:, 2]
if y_pred.ndim == 2:
y_pred = y_pred[:, 0]
print(y_pred.shape, y_pred[:5])
model_name = model_name_mapping[model_name]
model_names.append(model_name)
complete_df[model_name] = y_pred
model_names = sorted(model_names)
column_names.extend(model_names)
complete_df = complete_df[column_names]
print()
### Generate Metric DF
true_label = complete_df['label'].as_matrix()
true_label = reshape_data_into_2_dim(true_label)
model_names.remove('Baseline')
roc_auc_list = []
metric_df = | pd.DataFrame({'Model': model_names}) | pandas.DataFrame |
"""
This script will create the movement multipliers using:
- A populations estimate for each local authority in Scotland from 2018 (mid year) https://www2.gov.scot/Resource/0046/00462936.csv
- Google mobility data giving the number of movements between geographic regions in 2020 https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv
- A table mapping the iso_3166_2 name of each local authority to the local authority name and code
compiled by hand, <NAME>, 30 June 2020
Python requirements:
- pathlib
- pandas
- zipfile
- urllib.request
- data_pipeline_api
How to run this module:
This script uses the data_pipeline_api.data_processing_api, and therefore makes use of both
- data_processing_config.yaml, and
- metadata.yaml
metadata.yaml should include something like:
```
- doi_or_unique_name: mid-year-pop-est-18-tabs_Table 2.csv
filename: human/external/mid-year-pop-est-18-tabs_Table 2.csv
- doi_or_unique_name: Global_Mobility_Report.csv
filename: human/external/Global_Mobility_Report.csv
```
This script can be run with
```
python aggregate_flows_from_google_data.py
```
The script generates .h5 files in generated_sns_products/movement_multiplier
"""
from pathlib import Path
import pandas as pd
from data_pipeline_api.data_processing_api import DataProcessingAPI
config_filename = Path(__file__).parent / "data_processing_config.yaml"
uri = "data_processing_uri"
git_sha = "data_processing_git_sha"
def download_pop_table():
"""
Download the population data from an external source if it doesn't exists in data_path, using only the Area1 data and the first 3 columns
the LA code, name and population, removing the commas in the population numbers.
:return: A dataframe containing the local authority code, name and population
and the total polulation
"""
population_table = "mid-year-pop-est-18-tabs_Table 2.csv"
# The downloading below isn't currently in use - we want to have these script not directly download anything
# but instead handle that as part of the database. However, I've left it here as a record, in case we need to include it again
# If the population table doesn't exist download it.
# if not Path(data_path / population_table).exists():
# print(f"Could not find {data_path}/{population_table}, downloading it")
# url = "https://www.nrscotland.gov.uk/files//statistics/population-estimates/mid-18/mid-year-pop-est-18-tabs.zip"
# zip_filename = "mid-year-pop-est-18-tabs.zip"
# urllib.request.urlretrieve(
# url, zip_filename
# )
#
# with zipfile.ZipFile(zip_filename, 'r') as zip_ref:
# zip_ref.extractall(data_path, members=[population_table])
#
# # clean up (i.e. remove) the downloaded datafile(s)
# Path(zip_filename).unlink(missing_ok=False)
with DataProcessingAPI.from_config(
config_filename, uri=uri, git_sha=git_sha
) as api:
with api.read_external_object(population_table, "only") as file:
dfPop = pd.read_csv(file, skiprows=5, nrows=32, usecols=[0, 1, 2])
dfPop.columns = ["la_code", "la_name", "population"]
dfPop["population"] = dfPop["population"].str.replace(",", "").astype(int)
total_population = dfPop["population"].sum()
return dfPop, total_population
def download_lookup_table():
"""
Downloads the mapping of iso_3166_2 codes to local authority from the SCRC database,
if it doesn't exist, upload it
:return: A dataframe containing the full ISO code (GB-iso_3166_2) and the corresponding local authority code.
"""
# ISO region to LA best-attempt lookup table: compiled by hand, <NAME>, 30 June 2020
lookup_table = "iso-3166-2_to_scottishLA.csv"
with DataProcessingAPI.from_config(
config_filename, uri=uri, git_sha=git_sha
) as api:
with api.read_external_object(lookup_table, "only") as file:
dfLookup = | pd.read_csv(file, low_memory=False) | pandas.read_csv |
"""
Copyright (c) 2020, <NAME> <NAME>
All rights reserved.
This is an information tool to retrieve official business financials (income statements, balance sheets, and cashflow statements) for a sepcified range of times. The code aims to be as vallina as possible by minimizing the depndencies and packages ued to construct functions. This code can be used immediately off the shelf and assumes no more than the following packages to be installed. As a reminder, please ensure that your directory has enough space, ideally at least 100 MB for newly serialized reports to reside on the disk until you decide to clear them.
"""
import libraries
import re
import requests
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
from datetime import datetime
from selenium import webdriver
import os
import pickle
class Business:
# Define a default constructor for the Business object
def __init__(self, foreign, symbol, report_type, start_period, end_period ):
self.foreign=foreign
self.symbol=symbol
self.report_type=report_type
self.start_period=start_period
self.end_period=end_period
#-------------Retrieving Annual/Quarter Reports----------
# Define a function to store the url(s) to a company's annual or quarter report(s)
def ghost_report_url(self):
############## Check validity of inputs #############
## Error Message if the foreign argument is not logical
if (type(self.foreign)!=bool):
raise TypeError("Invalid foreign type: foreign argument must be logical- True or False")
## Error message if the inputted ticker symbol is not a string
if(type(self.symbol)!=str):
raise TypeError("Invalid ticker symbol type: symbol argument must be a string")
## Error message if the inputted report type is neither 'annual' or 'quarter'
if(self.report_type!='annual' and self.report_type!='quarter'):
raise TypeError("Invalid report type: only 'annual' or 'quarter' report type is allowed")
## Error message if the specified start period or(and) end period is(are) not valid
if ((len(str(self.start_period)))| (len(str(self.end_period)))!=8):
raise ValueError("Invalid start period or(and) end period(s): start_period and end_period arguments must be in the form yyyymmdd")
## Error message to warn that foreign quarterly reports are not available on the SEC Edgar database
if(self.foreign==True and self.report_type=='quarter'):
raise ValueError("Foreign quarterly report(s) not available: try 'annual' report instead")
# Convert start_period and end_period inputs to a datetime object
start_period=datetime.strptime(str(self.start_period),"%Y%m%d").date()
end_period=datetime.strptime(str(self.end_period),"%Y%m%d").date()
################# Retrieving Annual Report(s) (10-K or 20-F) ################
if(self.report_type=='annual'):
# Get the url to the company's historic 10-K (including 10-K/A) or 20-F (including 20-F/A) filings(s)
historical_filings_url=r"http://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK="+self.symbol+"&type=10-k&dateb=&owner=exclude&count=100" if self.foreign==False else r"http://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK="+self.symbol+"&type=20-f&dateb=&owner=exclude&count=100"
# Get table containing descriptions of the company's 10-K(include 10-K/A and others) or 20-F(include 20F/A and others) filings(s)
filings_description_table=pd.read_html(str(BeautifulSoup(requests.get(historical_filings_url).content,"html.parser").find("table",{"class":"tableFile2"})))[0]
## Stop and return an error message if the company has no filing type of 10-K or 20-F, given the company symbol and foreign logic
if len(filings_description_table[(filings_description_table["Filings"]=="10-K")|(filings_description_table["Filings"]=="20-F")])==0:
raise NameError("Invalid company symbol or(and) foreign logical")
# Get the company's CIK (Central Index Key) number
cik_number=re.search(r"(\d{10})",BeautifulSoup(requests.get(historical_filings_url).content,"html.parser").find("span",{"class":"companyName"}).text)[0]
# Get a list of accession numbers of the historic 10-K or 20-F filing(s). raw_accesion_numbers because accession numbers seperated by dashes
raw_accession_numbers=filings_description_table[(filings_description_table["Filings"]=="10-K")| (filings_description_table["Filings"]=="20-F")].Description.str.extract(r"(\d{10}\-\d{2}\-\d{6})",expand=False)
# Get a list of url(s) to a company's historic 10-K or 20-F report(s) details
filing_details_url=r"https://www.sec.gov/Archives/edgar/data/"+cik_number+r"/"+raw_accession_numbers+r"-index.html"
filing_details_url=filing_details_url.to_list()
# Get a list of url(s) to a company's 10-K or 20-F report(s) documentations
document_details_url=r"https://www.sec.gov/cgi-bin/viewer?action=view&cik="+cik_number+"&accession_number="+raw_accession_numbers+"&xbrl_type=v"
document_details_url=document_details_url.to_list()
# Get report period(s), that is the 10-K or 20-F report(s) as of this(these) date(s)
report_periods=[datetime.strptime(BeautifulSoup(requests.get(url).content,"html.parser").find("div",text=re.compile("Period of Report")).find_next("div").text,"%Y-%m-%d").date() for url in filing_details_url]
# Get specified filing details url(s)
filing_details_url=[filing_details_url[url] for url in range(len(report_periods)) if report_periods[url]>start_period and report_periods[url]<=end_period]
# Get specified document details url(s)
document_details_url=[document_details_url[url] for url in range(len(report_periods)) if report_periods[url]>start_period and report_periods[url]<=end_period]
# Get download url(s) to the company's 10-K or 20F extracts
annual_download_url=[]
for url in document_details_url:
soup=BeautifulSoup(requests.get(url).content,"html.parser").find('a', text = re.compile('View Excel Document'), attrs = {'class' : 'xbrlviewer'})
if soup is not None:
annual_download_url.append(r"https://www.sec.gov"+soup['href'])
else:
annual_download_url.append(None)
# Get specified report period(s)
report_periods=[report_periods[rp] for rp in range(len(report_periods)) if report_periods[rp]>start_period and report_periods[rp]<=end_period]
# Get html table(s) of the document format files
tableFile=[BeautifulSoup(requests.get(url).content,"html.parser").find("table", { "summary" : "Document Format Files"}) for url in filing_details_url]
# Get url(s) to the annual report html
annual_report_url=[]
for tab in range(len(tableFile)):
if tableFile[tab].findAll("tr")[1].findAll("td")[2].find("a").text.strip()!='':
if ".htm" in tableFile[tab].findAll("tr")[1].findAll("td")[2].find("a").text.strip():
annual_report_url.append("https://www.sec.gov"+tableFile[tab].findAll("tr")[1].findAll("td")[2].find("a")["href"].replace("/ix?doc=",""))
else:
annual_report_url.append("annual report is not in HTML format")
else:
annual_report_url.append("annual report not available")
# Combine the company's report period(s), and annual report url(s) into a data frame
annual_report_df=pd.DataFrame({'report_periods':report_periods,'annual_report_url':annual_report_url,'annual_download_url':annual_download_url},index=[self.symbol]*len(report_periods))
# Return the data frame contructed above if it is not empty
if not annual_report_df.empty:
return annual_report_df
else:
return "No annual report filing(s) for "+ self.symbol + " between "+ start_period.strftime("%Y-%m-%d")+" and "+end_period.strftime("%Y-%m-%d")
################# Retrieving Quarter Report(s) (10-Q) #########################
if(self.report_type=='quarter'):
# Get the url to the company's historic 10-Q
historical_filings_url=r"http://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK="+self.symbol+"&type=10-q&dateb=&owner=exclude&count=100"
# Get table containing descriptions of the company's 10-Q(include 10-Q/A and others) filings(s)
filings_description_table=pd.read_html(str(BeautifulSoup(requests.get(historical_filings_url).content,"html.parser").find("table",{"class":"tableFile2"})))[0]
## Stop and return an error message if the company has no filing type of 10-Q, given the company symbol and foreign logic
if len(filings_description_table[filings_description_table["Filings"]=="10-Q"])==0:
raise NameError("Invalid company symbol or(and) foreign logical")
# Get the company's CIK (Central Index Key) number
cik_number=re.search(r"(\d{10})",BeautifulSoup(requests.get(historical_filings_url).content,"html.parser").find("span",{"class":"companyName"}).text)[0]
# Get a list of accession numbers of the historic 10-Q. raw_accesion_numbers because accession numbers seperated by dashes
raw_accession_numbers=filings_description_table[filings_description_table["Filings"]=="10-Q"].Description.str.extract(r"(\d{10}\-\d{2}\-\d{6})",expand=False)
# Get a list of url(s) to a company's historic 10-Q report(s) details
filing_details_url=r"https://www.sec.gov/Archives/edgar/data/"+cik_number+r"/"+raw_accession_numbers+r"-index.html"
filing_details_url=filing_details_url.to_list()
# Get a list of url(s) to a company's 10-Q report(s) documentations
document_details_url=r"https://www.sec.gov/cgi-bin/viewer?action=view&cik="+cik_number+"&accession_number="+raw_accession_numbers+"&xbrl_type=v"
document_details_url=document_details_url.to_list()
## At this moment, documents before 2009 are not available. Documents of this type are not normally needed anyway
# Get report period(s), that is the 10-Q report(s) as of this(these) date(s)
report_periods=[datetime.strptime(BeautifulSoup(requests.get(url).content,"html.parser").find("div",text=re.compile("Period of Report")).find_next("div").text,"%Y-%m-%d").date() for url in filing_details_url]
# Get specified filing details url(s)
filing_details_url=[filing_details_url[url] for url in range(len(report_periods)) if report_periods[url]>start_period and report_periods[url]<=end_period]
# Get specified document details url(s)
document_details_url=[document_details_url[url] for url in range(len(report_periods)) if report_periods[url]>start_period and report_periods[url]<=end_period]
# Get download url(s) to the company's 10-Q extracts
quarter_download_url=[]
for url in document_details_url:
soup=BeautifulSoup(requests.get(url).content,"html.parser").find('a', text = re.compile('View Excel Document'), attrs = {'class' : 'xbrlviewer'})
if soup is not None:
quarter_download_url.append(r"https://www.sec.gov"+soup['href'])
else:
quarter_download_url.append(None)
# Get specified report period(s)
report_periods=[report_periods[rp] for rp in range(len(report_periods)) if report_periods[rp]>start_period and report_periods[rp]<=end_period]
# Get html table(s) of the document format files
tableFile=[BeautifulSoup(requests.get(url).content,"html.parser").find("table", { "summary" : "Document Format Files"}) for url in filing_details_url]
# Get url(s) to the quarterly report html
quarter_report_url=[]
for tab in range(len(tableFile)):
if tableFile[tab].findAll("tr")[1].findAll("td")[2].find("a").text.strip()!='':
if ".htm" in tableFile[tab].findAll("tr")[1].findAll("td")[2].find("a").text.strip():
quarter_report_url.append("https://www.sec.gov"+tableFile[tab].findAll("tr")[1].findAll("td")[2].find("a")["href"].replace("/ix?doc=",""))
else:
quarter_report_url.append("quarterly report is not in HTML format")
else:
quarter_report_url.append("quarterly report not available")
# Combine the company's report period(s), and quarterly report url(s) into a data frame
quarter_report_df=pd.DataFrame({'report_periods':report_periods,'quarter_report_url':quarter_report_url,'quarter_download_url':quarter_download_url},index=[self.symbol]*len(report_periods))
# Return the data frame contructed above if it is not empty
if not quarter_report_df.empty:
return quarter_report_df
else:
return "No quarter report filing(s) for "+ self.symbol + " between "+ start_period.strftime("%Y-%m-%d")+" and "+end_period.strftime("%Y-%m-%d")
#------------------------ Best-scrolled to the most relevant financial exhibit------------------------
# A function to exhibit financial statements
def financial_statements_exhibit(self):
## Errors checked in the ghost_report_url()
# Target annual financial statements of U.S. businesses
# Prioritize in the order of 'Consolidated Statements of Cash Flows', 'Consolidated Income Statements', 'Consolidated Statements of Operations', 'Consolidated Balance Sheets', 'Consolidated Statements of Financial Position', 'Financial Statements and Supplementary Data', 'Selected Financial Data'
if (self.foreign==False and self.report_type=='annual'):
# Import annual_report_url dataframe
annual_report_url=self.ghost_report_url().annual_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled financial exhibits
for url_index in range(len(annual_report_url)):
driver.get(annual_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Cash Flows').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Income Statements').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Operations').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Balance Sheets').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Financial Position').click()
except:
try:
driver.find_element_by_partial_link_text('Financial Statements and Supplementary Data').click()
except:
try:
driver.find_element_by_partial_link_text('Selected Financial Data').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' annual financial statements require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled financial exhibit
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
# Target annual financial statements of foreign businesses
# Prioritize in the order of 'Consolidated Statements of Cash Flows', 'Consolidated Income Statements', 'Consolidated Statements of Operations', 'Consolidated Balance Sheets', 'Consolidated Statements of Financial Position', 'FINANCIAL STATEMENTS', 'Financial Statements', 'Selected Financial Data'
if (self.foreign==True and self.report_type=='annual'):
# Import annual_report_url dataframe
annual_report_url=self.ghost_report_url().annual_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the most relevant financial exhibits
for url_index in range(len(annual_report_url)):
driver.get(annual_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Cash Flows').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Income Statements').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Operations').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Balance Sheets').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Financial Position').click()
except:
try:
driver.find_element_by_partial_link_text('FINANCIAL STATEMENTS').click()
except:
try:
# Since the query is case insensitive, search in other cases
driver.find_element_by_partial_link_text('Financial Statements').click()
except:
try:
driver.find_element_by_partial_link_text('Selected Financial Data').click()
except:
try:
driver.find_element_by_partial_link_text('KEY INFORMATION').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' annual financial statements require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled financial exhibit
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
# Target quarter financial statements of U.S. businesses
# Prioritize in the order of 'Consolidated Balance Sheets', 'Consolidated Statements of Financial Position','Consolidated Statements of Cash Flows','Consolidated Income Statements' 'Consolidated Statements of Operations', 'FINANCIAL STATEMENTS', 'Financial Statements'
if(self.foreign==False and self.report_type=='quarter'):
# Import quarter_report_url dataframe
quarter_report_url=self.ghost_report_url().quarter_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up best-scrolled financial exhibits
for url_index in range(len(quarter_report_url)):
driver.get(quarter_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Consolidated Balance Sheets').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Financial Position').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Cash Flows').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Income Statements').click()
except:
try:
driver.find_element_by_partial_link_text('Consolidated Statements of Operations').click()
except:
try:
driver.find_element_by_partial_link_text('FINANCIAL STATEMENTS').click()
except:
try:
driver.find_element_by_partial_link_text('Financial Statements').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' quarter financial statements require manual browsing.' )
pass
# Open new tab after pulling up the best-scrolled balance sheet section
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
#------------ Best-scrolled to the most relevant risk factor exhibit------------
# A function to exhibit risk factors
def risk_factors_exhibit(self, risk_type):
## Previous errors checked in the ghost_report_url()
## Error message if the inputted risk type is neither 'enterprise' or 'market'
if(risk_type!='enterprise' and risk_type!='market'):
raise TypeError("Invalid risk type: only 'enterprise' or 'market' risk type is allowed")
########################### Enterprise Risk Exhibit ##################################
if(risk_type=='enterprise'):
# Target annual and quarter enterprise risk factors of U.S. businesses
# Prioritize in the order of 'Risk Factors','RISK FACTORS'
if (self.foreign==False and self.report_type=='annual'):
# Import annual_report_url dataframe
annual_report_url=self.ghost_report_url().annual_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled enterprise risk factor exhibits
for url_index in range(len(annual_report_url)):
driver.get(annual_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Risk Factors').click()
except:
try:
driver.find_element_by_partial_link_text('RISK FACTORS').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' annual enterprise risk factors require manual browsing.' )
pass
# Open new tab after pulling up the best-scrolled enterprise risk factor exhibits
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
elif (self.foreign==False and self.report_type=='quarter'):
# Import annual_report_url dataframe
quarter_report_url=self.ghost_report_url().quarter_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled enterprise risk factor exhibits
for url_index in range(len(quarter_report_url)):
driver.get(quarter_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Risk Factors').click()
except:
try:
driver.find_element_by_partial_link_text('RISK FACTORS').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' quarter enterprise risk factors require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled enterprise risk factor exhibits
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
# Target annual enterprise risk factors of foreign businesses
# Prioritize in the order of 'Risk Factors', 'RISK FACTORS', 'KEY INFORMATION', 'Key Information'
if (self.foreign==True and self.report_type=='annual'):
# Import annual_report_url dataframe
annual_report_url=self.ghost_report_url().annual_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled enterprise risk factor exhibits
for url_index in range(len(annual_report_url)):
driver.get(annual_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Risk Factors').click()
except:
try:
driver.find_element_by_partial_link_text('RISK FACTORS').click()
except:
try:
driver.find_element_by_partial_link_text('KEY INFORMATION').click()
except:
try:
driver.find_element_by_partial_link_text('Key Information').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' annual enterprise risk factors require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled enterprise risk factor exhibits
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
########################### Market Risk Exhibit #############################
elif(risk_type=='market'):
# Target annual and quarter market risk factors of U.S. businesses
# Prioritize in the order of 'Quantitative and Qualitative Disclosures About Market Risk', 'QUANTITATIVE AND QUALITATIVE DISCLOSURES ABOUT MARKET RISK'
if (self.foreign==False and self.report_type=='annual'):
# Import annual_report_url dataframe
annual_report_url=self.ghost_report_url().annual_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled market risk factor exhibits
for url_index in range(len(annual_report_url)):
driver.get(annual_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Quantitative and Qualitative Disclosures about Market Risk').click()
except:
try:
driver.find_element_by_partial_link_text('Quantitative and Qualitative Disclosures About Market Risk').click()
except:
try:
driver.find_element_by_partial_link_text('QUANTITATIVE AND QUALITATIVE DISCLOSURES ABOUT MARKET RISK').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' annual market risk factors require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled market risk factor exhibits
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
elif (self.foreign==False and self.report_type=='quarter'):
# Import annual_report_url dataframe
quarter_report_url=self.ghost_report_url().quarter_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled market risk factor exhibits
for url_index in range(len(quarter_report_url)):
driver.get(quarter_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Quantitative and Qualitative Disclosures about Market Risk').click()
except:
try:
driver.find_element_by_partial_link_text('Quantitative and Qualitative Disclosures About Market Risk').click()
except:
try:
driver.find_element_by_partial_link_text('QUANTITATIVE AND QUALITATIVE DISCLOSURES ABOUT MARKET RISK').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' quarter market risk factors require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled market risk factor exhibits
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
# Target annual market risk factors of foreign businesses
# Prioritize in the order of 'Quantitative and Qualitative Disclosures About Market Risk','QUANTITATIVE AND QUALITATIVE DISCLOSURES ABOUT MARKET RISK'
if (self.foreign==True and self.report_type=='annual'):
# Import annual_report_url dataframe
annual_report_url=self.ghost_report_url().annual_report_url
# Import report_periods dataframe
report_periods=self.ghost_report_url().report_periods
# Locate 'webdrive.exe' file to launch chrome browser
driver=webdriver.Chrome(os.getcwd()+'\\chromedriver.exe')
# Recurrently pull up the best_scrolled market risk factor exhibits
for url_index in range(len(annual_report_url)):
driver.get(annual_report_url[url_index])
try:
driver.find_element_by_partial_link_text('Quantitative and Qualitative Disclosures about Market Risk').click()
except:
try:
driver.find_element_by_partial_link_text('Quantitative and Qualitative Disclosures About Market Risk').click()
except:
try:
driver.find_element_by_partial_link_text('QUANTITATIVE AND QUALITATIVE DISCLOSURES ABOUT MARKET RISK').click()
except:
print(self.symbol+' '+report_periods[url_index].strftime('%Y-%m-%d')+' annual market risk factors require manual browsing.')
pass
# Open new tab after pulling up the best-scrolled market risk factor exhibits
driver.execute_script("window.open('');")
# Focus on the new tab for the next loop
driver.switch_to.window(driver.window_handles[-1])
#----------------------------- Curate Financial Statements -----------------------------------------
# A function to curate income statements, balance sheets, and cah flow statements for U.S. and foreign businesses
def curate_financial_statements(self,statement_type):
## Error message if inputted statement type is not available
if(statement_type!='income' and statement_type!='balance' and statement_type!='cashflow'):
raise TypeError("Statement type not available: only 'income', 'balance', or 'cashflow' statement type is allowed")
# Probable names for statement selection- may nave to update identifiers as different company uses different statement names
income_terms=['Consolidated Income Statement', 'Consolidated Statements of Income', 'Consolidated Statements of Earnings', 'Consolidated Statements of Operations','Consolidated Statements of Profit or Loss','Profit and Loss Statement','P&L Statement','P/L Statement','Consolidated Income Statement','Consolidated Statement of Income', 'Consolidated Statement of Earnings','Consolidated Statement of Operations','Consolidated Statement of Profit or Loss','Consolidated Profit and Loss Statement','Consolidated P&L Statement','Consolidated P/L Statement','Statement of Consolidated Operations','Statements of Consolidated Operations','Statement of Combined Operation','Statements of Combined Operation']
balance_terms=['Consolidated Balance Sheets', 'Consolidated Balance Sheet','Consolidated Statements of Financial Position', 'Consolidated Statements of Financial Condition','Consolidated Statement of Financial Positions','Consolidated Statement of Financial Conditions', 'Statement of Consolidated Financial Position','Statements of Consolidated Financial Position', 'Statement of Consolidated Financial Condition', 'Statements of Consolidated Financial Condition','Combined Balance Sheet']
cashflow_terms=['Consolidated Statements of Cash Flows','Consolidated Statement of Cash Flows','Cash Flow Statement','Consolidated Cash Flow Statement', 'Statement of Consolidated Cash Flows','Statements of Consolidated Cash Flows','Statement of Combined Cash Flow','Statements of Combined Cash Flow']
# Set root diectory for file access
root_path=os.getcwd()
########### Extract Annual and Quarter Financial Statements (U.S. and foreign businesses)#################
# Retrieve periods and url(s) from the url table called by ghost_report_url()
report_table=self.ghost_report_url()
report_periods=report_table.report_periods.to_list()
if(self.report_type=='annual'):
download_url_container=report_table.annual_download_url.to_list() # container to store the download urls of annual statements
elif(self.report_type=='quarter'):
download_url_container=report_table.quarter_download_url.to_list() # container to store the download urls of quarter statements
# Designate a directory to store downloaded statements (begin statement piling)
statement_pile_path=os.path.join(root_path,'statement_pile')
company_pile_path=os.path.join(statement_pile_path,self.symbol)
try:
os.mkdir(statement_pile_path) # Create the statement_pile_path path
os.mkdir(company_pile_path) # Create the company_pile_path path
os.chdir(company_pile_path) # Tab into the company_pile_path path
except:
try:
os.mkdir(company_pile_path) # Create the company_pile_path path
os.chdir(company_pile_path) # Tab into the company_pile_path path
except:
os.chdir(company_pile_path)
# Downlaod accessible statements into the statement_pile path
# Construct a data frame to store the specified statement type
period_container=[] # container to store statement periods
statement_container=[] # container to store statement table
for url_index in range(len(download_url_container)):
statement_period=report_periods[url_index].strftime("%Y-%m-%d")
if(download_url_container[url_index] is not None and download_url_container[url_index][download_url_container[url_index].rfind('.')+1:len(download_url_container[url_index])]!='xls'):
statement_file=requests.get(download_url_container[url_index])
file_name=self.symbol+statement_period+self.report_type+'.xlsx'
with open(file_name, 'wb+') as fs:
fs.write(statement_file.content) # populating statement contents
dfs=pd.ExcelFile(fs)
sheet_headers=list(map(lambda x: x.lower().replace(' ','').replace('_','').replace('-','').replace(',','').replace("'","").replace('&','').replace('/',''), [dfs.parse(sn).columns[0] for sn in dfs.sheet_names]))
############################ Income Statements ###################################
if (statement_type=='income'):
income_term_header=list(map(lambda x: x.lower().replace(' ','').replace('&','').replace('/',''),income_terms))
select_sheet_bool=[any(x in sheet_headers[i] for x in income_term_header) for i in range(len(sheet_headers))]
if(any(select_sheet_bool)):
# Identify income statement and store in dataframe form
income_statement=dfs.parse(dfs.sheet_names[select_sheet_bool.index(True)])
# Store income statement into the statement container
statement_container.append(income_statement)
# Store income statement period into the period container
period_container.append(statement_period)
# Serialize the income statement dataframe into '.pickle'- to be accessed faster next time
income_statement.to_pickle(self.symbol+statement_period+self.report_type.capitalize()+statement_type.capitalize()+'.pickle')
else:
# Store income statement as None in the statement container
## Because not identified or does not exist
statement_container.append(None)
# Store income statement period into the period container
period_container.append(statement_period)
# Message to warn that income statement may be non-identified or simply not available
print(self.symbol+' '+statement_period+ ' '+self.report_type+' income statement not identified or not available: update income statement identifiers or pass')
############################ Balance Sheets ###################################
if (statement_type=='balance'):
balance_term_header=list(map(lambda x: x.lower().replace(' ','').replace('&','').replace('/',''), balance_terms))
select_sheet_bool=[any(x in sheet_headers[i] for x in balance_term_header) for i in range(len(sheet_headers))]
if(any(select_sheet_bool)):
# Identify balance sheet and store in dataframe form
balance_sheet=dfs.parse(dfs.sheet_names[select_sheet_bool.index(True)])
# Store balacne sheet into the statement container
statement_container.append(balance_sheet)
# Store balance sheet period into the period container
period_container.append(statement_period)
# Serialize the balance sheet dataframe into '.pickle'- to be accessed faster next time
balance_sheet.to_pickle(self.symbol+statement_period+self.report_type.capitalize()+statement_type.capitalize()+'.pickle')
else:
# Store balance sheet as None in the statement container
## Because not identified or does not exist
statement_container.append(None)
# Store balance sheet period into the period container
period_container.append(statement_period)
# Message to warn that balance sheet may be non-identified or simply not available
print(self.symbol+' '+statement_period+ ' '+self.report_type+' balance sheet not identified or not available: update balance sheet identifiers or pass')
############################ Cash Flow Statements ###################################
if (statement_type=='cashflow'):
cashflow_term_header=list(map(lambda x: x.lower().replace(' ','').replace('&','').replace('/',''), cashflow_terms))
select_sheet_bool=[any(x in sheet_headers[i] for x in cashflow_term_header) for i in range(len(sheet_headers))]
if(any(select_sheet_bool)):
# Identify cash flow statement and store in dataframe form
cashflow_statement=dfs.parse(dfs.sheet_names[select_sheet_bool.index(True)])
# Store cash flow statement into the statement container
statement_container.append(cashflow_statement)
# Store cash flow statement period into the period container
period_container.append(statement_period)
# Serialize the cash flow statement dataframe into '.pickle'- to be accessed faster next time
cashflow_statement.to_pickle(self.symbol+statement_period+self.report_type.capitalize()+statement_type.capitalize()+'.pickle')
else:
# Store cash flow statement as None in the statement container
## Because not identified or does not exist
statement_container.append(None)
# Store cash flow statement period into the period container
period_container.append(statement_period)
# Message to warn that cash flow statement may be non-identified or simply not available
print(self.symbol+' '+statement_period+ ' '+self.report_type+' cashflow statement not identified or not available: update cash flow statement identifiers or pass')
fs.close() # close the downloaded '.xlsx' file
os.remove(file_name) # remove the downloaded '.xlsx' file after extracting financial statements
else:
print(self.symbol+' '+statement_period+' '+self.report_type+' '+statement_type+' statement not available')
# Combine the conpany's income statement(s) or balance sheet(s) or cash flow statement(s), and statement periods into a dataframe
statement_df=pd.DataFrame({'statement_periods':period_container,statement_type+'_statement':statement_container},index=[self.symbol]*len(period_container))
# Return back to root_path (end statement piling)
os.chdir(root_path)
# Return the data frame contructed above if it is not empty
if not statement_df.empty:
return statement_df
else:
return 'No '+self.report_type+' '+statement_type+' statement for '+self.symbol+' between '+self.start_period.strftime("%Y-%m-%d")+' and '+self.end_period.strftime("%Y-%m-%d")
#------------------------Extract Most Recent Income Statements--------------------------------
def ghost_income(self):
bin_path=r'.\\statement_pile\\'+self.symbol
if (os.path.isdir(bin_path)):
bin_files=os.listdir(bin_path)
pass
else:
os.makedirs(bin_path)
bin_files=os.listdir(bin_path)
# Convert start_period and end_period inputs to a datetime object
start_period=datetime.strptime(str(self.start_period),"%Y%m%d").date()
end_period=datetime.strptime(str(self.end_period),"%Y%m%d").date()
if(self.report_type=='annual'):
if any(["AnnualIncome" in s for s in bin_files]):
annual_income_file=[s for s in bin_files if "AnnualIncome" in s]
annual_income_periods=list(map(lambda x: datetime.strptime(re.search('\d{4}-\d{2}-\d{2}',x).group(),"%Y-%m-%d").date(),annual_income_file))
annual_income_file=[annual_income_file[i] for i in range(len(annual_income_file)) if annual_income_periods[i]>start_period and annual_income_periods[i]<=end_period]
annual_income_periods=[annual_income_periods[i] for i in range(len(annual_income_periods)) if annual_income_periods[i]>start_period and annual_income_periods[i]<=end_period]
annual_income_file.reverse()
annual_income_periods.reverse()
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[3]), pd.read_pickle(bin_path+'\\'+annual_income_file[6])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[3]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[6]).group()
except:
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[3]), pd.read_pickle(bin_path+'\\'+annual_income_file[5])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[3]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[5]).group()
except:
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[3]), pd.read_pickle(bin_path+'\\'+annual_income_file[4])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[3]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[4]).group()
except:
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[3])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[3]).group()
except:
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[2])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[2]).group()
except:
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+annual_income_file[0]),pd.read_pickle(bin_path+'\\'+annual_income_file[1])], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[1]).group()
except:
try:
binded_income=pd.read_pickle(bin_path+'\\'+annual_income_file[0])
binded_message='Ghosted '+self.report_type+' income statments for '+re.search('\d{4}-\d{2}-\d{2}',annual_income_file[0]).group()
except:
binded_income=None
binded_message='The specified time range is not available, try including a larger time range'
if(len(annual_income_periods)>0):
if(end_period-annual_income_periods[0]).days>365:
print('Recommend updating to the latest annual income statements: update via .update_financial_statements("income"), then call this function again')
else:
business_income=self.curate_financial_statements('income')
try:
binded_income=pd.concat([business_income.income_statement[0],business_income.income_statement[3], business_income.income_statement[6]], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]+', '+business_income.statement_periods[3]+', '+business_income.statement_periods[6]
except:
try:
binded_income=pd.concat([business_income.income_statement[0],business_income.income_statement[3], business_income.income_statement[5]], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]+', '+business_income.statement_periods[3]+', '+business_income.statement_periods[5]
except:
try:
binded_income=pd.concat([business_income.income_statement[0],business_income.income_statement[3], business_income.income_statement[4]], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]+', '+business_income.statement_periods[3]+', '+business_income.statement_periods[4]
except:
try:
binded_income=pd.concat([business_income.income_statement[0],business_income.income_statement[3]], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]+', '+business_income.statement_periods[3]
except:
try:
binded_income=pd.concat([business_income.income_statement[0],business_income.income_statement[2]], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]+', '+business_income.statement_periods[2]
except:
try:
binded_income=pd.concat([business_income.income_statement[0],business_income.income_statement[1]], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]+', '+business_income.statement_periods[1]
except:
try:
binded_income=business_income.income_statement[0]
binded_message='Ghosted '+self.report_type+' income statments for '+business_income.statement_periods[0]
except:
binded_income=None
binded_message='No '+self.report_type+' income statements for '+self.symbol+' between '+datetime.strptime(str(self.start_period),"%Y%m%d").strftime("%Y-%m-%d")+' and '+datetime.strptime(str(self.end_period),"%Y%m%d").strftime("%Y-%m-%d")
elif(self.report_type=='quarter'):
if any(["QuarterIncome" in s for s in bin_files]):
quarter_income_file=[s for s in bin_files if "QuarterIncome" in s]
quarter_income_periods=list(map(lambda x: datetime.strptime(re.search('\d{4}-\d{2}-\d{2}',x).group(),"%Y-%m-%d").date(),quarter_income_file))
quarter_income_file=[quarter_income_file[i] for i in range(len(quarter_income_file)) if quarter_income_periods[i]>start_period and quarter_income_periods[i]<=end_period]
quarter_income_periods=[quarter_income_periods[i] for i in range(len(quarter_income_periods)) if quarter_income_periods[i]>start_period and quarter_income_periods[i]<=end_period]
quarter_income_file.reverse()
quarter_income_periods.reverse()
try:
binded_income=pd.concat([pd.read_pickle(bin_path+'\\'+f) for f in quarter_income_file], axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+', '.join([re.search('\d{4}-\d{2}-\d{2}',f).group() for f in quarter_income_file])
except:
binded_income=None
binded_message='The specified time range is not available, try including a larger time range'
if(len(quarter_income_periods)>0):
if(end_period-quarter_income_periods[0]).days>180:
print('Recommend updating to the latest quarter income statements: update via .update_financial_statements("income") function, then call this function again')
else:
business_income=self.curate_financial_statements('income')
try:
binded_income=pd.concat(business_income.income_statement.to_list(), axis = 1)
binded_message='Ghosted '+self.report_type+' income statments for '+', '.join([business_income.statement_periods[i] for i in range(len(business_income.statement_periods))])
except:
binded_income=None
binded_message='No '+self.report_type+' income statements for '+self.symbol+' between '+datetime.strptime(str(self.start_period),"%Y%m%d").strftime("%Y-%m-%d")+' and '+datetime.strptime(str(self.end_period),"%Y%m%d").strftime("%Y-%m-%d")
print(binded_message)
return binded_income
#------------------------Extract Most Recent Balance Sheets--------------------------------
def ghost_balance(self):
bin_path=r'.\statement_pile\\'+self.symbol
if (os.path.isdir(bin_path)):
bin_files=os.listdir(bin_path)
pass
else:
os.makedirs(bin_path)
bin_files=os.listdir(bin_path)
# Convert start_period and end_period inputs to a datetime object
start_period=datetime.strptime(str(self.start_period),"%Y%m%d").date()
end_period=datetime.strptime(str(self.end_period),"%Y%m%d").date()
if(self.report_type=='annual'):
if any(["AnnualBalance" in s for s in bin_files]):
annual_balance_file=[s for s in bin_files if "AnnualBalance" in s]
annual_balance_periods=list(map(lambda x: datetime.strptime(re.search('\d{4}-\d{2}-\d{2}',x).group(),"%Y-%m-%d").date(),annual_balance_file))
annual_balance_file=[annual_balance_file[i] for i in range(len(annual_balance_file)) if annual_balance_periods[i]>start_period and annual_balance_periods[i]<=end_period]
annual_balance_periods=[annual_balance_periods[i] for i in range(len(annual_balance_periods)) if annual_balance_periods[i]>start_period and annual_balance_periods[i]<=end_period]
annual_balance_file.reverse()
annual_balance_periods.reverse()
try:
binded_balance=pd.concat([pd.read_pickle(bin_path+'\\'+annual_balance_file[0]),pd.read_pickle(bin_path+'\\'+annual_balance_file[2]), pd.read_pickle(bin_path+'\\'+annual_balance_file[4]), pd.read_pickle(bin_path+'\\'+annual_balance_file[6]), pd.read_pickle(bin_path+'\\'+annual_balance_file[8])], axis = 1)
binded_message='Ghosted '+self.report_type+' balance sheets for '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[0]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[2]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[4]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[6]).group()+', '+re.search('\d{4}-\d{2}-\d{2}',annual_balance_file[8]).group()
except:
try:
binded_balance=pd.concat([ | pd.read_pickle(bin_path+'\\'+annual_balance_file[0]) | pandas.read_pickle |
# coding: utf-8
"""
@brief test log(time=1s)
"""
import unittest
import pandas
import numpy
from scipy.sparse.linalg import lsqr as sparse_lsqr
from pyquickhelper.pycode import ExtTestCase, ignore_warnings
from pandas_streaming.df import pandas_groupby_nan, numpy_types
class TestPandasHelper(ExtTestCase):
def test_pandas_groupbynan(self):
self.assertTrue(sparse_lsqr is not None)
types = [(int, -10), (float, -20.2), (str, "e"),
(bytes, bytes("a", "ascii"))]
skip = (numpy.bool_, numpy.complex64, numpy.complex128)
types += [(_, _(5)) for _ in numpy_types() if _ not in skip]
for ty in types:
data = [{"this": "cst", "type": "tt1=" + str(ty[0]), "value": ty[1]},
{"this": "cst", "type": "tt2=" +
str(ty[0]), "value": ty[1]},
{"this": "cst", "type": "row_for_nan"}]
df = pandas.DataFrame(data)
gr = pandas_groupby_nan(df, "value")
co = gr.sum()
li = list(co["value"])
try:
self.assertIsInstance(li[-1], float)
except AssertionError as e:
raise AssertionError("Issue with {0}".format(ty)) from e
try:
self.assertTrue(numpy.isnan(li[-1]))
except AssertionError as e:
raise AssertionError(
"Issue with value {}\n--df--\n{}\n--gr--\n{}\n--co--\n{}".format(
li, df, gr.count(), co)) from e
for ty in types:
data = [{"this": "cst", "type": "tt1=" + str(ty[0]), "value": ty[1]},
{"this": "cst", "type": "tt2=" +
str(ty[0]), "value": ty[1]},
{"this": "cst", "type": "row_for_nan"}]
df = pandas.DataFrame(data)
try:
gr = pandas_groupby_nan(df, ("value", "this"))
t = True
raise Exception("---")
except TypeError:
t = False
if t:
co = gr.sum()
li = list(co["value"])
self.assertIsInstance(li[-1], float)
self.assertTrue(numpy.isnan(li[-1]))
try:
gr = pandas_groupby_nan(df, ["value", "this"])
t = True
except (TypeError, NotImplementedError):
t = False
if t:
co = gr.sum()
li = list(co["value"])
self.assertEqual(len(li), 2)
def test_pandas_groupbynan_tuple(self):
data = [dict(a="a", b="b", c="c", n=1), dict(
b="b", n=2), dict(a="a", n=3), dict(c="c", n=4)]
df = | pandas.DataFrame(data) | pandas.DataFrame |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import pandas as pd
import numpy as np
from statsmodels.tools import categorical
# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier
from sklearn.feature_selection import RFE, RFECV
from sklearn import preprocessing
def _time_analyze_(func):
from time import clock
exec_times = 1
def callf(*args, **kwargs):
start = clock()
for _ in range(exec_times):
r = func(*args, **kwargs)
finish = clock()
print("{:<20}{:10.6} s".format(func.__name__ + ":", finish - start))
return r
return callf
batch_size = 100000
train_data_dir = '~/Downloads/new_ele_power_cur_vol_weather.load.Three.bias.csv'
reader = pd.read_csv(train_data_dir, chunksize=batch_size,
dtype={'STAT_DATE': np.str})
print(type(reader))
def SimpleLableEncoder():
le = preprocessing.LabelEncoder()
print(le.fit(["paris", "paris", "tokyo", "amsterdam"]))
print(le.transform(["tokyo", "tokyo", "paris", "asdasf"]))
def CustomLabelEncoder():
from sklearn.preprocessing import LabelEncoder
data_batch = next(reader)
print(data_batch.head(2))
X_feature = data_batch['STAT_DATE']
# bad
# X_feature = data_batch[['STAT_DATE', 'avg.IRRADIANCE']]
le = LabelEncoder()
f_b = le.fit_transform(X_feature)
print(f_b)
def OneHotEncoder():
pass
def Vectorizer():
from sklearn.feature_extraction import DictVectorizer
data_batch = next(reader)
X_feature = data_batch['STAT_DATE']
l_batch = data_batch['is_bias']
X_dict = X_feature.T.to_dict().values()
vect = DictVectorizer(sparse=False)
X_vector = vect.fit_transform(X_dict)
clf_p = ExtraTreesClassifier()
clf_p.fit(X_vector, l_batch)
print(clf_p.feature_importances_)
print(np.sum(clf_p.feature_importances_))
pass
def Dummy():
data_batch = next(reader)
# print(data_batch.head(5))
X_feature = data_batch[['STAT_DATE', 'energy_mean']]
l_batch = data_batch['is_bias']
DummyData = | pd.get_dummies(X_feature) | pandas.get_dummies |
from pathlib import Path
import pandas as pd
from openpyxl.styles import Font, Alignment
from openpyxl.formatting.rule import CellIsRule
from openpyxl.chart import BarChart, Reference
from openpyxl.chart.shapes import GraphicalProperties
from openpyxl.drawing.line import LineProperties
# Directory of this file
this_dir = Path(__file__).resolve().parent
# Read in all files
parts = []
for path in (this_dir / "sales_data").rglob("*.xls*"):
print(f'Reading {path.name}')
part = pd.read_excel(path)
parts.append(part)
# Combine the DataFrames from each file into a single DataFrame
df = | pd.concat(parts) | pandas.concat |
import numpy as np
from scipy.interpolate import Rbf
import matplotlib.pyplot as plt
from matplotlib import cm
from math import sin, cos, atan, atan2, asin, radians, degrees, sqrt, pow, acos, fabs, tan, isnan
import numpy as np
import geopandas as gpd
from geopandas import GeoDataFrame
import pandas as pd
import os
from shapely.geometry import Polygon, LineString, Point
from map2loop import m2l_utils
import rasterio
######################################
# inspired by https://stackoverflow.com/questions/3104781/inverse-distance-weighted-idw-interpolation-with-python
#
# Simple Inverse Distance Weighting interpolation of observations z at x,y locations returned at locations defined by xi,yi arrays. From...
# scipy_idw(x, y, z, xi, yi)
# Args:
# x,y coordinates of points to be interpolated
# z value to be interpolated
# xi,yi grid of points where interpolation of z will be calculated - sci_py version of Simple Inverse Distance Weighting interpolation of observations z at x,y locations returned at locations defined by xi,yi arrays
#
# simple Inverse Distance Weighting calculation
######################################
def simple_idw(x, y, z, xi, yi):
dist = distance_matrix(x,y, xi,yi)
# In IDW, weights are 1 / distance
weights = 1.0 / (dist)
# Make weights sum to one
weights /= weights.sum(axis=0)
# Multiply the weights for each interpolated point by all observed Z-values
zi = np.dot(weights.T, z)
return zi
######################################
# call scipy inverse distance weighting
#
# Simple Inverse Distance Weighting interpolation of observations z at x,y locations returned at locations defined by xi,yi arrays. From...
# scipy_idw(x, y, z, xi, yi)
# Args:
# x,y coordinates of points to be interpolated
# z value to be interpolated
# xi,yi grid of points where interpolation of z will be calculated - sci_py version of Simple Inverse Distance Weighting interpolation of observations z at x,y locations returned at locations defined by xi,yi arrays
#
######################################
def scipy_idw(x, y, z, xi, yi):
interp = Rbf(x, y, z, function='linear')
return interp(xi, yi)
######################################
# call scipy Radial basis function interpolation
#
# scipy_rbf(x, y, z, xi, yi)
# Args:
# x,y coordinates of points to be interpolated
# z value to be interpolated
# xi,yi grid of points where interpolation of z will be calculated
#
# sci_py version of Radial Basis Function interpolation of observations z at x,y locations returned at locations defined by xi,yi arraysplot(x,y,z,grid)
######################################
def scipy_rbf(x, y, z, xi, yi):
interp = Rbf(x, y, z,smooth=.5)
return interp(xi, yi)
######################################
# calculate all distances between to arrays of points
# Make a distance matrix between pairwise observations
# Note: from <http://stackoverflow.com/questions/1871536>
# (Yay for ufuncs!)
# distance_matrix(x0, y0, x1, y1)
# Args:
# x0,y0 array of point locations
# x1,y1 second array of point locations
#
# Returns array of distances between all points defined by arrays by x0,y0 and all points defined by arrays x1,y1 from http://stackoverflow.com/questions/1871536
######################################
def distance_matrix(x0, y0, x1, y1):
obs = np.vstack((x0, y0)).T
interp = np.vstack((x1, y1)).T
d0 = np.subtract.outer(obs[:,0], interp[:,0])
d1 = np.subtract.outer(obs[:,1], interp[:,1])
return np.hypot(d0, d1)
######################################
# plot an array of data
######################################
def plot(x,y,z,grid):
plt.figure()
plt.imshow(grid, extent=(0,100,0,100),origin='lower')
#plt.hold(True)
#plt.scatter(x,100-y,c=z)
plt.colorbar()
######################################
# interpolate three data arrays using various schemes
#
# call_interpolator(calc,x,y,l,m,n,xi,yi,nx,ny,fault_flag)
# Args:
# calc calculation mode, one of 'simple_idw', 'scipy_idw', 'scipy_rbf'
# l,m,n arrays of direction cosines of pole to plane
# xi,yi arrays of locations of interpolated locations (assumes a grid for plotting, otherwise doesn't matter)
# nx,ny number of x,y elemnts in grid
# fault_flag toggle whether calc for near-fault orientations or not
#
# Call interpolator defined by calc for arrays of arbitrary location x,y located observations as triple or double arrays of 3D or 2D direction cosine arrays (l,m,n) and returns grid of nx ,ny interpolated values for points defined by xi,yi locations. Inspired by https://stackoverflow.com/questions/3104781/inverse-distance-weighted-idw-interpolation-with-python
######################################
def call_interpolator(calc,x,y,l,m,n,xi,yi,nx,ny,fault_flag):
# Calculate IDW or other interpolators
if(calc=='simple_idw'):
ZIl = simple_idw(x,y,l,xi,yi)
if(calc=='scipy_rbf'):
ZIl = scipy_rbf(x,y,l,xi,yi)
if(calc=='scipy_idw'):
ZIl = scipy_idw(x,y,l,xi,yi)
if(not fault_flag):
ZIl = ZIl.reshape((ny, nx))
if(calc=='simple_idw'):
ZIm = simple_idw(x,y,m,xi,yi)
if(calc=='scipy_rbf'):
ZIm = scipy_rbf(x,y,m,xi,yi)
if(calc=='scipy_idw'):
ZIm = scipy_idw(x,y,m,xi,yi)
if(not fault_flag):
ZIm = ZIm.reshape((ny, nx))
if(type(n) is not int):
if(calc=='simple_idw'):
ZIn = simple_idw(x,y,n,xi,yi)
if(calc=='scipy_rbf'):
ZIn = scipy_rbf(x,y,n,xi,yi)
if(calc=='scipy_idw'):
ZIn = scipy_idw(x,y,n,xi,yi)
if(not fault_flag):
ZIn = ZIn.reshape((ny, nx))
else:
ZIn=0
return(ZIl,ZIm,ZIn)
######################################
# Interpolate dipd,dipdirection data from shapefile
#
# interpolate_orientations(structure_file,tmp_path,bbox,c_l,use_gcode,scheme,gridx,gridy,fault_flag)
# Args:
# structure_file path to orientation layer
# tmp_path directory of temporary outputs from m2l
# bbox bounding box of region of interest
# c_l dictionary of codes and labels specific to input geo information layers
# use_gcode list of groups whose orientation data will be interpolated
# scheme interpolation scheme one of 'simple_idw', 'scipy_idw', 'scipy_rbf'
# gridx,gridy number of cols & rows in interpolation grid
# fault_flag toggle whether calc for near-fault orientations or not
#
# Interpolate orientation layer to produce regular grid of l,m,n direction cosines
# Can choose between various RBF and IDW options
# The purpose of these interpolations and associated code is to help in three cases:
# -- Providing estimated dips and contacts in fault-bounded domains where no structural data are available
# -- Needed to estimate true thickness of formations
# -- Useful for poulating parts of maps where little structural data is available
######################################
def interpolate_orientations(structure_file,output_path,bbox,c_l,this_gcode,calc,gridx,gridy,fault_flag):
structure = gpd.read_file(structure_file,bbox=bbox)
if(len(this_gcode)==1):
is_gp=structure[c_l['g']] == thisgcode # subset orientations to just those with this group
gp_structure = structure[is_gp]
#print('single group')
#display(gp_structure)
else:
#print('first code',this_gcode[0])
is_gp=structure[c_l['g']] == this_gcode[0] # subset orientations to just those with this group
gp_structure = structure[is_gp]
gp_structure_all = gp_structure.copy()
#print('first group')
#display(gp_structure)
for i in range (1,len(this_gcode)):
#print('next code',this_gcode[i])
is_gp=structure[c_l['g']] == this_gcode[i] # subset orientations to just those with this group
temp_gp_structure = structure[is_gp]
gp_structure_all = pd.concat([gp_structure_all, temp_gp_structure], ignore_index=True)
#print('next group')
#display(gp_structure)
npts = len(gp_structure_all)
if(fault_flag):
nx, ny = len(gridx),len(gridy)
else:
nx, ny = gridx,gridy
xi = np.linspace(bbox[0],bbox[2], nx)
yi = np.linspace(bbox[1],bbox[3], ny)
xi, yi = np.meshgrid(xi, yi)
xi, yi = xi.flatten(), yi.flatten()
x = np.zeros(npts)
y = np.zeros(npts)
dip = np.zeros(npts)
dipdir = np.zeros(npts)
i=0
for a_pt in gp_structure_all.iterrows():
x[i]=a_pt[1]['geometry'].x+(np.random.ranf()*0.01)
y[i]=a_pt[1]['geometry'].y+(np.random.ranf()*0.01)
dip[i] = a_pt[1][c_l['d']]
if(c_l['otype']=='strike'):
dipdir[i] = a_pt[1][c_l['dd']]+90
else:
dipdir[i] = a_pt[1][c_l['dd']]
i=i+1
l=np.zeros(npts)
m=np.zeros(npts)
n=np.zeros(npts)
for i in range(0,npts):
l[i],m[i],n[i]=m2l_utils.ddd2dircos(dip[i],dipdir[i])
if(fault_flag):
ZIl,ZIm,ZIn=call_interpolator(calc,x,y,l,m,n,gridx,gridy,nx,ny,fault_flag)
else:
ZIl,ZIm,ZIn=call_interpolator(calc,x,y,l,m,n,xi,yi,nx,ny,fault_flag)
# Comparisons...
if(not fault_flag):
plot(x,-y,l,ZIl)
plt.title('l')
plot(x,-y,m,ZIm)
plt.title('m')
plot(x,-y,n,ZIn)
plt.title('n')
plt.show()
if(fault_flag):
f=open(output_path+'f_input.csv','w')
fi=open(output_path+'f_interpolation_'+calc+'.csv','w')
fl=open(output_path+'f_interpolation_l.csv','w')
fm=open(output_path+'f_interpolation_m.csv','w')
fn=open(output_path+'f_interpolation_n.csv','w')
else:
f=open(output_path+'input.csv','w')
fi=open(output_path+'interpolation_'+calc+'.csv','w')
fl=open(output_path+'interpolation_l.csv','w')
fm=open(output_path+'interpolation_m.csv','w')
fn=open(output_path+'interpolation_n.csv','w')
f.write("x,y,dip,dipdirection\n")
fi.write("x,y,dip,dipdirection\n")
fl.write("x,y,l\n")
fm.write("x,y,m\n")
fn.write("x,y,n\n")
for i in range (0,npts):
ostr="{},{},{},{}\n"\
.format(x[i],y[i],int(dip[i]),int(dipdir[i]))
#ostr=str(x[i])+","+str(y[i])+","+str(int(dip[i]))+","+str(int(dipdir[i]))+'\n'
f.write(ostr)
if(fault_flag):
for i in range (0,len(gridx)):
L=ZIl[i]/(sqrt((pow(ZIl[i],2.0))+(pow(ZIm[i],2.0))+(pow(ZIn[i],2.0))))
M=ZIm[i]/(sqrt((pow(ZIl[i],2.0))+(pow(ZIm[i],2.0))+(pow(ZIn[i],2.0))))
N=ZIn[i]/(sqrt((pow(ZIl[i],2.0))+(pow(ZIm[i],2.0))+(pow(ZIn[i],2.0))))
dip,dipdir=m2l_utils.dircos2ddd(L,M,N)
if(isnan(dip) or isnan(dipdir)):
dip=dipdir=L=M=N=0
print("Warning, no interpolated value for element No. ",i)
ostr="{},{},{},{}\n"\
.format(gridx[i],gridy[i],int(dip),int(dipdir))
#ostr=str(gridx[i])+","+str(gridy[i])+","+str(int(dip))+","+str(int(dipdir))+'\n'
fi.write(ostr)
ostr="{},{},{}\n"\
.format(gridx[i],gridy[i],L)
#ostr=str(gridx[i])+","+str(gridy[i])+","+str(L)+'\n'
fl.write(ostr)
ostr="{},{},{}\n"\
.format(gridx[i],gridy[i],M)
#ostr=str(gridx[i])+","+str(gridy[i])+","+str(M)+'\n'
fm.write(ostr)
ostr="{},{},{}\n"\
.format(gridx[i],gridy[i],N)
#ostr=str(gridx[i])+","+str(gridy[i])+","+str(N)+'\n'
fn.write(ostr)
else:
for xx in range (0,gridx):
for yy in range (0,gridy):
yyy=xx
xxx=gridy-2-yy
L=ZIl[xxx,yyy]/(sqrt((pow(ZIl[xxx,yyy],2.0))+(pow(ZIm[xxx,yyy],2.0))+(pow(ZIn[xxx,yyy],2.0))))
M=ZIm[xxx,yyy]/(sqrt((pow(ZIl[xxx,yyy],2.0))+(pow(ZIm[xxx,yyy],2.0))+(pow(ZIn[xxx,yyy],2.0))))
N=ZIn[xxx,yyy]/(sqrt((pow(ZIl[xxx,yyy],2.0))+(pow(ZIm[xxx,yyy],2.0))+(pow(ZIn[xxx,yyy],2.0))))
dip,dipdir=m2l_utils.dircos2ddd(L,M,N)
if(isnan(dip) or isnan(dipdir)):
dip=dipdir=L=M=N=0
print("Warning, no interpolated value for grid point No. ",xx,',',yy)
ostr="{},{},{},{}\n"\
.format(bbox[0]+(xx*((bbox[2]-bbox[0])/gridx)),bbox[1]+((gridy-1-yy)*((bbox[3]-bbox[1])/gridy)),int(dip),int(dipdir))
#ostr=str(bbox[0]+(xx*((bbox[2]-bbox[0])/gridx)))+","+str(bbox[1]+((gridy-1-yy)*((bbox[3]-bbox[1])/gridy)))+","+str(int(dip))+","+str(int(dipdir))+'\n'
fi.write(ostr)
ostr="{},{},{}\n"\
.format(xx,yy,L)
#ostr=str(xx)+","+str(yy)+","+str(L)+'\n'
fl.write(ostr)
ostr="{},{},{}\n"\
.format(xx,yy,M)
#ostr=str(xx)+","+str(yy)+","+str(M)+'\n'
fm.write(ostr)
ostr="{},{},{}\n"\
.format(xx,yy,N)
#ostr=str(xx)+","+str(yy)+","+str(N)+'\n'
fn.write(ostr)
f.close()
fi.close()
fl.close()
fm.close()
fn.close()
if(fault_flag):
print("orientations interpolated as dip dip direction",output_path+'f_interpolation_'+calc+'.csv')
print("orientations interpolated as l,m,n dir cos",output_path+'f_interpolation_l.csv etc.')
else:
fig, ax = plt.subplots(figsize=(10, 10),)
q = ax.quiver(xi, yi, -ZIm, ZIl,headwidth=0)
plt.show()
print("orientations interpolated as dip dip direction",output_path+'interpolation_'+calc+'.csv')
print("orientations interpolated as l,m,n dir cos",output_path+'interpolation_l.csv etc.')
######################################
# Interpolate 2D contact data from shapefile
#
# interpolate_contacts(geology_file,tmp_path,dtm,bbox,c_l,use_gcode,scheme,gridx,gridy,fault_flag)
# Args:
# geology_file path to basal contacts layer
# tmp_path directory of temporary outputs from m2l
# dtm rasterio format elevation grid
# bbox bounding box of region of interest
# c_l dictionary of codes and labels specific to input geo information layers
# use_gcode list of groups whose contact data will be interpolated
# scheme interpolation scheme one of 'simple_idw', 'scipy_idw', 'scipy_rbf'
# gridx,gridy number of cols & rows in interpolation grid
# fault_flag toggle whether calc for near-fault orientations or not
#
# Interpolate basal contacts layer to produce regular grid of l,m direction cosines
######################################
def interpolate_contacts(geology_file,output_path,dtm,dtb,dtb_null,cover_map,bbox,c_l,use_gcode,calc,gridx,gridy,fault_flag):
geol_file = gpd.read_file(geology_file,bbox=bbox)
#print(len(geol_file))
#geol_file.plot( color='black',edgecolor='black')
# Setup: Generate data...
npts = 0
decimate=1
if(fault_flag):
nx, ny = len(gridx),len(gridy)
else:
nx, ny= gridx,gridy
xi = np.linspace(bbox[0],bbox[2], nx)
yi = np.linspace(bbox[1],bbox[3], ny)
xi, yi = np.meshgrid(xi, yi)
xi, yi = xi.flatten(), yi.flatten()
x = np.zeros(20000) ############## FUDGE ################
y = np.zeros(20000) # should go through geology file to see how many contact
l = np.zeros(20000) # segments will be made then define arrays?
m = np.zeros(20000) #####################################
if(fault_flag):
f=open(output_path+'f_raw_contacts.csv','w')
else:
f=open(output_path+'raw_contacts.csv','w')
f.write("X,Y,Z,angle,lsx,lsy,formation,group\n")
j=0
i=0
for indx,acontact in geol_file.iterrows(): #loop through distinct linestrings in MultiLineString
if(acontact.geometry.type=='MultiLineString'):
#print(i)
for line in acontact.geometry: # loop through line segments
#print(i,len(acontact.geometry))
if(m2l_utils.mod_safe(i,decimate) ==0 and acontact[c_l['g']] in use_gcode):
#if(acontact['id']==170):
#display(npts,line.coords[0][0],line.coords[1][0])
dlsx=line.coords[0][0]-line.coords[1][0]
dlsy=line.coords[0][1]-line.coords[1][1]
if(not line.coords[0][0]==line.coords[1][0] or not line.coords[0][1]==line.coords[1][1]):
lsx=dlsx/sqrt((dlsx*dlsx)+(dlsy*dlsy))
lsy=dlsy/sqrt((dlsx*dlsx)+(dlsy*dlsy))
x[i]=line.coords[1][0]+(dlsx/2)
y[i]=line.coords[1][1]+(dlsy/2)
angle=degrees(atan2(lsx,lsy))
l[i]=lsx
m[i]=lsy
locations=[(x[i],y[i])] #doesn't like point right on edge?
height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations)
if(str(acontact[c_l['g']])=='None'):
ostr=str(x[i])+","+str(y[i])+","+str(height)+","+str(angle%180)+","+str(lsx)+","+str(lsy)+","+acontact[c_l['c']].replace(" ","_").replace("-","_")+","+acontact[c_l['c']].replace(" ","_").replace("-","_")+"\n"
else:
ostr=str(x[i])+","+str(y[i])+","+str(height)+","+str(angle%180)+","+str(lsx)+","+str(lsy)+","+acontact[c_l['c']].replace(" ","_").replace("-","_")+","+acontact[c_l['g']].replace(" ","_").replace("-","_")+"\n"
f.write(ostr)
npts=npts+1
i=i+1
else:
#display(acontact.geometry,acontact.geometry.coords)
#for line in acontact: # loop through line segments in LineString
if( m2l_utils.mod_safe(i,decimate) ==0 and acontact[c_l['g']] in use_gcode):
dlsx=acontact.geometry.coords[0][0]-acontact.geometry.coords[1][0]
dlsy=acontact.geometry.coords[0][1]-acontact.geometry.coords[1][1]
if(not acontact.geometry.coords[0][0]==acontact.geometry.coords[1][0]
or not acontact.geometry.coords[0][1]==acontact.geometry.coords[1][1]):
lsx=dlsx/sqrt((dlsx*dlsx)+(dlsy*dlsy))
lsy=dlsy/sqrt((dlsx*dlsx)+(dlsy*dlsy))
x[i]=acontact.geometry.coords[1][0]+(dlsx/2)
y[i]=acontact.geometry.coords[1][1]+(dlsy/2)
angle=degrees(atan2(lsx,lsy))
l[i]=lsx
m[i]=lsy
locations=[(x[i],y[i])] #doesn't like point right on edge?
height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations)
if(str(acontact[c_l['g']])=='None'):
ostr=str(x[i])+","+str(y[i])+","+str(height)+","+str(angle%180)+","+str(lsx)+","+str(lsy)+","+acontact[c_l['c']].replace(" ","_").replace("-","_")+","+acontact[c_l['c']].replace(" ","_").replace("-","_")+"\n"
else:
ostr=str(x[i])+","+str(y[i])+","+str(height)+","+str(angle%180)+","+str(lsx)+","+str(lsy)+","+acontact[c_l['c']].replace(" ","_").replace("-","_")+","+acontact[c_l['g']].replace(" ","_").replace("-","_")+"\n"
#print(ostr)
f.write(ostr)
#print(npts,dlsx,dlsy)
npts=npts+1
i=i+1
j=j+1
f.close()
#print("i",i,"npts",npts)
for i in range(0,npts):
x[i]=x[i]+(np.random.ranf()*0.01)
y[i]=y[i]+(np.random.ranf()*0.01)
if(fault_flag):
ZIl,ZIm,ZIn=call_interpolator(calc,x[:npts],y[:npts],l[:npts],m[:npts],0,gridx,gridy,nx,ny,fault_flag)
else:
ZIl,ZIm,ZIn=call_interpolator(calc,x[:npts],y[:npts],l[:npts],m[:npts],0,xi,yi,nx,ny,fault_flag)
# Comparisons...
if(not fault_flag):
plot(x,-y,l,ZIl)
plt.title('l')
plot(x,-y,m,ZIm)
plt.title('m')
if(fault_flag):
fi=open(output_path+'f_interpolation_contacts_'+calc+'.csv','w')
fl=open(output_path+'f_interpolation_contacts_l.csv','w')
fm=open(output_path+'f_interpolation_contacts_m.csv','w')
else:
fi=open(output_path+'interpolation_contacts_'+calc+'.csv','w')
fl=open(output_path+'interpolation_contacts_l.csv','w')
fm=open(output_path+'interpolation_contacts_m.csv','w')
fi.write("x,y,angle\n")
fl.write("x,y,l\n")
fm.write("x,y,m\n")
if(fault_flag):
for i in range (0,len(gridx)):
L=ZIl[i]/(sqrt((pow(ZIl[i],2.0))+(pow(ZIm[i],2.0))))
M=ZIm[i]/(sqrt((pow(ZIl[i],2.0))+(pow(ZIm[i],2.0))))
S=degrees(atan2(L,M))
if(isnan(S)):
S=0
print("Warning, no interpolated value for element No. ",i)
ostr=str(gridx[i])+","+str(gridy[i])+","+str(int(S))+'\n'
fi.write(ostr)
ostr=str(gridx[i])+","+str(gridy[i])+","+str(L)+'\n'
fl.write(ostr)
ostr=str(gridx[i])+","+str(gridy[i])+","+str(M)+'\n'
fm.write(ostr)
else:
for xx in range (0,gridx):
for yy in range (0,gridy):
yyy=xx
xxx=gridy-2-yy
L=ZIl[xxx,yyy]/(sqrt((pow(ZIl[xxx,yyy],2.0))+(pow(ZIm[xxx,yyy],2.0))))
M=ZIm[xxx,yyy]/(sqrt((pow(ZIl[xxx,yyy],2.0))+(pow(ZIm[xxx,yyy],2.0))))
S=degrees(atan2(L,M))
ostr=str(bbox[0]+(xx*((bbox[2]-bbox[0])/(gridx))))+","+str(bbox[1]+((gridy-2-yy)*((bbox[3]-bbox[1])/(gridy))))+","+str(int(S))+'\n'
fi.write(ostr)
ostr=str(xx)+","+str(yy)+","+str(L)+'\n'
fl.write(ostr)
ostr=str(xx)+","+str(yy)+","+str(M)+'\n'
fm.write(ostr)
fi.close()
fl.close()
fm.close()
if(fault_flag):
print("contacts interpolated as strike",output_path+'f_interpolation_contacts_'+calc+'.csv')
print("contacts interpolated as l,m dir cos",output_path+'f_interpolation_contacts_l.csv etc.')
else:
fig, ax = plt.subplots(figsize=(10, 10))
q = ax.quiver(xi, yi, ZIl, ZIm,headwidth=0)
plt.show()
print("contacts interpolated as strike",output_path+'interpolation_contacts_'+calc+'.csv')
print("contacts interpolated as l,m dir cos",output_path+'interpolation_contacts_l.csv etc.')
######################################
# save all contacts as vectors (used for debugging)
#
# save_contact_vectors(geology_file,tmp_path,dtm,bbox,c_l,calc,decimate)
# Args:
# geology_file file path to geology polygons
# tmp_path directory of temporary outputs from m2l
# dtm rasterio format dtm raster
# bbox bounding box of model
# c_l dictionary of codes and labels specific to input geo information layers
# calc NOT USED
# decimate simple decimation factor for saving vectors
######################################
def save_contact_vectors(geology_file,tmp_path,dtm,dtb,dtb_null,cover_map,bbox,c_l,calc,decimate):
geol_file = gpd.read_file(geology_file,bbox=bbox)
print(len(geol_file))
#geol_file.plot( color='black',edgecolor='black')
npts = 0
i=0
for indx,acontact in geol_file.iterrows(): #loop through distinct linestrings in MultiLineString
if(acontact.geometry.type=='MultiLineString'):
for line in acontact.geometry: # loop through line segments
if(m2l_utils.mod_safe(i,decimate) ==0):
npoint=1
i=i+1
else:
if( m2l_utils.mod_safe(i,decimate) ==0):
npoint=1
i=i+1
x = np.zeros(i+1)
y = np.zeros(i+1)
l = np.zeros(i+1)
m = np.zeros(i+1)
f=open(tmp_path+'raw_contacts.csv','w')
f.write("X,Y,Z,angle,lsx,lsy,formation,group\n")
j=0
i=0
for indx,acontact in geol_file.iterrows(): #loop through distinct linestrings in MultiLineString
if(acontact.geometry.type=='MultiLineString'):
#print(i)
for line in acontact.geometry: # loop through line segments
#print(i,len(acontact.geometry))
if(m2l_utils.mod_safe(i,decimate) ==0):
#if(acontact['id']==170):
#display(npts,line.coords[0][0],line.coords[1][0])
dlsx=line.coords[0][0]-line.coords[1][0]
dlsy=line.coords[0][1]-line.coords[1][1]
if(not line.coords[0][0]==line.coords[1][0] or not line.coords[0][1]==line.coords[1][1]):
lsx=dlsx/sqrt((dlsx*dlsx)+(dlsy*dlsy))
lsy=dlsy/sqrt((dlsx*dlsx)+(dlsy*dlsy))
x[i]=line.coords[1][0]+(dlsx/2)
y[i]=line.coords[1][1]+(dlsy/2)
angle=degrees(atan2(lsx,lsy))
l[i]=lsx
m[i]=lsy
locations=[(x[i],y[i])] #doesn't like point right on edge?
height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations)
if(str(acontact[c_l['g']])=='None'):
ostr="{},{},{},{},{},{},{},{}\n"\
.format(x[i],y[i],height,angle%180,lsx,lsy,acontact[c_l['c']].replace(" ","_").replace("-","_"),acontact[c_l['c']].replace(" ","_").replace("-","_"))
#ostr=str(x[i])+","+str(y[i])+","+str(height)+","+str(angle%180)+","+str(lsx)+","+str(lsy)+","+acontact[c_l['c']].replace(" ","_").replace("-","_")+","+acontact[c_l['c']].replace(" ","_").replace("-","_")+"\n"
else:
ostr="{},{},{},{},{},{},{},{}\n"\
.format(x[i],y[i],height,angle%180,lsx,lsy,acontact[c_l['c']].replace(" ","_").replace("-","_"),acontact[c_l['g']].replace(" ","_").replace("-","_"))
#ostr=str(x[i])+","+str(y[i])+","+str(height)+","+str(angle%180)+","+str(lsx)+","+str(lsy)+","+acontact[c_l['c']].replace(" ","_").replace("-","_")+","+acontact[c_l['g']].replace(" ","_").replace("-","_")+"\n"
f.write(ostr)
npts=npts+1
i=i+1
else:
#display(acontact.geometry,acontact.geometry.coords)
#for line in acontact: # loop through line segments in LineString
if( m2l_utils.mod_safe(i,decimate) ==0):
dlsx=acontact.geometry.coords[0][0]-acontact.geometry.coords[1][0]
dlsy=acontact.geometry.coords[0][1]-acontact.geometry.coords[1][1]
if(not acontact.geometry.coords[0][0]==acontact.geometry.coords[1][0]
or not acontact.geometry.coords[0][1]==acontact.geometry.coords[1][1]):
lsx=dlsx/sqrt((dlsx*dlsx)+(dlsy*dlsy))
lsy=dlsy/sqrt((dlsx*dlsx)+(dlsy*dlsy))
x[i]=acontact.geometry.coords[1][0]+(dlsx/2)
y[i]=acontact.geometry.coords[1][1]+(dlsy/2)
angle=degrees(atan2(lsx,lsy))
l[i]=lsx
m[i]=lsy
locations=[(x[i],y[i])] #doesn't like point right on edge?
height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations)
if(str(acontact[c_l['g']])=='None'):
ostr="{},{},{},{},{},{},{},{}\n"\
.format(x[i],y[i],height,angle%180,lsx,lsy,acontact[c_l['c']].replace(" ","_").replace("-","_"),acontact[c_l['c']].replace(" ","_").replace("-","_"))
#ostr=str(x[i])+","+str(y[i])+","+str(height)+","+str(angle%180)+","+str(lsx)+","+str(lsy)+","+acontact[c_l['c']].replace(" ","_").replace("-","_")+","+acontact[c_l['c']].replace(" ","_").replace("-","_")+"\n"
else:
ostr="{},{},{},{},{},{},{},{}\n"\
.format(x[i],y[i],height,angle%180,lsx,lsy,acontact[c_l['c']].replace(" ","_").replace("-","_"),acontact[c_l['g']].replace(" ","_").replace("-","_"))
#ostr=str(x[i])+","+str(y[i])+","+str(height)+","+str(angle%180)+","+str(lsx)+","+str(lsy)+","+acontact[c_l['c']].replace(" ","_").replace("-","_")+","+acontact[c_l['g']].replace(" ","_").replace("-","_")+"\n"
#print(ostr)
f.write(ostr)
#print(npts,dlsx,dlsy)
npts=npts+1
i=i+1
j=j+1
f.close()
print(npts,'points saved to',tmp_path+'raw_contacts.csv')
####################################
# combine interpolated contact information (to provide l,m with interpolated dip,dipdirection data (to provide n)
#
# join_contacts_and_orientations(combo_file,geology_file,tmp_path,dtm_reproj_file,c_l,lo,mo,no,lc,mc,xy,dst_crs,bbox,fault_flag)
# combo_file path to temporary combined information geology_file path to basal contacts layer
# tmp_path directory of temporary outputs from m2l
# dtm_reproj_file path to reprojected dtm file
# c_l dictionary of codes and labels specific to input geo information layers
# lo,mo,no 3D direction cosines of interpolated orientations
# lc,mc 2D direction cosines of interpolated contacts
# xy interpolated orientations (used to get x,y locations only) dst_crs Coordinate Reference System of destination geotif (any length-based projection)
# bbox bounding box of region of interest
# fault_flag toggle whether calc for near-fault orientations or not
#
# Combine interpolation orientations with interpolated basal contacts layers to produce regular grid of interpolated dip, dip direction estimates
# Uses normalised direction cosines (l,m,n):
# -- l,m from RBF of basal contact orientations -- signs of l & m from misorientation with RBF of orientation data and -- n from RBF of orientation data
#
# Useful for adding data where no orientations are available (e.g. in fault bounded domains) and for calculating true thickness of layers. Assumes a 2D plane of data, but if 3D RBF was calulated and projected contact info was used it should apply with topography too.
####################################
def join_contacts_and_orientations(combo_file,geology_file,output_path,dtm_reproj_file,dtb,dtb_null,cover_map,c_l,lo,mo,no,lc,mc,xy,dst_crs,bbox,fault_flag):
f=open(combo_file,'w')
f.write('x,y,dip,dipdirection,misorientation,dotproduct\n')
for i in range(0,len(lc)):
#print(mc[i,2],lc[i,2],lo[i,2],mo[i,2],no[i,2])
scale=sqrt(1-pow(no[i,2],2)) #scaling contact dircos to *include* dip info
lcscaled=scale*-mc[i,2] #includes 90 rotation to account for orthogonality of contact and dip direction
mcscaled=scale*lc[i,2]
scale2=sqrt(pow(lo[i,2],2)+pow(mo[i,2],2)) #scaling dip dipdir dircos to *exclude* dip info
if(scale2>0.0):
loscaled=lo[i,2]/scale2
moscaled=mo[i,2]/scale2
else:
loscaled=0
moscaled=0
dotproduct=(-mc[i,2]*loscaled)+(lc[i,2]*moscaled) #includes 90 rotation to account for orthogonality of contact and dip direction
if(dotproduct<0):
lcscaled=-lcscaled
mcscaled=-mcscaled
misorientation=degrees(acos(dotproduct))
dip,dipdir=m2l_utils.dircos2ddd(lcscaled,mcscaled,no[i,2])
ostr="{},{},{},{},{},{}\n"\
.format(xy[i,0],xy[i,1],int(dip),int(dipdir),int(misorientation),dotproduct)
#ostr=str(xy[i,0])+','+str(xy[i,1])+','+str(int(dip))+','+str(int(dipdir))+','+str(int(misorientation))+','+str(dotproduct)+'\n'
f.write(ostr)
f.close()
geology = gpd.read_file(geology_file,bbox=bbox)
geology.crs=dst_crs
geology = m2l_utils.explode(geology)
data = pd.read_csv(combo_file)
geometry = [Point(xy) for xy in zip(data['x'], data['y'])]
gdf = GeoDataFrame(data, crs=dst_crs, geometry=geometry)
gdf.crs=dst_crs
print(gdf.crs,geology.crs)
structure_code = gpd.sjoin(gdf, geology, how="left", op="within")
dtm = rasterio.open(dtm_reproj_file)
if(fault_flag):
f=open(output_path+'f_combo_full.csv','w')
else:
f=open(output_path+'combo_full.csv','w')
f.write('X,Y,Z,azimuth,dip,polarity,formation\n')
last_code=''
for indx,a_point in structure_code.iterrows():
locations=[(a_point['x'],a_point['y'])]
height=m2l_utils.value_from_dtm_dtb(dtm,dtb,dtb_null,cover_map,locations)
ostr=str(a_point['x'])+','
ostr=ostr+str(a_point['y'])+','
ostr=ostr+str(height)+','+str(int(a_point['dipdirection']))+','
ostr=ostr+str(int(a_point['dip']))+',1,'
ostr=ostr+str(a_point[c_l['c']]).replace("-","_").replace(" ","_")+'\n'
if(not str(a_point[c_l['c']])=='nan' ):
f.write(ostr)
last_code=a_point[c_l['c']]
f.close()
if(fault_flag):
print("contacts and orientations interpolated as dip dip direction",output_path+'f_combo_full.csv')
else:
print("contacts and orientations interpolated as dip dip direction",output_path+'combo_full.csv')
######################################
# Interpolate dipd,dipdirection data from shapefile usin fold axial traces as additional constraints
# interpolate_orientations_with_fat(structure_file,output_path,bbox,c_l,this_gcode,calc,gridx,gridy)
# structure_file path to orientation layer
# output_path directory for outputs from m2l
# bbox bounding box of region of interest
# c_l dictionary of codes and labels specific to input geo information layers
# this_gcode list of groups whose orientation data will be interpolated
# calc interpolation scheme one of 'simple_idw', 'scipy_idw', 'scipy_rbf'
# gridx,gridy number of cols & rows in interpolation grid
#
# Interpolate orientation layer to produce regular grid of l,m,n direction cosines
# Can choose between various RBF and IDW options
# The purpose of these interpolations and associated code is to help in three cases:
# -- Providing estimated dips and contacts in fault-bounded domains where no structural data are available
# -- Needed to estimate true thickness of formations
# -- Useful for poulating parts of maps where little structural data is available
######################################
def interpolate_orientations_with_fat(structure_file,output_path,bbox,c_l,this_gcode,calc,gridx,gridy):
structure = gpd.read_file(structure_file,bbox=bbox)
fat_orientations= | pd.read_csv(output_path+'fold_axial_trace_orientations2.csv',",") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
@author: hkaneko
"""
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from rdkit import Chem
from rdkit.Chem import AllChem, BRICS, Descriptors
from rdkit.ML.Descriptors import MoleculeDescriptors
import sample_functions
from sklearn import metrics
from sklearn import svm
from sklearn.cross_decomposition import PLSRegression
from sklearn.model_selection import cross_val_predict, GridSearchCV
# 下の y_name を、'boiling_point', 'logS', 'melting_point', 'pIC50', 'pIGC50' のいずれかにしてください。
# descriptors_with_[y_name].csv というファイルを dataset として読み込み計算します。
# さらに、y_name を別の名前に変えて、ご自身で別途 sample_program_6_8_0_csv.py もしくは
# sample_program_6_8_0_sdf.py で descriptors_with_[y_name].csv というファイルを、
# 他のファイルと同様の形式で準備すれば、同じように計算することができます。
y_name = 'logS'
# 'boiling_point' : 沸点のデータセットの場合
# 'logS' : 水溶解度のデータセットの場合
# 'melting_point' : 融点のデータセットの場合
# 'pIC50' : 薬理活性のデータセットの場合
# 'pIGC50' : 環境毒性のデータセットの場合
structures_name = 'file' # 'file' or 'brics' or 'r_group' or 'descriptors'
# 'file' : 予測用の化学構造を読み込み、y の値を推定します。
# file_name_for_prediction を、予測用の化学構造のファイル名にしてください。
# csv ファイルもしくは sdf ファイルです。 サンプルとして、molecules_for_prediction.csv,
# molecules_for_prediction.sdf, molecules_estimated_pIC50_positive.csv があります。
#
# 'brics' : BRICS アルゴリズムで生成された化学構造の y の値を推定します。
# 化学構造生成の元となる構造のデータセットのファイル名を、file_name_of_seed_structures で指定してください。
#
# 'r_group' : R-group の化学構造をランダムに生成して y の値を推定します。
# 予測用の化学構造を発生するため、主骨格のフラグメントのファイル名を file_name_of_main_fragments で、
# 側鎖のフラグメントのファイル名を file_name_of_sub_fragments で指定してください。どちらも SMILES にしてください。
#
# 'descriptors' : 予測用の化学構造の記述子データセットを読み込み、y の値を推定します。
# 予測用のデータセットの csv ファイル名を、file_name_of_descriptors_for_prediction で指定してください。
# このファイルは、事前に sample_program_6_8_6_descriotprs_for_prediction.py で計算する必要があります。
file_name_for_prediction = 'molecules_for_prediction.csv' # 'file', 'descriptors' 予測用のデータセットのファイル名
#file_name_for_prediction = 'molecules_estimated_pIC50_positive.csv' # 'file', 'descriptors' 予測用のデータセットのファイル名
file_name_of_seed_structures = 'molecules_with_{0}.csv'.format(y_name) # 'brics' 構造生成のための元になる化学構造のファイル名。csv ファイルか sdf ファイルです。
#file_name_of_seed_structures = 'molecules_for_prediction.csv' # 'brics' 構造生成のための元になる化学構造のファイル名。csv ファイルか sdf ファイルです。
file_name_of_main_fragments = 'sample_main_fragments.smi' # 'r_group' 主骨格のフラグメントがあるファイル名。サンプルとして、'sample_main_fragments.smi' があります。
#file_name_of_main_fragments = 'sample_main_fragments_for_pIC50.smi' # 'r_group' 主骨格のフラグメントがあるファイル名。サンプルとして、'sample_main_fragments.smi' があります。
file_name_of_sub_fragments = 'sample_sub_fragments.smi' # 'r_group' 側鎖のフラグメントがあるファイル名。サンプルとして、'sample_main_fragments.smi' があります
number_of_generated_structures = 10000 # 'brics', 'r_group' 生成する化学構造の数
file_name_of_descriptors_for_prediction = 'descriptors_of_molecules_for_prediction.csv' # 'descriptors' 記述子データセットのファイル名
method_name = 'svr' # 'pls' or 'svr'
ad_method_name = 'ensemble' # 'ensemble' or 'ocsvm' or 'no'(ADなし)
add_nonlinear_terms_flag = False # True (二乗項・交差項を追加) or False (追加しない)
number_of_bins = 50 # y の推定値におけるヒストグラムのビンの数
fold_number = 5 # N-fold CV の N
max_number_of_principal_components = 30 # 使用する主成分の最大数
svr_cs = 2 ** np.arange(-5, 11, dtype=float) # C の候補
svr_epsilons = 2 ** np.arange(-10, 1, dtype=float) # ε の候補
svr_gammas = 2 ** np.arange(-20, 11, dtype=float) # γ の候補
ocsvm_nu = 0.003 # OCSVM における ν。トレーニングデータにおけるサンプル数に対する、サポートベクターの数の下限の割合
ocsvm_gammas = 2 ** np.arange(-20, 11, dtype=float) # γ の候補
number_of_submodels = 50 # サブモデルの数
rate_of_selected_x_variables = 0.8 # 各サブデータセットで選択される説明変数の数の割合。0 より大きく 1 未満
if structures_name != 'file' and structures_name != 'brics' and structures_name != 'r_group' and structures_name != 'descriptors':
sys.exit('\'{0}\' という予測用の化学構造(生成)はありません。structures_name を見直してください。'.format(structures_name))
if method_name != 'pls' and method_name != 'svr':
sys.exit('\'{0}\' という回帰分析手法はありません。method_name を見直してください。'.format(method_name))
if ad_method_name != 'ensemble' and ad_method_name != 'ocsvm' and ad_method_name != 'no':
sys.exit('\'{0}\' というAD設定手法はありません。ad_method_name を見直してください。'.format(ad_method_name))
dataset = pd.read_csv('descriptors_with_{0}.csv'.format(y_name), index_col=0) # 物性・活性と記述子のデータセットの読み込み
y = dataset.iloc[:, 0]
original_x = dataset.iloc[:, 1:]
original_x = original_x.replace(np.inf, np.nan).fillna(np.nan) # inf を NaN に置き換え
nan_variable_flags = original_x.isnull().any() # NaN を含む変数
original_x = original_x.drop(original_x.columns[nan_variable_flags], axis=1) # NaN を含む変数を削除
# 標準偏差が 0 の説明変数を削除
std_0_variable_flags = original_x.std() == 0
original_x = original_x.drop(original_x.columns[std_0_variable_flags], axis=1)
if add_nonlinear_terms_flag:
x = pd.read_csv('x_{0}.csv'.format(y_name), index_col=0) # 物性・活性と記述子のデータセットの読み込み
# x = sample_functions.add_nonlinear_terms(x) # 説明変数の二乗項や交差項を追加
# 標準偏差が 0 の説明変数を削除
std_0_nonlinear_variable_flags = x.std() == 0
x = x.drop(x.columns[std_0_nonlinear_variable_flags], axis=1)
else:
x = original_x.copy()
# オートスケーリング
autoscaled_original_x = (original_x - original_x.mean()) / original_x.std()
autoscaled_x = (x - x.mean()) / x.std()
autoscaled_y = (y - y.mean()) / y.std()
if ad_method_name == 'ocsvm':
# グラム行列の分散を最大化することによる γ の最適化
optimal_ocsvm_gamma = sample_functions.gamma_optimization_with_variance(autoscaled_x, ocsvm_gammas)
if method_name == 'pls':
# CV による成分数の最適化
components = [] # 空の list の変数を作成して、成分数をこの変数に追加していきます同じく成分数をこの変数に追加
r2_in_cv_all = [] # 空の list の変数を作成して、成分数ごとのクロスバリデーション後の r2 をこの変数に追加
for component in range(1, min(np.linalg.matrix_rank(autoscaled_x), max_number_of_principal_components) + 1):
# PLS
model = PLSRegression(n_components=component) # PLS モデルの宣言
estimated_y_in_cv = pd.DataFrame(cross_val_predict(model, autoscaled_x, autoscaled_y,
cv=fold_number)) # クロスバリデーション推定値の計算し、DataFrame型に変換
estimated_y_in_cv = estimated_y_in_cv * y.std() + y.mean() # スケールをもとに戻す
r2_in_cv = metrics.r2_score(y, estimated_y_in_cv) # r2 を計算
print(component, r2_in_cv) # 成分数と r2 を表示
r2_in_cv_all.append(r2_in_cv) # r2 を追加
components.append(component) # 成分数を追加
# 成分数ごとの CV 後の r2 をプロットし、CV 後のr2が最大のときを最適成分数に
optimal_component_number = sample_functions.plot_and_selection_of_hyperparameter(components, r2_in_cv_all,
'number of components',
'cross-validated r2')
print('\nCV で最適化された成分数 :', optimal_component_number)
# PLS
model = PLSRegression(n_components=optimal_component_number) # モデルの宣言
elif method_name == 'svr':
# グラム行列の分散を最大化することによる γ の最適化
if ad_method_name == 'ocsvm':
optimal_svr_gamma = optimal_ocsvm_gamma.copy()
else:
optimal_svr_gamma = sample_functions.gamma_optimization_with_variance(autoscaled_x, svr_gammas)
# CV による ε の最適化
model_in_cv = GridSearchCV(svm.SVR(kernel='rbf', C=3, gamma=optimal_svr_gamma), {'epsilon': svr_epsilons},
cv=fold_number, verbose=2)
model_in_cv.fit(autoscaled_x, autoscaled_y)
optimal_svr_epsilon = model_in_cv.best_params_['epsilon']
# CV による C の最適化
model_in_cv = GridSearchCV(svm.SVR(kernel='rbf', epsilon=optimal_svr_epsilon, gamma=optimal_svr_gamma),
{'C': svr_cs}, cv=fold_number, verbose=2)
model_in_cv.fit(autoscaled_x, autoscaled_y)
optimal_svr_c = model_in_cv.best_params_['C']
# CV による γ の最適化
model_in_cv = GridSearchCV(svm.SVR(kernel='rbf', epsilon=optimal_svr_epsilon, C=optimal_svr_c),
{'gamma': svr_gammas}, cv=fold_number, verbose=2)
model_in_cv.fit(autoscaled_x, autoscaled_y)
optimal_svr_gamma = model_in_cv.best_params_['gamma']
# 最適化された C, ε, γ
print('C : {0}\nε : {1}\nGamma : {2}'.format(optimal_svr_c, optimal_svr_epsilon, optimal_svr_gamma))
# SVR
model = svm.SVR(kernel='rbf', C=optimal_svr_c, epsilon=optimal_svr_epsilon, gamma=optimal_svr_gamma) # モデルの宣言
model.fit(autoscaled_x, autoscaled_y) # モデルの構築
if method_name == 'pls':
# 標準回帰係数
standard_regression_coefficients = pd.DataFrame(model.coef_, index=x.columns,
columns=['standard_regression_coefficients'])
standard_regression_coefficients.to_csv(
'pls_standard_regression_coefficients.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされますので注意してください
# 予測用の化学構造
if structures_name == 'file': # 予測用の化学構造の読み込み
if file_name_for_prediction[-4:] == '.csv':
dataset_prediction = pd.read_csv(file_name_for_prediction, index_col=0) # SMILES 付きデータセットの読み込み
smiles_prediction = dataset_prediction.iloc[:, 0] # 分子の SMILES
print('分子の数 :', len(smiles_prediction))
molecules_prediction = [Chem.MolFromSmiles(smiles_i) for smiles_i in smiles_prediction]
elif file_name_for_prediction[-4:] == '.sdf':
molecules_prediction = Chem.SDMolSupplier(file_name_for_prediction) # sdf ファイルの読み込み
print('分子の数 :', len(molecules_prediction))
elif structures_name == 'brics': # BRICS による化学構造生成
if file_name_of_seed_structures[-4:] == '.csv': # SMILES で分子の読み込み
dataset_seed = pd.read_csv(file_name_of_seed_structures, index_col=0)
smiles_seed = dataset_seed.iloc[:, 0] # 分子の SMILES
molecules = [Chem.MolFromSmiles(smiles_i) for smiles_i in smiles_seed]
elif file_name_of_seed_structures[-4:] == '.sdf': # SDF ファイルで分子の読み込み
molecules = Chem.SDMolSupplier(file_name_of_seed_structures)
# フラグメントへの変換
print('読み込んだ分子の数 :', len(molecules))
print('フラグメントへの分解')
fragments = set()
for molecule in molecules:
fragment = BRICS.BRICSDecompose(molecule, minFragmentSize=1)
fragments.update(fragment)
print('生成されたフラグメントの数 :', len(fragments))
generated_structures = BRICS.BRICSBuild([Chem.MolFromSmiles(fragment) for fragment in fragments])
# リスト型の変数に分子を格納
molecules_prediction = []
for index, generated_structure in enumerate(generated_structures):
print(index + 1, '/', number_of_generated_structures)
generated_structure.UpdatePropertyCache(True)
AllChem.Compute2DCoords(generated_structure)
molecules_prediction.append(generated_structure)
if index + 1 >= number_of_generated_structures:
break
elif structures_name == 'r_group': # R-group の化学構造生成
print('化学構造生成 開始')
smiles_prediction = sample_functions.structure_generation_based_on_r_group_random(file_name_of_main_fragments,
file_name_of_sub_fragments,
number_of_generated_structures)
molecules_prediction = [Chem.MolFromSmiles(smiles_i) for smiles_i in smiles_prediction]
if structures_name == 'descriptors':
original_x_prediction = pd.read_csv(file_name_of_descriptors_for_prediction, index_col=0) # 予測用の記述子のデータセットの読み込み
else:
# 記述子の計算
print('記述子の計算 開始')
# 計算する記述子名の取得
descriptor_names = []
for descriptor_information in Descriptors.descList:
descriptor_names.append(descriptor_information[0])
print('計算する記述子の数 :', len(descriptor_names))
descriptor_calculator = MoleculeDescriptors.MolecularDescriptorCalculator(descriptor_names)
# 分子ごとに、リスト型の変数に計算された記述子の値や、SMILES を追加
descriptors_of_molecules_prediction, smiles_prediction = [], []
for index, molecule_prediction in enumerate(molecules_prediction):
print(index + 1, '/', len(molecules_prediction))
if molecule_prediction is not None:
smiles_prediction.append(Chem.MolToSmiles(molecule_prediction))
AllChem.Compute2DCoords(molecule_prediction)
descriptors_of_molecules_prediction.append(descriptor_calculator.CalcDescriptors(molecule_prediction))
if structures_name == 'file' and file_name_for_prediction[-4:] == '.csv':
original_x_prediction = pd.DataFrame(descriptors_of_molecules_prediction, index=dataset_prediction.index, columns=descriptor_names)
else:
original_x_prediction = | pd.DataFrame(descriptors_of_molecules_prediction, index=smiles_prediction, columns=descriptor_names) | pandas.DataFrame |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
import glob
import subprocess
from libraries.lib_percentiles import *
from libraries.lib_gtap_to_final import gtap_to_final
from libraries.lib_common_plotting_functions import greys, quint_colors, quint_labels
from libraries.lib_country_params import get_FD_scale_fac,iso_to_name
from libraries.lib_get_hh_survey import get_hh_survey#, get_miembros_hogar
from libraries.lib_survey_categories import get_dict_gtap_to_final
from libraries.lib_results_to_excel import save_to_results_file
from matplotlib.ticker import FormatStrFormatter
import matplotlib as mpl
mpl.rcParams['hatch.linewidth'] = 0.2
import seaborn as sns
div_pal = sns.color_palette('BrBG', n_colors=11)
def plot_expenditures_by_category(pais,hies_FD,hies_FD_tot):
out_dir = 'output/'
if pais == 'brb': out_dir = '/Users/brian/Desktop/Dropbox/IDB/Barbados/output/'
####################
# Plot expenditures by category
# --> as fraction of total expenditures
hies_FD = hies_FD.reset_index().set_index(['cod_hogar','quintile'])
hies_FD_tot = hies_FD_tot.reset_index().set_index(['cod_hogar','quintile'])
final_FD_quints = pd.DataFrame(index=hies_FD_tot.sum(level='quintile').index).sort_index()
# Reset df
do_not_plot = []
plt.figure(figsize=(6,6))
fdict = get_dict_gtap_to_final()
for _h in fdict:
hies_FD_tot[_h] = hies_FD[[fdict[_h][1]]].sum(axis=1)
final_FD_quints[_h] = 100.*(hies_FD_tot[['hhwgt',_h]].prod(axis=1)/hies_FD_tot['totex_hh']).sum(level='quintile')/hies_FD_tot['hhwgt'].sum(level='quintile')
_ = final_FD_quints.T.copy()
_.columns = ['Q1','Q2','Q3','Q4','Q5']
##########################################################################################
# Record sample (all countries) stats in out_dir+'all_countries/hh_expenditures_table.csv'
try: hhexp = pd.read_csv(out_dir+'all_countries/hh_expenditures_table.csv').set_index('category')
except: hhexp = pd.DataFrame({pais.upper():0,'category':[fdict[i][1] for i in fdict]},index=None).set_index('category')
for _ex in fdict:
hhexp.loc[fdict[_ex][1],pais.upper()] = _.loc[_ex].mean()
try: hhexp.to_csv(out_dir+'all_countries/hh_expenditures_table.csv')
except: pass
##########################################################################################
##########################################################################################
# Record sample (all countries) stats in out_dir+'all_countries/hh_regressivity_table.csv'
for _q in ['Q1','Q2','Q3','Q4']:
try: hhreg = pd.read_csv(out_dir+'all_countries/hh_regressivity_table_'+_q+'.csv').set_index('category')
except: hhreg = pd.DataFrame({pais.upper():0,'category':[fdict[i][1] for i in fdict]},index=None).set_index('category')
for _ex in fdict:
hhreg.loc[fdict[_ex][1],pais.upper()] = _.loc[_ex,'Q1']/_.loc[_ex,'Q5']
try: hhreg.to_csv(out_dir+'all_countries/hh_regressivity_table_'+_q+'.csv')
except: pass
##########################################################################################
_ = _[['Q1','Q5']].T.sort_values(by='Q1',axis=1)
null_col = []
for _c in _:
if round(_[_c].mean(),1)==0: null_col.append(_c)
if _[_c].mean()<0.1: do_not_plot.append(_c)
_ = _.drop(null_col,axis=1)
final_FD_quints.to_csv(out_dir+'expenditures/'+pais+'_gasto_by_cat_and_quint.csv')
col_wid=_.shape[1]/2
ax = plt.barh(np.arange(0,_.shape[1],1)*col_wid,_.iloc[0],color=sns.color_palette('BrBG', n_colors=11)[2],height=2.5)
plt.barh(np.arange(0,_.shape[1],1)*col_wid+2.5,_.iloc[1],color=sns.color_palette('BrBG', n_colors=11)[8],height=2.5)
plt.gca().grid(False)
sns.despine(bottom=True)
plt.gca().set_yticks(np.arange(0,_.shape[1],1)*col_wid+1)
plt.gca().set_yticklabels([fdict[_h][1] for _h in _.columns],ha='right',fontsize=10,weight='light',color=greys[7])
plt.gca().set_xticklabels([])
ax = plt.gca()
_y = [0.,0.]
rects = ax.patches
for rect in rects:
if (rect.get_y()+rect.get_height()/2.) > _y[0]:
_y.append(rect.get_y()+rect.get_height()/2.);_y.sort();_y.pop(0)
for rect in rects:
_w = rect.get_width()
pct = ''
if (rect.get_y()+rect.get_height()/2.) in _y: pct = '%'
ax.annotate(str(round(_w,1))+pct,xy=(rect.get_x()+rect.get_width()+0.5, rect.get_y()+rect.get_height()/2.-0.1),
ha='left', va='center',color=greys[7],fontsize=7,zorder=100,clip_on=False,style='italic')
ax.annotate('Wealthiest quintile',xy=(0.8,_y[1]),ha='left',va='center',color=greys[0],fontsize=7,zorder=100,style='italic')
ax.annotate('Poorest quintile',xy=(0.8,_y[0]),ha='left',va='center',color=greys[7],fontsize=7,zorder=100,style='italic')
plt.title('Household expenditures in '+iso_to_name[pais],weight='bold',color=greys[7],fontsize=12,loc='right')
plt.draw()
try:
plt.gcf().savefig(out_dir+'expenditures/'+pais+'_gastos_all_categories.pdf',format='pdf',bbox_inches='tight')
plt.gcf().savefig(out_dir+'expenditures/'+pais+'_gastos_all_categories.png',format='png',bbox_inches='tight')
except: pass
plt.cla(); plt.close('all')
return hies_FD,hies_FD_tot,null_col
def plot_gtap_exp(pais,do_tax_food=True,verbose=False):
out_dir = 'output/'
if pais == 'brb': out_dir = '/Users/brian/Desktop/Dropbox/IDB/Barbados/output/'
############################
# Kuishuang's code (mostly):
# load household survey data
hh_hhsector = get_hh_survey(pais)
hh_hhsector = hh_hhsector.drop([i for i in hh_hhsector.columns if 'ing' in i or 'ict' in i],axis=1)
#hh_hhsector = hh_hhsector.fillna(1E5)#flag
if verbose: print(hh_hhsector.shape)
# load bridge matrix
xl = pd.ExcelFile('consumption_and_household_surveys/2017-10-13/Bridge_matrix_consumption_items_to_GTAP_power_sectors.xlsx')
if pais in xl.sheet_names: # all sheet names
print('using '+pais+' tab')
bridge_to_use = xl.parse(pais).fillna(0).drop(['Item_english'],axis = 1).set_index('Item') # read the specific sheet
else:
if verbose: print('using default tab')
bridge_to_use = xl.parse('nae_of_default_tab').fillna(0).drop(['Item_english'],axis = 1).set_index('Item')
cols_to_drop = []
for i in bridge_to_use.columns:
if verbose: print(i,bridge_to_use[i].sum())
if bridge_to_use[i].sum(axis=0)==0:
cols_to_drop.append(i)
bridge_to_use = bridge_to_use.drop(cols_to_drop,axis=1)
# household survey in GTAP sectors
hh_gtap_sector = hh_hhsector[bridge_to_use.index].fillna(0).dot(bridge_to_use)
hh_gtap_sector = hh_gtap_sector.reset_index()
try: hh_gtap_sector['cod_hogar'] = hh_gtap_sector['cod_hogar'].astype('int')
except: hh_gtap_sector['cod_hogar'] = hh_gtap_sector['cod_hogar'].astype('str')
hh_gtap_sector = hh_gtap_sector.reset_index().set_index('cod_hogar')
## Run test.
#print(hh_hhsector.columns)
#print(hh_hhsector.head())
#_hh_hhsector = hh_hhsector.copy()
#for _c in _hh_hhsector.columns:
# if _c != 'gasto_ali':#and _c != 'gasto_alihogar':
# _hh_hhsector[_c] = 0
#_hh_gtap_sector = _hh_hhsector[bridge_to_use.index].fillna(0).dot(bridge_to_use)
if verbose: print(hh_gtap_sector.head(8))
# calcuate each household's share of national consumption, by category
hh_share = (hh_gtap_sector.mul(hh_hhsector.factor_expansion, axis=0).fillna(0))/(hh_gtap_sector.mul(hh_hhsector.factor_expansion, axis=0).fillna(0).sum())
# Read household consumption vector from GTAP
_iot_code = pais if pais != 'brb' else 'xcb'
try:
hh_fd_file = 'GTAP_power_IO_tables_with_imports/Household_consumption_both_domestic_import.xlsx'
household_FD = get_FD_scale_fac(pais)*pd.read_excel(hh_fd_file,index_col=[0])[_iot_code].squeeze()
except:
if pais == 'brb': household_FD = get_FD_scale_fac(pais)*pd.read_excel('GTAP_power_IO_tables/xcbIOT.xlsx',sheet_name='Final_Demand',index_col=[0])['Hou'].squeeze()
else: assert(False)
# ^ get_FD_scale_fac(pais) != 1. ONLY IF pais == 'brb'
# Final demand matrix
hh_FD = household_FD*hh_share.fillna(0)
for i in hh_FD.columns: hh_FD[i]/=hh_hhsector['factor_expansion']
if verbose:
print(household_FD.head())
print(hh_FD.head(5))
####################
# Use gtap_to_final script to translate both expenditures & cc into HIES cats
hies_FD, hies_FD_tot, hies_sf = gtap_to_final(hh_hhsector,hh_FD,pais,verbose=True)
# Now, this df should be consistent with the FD vector
if verbose:
print((hh_FD.sum(axis=1)*hh_hhsector['factor_expansion']).sum())
print(hies_FD_tot[['totex_hh','hhwgt']].prod(axis=1).sum())
print('FD:',round(hies_FD_tot[['totex_hh','hhwgt']].prod(axis=1).sum(),3),round((hh_FD.sum(axis=1)*hh_hhsector['factor_expansion']).sum(),3))
assert(hies_FD_tot[['totex_hh','hhwgt']].prod(axis=1).sum()/(hh_FD.sum(axis=1)*hh_hhsector['factor_expansion']).sum()>0.999)
assert(hies_FD_tot[['totex_hh','hhwgt']].prod(axis=1).sum()/(hh_FD.sum(axis=1)*hh_hhsector['factor_expansion']).sum()<1.001)
####################
####################
if pais == 'brb':
energy_tax_total = get_FD_scale_fac(pais)*pd.read_csv('/Users/brian/Desktop/Dropbox/IDB/Barbados/output/tax_cost_to_hh_in_gtap_cats.csv').set_index('cod_hogar')
final_CC,wgts,_ = gtap_to_final(hh_hhsector,energy_tax_total,pais)
hhwgts = wgts[['pcwgt','hhwgt','hhsize']].copy().dropna()
final_CC_ind = final_CC.copy()
final_CC_CO2 = final_CC.copy()
final_CC_nonCO2 = final_CC.copy()
for col in final_CC_nonCO2.columns: final_CC_nonCO2[col].values[:] = 0
final_CC_dir = final_CC.copy()
for col in final_CC_dir.columns: final_CC_dir[col].values[:] = 0
#print(hhwgts.shape[0],hhwgts.dropna().shape[0])
# HACK: ^ should be no NAs in this df
else:
# Indirect carbon costs - CO2
ccdf_ind_CO2 = get_FD_scale_fac(pais)*pd.read_csv(out_dir+'carbon_cost/CC_per_hh_indirect_'+pais+'_CO2.csv').set_index('cod_hogar')
# Indirect carbon costs - non-CO2
ccdf_ind_nonCO2 = get_FD_scale_fac(pais)*pd.read_csv(out_dir+'carbon_cost/CC_per_hh_indirect_'+pais+'_nonCO2.csv').set_index('cod_hogar')
# Indirect carbon costs (allGHG)
ccdf_ind = get_FD_scale_fac(pais)*pd.read_csv(out_dir+'carbon_cost/CC_per_hh_indirect_'+pais+'_allGHG.csv').set_index('cod_hogar')
# Direct carbon costs (allGHG)
ccdf_dir = get_FD_scale_fac(pais)*pd.read_csv(out_dir+'carbon_cost/CC_per_hh_direct_'+pais+'_allGHG.csv').set_index('cod_hogar')
# ^ these files are per household (multiply by factor_expansion for total)
# HACK
_bypass = pd.DataFrame(index=ccdf_ind.index.copy())
hacker_dict = {'col':['frac_gas'],
'gtm':['frac_gas'],
'pan':['frac_gas'],
'hnd':['frac_gas'],
'nic':['frac_gas','frac_water'],
'pry':['frac_gas','frac_electricity']}
if pais in hacker_dict:
for _set in hacker_dict[pais]:
_gtap_cols = get_dict_gtap_to_final()[_set][0]
_i = [i for i in _gtap_cols if i in ccdf_ind.columns]
_d = [d for d in _gtap_cols if d in ccdf_dir.columns]
_bypass[_set] = ccdf_ind[_i].sum(axis=1) + ccdf_dir[_d].sum(axis=1)
_bypass[_set] *= hh_hhsector['factor_expansion']
try:
ccdf_ind_CO2[_i] = [0,0]
ccdf_ind_nonCO2[_i] = [0,0]
ccdf_ind[_i] = [0,0]
except: ccdf_ind_CO2[_i],ccdf_ind_nonCO2[_i],ccdf_ind[_i] = 0,0,0
try: ccdf_dir[_d] = [0,0]
except: ccdf_dir[_d] = 0
_bypass = _bypass.sum()*1E-6*get_FD_scale_fac(pais)
if not do_tax_food:
ccdf_ind_CO2[['pdr','wht','gro','v_f','osd','c_b','ocr','ctl','oap','rmk','fsh','cmt','omt','vol','mil','pcr','sgr','ofd','b_t']] = 0
ccdf_ind_nonCO2[['pdr','wht','gro','v_f','osd','c_b','ocr','ctl','oap','rmk','fsh','cmt','omt','vol','mil','pcr','sgr','ofd','b_t']] = 0
ccdf_ind[['pdr','wht','gro','v_f','osd','c_b','ocr','ctl','oap','rmk','fsh','cmt','omt','vol','mil','pcr','sgr','ofd','b_t']] = 0
# No food categories in ccdf_dir
final_CC_ind,wgts,_ = gtap_to_final(hh_hhsector,ccdf_ind,pais)
final_CC_dir,wgts,_ = gtap_to_final(hh_hhsector,ccdf_dir,pais)
final_CC = final_CC_ind + final_CC_dir
#final_CC_tot = final_CC_ind_tot + final_CC_dir_tot
final_CC_ind_CO2,wgts,_ = gtap_to_final(hh_hhsector,ccdf_ind_CO2,pais)
final_CC_CO2 = final_CC_ind_CO2 + final_CC_dir
#final_CC_tot_CO2 = final_CC_ind_tot_CO2 + final_CC_dir_tot
final_CC_nonCO2,wgts,_ = gtap_to_final(hh_hhsector,ccdf_ind_nonCO2,pais)
hhwgts = wgts[['pcwgt','hhwgt','hhsize']].copy()
if verbose:
#print('FD:',round(hhwgts[['totex_hh','hhwgt']].prod(axis=1).sum(),1),round((hh_FD.sum(axis=1)*hh_hhsector['factor_expansion']).sum(),3))
print('Direct costs:',round((final_CC_dir.sum(axis=1)*hh_hhsector['factor_expansion']).sum(),1),
round((ccdf_dir.sum(axis=1)*hh_hhsector['factor_expansion']).sum(),1))
print('Indirect cost:',round((final_CC_ind.sum(axis=1)*hh_hhsector['factor_expansion']).sum(),1),
round((ccdf_ind.sum(axis=1)*hh_hhsector['factor_expansion']).sum(),1))
assert((final_CC_dir.sum(axis=1)*hhwgts['hhwgt']).sum()/(ccdf_dir.sum(axis=1)*hh_hhsector['factor_expansion']).sum()>0.99)
assert((final_CC_dir.sum(axis=1)*hhwgts['hhwgt']).sum()/(ccdf_dir.sum(axis=1)*hh_hhsector['factor_expansion']).sum()<1.01)
assert((final_CC_ind.sum(axis=1)*hhwgts['hhwgt']).sum()/(ccdf_ind.sum(axis=1)*hh_hhsector['factor_expansion']).sum()>0.99)
assert((final_CC_ind.sum(axis=1)*hhwgts['hhwgt']).sum()/(ccdf_ind.sum(axis=1)*hh_hhsector['factor_expansion']).sum()<1.01)
# 5 dataframes with results in them
# --> final_CC
# --> final_CC_CO2 & final_CC_nonCO2
# --> final_CC_ind & final_CC_dir
#hhwgts = wgts[['pcwgt','hhwgt','hhsize']].copy()
# ^ plus this, with necessary weights
#########################
# Assign decile based on totex (household expenditures, mapped to gtap)
hies_FD_tot['pais'] = pais
if 'quintile' not in hies_FD_tot.columns:
_deciles=np.arange(0.10, 1.01, 0.10)
_quintiles=np.arange(0.20, 1.01, 0.20)
hies_FD_tot = hies_FD_tot.groupby('pais',sort=True).apply(lambda x:match_percentiles(x,perc_with_spline(reshape_data(x.totex_pc),reshape_data(x.pcwgt),_deciles),'decile','totex_pc'))
hies_FD_tot = hies_FD_tot.groupby('pais',sort=True).apply(lambda x:match_percentiles(x,perc_with_spline(reshape_data(x.totex_pc),reshape_data(x.pcwgt),_quintiles),'quintile','totex_pc'))
hies_FD_tot = hies_FD_tot.drop(['pais'],axis=1)
hies_FD['decile'] = hies_FD_tot['decile'].copy()
hies_FD['quintile'] = hies_FD_tot['quintile'].copy()
###################################
# Price hikes in all goods due to gasoline increase (% of current price)
fdict = get_dict_gtap_to_final()
try: df = pd.read_csv(out_dir+'all_countries/price_increase_full.csv').set_index('category')
except: df = pd.DataFrame({pais.upper():0,'category':[fdict[i][1] for i in fdict]},index=None).set_index('category')
for i in fdict:
table_value = None
gtap_cat_array = get_dict_gtap_to_final()[i][0]
#table_value_n = (final_CC_ind_tot['hhwgt']*(final_CC_ind[fdict[i][0]].sum(axis=1)+final_CC_dir[fdict[i][0]].sum(axis=1))/1E6).sum()
# ^ this is already zero when there's no data in the survey
if pais == 'brb':
table_value_n = energy_tax_total[[_g for _g in gtap_cat_array if _g in energy_tax_total.columns]].sum(axis=1).sum()
table_value_d = get_FD_scale_fac(pais)*float(pd.read_excel('GTAP_power_IO_tables/xcbIOT.xlsx',sheet_name='Final_Demand',index_col=[0])['Hou'].squeeze()[gtap_cat_array].sum())
# ^ get_FD_scale_fac(pais) != 1. ONLY IF pais == 'brb'
else:
table_value_n = ((ccdf_ind[[_g for _g in gtap_cat_array if _g in ccdf_ind.columns]].sum(axis=1)
+ccdf_dir[[_g for _g in gtap_cat_array if _g in ccdf_dir.columns]].sum(axis=1))*hh_hhsector['factor_expansion']).sum()/1E6
#table_value_d = get_FD_scale_fac(pais)*float(pd.read_excel('GTAP_power_IO_tables/'
# +_iot_code+'IOT.xlsx','Final_Demand',index_col=[0])['Hou'].squeeze()[gtap_cat_array].sum())
_fname = 'GTAP_power_IO_tables_with_imports/Household_consumption_both_domestic_import.xlsx'
table_value_d = get_FD_scale_fac(pais)*float(pd.read_excel(_fname,index_col=[0])[pais].squeeze()[gtap_cat_array].sum())
# ^ get_FD_scale_fac(pais) != 1. ONLY IF pais == 'brb'. so this should be deleted
if table_value_n == 0 and table_value_d != 0:
print('BYPASS:',pais,_bypass)
try: table_value_n = float(_bypass[i])
except: pass
# throw results...look how clever we are!
if verbose:
print(i,table_value_n,table_value_d)
print('ind:',(ccdf_ind[[_g for _g in gtap_cat_array if _g in ccdf_ind.columns]].sum(axis=1)*hh_hhsector['factor_expansion']).sum()/1E6)
print('dir:',(ccdf_dir[[_g for _g in gtap_cat_array if _g in ccdf_dir.columns]].sum(axis=1)*hh_hhsector['factor_expansion']).sum()/1E6)
table_value = round(100*table_value_n/table_value_d,1)
df.loc[fdict[i][1],pais.upper()] = table_value
if pais == 'brb':
df['BRB']/=1000.
df.loc['Petroleum, gasoline & diesel'] = 6.2
# HACK: don't understand why *=1/1000. would be justified; haven't checked units
# HACK: not sure why 'Petroleum, gasoline & diesel' doesn't come through analysis
_df = df.sort_values(pais.upper(),ascending=False).drop([fdict[i][1] for i in cols_to_drop])[pais.upper()]
_df.name = '[%]'
_df.index.name = 'Relative increase'
_df.round(1).to_latex(out_dir+'latex/pct_change_'+pais.lower()+'.tex')
with open(out_dir+'latex/pct_change_'+pais.lower()+'.tex', 'r') as f:
with open(out_dir+'latex/out_pct_change_'+pais.lower()+'.tex', 'w') as f2:
f2.write(r'\documentclass[10pt]{article}'+'\n')
f2.write(r'\usepackage{amssymb} %maths'+'\n')
f2.write(r'\usepackage{amsmath} %maths'+'\n')
f2.write(r'\usepackage{booktabs}'+'\n')
f2.write(r'\begin{document}'+'\n')
f2.write(f.read())
f2.write(r'\end{document}')
f2.close()
subprocess.call('cd '+out_dir+'latex/; pdflatex out_pct_change_'+pais.lower()+'.tex',shell=True)
for f in glob.glob(out_dir+'latex/*.aux'): os.remove(f)
for f in glob.glob(out_dir+'latex/*.log'): os.remove(f)
for f in glob.glob(out_dir+'latex/out_*.tex'): os.remove(f)
if pais != 'brb': df.to_csv('output/all_countries/price_increase_full.csv')
hies_FD,hies_FD_tot,cols_to_drop = plot_expenditures_by_category(pais,hies_FD,hies_FD_tot)
###################################
# Current spending on all energy (electricity, petroleum, gasoline, diesel, natural gas, & coal), as % of totex
energy_categories = [fdict['frac_fuels'][1],fdict['frac_gas'][1],fdict['frac_char'][1]]
# ^ includes: gasto_tcomb = Household expenditure on transportation fuels
# ^ gasto_vpgk = Household expenditure on petroleum, gasoline and kerosene for domestic use
# ^ gasto_vlp = Household expenditure on liquified petroleum gas for domestic use
# ^ gasto_vdi = Household expenditure on diesel for domestic use"
final_FD_quints = pd.DataFrame(index=hies_FD.reset_index().set_index('quintile').sum(level='quintile').index).sort_index()
final_FD_quints['Direct fuel consumption'] = 100.*((hies_FD_tot['hhwgt']*hies_FD[energy_categories].sum(axis=1)/hies_FD_tot['totex_hh']).sum(level='quintile')
/hies_FD_tot['hhwgt'].sum(level='quintile'))
_hack = final_CC_dir.copy()
_hack['quintile'] = hies_FD_tot.reset_index('quintile')['quintile'].copy()
_hack = _hack.reset_index().set_index(['cod_hogar','quintile'])
final_FD_quints['Direct fuel consumption tax'] = (100./1E6*(_hack.sum(axis=1)*hhwgts['hhwgt']).sum(level='quintile')
/hies_FD_tot[['totex_pc','pcwgt']].prod(axis=1).sum(level='quintile'))
final_FD_quints.plot(final_FD_quints.index,'Direct fuel consumption',kind='bar',color=quint_colors,legend=False)
plt.gca().set_xticklabels(quint_labels,ha='center',rotation=0)
plt.ylabel('Direct fuel consumption [% of total expenditures]',fontsize=11,weight='bold',labelpad=8)
plt.xlabel('')
plt.ylim([0,final_FD_quints[['Direct fuel consumption','Direct fuel consumption tax']].sum(axis=1).max()*1.05])
rects = plt.gca().patches
for rect in rects:
_w = rect.get_height()
plt.gca().annotate(str(round(_w,1))+'%',xy=(rect.get_x()+rect.get_width()/2, rect.get_y()+rect.get_height()+0.025),
ha='center', va='bottom',color='black',fontsize=8,weight='bold',clip_on=False)
plt.gca().grid(False)
sns.despine()
plt.draw()
plt.gcf().savefig(out_dir+'expenditures/'+pais+'_gasoline_as_pct_by_quintile.pdf',format='pdf',bbox_inches='tight')
plt.gcf().savefig(out_dir+'expenditures/'+pais+'_gasoline_as_pct_by_quintile.png',format='png',bbox_inches='tight')
############################
# Try to plot tax on top of expenditures
#ax = plt.gca()
plt.cla()
final_FD_quints.plot(final_FD_quints.index,'Direct fuel consumption',kind='bar',color=quint_colors,legend=False)
# Below labels the total cost, etc, by quintile
if False:
rects = plt.gca().patches
for rect in rects:
_w = rect.get_height()
plt.gca().annotate(str(round(_w,1))+'%',xy=(rect.get_x()+rect.get_width()-0.025, rect.get_y()+rect.get_height()/2.),
ha='right', va='center',color='black',fontsize=8,weight='bold',clip_on=False)
final_FD_quints.plot(final_FD_quints.index,'Direct fuel consumption tax',kind='bar',color=sns.color_palette('Set1', n_colors=9)[5],legend=False,bottom=final_FD_quints['Direct fuel consumption'],ax=plt.gca())
plt.ylim([0,final_FD_quints[['Direct fuel consumption','Direct fuel consumption tax']].sum(axis=1).max()*1.05])
plt.gca().grid(False)
sns.despine()
plt.gca().set_xticklabels(quint_labels,ha='center',rotation=0)
plt.ylabel('Direct fuel consumption [% of total expenditures]',fontsize=11,weight='bold',labelpad=8)
plt.xlabel('')
plt.gcf().savefig(out_dir+'expenditures/'+pais+'_gasoline_as_pct_by_quintile_with_tax.pdf',format='pdf',bbox_inches='tight')
plt.gcf().savefig(out_dir+'expenditures/'+pais+'_gasoline_as_pct_by_quintile_with_tax.png',format='png',bbox_inches='tight')
plt.cla()
###################################
# Put quintile info into final_CC_tot, final_CC_tot_CO2, final_CC_tot_nonCO2
hies_FD_tot = hies_FD_tot.reset_index().set_index('cod_hogar')
try: hies_FD_tot['quintile'] = hies_FD_tot['quintile'].astype('int')
except: hies_FD_tot['quintile'] = hies_FD_tot['quintile'].astype('str')
#
hhwgts['quintile'] = hies_FD_tot['quintile'].copy()
hhwgts = hhwgts.reset_index().set_index(['cod_hogar','quintile'])
#
final_CC['quintile'] = hies_FD_tot['quintile'].copy()
final_CC = final_CC.reset_index().set_index(['cod_hogar','quintile'])
#
try:
final_CC_ind['quintile'] = hies_FD_tot['quintile'].copy()
final_CC_ind = final_CC_ind.reset_index().set_index(['cod_hogar','quintile'])
#
final_CC_dir['quintile'] = hies_FD_tot['quintile'].copy()
final_CC_dir = final_CC_dir.reset_index().set_index(['cod_hogar','quintile'])
#
final_CC_CO2['quintile'] = hies_FD_tot['quintile'].copy()
final_CC_CO2 = final_CC_CO2.reset_index().set_index(['cod_hogar','quintile'])
#
final_CC_nonCO2['quintile'] = hies_FD_tot['quintile'].copy()
final_CC_nonCO2 = final_CC_nonCO2.reset_index().set_index(['cod_hogar','quintile'])
#
except: pass
# ^ this (t/e) pair is for pais != 'brb'
hies_FD_tot = hies_FD_tot.reset_index().set_index(['cod_hogar','quintile'])
##########################################################################################
# Record sample (all countries) stats in hh_tax_cost_table.csv
# total cost
try: hhcost_t = pd.read_csv('output/all_countries/hh_tax_cost_table.csv').set_index('quintile')
except: hhcost_t = pd.DataFrame({pais.upper():0,'quintile':['Q1','Q2','Q3','Q4','Q5']},index=None).set_index('quintile')
# Direct
try: hhcost_d = pd.read_csv('output/all_countries/hh_direct_tax_cost_table.csv').set_index('quintile')
except: hhcost_d = pd.DataFrame({pais.upper():0,'quintile':['Q1','Q2','Q3','Q4','Q5']},index=None).set_index('quintile')
# Indirect
try: hhcost_i = | pd.read_csv('output/all_countries/hh_indirect_tax_cost_table.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from pandas import (Series, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import pandas as pd
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable as ht)
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import CategoricalDtype as CDT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2, 4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
uniques, np.array(['a', 'b', 'c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Series([v1, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = pd.Period('201302', freq='M')
v2 = pd.Period('201303', freq='M')
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
# GH 5986
v1 = pd.to_timedelta('1 day 1 min')
v2 = pd.to_timedelta('1 day')
x = Series([v1, v2, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key),
expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = pd.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if pd._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, 1], dtype=np.uint64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, -1], dtype=object)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_uniques = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_uniques = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.unique(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(td_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.unique(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = pd.unique(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.unique()
tm.assert_categorical_equal(result, expected)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.unique()
tm.assert_categorical_equal(result, expected_o)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected_o)
# Series of categorical dtype
s = Series(Categorical(list('baabc')), name='foo')
result = s.unique()
tm.assert_categorical_equal(result, expected)
result = pd.unique(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.unique()
tm.assert_index_equal(result, expected)
result = pd.unique(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Series(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).unique()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).unique()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(
Series(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = pd.unique(Series([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = pd.unique(Series([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = pd.unique(Series([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Series(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = pd.unique(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.isin(1, 1))
pytest.raises(TypeError, lambda: algos.isin(1, [1]))
pytest.raises(TypeError, lambda: algos.isin([1], 1))
def test_basic(self):
result = algos.isin([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), Series([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), Series(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = pd.date_range('20130101', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = pd.timedelta_range('1 day', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = pd.date_range('20000101', periods=2000000, freq='s').values
result = algos.isin(s, s[0:2])
expected = np.zeros(len(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Series(Categorical(1).from_codes(vals, cats))
St = Series(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.isin(Sd, St)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.isin(vals, empty)
tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
def test_value_counts(self):
np.random.seed(1234)
from pandas.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert isinstance(factor, n)
result = algos.value_counts(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
index = IntervalIndex.from_breaks(breaks).astype(CDT(ordered=True))
expected = Series([1, 1, 1, 1], index=index)
tm.assert_series_equal(result.sort_index(), expected.sort_index())
def test_value_counts_bins(self):
s = [1, 2, 3, 4]
result = algos.value_counts(s, bins=1)
expected = Series([4],
index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_series_equal(result, expected)
result = algos.value_counts(s, bins=2, sort=False)
expected = Series([2, 2],
index=IntervalIndex.from_tuples([(0.996, 2.5),
(2.5, 4.0)]))
tm.assert_series_equal(result, expected)
def test_value_counts_dtypes(self):
result = algos.value_counts([1, 1.])
assert len(result) == 1
result = algos.value_counts([1, 1.], bins=1)
assert len(result) == 1
result = algos.value_counts(Series([1, 1., '1'])) # object
assert len(result) == 2
pytest.raises(TypeError, lambda s: algos.value_counts(s, bins=1),
['1', 1])
def test_value_counts_nat(self):
td = Series([np.timedelta64(10000), pd.NaT], dtype='timedelta64[ns]')
dt = pd.to_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.value_counts(s)
vc_with_na = algos.value_counts(s, dropna=False)
assert len(vc) == 1
assert len(vc_with_na) == 2
exp_dt = Series({Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_series_equal(algos.value_counts(dt), exp_dt)
# TODO same for (timedelta)
def test_value_counts_datetime_outofbounds(self):
# GH 13663
s = Series([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1),
datetime(3000, 1, 1), datetime(3000, 1, 1)])
res = s.value_counts()
exp_index = Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(6000, 1, 1)], dtype=object)
exp = Series([3, 2, 1], index=exp_index)
tm.assert_series_equal(res, exp)
# GH 12424
res = pd.to_datetime(Series(['2362-01-01', np.nan]),
errors='ignore')
exp = Series(['2362-01-01', np.nan], dtype=object)
tm.assert_series_equal(res, exp)
def test_categorical(self):
s = Series(Categorical(list('aaabbc')))
result = s.value_counts()
expected = Series([3, 2, 1], index=CategoricalIndex(['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.value_counts()
expected.index = expected.index.as_ordered()
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Series(Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([
4, 3, 2, 1
], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
tm.assert_series_equal(result, expected, check_index_type=True)
# out of order
s = Series(Categorical(
list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([4, 3, 2, 1], index=CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Series(Categorical(
list('bbbaac'), categories=list('abcd'), ordered=True))
result = s.value_counts()
expected = Series([3, 2, 1, 0], index=Categorical(
['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_dropna(self):
# https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=False),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=False),
Series([2, 1, 1], index=[True, False, np.nan]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=False),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5., None]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
result = Series([10.3, 5., 5., None]).value_counts(dropna=False)
expected = Series([2, 1, 1], index=[5., 10.3, np.nan])
tm.assert_series_equal(result, expected)
def test_value_counts_normalized(self):
# GH12558
s = Series([1, 2, np.nan, np.nan, np.nan])
dtypes = (np.float64, np.object, 'M8[ns]')
for t in dtypes:
s_typed = s.astype(t)
result = s_typed.value_counts(normalize=True, dropna=False)
expected = Series([0.6, 0.2, 0.2],
index=Series([np.nan, 2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
result = s_typed.value_counts(normalize=True, dropna=True)
expected = Series([0.5, 0.5],
index=Series([2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
def test_value_counts_uint64(self):
arr = np.array([2**63], dtype=np.uint64)
expected = Series([1], index=[2**63])
result = algos.value_counts(arr)
tm.assert_series_equal(result, expected)
arr = np.array([-1, 2**63], dtype=object)
expected = Series([1, 1], index=[-1, 2**63])
result = algos.value_counts(arr)
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
tm.assert_series_equal(result, expected)
class TestDuplicated(object):
def test_duplicated_with_nas(self):
keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
result = algos.duplicated(keys)
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='first')
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='last')
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array([True, False, True, True, False, True])
tm.assert_numpy_array_equal(result, expected)
keys = np.empty(8, dtype=object)
for i, t in enumerate(zip([0, 0, np.nan, np.nan] * 2,
[0, np.nan, 0, np.nan] * 2)):
keys[i] = t
result = algos.duplicated(keys)
falses = [False] * 4
trues = [True] * 4
expected = np.array(falses + trues)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='last')
expected = np.array(trues + falses)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array(trues + trues)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('case', [
np.array([1, 2, 1, 5, 3,
2, 4, 1, 5, 6]),
np.array([1.1, 2.2, 1.1, np.nan, 3.3,
2.2, 4.4, 1.1, np.nan, 6.6]),
pytest.param(np.array([1 + 1j, 2 + 2j, 1 + 1j, 5 + 5j, 3 + 3j,
2 + 2j, 4 + 4j, 1 + 1j, 5 + 5j, 6 + 6j]),
marks=pytest.mark.xfail(reason="Complex bug. GH 16399")
),
np.array(['a', 'b', 'a', 'e', 'c',
'b', 'd', 'a', 'e', 'f'], dtype=object),
np.array([1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7],
dtype=np.uint64),
])
def test_numeric_object_likes(self, case):
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_last = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_last
res_first = algos.duplicated(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = algos.duplicated(case, keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = algos.duplicated(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [Index(case), Index(case, dtype='category')]:
res_first = idx.duplicated(keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# series
for s in [Series(case), Series(case, dtype='category')]:
res_first = s.duplicated(keep='first')
tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep='last')
tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
tm.assert_series_equal(res_false, Series(exp_false))
def test_datetime_likes(self):
dt = ['2011-01-01', '2011-01-02', '2011-01-01', 'NaT', '2011-01-03',
'2011-01-02', '2011-01-04', '2011-01-01', 'NaT', '2011-01-06']
td = ['1 days', '2 days', '1 days', 'NaT', '3 days',
'2 days', '4 days', '1 days', 'NaT', '6 days']
cases = [np.array([Timestamp(d) for d in dt]),
np.array([Timestamp(d, tz='US/Eastern') for d in dt]),
np.array([pd.Period(d, freq='D') for d in dt]),
np.array([np.datetime64(d) for d in dt]),
np.array([pd.Timedelta(d) for d in td])]
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_last = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_last
for case in cases:
res_first = algos.duplicated(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = algos.duplicated(case, keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = algos.duplicated(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [Index(case), Index(case, dtype='category'),
Index(case, dtype=object)]:
res_first = idx.duplicated(keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# series
for s in [Series(case), Series(case, dtype='category'),
Series(case, dtype=object)]:
res_first = s.duplicated(keep='first')
tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep='last')
tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
tm.assert_series_equal(res_false, Series(exp_false))
def test_unique_index(self):
cases = [Index([1, 2, 3]), pd.RangeIndex(0, 3)]
for case in cases:
assert case.is_unique
tm.assert_numpy_array_equal(case.duplicated(),
np.array([False, False, False]))
@pytest.mark.parametrize('arr, unique', [
([(0, 0), (0, 1), (1, 0), (1, 1), (0, 0), (0, 1), (1, 0), (1, 1)],
[(0, 0), (0, 1), (1, 0), (1, 1)]),
([('b', 'c'), ('a', 'b'), ('a', 'b'), ('b', 'c')],
[('b', 'c'), ('a', 'b')]),
([('a', 1), ('b', 2), ('a', 3), ('a', 1)],
[('a', 1), ('b', 2), ('a', 3)]),
])
def test_unique_tuples(self, arr, unique):
# https://github.com/pandas-dev/pandas/issues/16519
expected = np.empty(len(unique), dtype=object)
expected[:] = unique
result = pd.unique(arr)
tm.assert_numpy_array_equal(result, expected)
class GroupVarTestMixin(object):
def test_group_var_generic_1d(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 1))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(15, 1).astype(self.dtype)
labels = np.tile(np.arange(5), (3, )).astype('int64')
expected_out = (np.squeeze(values)
.reshape((5, 3), order='F')
.std(axis=1, ddof=1) ** 2)[:, np.newaxis]
expected_counts = counts + 3
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_1d_flat_labels(self):
prng = RandomState(1234)
out = (np.nan * np.ones((1, 1))).astype(self.dtype)
counts = np.zeros(1, dtype='int64')
values = 10 * prng.rand(5, 1).astype(self.dtype)
labels = np.zeros(5, dtype='int64')
expected_out = np.array([[values.std(ddof=1) ** 2]])
expected_counts = counts + 5
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_all_finite(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 2))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(10, 2).astype(self.dtype)
labels = np.tile(np.arange(5), (2, )).astype('int64')
expected_out = np.std(values.reshape(2, 5, 2), ddof=1, axis=0) ** 2
expected_counts = counts + 2
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_some_nan(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 2))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(10, 2).astype(self.dtype)
values[:, 1] = np.nan
labels = np.tile(np.arange(5), (2, )).astype('int64')
expected_out = np.vstack([values[:, 0]
.reshape(5, 2, order='F')
.std(ddof=1, axis=1) ** 2,
np.nan * np.ones(5)]).T.astype(self.dtype)
expected_counts = counts + 2
self.algo(out, counts, values, labels)
tm.assert_almost_equal(out, expected_out, check_less_precise=6)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_constant(self):
# Regression test from GH 10448.
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype='int64')
values = 0.832845131556193 * np.ones((3, 1), dtype=self.dtype)
labels = np.zeros(3, dtype='int64')
self.algo(out, counts, values, labels)
assert counts[0] == 3
assert out[0, 0] >= 0
tm.assert_almost_equal(out[0, 0], 0.0)
class TestGroupVarFloat64(GroupVarTestMixin):
__test__ = True
algo = libgroupby.group_var_float64
dtype = np.float64
rtol = 1e-5
def test_group_var_large_inputs(self):
prng = RandomState(1234)
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype='int64')
values = (prng.rand(10 ** 6) + 10 ** 12).astype(self.dtype)
values.shape = (10 ** 6, 1)
labels = np.zeros(10 ** 6, dtype='int64')
self.algo(out, counts, values, labels)
assert counts[0] == 10 ** 6
tm.assert_almost_equal(out[0, 0], 1.0 / 12, check_less_precise=True)
class TestGroupVarFloat32(GroupVarTestMixin):
__test__ = True
algo = libgroupby.group_var_float32
dtype = np.float32
rtol = 1e-2
class TestHashTable(object):
def test_lookup_nan(self):
xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3])
m = ht.Float64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs),
dtype=np.int64))
def test_lookup_overflow(self):
xs = np.array([1, 2, 2**63], dtype=np.uint64)
m = ht.UInt64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs),
dtype=np.int64))
def test_get_unique(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(s.unique(), exp)
def test_vector_resize(self):
# Test for memory errors after internal vector
# reallocations (pull request #7157)
def _test_vector_resize(htable, uniques, dtype, nvals, safely_resizes):
vals = np.array(np.random.randn(1000), dtype=dtype)
# get_labels may append to uniques
htable.get_labels(vals[:nvals], uniques, 0, -1)
# to_array() set an external_view_exists flag on uniques.
tmp = uniques.to_array()
oldshape = tmp.shape
# subsequent get_labels() calls can no longer append to it
# (for all but StringHashTables + ObjectVector)
if safely_resizes:
htable.get_labels(vals, uniques, 0, -1)
else:
with pytest.raises(ValueError) as excinfo:
htable.get_labels(vals, uniques, 0, -1)
assert str(excinfo.value).startswith('external reference')
uniques.to_array() # should not raise here
assert tmp.shape == oldshape
test_cases = [
(ht.PyObjectHashTable, ht.ObjectVector, 'object', False),
(ht.StringHashTable, ht.ObjectVector, 'object', True),
(ht.Float64HashTable, ht.Float64Vector, 'float64', False),
(ht.Int64HashTable, ht.Int64Vector, 'int64', False),
(ht.UInt64HashTable, ht.UInt64Vector, 'uint64', False)]
for (tbl, vect, dtype, safely_resizes) in test_cases:
# resizing to empty is a special case
_test_vector_resize(tbl(), vect(), dtype, 0, safely_resizes)
_test_vector_resize(tbl(), vect(), dtype, 10, safely_resizes)
def test_quantile():
s = Series(np.random.randn(100))
result = algos.quantile(s, [0, .25, .5, .75, 1.])
expected = algos.quantile(s.values, [0, .25, .5, .75, 1.])
tm.assert_almost_equal(result, expected)
def test_unique_label_indices():
a = np.random.randint(1, 1 << 10, 1 << 15).astype('i8')
left = unique_label_indices(a)
right = np.unique(a, return_index=True)[1]
tm.assert_numpy_array_equal(left, right,
check_dtype=False)
a[np.random.choice(len(a), 10)] = -1
left = unique_label_indices(a)
right = np.unique(a, return_index=True)[1][1:]
tm.assert_numpy_array_equal(left, right,
check_dtype=False)
class TestRank(object):
@td.skip_if_no_scipy
def test_scipy_compat(self):
from scipy.stats import rankdata
def _check(arr):
mask = ~np.isfinite(arr)
arr = arr.copy()
result = libalgos.rank_1d_float64(arr)
arr[mask] = np.inf
exp = rankdata(arr)
exp[mask] = nan
assert_almost_equal(result, exp)
_check(np.array([nan, nan, 5., 5., 5., nan, 1, 2, 3, nan]))
_check(np.array([4., nan, 5., 5., 5., nan, 1, 2, 4., nan]))
def test_basic(self):
exp = np.array([1, 2], dtype=np.float64)
for dtype in np.typecodes['AllInteger']:
s = Series([1, 100], dtype=dtype)
tm.assert_numpy_array_equal(algos.rank(s), exp)
def test_uint64_overflow(self):
exp = np.array([1, 2], dtype=np.float64)
for dtype in [np.float64, np.uint64]:
s = Series([1, 2**63], dtype=dtype)
tm.assert_numpy_array_equal(algos.rank(s), exp)
def test_too_many_ndims(self):
arr = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]])
msg = "Array with ndim > 2 are not supported"
with tm.assert_raises_regex(TypeError, msg):
algos.rank(arr)
def test_pad_backfill_object_segfault():
old = np.array([], dtype='O')
new = np.array([datetime(2010, 12, 31)], dtype='O')
result = libalgos.pad_object(old, new)
expected = np.array([-1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.pad_object(new, old)
expected = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.backfill_object(old, new)
expected = np.array([-1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.backfill_object(new, old)
expected = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_arrmap():
values = np.array(['foo', 'foo', 'bar', 'bar', 'baz', 'qux'], dtype='O')
result = libalgos.arrmap_object(values, lambda x: x in ['foo', 'bar'])
assert (result.dtype == np.bool_)
class TestTseriesUtil(object):
def test_combineFunc(self):
pass
def test_reindex(self):
pass
def test_isna(self):
pass
def test_groupby(self):
pass
def test_groupby_withnull(self):
pass
def test_backfill(self):
old = Index([1, 5, 10])
new = Index(lrange(12))
filler = libalgos.backfill_int64(old.values, new.values)
expect_filler = np.array([0, 0, 1, 1, 1, 1,
2, 2, 2, 2, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
# corner case
old = Index([1, 4])
new = Index(lrange(5, 10))
filler = libalgos.backfill_int64(old.values, new.values)
expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
def test_pad(self):
old = Index([1, 5, 10])
new = Index(lrange(12))
filler = libalgos.pad_int64(old.values, new.values)
expect_filler = np.array([-1, 0, 0, 0, 0, 1,
1, 1, 1, 1, 2, 2], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
# corner case
old = Index([5, 10])
new = Index(lrange(5))
filler = libalgos.pad_int64(old.values, new.values)
expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
def test_is_lexsorted():
failure = [
np.array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3,
3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='int64'),
np.array([30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
15, 14,
13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28,
27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13,
12, 11,
10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25,
24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10,
9, 8,
7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25, 24, 23, 22,
21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7,
6, 5,
4, 3, 2, 1, 0], dtype='int64')]
assert (not libalgos.is_lexsorted(failure))
def test_groupsort_indexer():
a = np.random.randint(0, 1000, 100).astype(np.int64)
b = np.random.randint(0, 1000, 100).astype(np.int64)
result = libalgos.groupsort_indexer(a, 1000)[0]
# need to use a stable sort
# np.argsort returns int, groupsort_indexer
# always returns int64
expected = np.argsort(a, kind='mergesort')
expected = expected.astype(np.int64)
tm.assert_numpy_array_equal(result, expected)
# compare with lexsort
# np.lexsort returns int, groupsort_indexer
# always returns int64
key = a * 1000 + b
result = libalgos.groupsort_indexer(key, 1000000)[0]
expected = np.lexsort((b, a))
expected = expected.astype(np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_infinity_sort():
# GH 13445
# numpy's argsort can be unhappy if something is less than
# itself. Instead, let's give our infinities a self-consistent
# ordering, but outside the float extended real line.
Inf = libalgos.Infinity()
NegInf = libalgos.NegInfinity()
ref_nums = [NegInf, float("-inf"), -1e100, 0, 1e100, float("inf"), Inf]
assert all(Inf >= x for x in ref_nums)
assert all(Inf > x or x is Inf for x in ref_nums)
assert Inf >= Inf and Inf == Inf
assert not Inf < Inf and not Inf > Inf
assert libalgos.Infinity() == libalgos.Infinity()
assert not libalgos.Infinity() != libalgos.Infinity()
assert all(NegInf <= x for x in ref_nums)
assert all(NegInf < x or x is NegInf for x in ref_nums)
assert NegInf <= NegInf and NegInf == NegInf
assert not NegInf < NegInf and not NegInf > NegInf
assert libalgos.NegInfinity() == libalgos.NegInfinity()
assert not libalgos.NegInfinity() != libalgos.NegInfinity()
for perm in permutations(ref_nums):
assert sorted(perm) == ref_nums
# smoke tests
np.array([libalgos.Infinity()] * 32).argsort()
np.array([libalgos.NegInfinity()] * 32).argsort()
def test_infinity_against_nan():
Inf = libalgos.Infinity()
NegInf = libalgos.NegInfinity()
assert not Inf > np.nan
assert not Inf >= np.nan
assert not Inf < np.nan
assert not Inf <= np.nan
assert not Inf == np.nan
assert Inf != np.nan
assert not NegInf > np.nan
assert not NegInf >= np.nan
assert not NegInf < np.nan
assert not NegInf <= np.nan
assert not NegInf == np.nan
assert NegInf != np.nan
def test_ensure_platform_int():
arr = np.arange(100, dtype=np.intp)
result = libalgos.ensure_platform_int(arr)
assert (result is arr)
def test_int64_add_overflow():
# see gh-14068
msg = "Overflow in int64 addition"
m = np.iinfo(np.int64).max
n = np.iinfo(np.int64).min
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), m)
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([n, n]), n)
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([n, n]), np.array([n, n]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, n]), np.array([n, n]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([False, True]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
b_mask=np.array([False, True]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([False, True]),
b_mask=np.array([False, True]))
with tm.assert_raises_regex(OverflowError, msg):
with tm.assert_produces_warning(RuntimeWarning):
algos.checked_add_with_arr(np.array([m, m]),
np.array([np.nan, m]))
# Check that the nan boolean arrays override whether or not
# the addition overflows. We don't check the result but just
# the fact that an OverflowError is not raised.
with pytest.raises(AssertionError):
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([True, True]))
with pytest.raises(AssertionError):
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
b_mask=np.array([True, True]))
with pytest.raises(AssertionError):
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([True, False]),
b_mask=np.array([False, True]))
class TestMode(object):
def test_no_mode(self):
exp = Series([], dtype=np.float64)
tm.assert_series_equal(algos.mode([]), exp)
def test_mode_single(self):
# GH 15714
exp_single = [1]
data_single = [1]
exp_multi = [1]
data_multi = [1, 1]
for dt in np.typecodes['AllInteger'] + np.typecodes['Float']:
s = Series(data_single, dtype=dt)
exp = Series(exp_single, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
s = Series(data_multi, dtype=dt)
exp = Series(exp_multi, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
exp = Series([1], dtype=np.int)
tm.assert_series_equal(algos.mode([1]), exp)
exp = Series(['a', 'b', 'c'], dtype=np.object)
tm.assert_series_equal(algos.mode(['a', 'b', 'c']), exp)
def test_number_mode(self):
exp_single = [1]
data_single = [1] * 5 + [2] * 3
exp_multi = [1, 3]
data_multi = [1] * 5 + [2] * 3 + [3] * 5
for dt in np.typecodes['AllInteger'] + np.typecodes['Float']:
s = Series(data_single, dtype=dt)
exp = Series(exp_single, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
s = Series(data_multi, dtype=dt)
exp = Series(exp_multi, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
def test_strobj_mode(self):
exp = ['b']
data = ['a'] * 2 + ['b'] * 3
s = Series(data, dtype='c')
exp = Series(exp, dtype='c')
tm.assert_series_equal(algos.mode(s), exp)
exp = ['bar']
data = ['foo'] * 2 + ['bar'] * 3
for dt in [str, object]:
s = Series(data, dtype=dt)
exp = Series(exp, dtype=dt)
tm.assert_series_equal( | algos.mode(s) | pandas.core.algorithms.mode |
import numpy as np
import pytest
from pandas import DataFrame, Series, concat, isna, notna
import pandas._testing as tm
import pandas.tseries.offsets as offsets
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs",
[
[np.mean, "mean", {}],
[np.nansum, "sum", {}],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}],
[np.median, "median", {}],
[np.min, "min", {}],
[np.max, "max", {}],
[lambda x: np.std(x, ddof=1), "std", {}],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],
[lambda x: np.var(x, ddof=1), "var", {}],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],
],
)
def test_series(series, compare_func, roll_func, kwargs):
result = getattr(series.rolling(50), roll_func)(**kwargs)
assert isinstance(result, Series)
tm.assert_almost_equal(result.iloc[-1], compare_func(series[-50:]))
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs",
[
[np.mean, "mean", {}],
[np.nansum, "sum", {}],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}],
[np.median, "median", {}],
[np.min, "min", {}],
[np.max, "max", {}],
[lambda x: np.std(x, ddof=1), "std", {}],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],
[lambda x: np.var(x, ddof=1), "var", {}],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],
],
)
def test_frame(raw, frame, compare_func, roll_func, kwargs):
result = getattr(frame.rolling(50), roll_func)(**kwargs)
assert isinstance(result, DataFrame)
tm.assert_series_equal(
result.iloc[-1, :],
frame.iloc[-50:, :].apply(compare_func, axis=0, raw=raw),
check_names=False,
)
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs, minp",
[
[np.mean, "mean", {}, 10],
[np.nansum, "sum", {}, 10],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}, 0],
[np.median, "median", {}, 10],
[np.min, "min", {}, 10],
[np.max, "max", {}, 10],
[lambda x: np.std(x, ddof=1), "std", {}, 10],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}, 10],
[lambda x: np.var(x, ddof=1), "var", {}, 10],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}, 10],
],
)
def test_time_rule_series(series, compare_func, roll_func, kwargs, minp):
win = 25
ser = series[::2].resample("B").mean()
series_result = getattr(ser.rolling(window=win, min_periods=minp), roll_func)(
**kwargs
)
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_series = series[::2].truncate(prev_date, last_date)
tm.assert_almost_equal(series_result[-1], compare_func(trunc_series))
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs, minp",
[
[np.mean, "mean", {}, 10],
[np.nansum, "sum", {}, 10],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}, 0],
[np.median, "median", {}, 10],
[np.min, "min", {}, 10],
[np.max, "max", {}, 10],
[lambda x: np.std(x, ddof=1), "std", {}, 10],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}, 10],
[lambda x: np.var(x, ddof=1), "var", {}, 10],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}, 10],
],
)
def test_time_rule_frame(raw, frame, compare_func, roll_func, kwargs, minp):
win = 25
frm = frame[::2].resample("B").mean()
frame_result = getattr(frm.rolling(window=win, min_periods=minp), roll_func)(
**kwargs
)
last_date = frame_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_frame = frame[::2].truncate(prev_date, last_date)
tm.assert_series_equal(
frame_result.xs(last_date),
trunc_frame.apply(compare_func, raw=raw),
check_names=False,
)
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs",
[
[np.mean, "mean", {}],
[np.nansum, "sum", {}],
[np.median, "median", {}],
[np.min, "min", {}],
[np.max, "max", {}],
[lambda x: np.std(x, ddof=1), "std", {}],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],
[lambda x: np.var(x, ddof=1), "var", {}],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],
],
)
def test_nans(compare_func, roll_func, kwargs):
obj = Series(np.random.randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
result = getattr(obj.rolling(50, min_periods=30), roll_func)(**kwargs)
tm.assert_almost_equal(result.iloc[-1], compare_func(obj[10:-10]))
# min_periods is working correctly
result = getattr(obj.rolling(20, min_periods=15), roll_func)(**kwargs)
assert isna(result.iloc[23])
assert not isna(result.iloc[24])
assert not isna(result.iloc[-6])
assert isna(result.iloc[-5])
obj2 = Series(np.random.randn(20))
result = getattr(obj2.rolling(10, min_periods=5), roll_func)(**kwargs)
assert isna(result.iloc[3])
assert | notna(result.iloc[4]) | pandas.notna |
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, inspect
import pymysql
pymysql.install_as_MySQLdb()
import pandas as pd
from flask import Flask, jsonify
import datetime as dt
from splinter import Browser
from bs4 import BeautifulSoup
import time
Base = automap_base()
engine = create_engine('sqlite:///./data/FPA_FOD_20170508.sqlite')
Base.metadata.create_all(engine)
session = Session(engine)
# Store data in dataframe
df = pd.read_sql('SELECT fire_year,fire_name, fips_name, fire_size, stat_cause_descr, latitude, longitude, fips_code, DISCOVERY_DATE, CONT_DATE FROM Fires WHERE state == "CA" AND fire_year >= 2010 and fire_year <= 2014 and fire_size > 1000 and county <> "none"', engine)
merge_df = df.rename(index=str,columns={"FIRE_YEAR":"Fire Year","FIRE_NAME":"Fire Name","FIRE_SIZE":"Acres Burned",
"STAT_CAUSE_DESCR":"Fire Cause","LATITUDE":"Latitude","LONGITUDE":"Longitude",
"FIPS_CODE":"FIPS Code","FIPS_NAME":"County","DISCOVERY_DATE":"Start Date",
"CONT_DATE":"Containment Date"})
merge_df = merge_df[["Fire Year","Fire Name","Acres Burned","Fire Cause","Latitude","Longitude","FIPS Code","County","Start Date","Containment Date"]]
merge_df["Number of Days"] = ""
# Web Scrapping
browser = Browser("chrome", executable_path='chromedriver.exe', headless=True)
# 2015 data
url = "https://en.wikipedia.org/wiki/2015_California_wildfires"
url = "https://en.wikipedia.org/wiki/2015_California_wildfires"
df_2015_list = pd.read_html(url)
df_2015_clean = df_2015_list[1].columns=df_2015_list[1].iloc[0]
df_2015_clean = df_2015_list[1]
df_2015_clean.drop(0,axis=0,inplace=True)
df_2015_clean.reset_index(inplace=True)
df_2015_clean.drop(["index","Notes","Ref","Km2"],axis=1,inplace=True)
df_2015_clean["Fire Year"] = 2015
df_2015_clean = df_2015_clean.rename(index=str,columns={"Name":"Fire Name","Acres":"Acres Burned"})
df_2015_clean["Fire Cause"] = ""
df_2015_clean["Latitude"] = ""
df_2015_clean["Longitude"] = ""
df_2015_clean["FIPS Code"] = ""
df_2015_clean["Number of Days"] = (pd.to_datetime(df_2015_clean["Containment Date"]) - pd.to_datetime(df_2015_clean["Start Date"])).dt.days
# 2016 data
url = "https://en.wikipedia.org/wiki/2016_California_wildfires"
df_2016_list = pd.read_html(url)
df_2016_clean = df_2016_list[1].columns=df_2016_list[1].iloc[0]
df_2016_clean = df_2016_list[1]
df_2016_clean.drop(0,axis=0,inplace=True)
df_2016_clean.reset_index(inplace=True)
df_2016_clean.drop(["index","Notes","Ref"],axis=1,inplace=True)
df_2016_clean["Fire Year"] = 2016
df_2016_clean = df_2016_clean.rename(index=str,columns={"Name":"Fire Name","Acres":"Acres Burned"})
df_2016_clean["Fire Cause"] = ""
df_2016_clean["Latitude"] = ""
df_2016_clean["Longitude"] = ""
df_2016_clean["FIPS Code"] = ""
df_2016_clean["Number of Days"] = (pd.to_datetime(df_2016_clean["Containment Date"]) - | pd.to_datetime(df_2016_clean["Start Date"]) | pandas.to_datetime |
import functools
import inspect
import os
from functools import singledispatch
from typing import Callable, Collection, Iterable, List, Union
import joblib
import numpy as np
import pandas as pd
import requests
from IPython.display import display
from numpy import ndarray
from pandas.api.types import (
is_categorical_dtype,
is_float,
is_hashable,
is_integer,
is_list_like,
)
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from tqdm.notebook import tqdm
from sklearn.preprocessing import FunctionTransformer
from sklearn.utils import check_consistent_length, compute_sample_weight, deprecated
from ndg_tools._validation import _check_1d
from ndg_tools.typing import FrameOrSeries, ArrayLike
from fuzzywuzzy.process import dedupe, extractOne
from fuzzywuzzy import fuzz
def get_columns(data: DataFrame, subset: Union[str, Iterable[str]]):
if subset is None:
pass
elif isinstance(subset, str):
data = data.loc[:, [subset]].copy()
elif isinstance(subset, Iterable):
data = data.loc[:, list(subset)].copy()
else:
raise TypeError(
f"Expected str or iterable of str, got {type(subset).__name__}."
)
return data
def numeric_cols(data: pd.DataFrame) -> list:
"""Returns a list of all numeric column names.
Parameters
----------
data : DataFrame
DataFrame to get column names from.
Returns
-------
list
All and only the numeric column names.
"""
return data.select_dtypes("number").columns.to_list()
def true_numeric_cols(data: pd.DataFrame, min_unique=3) -> list:
"""Returns numeric columns with at least `min_unique` unique values.
Parameters
----------
data : DataFrame
DataFrame to get column names from.
Returns
-------
list
Numeric column names.
"""
num = data.select_dtypes("number")
return num.columns[min_unique <= num.nunique()].to_list()
def hashable_cols(data: pd.DataFrame) -> list:
valid_idx = data.apply(lambda x: x.first_valid_index() or x.index[0])
test_row = data.loc[valid_idx].fillna(method="bfill").iloc[0]
hashable = data.columns[test_row.map(is_hashable)]
return hashable.to_list()
def cat_cols(data: pd.DataFrame, min_cats: int = None, max_cats: int = None) -> list:
"""Returns a list of categorical column names.
Parameters
----------
data : DataFrame
DataFrame to get column names from.
min_cats : int, optional
Minimum number of categories, by default None.
max_cats : int, optional
Maximum number of categories, by default None.
Returns
-------
list
Categorical column names.
"""
cats = data.select_dtypes("category")
cat_counts = cats.nunique()
if min_cats is None:
min_cats = cat_counts.min()
if max_cats is None:
max_cats = cat_counts.max()
keep = (min_cats <= cat_counts) & (cat_counts <= max_cats)
return cats.columns[keep].to_list()
def multicat_cols(data: pd.DataFrame) -> list:
"""Returns column names of categoricals with 3+ categories.
Parameters
----------
data : DataFrame
DataFrame to get column names from.
Returns
-------
list
Categorical (3+) column names.
"""
cats = data.select_dtypes("category")
return cats.columns[3 <= cats.nunique()].to_list()
def noncat_cols(data: pd.DataFrame) -> list:
"""Returns a list of all non-categorical column names.
Parameters
----------
data : DataFrame
DataFrame to get column names from.
Returns
-------
list
All and only the non-categorical column names.
"""
return data.columns.drop(cat_cols(data)).to_list()
def binary_cols(data: pd.DataFrame) -> list:
"""Returns a list of columns with exactly 2 unique values.
Parameters
----------
data : DataFrame
DataFrame to get column names from.
Returns
-------
list
All and only the binary column names.
"""
return data.columns[data.nunique() == 2].to_list()
def get_defaults(func: Callable) -> dict:
"""Returns dict of parameters with their default values, if any.
Parameters
----------
func : Callable
Callable to look up parameters for.
Returns
-------
dict
Parameters with default values, if any.
Raises
------
TypeError
`callable` must be Callable.
"""
if not isinstance(func, Callable):
raise TypeError(f"`callable` must be Callable, not {type(func)}")
params = pd.Series(inspect.signature(func).parameters)
defaults = params.map(lambda x: x.default)
return defaults.to_dict()
def get_param_names(func: Callable, include_self=False) -> list:
"""Returns list of parameter names.
Parameters
----------
func : Callable
Callable to look up parameter names for.
Returns
-------
list
List of parameter names.
"""
params = list(inspect.signature(func).parameters.keys())
if "self" in params:
params.remove("self")
return params
def pandas_heatmap(
frame: pd.DataFrame,
subset=None,
na_rep="",
precision=3,
cmap="vlag",
low=0,
high=0,
vmin=None,
vmax=None,
axis=None,
):
"""Style DataFrame as a heatmap."""
table = frame.style.background_gradient(
subset=subset, cmap=cmap, low=low, high=high, vmin=vmin, vmax=vmax, axis=axis
)
table.set_na_rep(na_rep)
table.set_precision(precision)
return table
def filter_pipe(
data: FrameOrSeries,
like: List[str] = None,
regex: List[str] = None,
axis: int = None,
) -> FrameOrSeries:
"""Subset the DataFrame or Series labels with more than one filter at once.
Parameters
----------
data: DataFrame or Series
DataFrame or Series to filter labels on.
like : list of str
Keep labels from axis for which "like in label == True".
regex : list of str
Keep labels from axis for which re.search(regex, label) == True.
axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
The axis to filter on, expressed either as an index (int)
or axis name (str). By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
-------
Dataframe or Series
Subset of `data`.
"""
if like and regex:
raise ValueError("Cannot pass both `like` and `regex`")
elif like:
if isinstance(like, str):
like = [like]
for exp in like:
data = data.filter(like=exp, axis=axis)
elif regex:
if isinstance(regex, str):
regex = [regex]
for exp in like:
data = data.filter(regex=exp, axis=axis)
else:
raise ValueError("Must pass either `like` or `regex` but not both")
return data
def title(snake_case: str):
"""Format snake case string as title."""
return snake_case.replace("_", " ").strip().title()
def title_mode(data: pd.DataFrame):
"""Return copy of `data` with strings formatted as titles."""
result = data.copy()
result.update(result.select_dtypes("object").applymap(title))
for label, column in result.select_dtypes("category").items():
result[label] = column.cat.rename_categories(title)
if result.columns.dtype == "object":
result.columns = result.columns.map(title)
if result.index.dtype == "object":
result.index = result.index.map(title)
return result
def cartesian(*arrays: ArrayLike) -> np.ndarray:
"""Returns the Cartesian product of some 1d arrays.
Returns
-------
ndarray
Cartesian product.
"""
arrays = list(arrays)
for i, array in enumerate(arrays):
array = np.asarray(array)
arrays[i] = array
_check_1d(array)
return np.array(np.meshgrid(*arrays)).T.reshape(-1, len(arrays))
def broad_corr(frame: pd.DataFrame, other: pd.DataFrame) -> pd.DataFrame:
"""Get correlations between features of one frame with those of another.
Parameters
----------
frame : DataFrame
First DataFrame.
other : DataFrame
Second DataFrame.
Returns
-------
DataFrame
Pearson correlations.
"""
return other.apply(lambda x: frame.corrwith(x))
def swap_index(data: pd.Series) -> pd.Series:
"""Swap index and values.
Parameters
----------
data : Series
Series for swapping index and values.
Returns
-------
Series
Swapped Series.
"""
return pd.Series(data.index, index=data.values, name=data.name, copy=True)
def explicit_sort(
data: FrameOrSeries,
*,
order: list,
mode: str = "values",
inplace: bool = False,
**kwargs,
) -> FrameOrSeries:
"""Sort DataFrame or Series values in explicitly specified order.
Parameters
----------
data : FrameOrSeries
Data structure to sort.
order : list
List specifying sort order.
mode : str, optional
Whether to sort 'values' (default) or 'index'.
inplace : bool, optional
Perform operation in place; False by default.
Returns
-------
FrameOrSeries
Sorted data structure or None if `inplace` is set.
"""
order = list(order)
mode = mode.lower()
if mode not in {"values", "index"}:
raise ValueError("`mode` must be 'values' or 'index'")
# Define vectorized key function
get_rank = np.vectorize(lambda x: order.index(x))
# Sort according to mode
if mode == "values":
data = data.sort_values(key=get_rank, inplace=inplace, **kwargs)
else:
data = data.sort_index(key=get_rank, inplace=inplace, **kwargs)
# Return copy or None
return data
def bitgen(seed: Union[None, int, ArrayLike] = None):
return np.random.default_rng(seed).bit_generator
@singledispatch
def get_func_name(
func: Union[
Callable,
FunctionTransformer,
Collection[Callable],
Collection[FunctionTransformer],
]
) -> Union[str, Collection[str]]:
"""Get function name(s) from function-like objects.
Parameters
----------
func : Callable, FunctionTransformer, collection of
Function-like object(s) to get names of.
Returns
-------
str or collection of
Function name(s).
"""
if hasattr(func, "pyfunc"):
name = get_func_name(func.pyfunc)
elif hasattr(func, "func"):
name = get_func_name(func.func)
elif hasattr(func, "__wrapped__"):
name = get_func_name(func.__wrapped__)
elif isinstance(func, Callable):
name = func.__name__
else:
raise TypeError(
f"Expected Callable or FunctionTransformer but encountered {type(func)}."
)
return name
@get_func_name.register
def _(func: FunctionTransformer) -> str:
return get_func_name(func.func)
@get_func_name.register
def _(func: Series) -> pd.Series:
return func.map(get_func_name)
@get_func_name.register
def _(func: ndarray) -> ndarray:
return flat_map(get_func_name, func)
@get_func_name.register
def _(func: list) -> list:
return [get_func_name(x) for x in func]
@singledispatch
def implode(
data: FrameOrSeries, column: Union[str, List[str]] = None, allow_dups=False
) -> FrameOrSeries:
"""Retract "exploded" DataFrame or Series into container of nested lists.
Parameters
----------
data : DataFrame or Series
Exploded data structure.
Returns
-------
DataFrame or Series (same as input)
Frame with values retracted into list-likes.
"""
raise TypeError(f"Expected DataFrame or Series, got {type(data).__name__}.")
@implode.register
def _(data: Series, column: Union[str, List[str]] = None, allow_dups=False) -> Series:
"""Dispatch for Series."""
if not allow_dups:
data = (
data.reset_index()
.drop_duplicates()
.set_index(data.index.name or "index")
.squeeze()
)
return data.groupby(data.index).agg(lambda x: x.to_list())
@implode.register
def _(
data: DataFrame, columns: Union[str, List[str]] = None, allow_dups=False
) -> DataFrame:
"""Dispatch for DataFrame"""
if columns is None:
raise ValueError("Must pass `columns` if input is DataFrame.")
if isinstance(columns, str):
columns = [columns]
imploded = {x: implode(data.loc[:, x], allow_dups=allow_dups) for x in columns}
data = data.loc[~data.index.duplicated()].copy()
return data.assign(**imploded)
@singledispatch
def expand(
data: Union[DataFrame, Series], column: str = None, labels: List[str] = None
) -> DataFrame:
"""Expand a column of length-N list-likes into N columns.
Parameters
----------
data : Series or DataFrame
Series or DataFrame with column to expand.
column : str, optional
Column of length-N list-likes to expand into N columns, by default None.
Only relevant for DataFrame input.
labels : list of str, optional
Labels for new columns (must provide N labels), by default None
Returns
-------
DataFrame
Expanded frame.
"""
# This is the fallback dispatch.
raise TypeError(f"Expected Series or DataFrame, got {type(data)}.")
@expand.register
def _(data: Series, column: str = None, labels: List[str] = None) -> DataFrame:
"""Dispatch for Series. Expands into DataFrame."""
if not data.map(is_list_like).all():
raise ValueError("Elements must all be list-like")
lengths = data.str.len()
if not (lengths == lengths.iloc[0]).all():
raise ValueError("List-likes must all be same length")
col_data = list(zip(*data))
if labels is not None:
if len(labels) != len(col_data):
raise ValueError("Number of `labels` must equal number of new columns")
else:
labels = range(len(col_data))
if data.name is not None:
labels = [f"{data.name}_{x}" for x in labels]
col_data = dict(zip(labels, col_data))
return DataFrame(col_data, index=data.index)
@expand.register
def _(data: DataFrame, column: str = None, labels: List[str] = None) -> DataFrame:
"""Dispatch for DataFrame. Returns DataFrame."""
if data.columns.value_counts()[column] > 1:
raise ValueError("`column` must be unique in DataFrame")
if column is None:
raise ValueError("Must pass `column` if input is DataFrame")
expanded = expand(data.loc[:, column], labels=labels)
insert_at = data.columns.get_loc(column)
data = data.drop(columns=column)
for i, label in enumerate(expanded.columns):
data.insert(
insert_at + i, label, expanded.loc[:, label], allow_duplicates=False
)
return data
def flat_map(func: Callable, arr: np.ndarray, **kwargs):
# Record shape
shape = arr.shape
# Make list
flat = [func(x, **kwargs) for x in arr.flat]
# Construct flat array
arr = np.array(flat, dtype=arr.dtype)
# Reshape in original shape
return arr.reshape(shape)
@singledispatch
def prune_categories(
data: FrameOrSeries,
column: str = None,
cut=None,
qcut=None,
inclusive=True,
show_report=True,
):
raise TypeError(f"`data` must be Series or DataFrame, got {type(data).__name__}.")
@prune_categories.register
def _(
data: Series,
column: str = None,
cut=None,
qcut=None,
inclusive=True,
show_report=True,
):
if column is not None:
raise UserWarning("Param `column` is irrelevant for Series input.")
if cut is not None:
if isinstance(cut, float):
assert 0.0 <= cut <= 1.0
counts = data.value_counts(True)
elif isinstance(cut, int):
assert 0 <= cut <= data.size
counts = data.value_counts()
elif qcut is not None:
assert 0.0 <= qcut <= 1.0
counts = data.value_counts()
cut = counts.quantile(qcut)
else:
raise ValueError("Must provide either `cut` or `qcut`.")
# Slice out categories to keep
keep = counts.loc[counts >= cut if inclusive else counts > cut]
keep = set(keep.index)
data = data.loc[data.isin(keep)].copy()
# Remove unused categories if necessary
if is_categorical_dtype(data):
data = data.cat.remove_unused_categories()
if show_report:
if set(counts.index) == keep:
print("No categories dropped.\n")
else:
report = counts.to_frame("Support")
status = pd.Series(data="dropped", index=counts.index, name="Status")
status[keep] = "retained"
report = pd.merge(status, report, left_index=True, right_index=True)
print(repr(report) + "\n")
return data
@prune_categories.register
def _(
data: DataFrame,
column: str = None,
cut=None,
qcut=None,
inclusive=True,
show_report=True,
):
if column is None:
raise ValueError("Must specify `column` for DataFrame input.")
# Slice out cat variable, reset index to integer range
cats = data.loc[:, column].reset_index(drop=True)
# Eliminate small cats using Series dispatch
cats = prune_categories(
cats,
column=None,
cut=cut,
qcut=qcut,
inclusive=inclusive,
show_report=show_report,
)
# Slice out surviving rows by integer location
data = data.iloc[cats.index].copy()
# Remove unused categories if necessary
if is_categorical_dtype(cats):
data[column] = data.loc[:, column].cat.remove_unused_categories()
return data
@singledispatch
def dedupe_categories(
data: FrameOrSeries,
column: str = None,
thresh=90,
scorer="token_sort_ratio",
merge=True,
show_report=True,
):
raise TypeError(f"`data` must be Series or DataFrame, got {type(data).__name__}.")
@dedupe_categories.register
def _(
data: Series,
column: str = None,
thresh=90,
scorer="token_sort_ratio",
merge=True,
show_report=True,
):
if column is not None:
raise UserWarning("Param `column` is irrelevant for Series input.")
if callable(scorer):
pass
elif not hasattr(fuzz, scorer):
raise ValueError(f"'{scorer}' is not a recognized scorer.")
else:
scorer = getattr(fuzz, scorer)
unique = set(data)
deduped = set(dedupe(unique, threshold=thresh, scorer=scorer))
dropped = unique - deduped
if merge:
# Recomputes distance to get merge target (wasteful)
repl = {x: extractOne(x, deduped, scorer=scorer)[0] for x in dropped}
data = data.replace(repl)
else:
data = data.loc[~data.isin(dropped)]
if | is_categorical_dtype(data) | pandas.api.types.is_categorical_dtype |
from numpy import sqrt, mean
from collections import Iterable
from functools import wraps
import six
import pandas as pd
import numpy as np
import re
import warnings
from IPython.display import display
from pandas.tseries.offsets import CustomBusinessDay, Day, BusinessDay
from scipy.stats import mode
class NonMatchingTimezoneError(Exception):
pass
class MaxLossExceededError(Exception):
pass
def rethrow(exception, additional_message):
"""
Re-raise the last exception that was active in the current scope
without losing the stacktrace but adding an additional message.
This is hacky because it has to be compatible with both python 2/3
"""
e = exception
m = additional_message
if not e.args:
e.args = (m,)
else:
e.args = (e.args[0] + m,) + e.args[1:]
raise e
def non_unique_bin_edges_error(func):
"""
Give user a more informative error in case it is not possible
to properly calculate quantiles on the input dataframe (factor)
"""
message = """
An error occurred while computing bins/quantiles on the input provided.
This usually happens when the input contains too many identical
values and they span more than one quantile. The quantiles are choosen
to have the same number of records each, but the same value cannot span
multiple quantiles. Possible workarounds are:
1 - Decrease the number of quantiles
2 - Specify a custom quantiles range, e.g. [0, .50, .75, 1.] to get unequal
number of records per quantile
3 - Use 'bins' option instead of 'quantiles', 'bins' chooses the
buckets to be evenly spaced according to the values themselves, while
'quantiles' forces the buckets to have the same number of records.
4 - for factors with discrete values use the 'bins' option with custom
ranges and create a range for each discrete value
Please see utils.get_clean_factor_and_forward_returns documentation for
full documentation of 'bins' and 'quantiles' options.
"""
def dec(*args, **kwargs):
try:
return func(*args, **kwargs)
except ValueError as e:
if 'Bin edges must be unique' in str(e):
rethrow(e, message)
raise
return dec
@non_unique_bin_edges_error
def quantize_factor(factor_data,
quantiles=5,
bins=None,
by_group=False,
no_raise=False,
zero_aware=False):
"""
Computes period wise factor quantiles.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
quantiles : int or sequence[float]
Number of equal-sized quantile buckets to use in factor bucketing.
Alternately sequence of quantiles, allowing non-equal-sized buckets
e.g. [0, .10, .5, .90, 1.] or [.05, .5, .95]
Only one of 'quantiles' or 'bins' can be not-None
bins : int or sequence[float]
Number of equal-width (valuewise) bins to use in factor bucketing.
Alternately sequence of bin edges allowing for non-uniform bin width
e.g. [-4, -2, -0.5, 0, 10]
Only one of 'quantiles' or 'bins' can be not-None
by_group : bool, optional
If True, compute quantile buckets separately for each group.
no_raise: bool, optional
If True, no exceptions are thrown and the values for which the
exception would have been thrown are set to np.NaN
zero_aware : bool, optional
If True, compute quantile buckets separately for positive and negative
signal values. This is useful if your signal is centered and zero is
the separation between long and short signals, respectively.
Returns
-------
factor_quantile : pd.Series
Factor quantiles indexed by date and asset.
"""
if not ((quantiles is not None and bins is None) or
(quantiles is None and bins is not None)):
raise ValueError('Either quantiles or bins should be provided')
if zero_aware and not (isinstance(quantiles, int)
or isinstance(bins, int)):
msg = ("zero_aware should only be True when quantiles or bins is an"
" integer")
raise ValueError(msg)
def quantile_calc(x, _quantiles, _bins, _zero_aware, _no_raise):
try:
if _quantiles is not None and _bins is None and not _zero_aware:
return pd.qcut(x, _quantiles, labels=False) + 1
elif _quantiles is not None and _bins is None and _zero_aware:
pos_quantiles = pd.qcut(x[x >= 0], _quantiles // 2,
labels=False) + _quantiles // 2 + 1
neg_quantiles = pd.qcut(x[x < 0], _quantiles // 2,
labels=False) + 1
return pd.concat([pos_quantiles, neg_quantiles]).sort_index()
elif _bins is not None and _quantiles is None and not _zero_aware:
return pd.cut(x, _bins, labels=False) + 1
elif _bins is not None and _quantiles is None and _zero_aware:
pos_bins = pd.cut(x[x >= 0], _bins // 2,
labels=False) + _bins // 2 + 1
neg_bins = pd.cut(x[x < 0], _bins // 2,
labels=False) + 1
return pd.concat([pos_bins, neg_bins]).sort_index()
except Exception as e:
if _no_raise:
return pd.Series(index=x.index)
raise e
grouper = [factor_data.index.get_level_values('date')]
if by_group:
grouper.append('group')
factor_quantile = factor_data.groupby(grouper)['factor'] \
.apply(quantile_calc, quantiles, bins, zero_aware, no_raise)
factor_quantile.name = 'factor_quantile'
return factor_quantile.dropna()
def infer_trading_calendar(factor_idx, prices_idx):
"""
Infer the trading calendar from factor and price information.
Parameters
----------
factor_idx : pd.DatetimeIndex
The factor datetimes for which we are computing the forward returns
prices_idx : pd.DatetimeIndex
The prices datetimes associated withthe factor data
Returns
-------
calendar : pd.DateOffset
"""
full_idx = factor_idx.union(prices_idx)
traded_weekdays = []
holidays = []
days_of_the_week = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
for day, day_str in enumerate(days_of_the_week):
weekday_mask = (full_idx.dayofweek == day)
# drop days of the week that are not traded at all
if not weekday_mask.any():
continue
traded_weekdays.append(day_str)
# look for holidays
used_weekdays = full_idx[weekday_mask].normalize()
all_weekdays = pd.date_range(full_idx.min(), full_idx.max(),
freq=CustomBusinessDay(weekmask=day_str)
).normalize()
_holidays = all_weekdays.difference(used_weekdays)
_holidays = [timestamp.date() for timestamp in _holidays]
holidays.extend(_holidays)
traded_weekdays = ' '.join(traded_weekdays)
return CustomBusinessDay(weekmask=traded_weekdays, holidays=holidays)
def compute_forward_returns(factor,
prices,
periods=(1, 5, 10),
filter_zscore=None,
cumulative_returns=True):
"""
Finds the N period forward returns (as percent change) for each asset
provided.
Parameters
----------
factor : pd.Series - MultiIndex
A MultiIndex Series indexed by timestamp (level 0) and asset
(level 1), containing the values for a single alpha factor.
- See full explanation in utils.get_clean_factor_and_forward_returns
prices : pd.DataFrame
Pricing data to use in forward price calculation.
Assets as columns, dates as index. Pricing data must
span the factor analysis time period plus an additional buffer window
that is greater than the maximum number of expected periods
in the forward returns calculations.
periods : sequence[int]
periods to compute forward returns on.
filter_zscore : int or float, optional
Sets forward returns greater than X standard deviations
from the the mean to nan. Set it to 'None' to avoid filtering.
Caution: this outlier filtering incorporates lookahead bias.
cumulative_returns : bool, optional
If True, forward returns columns will contain cumulative returns.
Setting this to False is useful if you want to analyze how predictive
a factor is for a single forward day.
Returns
-------
forward_returns : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by timestamp (level 0) and asset
(level 1), containing the forward returns for assets.
Forward returns column names follow the format accepted by
pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc).
'date' index freq property (forward_returns.index.levels[0].freq)
will be set to a trading calendar (pandas DateOffset) inferred
from the input data (see infer_trading_calendar for more details).
"""
factor_dateindex = factor.index.levels[0]
if factor_dateindex.tz != prices.index.tz:
raise NonMatchingTimezoneError("The timezone of 'factor' is not the "
"same as the timezone of 'prices'. See "
"the pandas methods tz_localize and "
"tz_convert.")
freq = infer_trading_calendar(factor_dateindex, prices.index)
factor_dateindex = factor_dateindex.intersection(prices.index)
if len(factor_dateindex) == 0:
raise ValueError("Factor and prices indices don't match: make sure "
"they have the same convention in terms of datetimes "
"and symbol-names")
# chop prices down to only the assets we care about (= unique assets in
# `factor`). we could modify `prices` in place, but that might confuse
# the caller.
prices = prices.filter(items=factor.index.levels[1])
raw_values_dict = {}
column_list = []
for period in sorted(periods):
if cumulative_returns:
returns = prices.pct_change(period)
else:
returns = prices.pct_change()
forward_returns = \
returns.shift(-period).reindex(factor_dateindex)
if filter_zscore is not None:
mask = abs(
forward_returns - forward_returns.mean()
) > (filter_zscore * forward_returns.std())
forward_returns[mask] = np.nan
#
# Find the period length, which will be the column name. We'll test
# several entries in order to find out the most likely period length
# (in case the user passed inconsinstent data)
#
days_diffs = []
for i in range(30):
if i >= len(forward_returns.index):
break
p_idx = prices.index.get_loc(forward_returns.index[i])
if p_idx is None or p_idx < 0 or (
p_idx + period) >= len(prices.index):
continue
start = prices.index[p_idx]
end = prices.index[p_idx + period]
period_len = diff_custom_calendar_timedeltas(start, end, freq)
days_diffs.append(period_len.components.days)
delta_days = period_len.components.days - mode(days_diffs).mode[0]
period_len -= pd.Timedelta(days=delta_days)
label = timedelta_to_string(period_len)
column_list.append(label)
raw_values_dict[label] = np.concatenate(forward_returns.values)
df = pd.DataFrame.from_dict(raw_values_dict)
df.set_index(
pd.MultiIndex.from_product(
[factor_dateindex, prices.columns],
names=['date', 'asset']
),
inplace=True
)
df = df.reindex(factor.index)
# now set the columns correctly
df = df[column_list]
df.index.levels[0].freq = freq
df.index.set_names(['date', 'asset'], inplace=True)
return df
def backshift_returns_series(series, N):
"""Shift a multi-indexed series backwards by N observations in
the first level.
This can be used to convert backward-looking returns into a
forward-returns series.
"""
ix = series.index
dates, sids = ix.levels
date_labels, sid_labels = map(np.array, ix.labels)
# Output date labels will contain the all but the last N dates.
new_dates = dates[:-N]
# Output data will remove the first M rows, where M is the index of the
# last record with one of the first N dates.
cutoff = date_labels.searchsorted(N)
new_date_labels = date_labels[cutoff:] - N
new_sid_labels = sid_labels[cutoff:]
new_values = series.values[cutoff:]
assert new_date_labels[0] == 0
new_index = pd.MultiIndex(
levels=[new_dates, sids],
labels=[new_date_labels, new_sid_labels],
sortorder=1,
names=ix.names,
)
return pd.Series(data=new_values, index=new_index)
def demean_forward_returns(factor_data, grouper=None):
"""
Convert forward returns to returns relative to mean
period wise all-universe or group returns.
group-wise normalization incorporates the assumption of a
group neutral portfolio constraint and thus allows allows the
factor to be evaluated across groups.
For example, if AAPL 5 period return is 0.1% and mean 5 period
return for the Technology stocks in our universe was 0.5% in the
same period, the group adjusted 5 period return for AAPL in this
period is -0.4%.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
Forward returns indexed by date and asset.
Separate column for each forward return window.
grouper : list
If True, demean according to group.
Returns
-------
adjusted_forward_returns : pd.DataFrame - MultiIndex
DataFrame of the same format as the input, but with each
security's returns normalized by group.
"""
factor_data = factor_data.copy()
# from IPython.display import display
# display(factor_data)
if not grouper:
grouper = factor_data.index.get_level_values('date')
cols = get_forward_returns_columns(factor_data.columns)
# factor_data[cols] = factor_data.groupby(grouper)[cols] \
# .transform(lambda x: x - x.mean())
factor_data[cols] = factor_data.groupby(
grouper, as_index=False
)[cols.append(pd.Index(['weights']))].apply(
lambda x: x[cols].subtract(
np.average(x[cols], axis=0,
weights=x['weights'].fillna(0.0).values),
axis=1
)
)
return factor_data
def print_table(table, name=None, fmt=None):
"""
Pretty print a pandas DataFrame.
Uses HTML output if running inside Jupyter Notebook, otherwise
formatted text output.
Parameters
----------
table : pd.Series or pd.DataFrame
Table to pretty-print.
name : str, optional
Table name to display in upper left corner.
fmt : str, optional
Formatter to use for displaying table elements.
E.g. '{0:.2f}%' for displaying 100 as '100.00%'.
Restores original setting after displaying.
"""
if isinstance(table, pd.Series):
table = pd.DataFrame(table)
if isinstance(table, pd.DataFrame):
table.columns.name = name
prev_option = pd.get_option('display.float_format')
if fmt is not None:
pd.set_option('display.float_format', lambda x: fmt.format(x))
display(table)
if fmt is not None:
pd.set_option('display.float_format', prev_option)
def get_clean_factor(factor,
forward_returns,
groupby=None,
binning_by_group=False,
quantiles=5,
bins=None,
groupby_labels=None,
max_loss=0.35,
zero_aware=False):
"""
Formats the factor data, forward return data, and group mappings into a
DataFrame that contains aligned MultiIndex indices of timestamp and asset.
The returned data will be formatted to be suitable for Alphalens functions.
It is safe to skip a call to this function and still make use of Alphalens
functionalities as long as the factor data conforms to the format returned
from get_clean_factor_and_forward_returns and documented here
Parameters
----------
factor : pd.Series - MultiIndex
A MultiIndex Series indexed by timestamp (level 0) and asset
(level 1), containing the values for a single alpha factor.
::
-----------------------------------
date | asset |
-----------------------------------
| AAPL | 0.5
-----------------------
| BA | -1.1
-----------------------
2014-01-01 | CMG | 1.7
-----------------------
| DAL | -0.1
-----------------------
| LULU | 2.7
-----------------------
forward_returns : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by timestamp (level 0) and asset
(level 1), containing the forward returns for assets.
Forward returns column names must follow the format accepted by
pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc).
'date' index freq property must be set to a trading calendar
(pandas DateOffset), see infer_trading_calendar for more details.
This information is currently used only in cumulative returns
computation
::
---------------------------------------
| | 1D | 5D | 10D
---------------------------------------
date | asset | | |
---------------------------------------
| AAPL | 0.09|-0.01|-0.079
----------------------------
| BA | 0.02| 0.06| 0.020
----------------------------
2014-01-01 | CMG | 0.03| 0.09| 0.036
----------------------------
| DAL |-0.02|-0.06|-0.029
----------------------------
| LULU |-0.03| 0.05|-0.009
----------------------------
groupby : pd.Series - MultiIndex or dict
Either A MultiIndex Series indexed by date and asset,
containing the period wise group codes for each asset, or
a dict of asset to group mappings. If a dict is passed,
it is assumed that group mappings are unchanged for the
entire time period of the passed factor data.
binning_by_group : bool
If True, compute quantile buckets separately for each group.
This is useful when the factor values range vary considerably
across gorups so that it is wise to make the binning group relative.
You should probably enable this if the factor is intended
to be analyzed for a group neutral portfolio
quantiles : int or sequence[float]
Number of equal-sized quantile buckets to use in factor bucketing.
Alternately sequence of quantiles, allowing non-equal-sized buckets
e.g. [0, .10, .5, .90, 1.] or [.05, .5, .95]
Only one of 'quantiles' or 'bins' can be not-None
bins : int or sequence[float]
Number of equal-width (valuewise) bins to use in factor bucketing.
Alternately sequence of bin edges allowing for non-uniform bin width
e.g. [-4, -2, -0.5, 0, 10]
Chooses the buckets to be evenly spaced according to the values
themselves. Useful when the factor contains discrete values.
Only one of 'quantiles' or 'bins' can be not-None
groupby_labels : dict
A dictionary keyed by group code with values corresponding
to the display name for each group.
max_loss : float, optional
Maximum percentage (0.00 to 1.00) of factor data dropping allowed,
computed comparing the number of items in the input factor index and
the number of items in the output DataFrame index.
Factor data can be partially dropped due to being flawed itself
(e.g. NaNs), not having provided enough price data to compute
forward returns for all factor values, or because it is not possible
to perform binning.
Set max_loss=0 to avoid Exceptions suppression.
zero_aware : bool, optional
If True, compute quantile buckets separately for positive and negative
signal values. This is useful if your signal is centered and zero is
the separation between long and short signals, respectively.
'quantiles' is None.
Returns
-------
merged_data : pd.DataFrame - MultiIndex
A MultiIndex Series indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- forward returns column names follow the format accepted by
pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc)
- 'date' index freq property (merged_data.index.levels[0].freq) is the
same as that of the input forward returns data. This is currently
used only in cumulative returns computation
::
-------------------------------------------------------------------
| | 1D | 5D | 10D |factor|group|factor_quantile
-------------------------------------------------------------------
date | asset | | | | | |
-------------------------------------------------------------------
| AAPL | 0.09|-0.01|-0.079| 0.5 | G1 | 3
--------------------------------------------------------
| BA | 0.02| 0.06| 0.020| -1.1 | G2 | 5
--------------------------------------------------------
2014-01-01 | CMG | 0.03| 0.09| 0.036| 1.7 | G2 | 1
--------------------------------------------------------
| DAL |-0.02|-0.06|-0.029| -0.1 | G3 | 5
--------------------------------------------------------
| LULU |-0.03| 0.05|-0.009| 2.7 | G1 | 2
--------------------------------------------------------
"""
initial_amount = float(len(factor.index))
factor_copy = factor.copy()
factor_copy.index = factor_copy.index.rename(['date', 'asset'])
factor_copy = factor_copy[np.isfinite(factor_copy)]
merged_data = forward_returns.copy()
merged_data['factor'] = factor_copy
if groupby is not None:
if isinstance(groupby, dict):
diff = set(factor_copy.index.get_level_values(
'asset')) - set(groupby.keys())
if len(diff) > 0:
raise KeyError(
"Assets {} not in group mapping".format(
list(diff)))
ss = pd.Series(groupby)
groupby = pd.Series(index=factor_copy.index,
data=ss[factor_copy.index.get_level_values(
'asset')].values)
if groupby_labels is not None:
diff = set(groupby.values) - set(groupby_labels.keys())
if len(diff) > 0:
raise KeyError(
"groups {} not in passed group names".format(
list(diff)))
sn = pd.Series(groupby_labels)
groupby = pd.Series(index=groupby.index,
data=sn[groupby.values].values)
merged_data['group'] = groupby.astype('category')
merged_data = merged_data.dropna()
fwdret_amount = float(len(merged_data.index))
no_raise = False if max_loss == 0 else True
quantile_data = quantize_factor(
merged_data,
quantiles,
bins,
binning_by_group,
no_raise,
zero_aware
)
merged_data['factor_quantile'] = quantile_data
merged_data = merged_data.dropna()
binning_amount = float(len(merged_data.index))
tot_loss = (initial_amount - binning_amount) / initial_amount
fwdret_loss = (initial_amount - fwdret_amount) / initial_amount
bin_loss = tot_loss - fwdret_loss
print("Dropped %.1f%% entries from factor data: %.1f%% in forward "
"returns computation and %.1f%% in binning phase "
"(set max_loss=0 to see potentially suppressed Exceptions)." %
(tot_loss * 100, fwdret_loss * 100, bin_loss * 100))
if tot_loss > max_loss:
message = ("max_loss (%.1f%%) exceeded %.1f%%, consider increasing it."
% (max_loss * 100, tot_loss * 100))
raise MaxLossExceededError(message)
else:
print("max_loss is %.1f%%, not exceeded: OK!" % (max_loss * 100))
return merged_data
def get_clean_factor_and_forward_returns(factor,
prices,
groupby=None,
binning_by_group=False,
quantiles=5,
bins=None,
periods=(1, 5, 10),
filter_zscore=20,
groupby_labels=None,
max_loss=0.35,
zero_aware=False,
cumulative_returns=True):
"""
Formats the factor data, pricing data, and group mappings into a DataFrame
that contains aligned MultiIndex indices of timestamp and asset. The
returned data will be formatted to be suitable for Alphalens functions.
It is safe to skip a call to this function and still make use of Alphalens
functionalities as long as the factor data conforms to the format returned
from get_clean_factor_and_forward_returns and documented here
Parameters
----------
factor : pd.Series - MultiIndex
A MultiIndex Series indexed by timestamp (level 0) and asset
(level 1), containing the values for a single alpha factor.
::
-----------------------------------
date | asset |
-----------------------------------
| AAPL | 0.5
-----------------------
| BA | -1.1
-----------------------
2014-01-01 | CMG | 1.7
-----------------------
| DAL | -0.1
-----------------------
| LULU | 2.7
-----------------------
prices : pd.DataFrame
A wide form Pandas DataFrame indexed by timestamp with assets
in the columns.
Pricing data must span the factor analysis time period plus an
additional buffer window that is greater than the maximum number
of expected periods in the forward returns calculations.
It is important to pass the correct pricing data in depending on
what time of period your signal was generated so to avoid lookahead
bias, or delayed calculations.
'Prices' must contain at least an entry for each timestamp/asset
combination in 'factor'. This entry should reflect the buy price
for the assets and usually it is the next available price after the
factor is computed but it can also be a later price if the factor is
meant to be traded later (e.g. if the factor is computed at market
open but traded 1 hour after market open the price information should
be 1 hour after market open).
'Prices' must also contain entries for timestamps following each
timestamp/asset combination in 'factor', as many more timestamps
as the maximum value in 'periods'. The asset price after 'period'
timestamps will be considered the sell price for that asset when
computing 'period' forward returns.
::
----------------------------------------------------
| AAPL | BA | CMG | DAL | LULU |
----------------------------------------------------
Date | | | | | |
----------------------------------------------------
2014-01-01 |605.12| 24.58| 11.72| 54.43 | 37.14 |
----------------------------------------------------
2014-01-02 |604.35| 22.23| 12.21| 52.78 | 33.63 |
----------------------------------------------------
2014-01-03 |607.94| 21.68| 14.36| 53.94 | 29.37 |
----------------------------------------------------
groupby : pd.Series - MultiIndex or dict
Either A MultiIndex Series indexed by date and asset,
containing the period wise group codes for each asset, or
a dict of asset to group mappings. If a dict is passed,
it is assumed that group mappings are unchanged for the
entire time period of the passed factor data.
binning_by_group : bool
If True, compute quantile buckets separately for each group.
This is useful when the factor values range vary considerably
across gorups so that it is wise to make the binning group relative.
You should probably enable this if the factor is intended
to be analyzed for a group neutral portfolio
quantiles : int or sequence[float]
Number of equal-sized quantile buckets to use in factor bucketing.
Alternately sequence of quantiles, allowing non-equal-sized buckets
e.g. [0, .10, .5, .90, 1.] or [.05, .5, .95]
Only one of 'quantiles' or 'bins' can be not-None
bins : int or sequence[float]
Number of equal-width (valuewise) bins to use in factor bucketing.
Alternately sequence of bin edges allowing for non-uniform bin width
e.g. [-4, -2, -0.5, 0, 10]
Chooses the buckets to be evenly spaced according to the values
themselves. Useful when the factor contains discrete values.
Only one of 'quantiles' or 'bins' can be not-None
periods : sequence[int]
periods to compute forward returns on.
filter_zscore : int or float, optional
Sets forward returns greater than X standard deviations
from the the mean to nan. Set it to 'None' to avoid filtering.
Caution: this outlier filtering incorporates lookahead bias.
groupby_labels : dict
A dictionary keyed by group code with values corresponding
to the display name for each group.
max_loss : float, optional
Maximum percentage (0.00 to 1.00) of factor data dropping allowed,
computed comparing the number of items in the input factor index and
the number of items in the output DataFrame index.
Factor data can be partially dropped due to being flawed itself
(e.g. NaNs), not having provided enough price data to compute
forward returns for all factor values, or because it is not possible
to perform binning.
Set max_loss=0 to avoid Exceptions suppression.
zero_aware : bool, optional
If True, compute quantile buckets separately for positive and negative
signal values. This is useful if your signal is centered and zero is
the separation between long and short signals, respectively.
cumulative_returns : bool, optional
If True, forward returns columns will contain cumulative returns.
Setting this to False is useful if you want to analyze how predictive
a factor is for a single forward day.
Returns
-------
merged_data : pd.DataFrame - MultiIndex
A MultiIndex Series indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- forward returns column names follow the format accepted by
pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc)
- 'date' index freq property (merged_data.index.levels[0].freq) will be
set to a trading calendar (pandas DateOffset) inferred from the input
data (see infer_trading_calendar for more details). This is currently
used only in cumulative returns computation
::
-------------------------------------------------------------------
| | 1D | 5D | 10D |factor|group|factor_quantile
-------------------------------------------------------------------
date | asset | | | | | |
-------------------------------------------------------------------
| AAPL | 0.09|-0.01|-0.079| 0.5 | G1 | 3
--------------------------------------------------------
| BA | 0.02| 0.06| 0.020| -1.1 | G2 | 5
--------------------------------------------------------
2014-01-01 | CMG | 0.03| 0.09| 0.036| 1.7 | G2 | 1
--------------------------------------------------------
| DAL |-0.02|-0.06|-0.029| -0.1 | G3 | 5
--------------------------------------------------------
| LULU |-0.03| 0.05|-0.009| 2.7 | G1 | 2
--------------------------------------------------------
See Also
--------
utils.get_clean_factor
For use when forward returns are already available.
"""
forward_returns = compute_forward_returns(
factor,
prices,
periods,
filter_zscore,
cumulative_returns,
)
factor_data = get_clean_factor(factor, forward_returns, groupby=groupby,
groupby_labels=groupby_labels,
quantiles=quantiles, bins=bins,
binning_by_group=binning_by_group,
max_loss=max_loss, zero_aware=zero_aware)
return factor_data
def rate_of_returns(ret, period):
return ((np.nansum(ret) + 1)**(1. / period)) - 1
def rate_of_return(period_ret, base_period):
"""
Convert returns to 'one_period_len' rate of returns: that is the value the
returns would have every 'one_period_len' if they had grown at a steady
rate
Parameters
----------
period_ret: pd.DataFrame
DataFrame containing returns values with column headings representing
the return period.
base_period: string
The base period length used in the conversion
It must follow pandas.Timedelta constructor format (e.g. '1 days',
'1D', '30m', '3h', '1D1h', etc)
Returns
-------
pd.DataFrame
DataFrame in same format as input but with 'one_period_len' rate of
returns values.
"""
period_len = period_ret.name
conversion_factor = (pd.Timedelta(base_period) /
pd.Timedelta(period_len))
return period_ret.add(1).pow(conversion_factor).sub(1)
def std_conversion(period_std, base_period):
"""
one_period_len standard deviation (or standard error) approximation
Parameters
----------
period_std: pd.DataFrame
DataFrame containing standard deviation or standard error values
with column headings representing the return period.
base_period: string
The base period length used in the conversion
It must follow pandas.Timedelta constructor format (e.g. '1 days',
'1D', '30m', '3h', '1D1h', etc)
Returns
-------
pd.DataFrame
DataFrame in same format as input but with one-period
standard deviation/error values.
"""
period_len = period_std.name
conversion_factor = (pd.Timedelta(period_len) /
pd.Timedelta(base_period))
return period_std / np.sqrt(conversion_factor)
def get_forward_returns_columns(columns):
"""
返回远期收益的序列
"""
pattern = re.compile(r"^(return\(.+\))$", re.IGNORECASE)
valid_columns = [(pattern.match(col) is not None) for col in columns]
return columns[valid_columns]
def timedelta_to_string(timedelta):
"""
Utility that converts a pandas.Timedelta to a string representation
compatible with pandas.Timedelta constructor format
Parameters
----------
timedelta: pd.Timedelta
Returns
-------
string
string representation of 'timedelta'
"""
c = timedelta.components
format = ''
if c.days != 0:
format += '%dD' % c.days
if c.hours > 0:
format += '%dh' % c.hours
if c.minutes > 0:
format += '%dm' % c.minutes
if c.seconds > 0:
format += '%ds' % c.seconds
if c.milliseconds > 0:
format += '%dms' % c.milliseconds
if c.microseconds > 0:
format += '%dus' % c.microseconds
if c.nanoseconds > 0:
format += '%dns' % c.nanoseconds
return format
def timedelta_strings_to_integers(sequence):
"""
Converts pandas string representations of timedeltas into integers of days.
Parameters
----------
sequence : iterable
List or array of timedelta string representations, e.g. ['1D', '5D'].
Returns
-------
sequence : list
Integer days corresponding to the input sequence, e.g. [1, 5].
"""
return list(map(lambda x: pd.Timedelta(x).days, sequence))
def add_custom_calendar_timedelta(input, timedelta, freq):
"""
Add timedelta to 'input' taking into consideration custom frequency, which
is used to deal with custom calendars, such as a trading calendar
Parameters
----------
input : pd.DatetimeIndex or pd.Timestamp
timedelta : pd.Timedelta
freq : pd.DataOffset (CustomBusinessDay, Day or BusinessDay)
Returns
-------
pd.DatetimeIndex or pd.Timestamp
input + timedelta
"""
if not isinstance(freq, (Day, BusinessDay, CustomBusinessDay)):
raise ValueError("freq must be Day, BDay or CustomBusinessDay")
days = timedelta.components.days
offset = timedelta - pd.Timedelta(days=days)
return input + freq * days + offset
def diff_custom_calendar_timedeltas(start, end, freq):
"""
Compute the difference between two pd.Timedelta taking into consideration
custom frequency, which is used to deal with custom calendars, such as a
trading calendar
Parameters
----------
start : pd.Timestamp
end : pd.Timestamp
freq : CustomBusinessDay (see infer_trading_calendar)
freq : pd.DataOffset (CustomBusinessDay, Day or BDay)
Returns
-------
pd.Timedelta
end - start
"""
if not isinstance(freq, (Day, BusinessDay, CustomBusinessDay)):
raise ValueError("freq must be Day, BusinessDay or CustomBusinessDay")
weekmask = getattr(freq, 'weekmask', None)
holidays = getattr(freq, 'holidays', None)
if weekmask is None and holidays is None:
if isinstance(freq, Day):
weekmask = 'Mon Tue Wed Thu Fri Sat Sun'
holidays = []
elif isinstance(freq, BusinessDay):
weekmask = 'Mon Tue Wed Thu Fri'
holidays = []
if weekmask is not None and holidays is not None:
# we prefer this method as it is faster
actual_days = np.busday_count(np.array(start).astype('datetime64[D]'),
np.array(end).astype('datetime64[D]'),
weekmask, holidays)
else:
# default, it is slow
actual_days = | pd.date_range(start, end, freq=freq) | pandas.date_range |
# -*- coding: utf-8 -*-
from nevergrad.optimization import optimizerlib
from .benchmark_functions import REGISTRY_OF_FUNCS as func_registry
from nevergrad.optimization import registry as algo_registry
import random
import math
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from timeit import default_timer as timer
from datetime import date
import io
import os
import sys
from contextlib import redirect_stdout
from functools import reduce
from cycler import cycler
#Setting plot params
use_tex = False #use True if LaTeX compiler installed locally
#Preferably use before generating the final plots since it is quite slow
plt.rcParams['figure.figsize'] = (12, 8)
plt.rc('text', usetex=use_tex)
plt.rc('font', family='serif')
plt.rc('font', size=12)
plt.rcParams['figure.dpi'] = 100
plt.rcParams['savefig.dpi'] = 600
def noisify(val, eps):
"""
Add a Gaussian White noise to function value
"""
val_noisy = (1 + random.gauss(mu=0,sigma=eps)) * (1-val)
return val_noisy
def logify(val_noisy):
"""
Apply Log10 to function value
If value is negative, return the original value
If value is greater than or equal to 1, return Log10(val)
If 0 <= val < 1, return log10 of the max of a delta val and the original val
MIGHT BE A SOURCE OF ARTEFACT. TO BE INVESTIGATED
"""
delta = 1e-20
if val_noisy < 0:
return val_noisy
elif val_noisy >= 1:
return math.log10(val_noisy)
else:
return math.log10(max(delta, val_noisy))
def scalify(x):
"""
Scale the value using arctan to [0,1]
"""
return (np.arctan(x)+(np.pi)/2)/(np.pi)
def makeDF(arr_results, loc=0):
"""
Make a DataFrame from the numpy array of dicts
If part of the array of dicst has already been Dataframed,
then start conversion from the loc position
"""
list_for_DF = []
count = 0
for result in arr_results:
if not count < loc:
exp_data = result.pop('exp_data')
DF_evals = pd.DataFrame(exp_data)
result['exp_data'] = DF_evals
result['n_evals']=len(exp_data)
result['f_min']=exp_data[-1]['f_min']
list_for_DF.append(result)
count +=1
results_DF = pd.DataFrame(list_for_DF)
return results_DF
def isin_row(master, lookup, cols):
"""
isin_row takes two datasets & a list of columns and checks if the latter is
a matching row in the former for the given list of columns
Use an extension of the built-in pandas isin() function to perform a row-wise isin
checking every row in the Dataframe for the given columns
"""
return reduce(lambda master, lookup:master&lookup, [master[f].isin(lookup[f]) for f in cols])
def lookup(algo, func, dim, eps, log_flag, n_evals, starter, master):
"""
Use isin_row() to check if the row formed by the passed paramters exists in the dataframe
Chief application is to check if the current iteration in the expt has already been performed before
DO NOT pass the full dataframe, drop the exp_data and min_params columns
"""
check_data = pd.DataFrame({'algo': algo,
'dim': dim,
'func': func,
'log': log_flag,
'n_evals': n_evals,
'noise_level': eps,
'starter': starter
}, index = [0])
check_result = False
if True in list(isin_row(master, check_data, cols = ['algo',
'dim',
'func',
'log',
'n_evals',
'noise_level',
'starter'])):
check_result = True
return check_result
def cust_optim(optim_algo, obj_func, dim, eps, log_flag, starter, budget):
"""
Use one of the optimizers from Nevergrad in the ask and tell interface
Log every function call
Return a list of dicts with the recommended min val parameters and the log of function calls
The logified value (if done) is sent only to the optimizer, all other data logging happens with
the noisy value - POSSIBLE SOURCE OF ARTEFACT, CHECK REQD. DISABLED FOR NOW
The f_min stores the current lowest attained value at the time of that evaluation number
"""
np.random.seed(0)
random.seed(0)
optimizer = optim_algo(instrumentation=dim, budget=budget)
evaluations_data = []
f_min = 0.0
initial = [starter]*dim
for i in range(optimizer.budget):
if i==0:
x = initial
dummy_x = optimizer.ask()
params = np.asarray(x)
final_val = scalify(obj_func(params))
f_min = final_val
else:
x = optimizer.ask()
params = np.asarray(*x.args)
value = obj_func(params)
noisy_val = scalify((noisify(value, eps)))
f_min = min(f_min, noisy_val)
final_val = noisy_val
# if(log_flag):
# final_val = logify(noisy_val)
# else:
# final_val = noisy_val
params_candidate = optimizer.create_candidate.from_call(params)
optimizer.tell(params_candidate, final_val)
#format in which function calls get stored
temp_evaluations_data = {'params':params,
'f_val':final_val,
'eval_no':i,
'f_min':f_min}
evaluations_data.append(temp_evaluations_data)
recommendation = optimizer.provide_recommendation()
opt_output = {'params':recommendation,
'f_vals':evaluations_data
}
return opt_output
def run_exp(algo_list = ['DiagonalCMA'],
func_list = ['rosenbrock'],
dim_list = [2, 3, 5],
eps_list = [0.3],
log_list = [True],
EVAL_BUDGET = 1000,
RESULTS_FILE = "results_df-default.pkl",
file_type = 'pkl',
initial_vals = [0.0],
save_interval = 1800):
"""
Run an expt for a given set of funcs, algos, dimensions, noise-levels etc.
Looks up if the expt iteration has already been available in the supplied dataframe
Saves a pickled dataframe of the logged function values along with relevant associated data
"""
results_list = [] #to store the data from this run of the expt
try:
#read from supplied file previous run of expts
if file_type == 'hdf':
stored_results = pd.read_hdf(RESULTS_FILE)
else:
stored_results = | pd.read_pickle(RESULTS_FILE) | pandas.read_pickle |
import pandas as pd
#app
from connection import db
def _query_series(db=db):
series = db.series
cursor_series = series.find( { } )
return cursor_series
def _dataframe_series():
cursor_series = _query_series()
df_series = | pd.DataFrame(cursor_series) | pandas.DataFrame |
import glob
import os
import sys
import copy
from joblib import Parallel, delayed
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyabf
from ipfx import feature_extractor
from ipfx import subthresh_features as subt
print("feature extractor loaded")
from .abf_ipfx_dataframes import _build_full_df, _build_sweepwise_dataframe, save_data_frames
from .loadABF import loadABF
from .patch_utils import plotabf, load_protocols, find_non_zero_range
from .QC import run_qc
default_dict = {'start': 0, 'end': 0, 'filter': 0}
def folder_feature_extract(files, param_dict, plot_sweeps=-1, protocol_name='IC1', para=1):
debugplot = 0
running_lab = ['Trough', 'Peak', 'Max Rise (upstroke)', 'Max decline (downstroke)', 'Width']
dfs = pd.DataFrame()
df_spike_count = pd.DataFrame()
df_running_avg_count = pd.DataFrame()
filelist = glob.glob(files + "/**/*.abf", recursive=True)
temp_df_spike_count = Parallel(n_jobs= para)(delayed(preprocess_abf)(f, copy.deepcopy(param_dict), plot_sweeps, protocol_name) for f in filelist)
df_spike_count = pd.concat(temp_df_spike_count, sort=True)
return dfs, df_spike_count, df_running_avg_count
def preprocess_abf(file_path, param_dict, plot_sweeps, protocol_name):
try:
abf = pyabf.ABF(file_path)
if abf.sweepLabelY != 'Clamp Current (pA)' and protocol_name in abf.protocol:
print(file_path + ' import')
temp_spike_df, df, temp_running_bin = analyze_abf(abf, sweeplist=None, plot=plot_sweeps, param_dict=param_dict)
return temp_spike_df
else:
print('Not correct protocol: ' + abf.protocol)
return pd.DataFrame()
except:
return pd.DataFrame()
def analyze_spike_sweep(abf, sweepNumber, param_dict):
abf.setSweep(sweepNumber)
spikext = feature_extractor.SpikeFeatureExtractor(**param_dict)
spiketxt = feature_extractor.SpikeTrainFeatureExtractor(start=param_dict['start'], end=param_dict['end'])
dataT, dataV, dataI = abf.sweepX, abf.sweepY, abf.sweepC
if dataI.shape[0] < dataV.shape[0]:
dataI = np.hstack((dataI, np.full(dataV.shape[0] - dataI.shape[0], 0)))
spike_in_sweep = spikext.process(dataT, dataV, dataI)
spike_train = spiketxt.process(dataT, dataV, dataI, spike_in_sweep)
return spike_in_sweep, spike_train
def analyze_abf(abf, sweeplist=None, plot=-1, param_dict=None):
np.nan_to_num(abf.data, nan=-9999, copy=False)
#If there is more than one sweep, we need to ensure we dont iterate out of range
if sweeplist == None:
if abf.sweepCount > 1:
sweepcount = abf.sweepList
else:
sweepcount = [0]
df = pd.DataFrame()
#Now we walk through the sweeps looking for action potentials
temp_spike_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
from pathlib import Path
from typing import Dict, Tuple, List
from sqlalchemy import create_engine
import io
import datetime
from data import wpa_scrape
REGEX = '^(Club|TeamName)'
def find_str(row, search_str: str) -> pd.Series:
return row.astype(str).str.contains(search_str, case=False).any()
def find_header(df: pd.DataFrame) -> int:
s = df.apply(find_str, args=(REGEX,), axis=1)
try:
return df.loc[s].index.values[0] + 1
except IndexError:
s = df.columns.str.contains(REGEX, case=False)
if pd.Series(s).sum() > 0:
return 0
else:
return -99
def load_excel(xlsx: pd.ExcelFile, sheet_name: str=None) -> pd.DataFrame:
if not sheet_name:
sheet_name = xlsx.sheet_names[0]
df = pd.read_excel(xlsx, sheet_name, nrows=20)
start = find_header(df)
if start == -99:
# Edge case for Cape Peninsula marathon 21 km sheet with no column names
new_df = pd.read_excel(xlsx, sheet_name)
res = (
(df.apply(lambda row: row.astype(str).str.contains('Cape Peninsula')
.any(), axis=1)) &
(df.apply(lambda row: row
.astype(str).str.contains('21km').any(), axis=1))
)
if res.all():
# Put the column headers as the first row and drop the first column
# that is a duplicate of the index
new_df = df.reset_index().T.reset_index().T.iloc[:, 1:]
cols = [
'Race', 'Event', 'Pos', 'FirstName', 'LastName',
'Race No', 'Finish Status', 'Time', 'Age', 'Category',
'Category Pos', 'Gender', 'Gender Pos', 'Club'
]
new_df.columns = cols
else:
new_df = | pd.read_excel(xlsx, sheet_name, header=start) | pandas.read_excel |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.