prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
'''
Load data template. Includes load(), metadata, split_stations(),
remove_upcast() and locals().update().
Inputs:
load() - data/ctd/<DATE>.cnv
metadata() - data/csv/coordenadas_<DATE>.csv
'''
# Dependencies
import pandas as pd
from code.functions import *
saida1 = 'data/ctd/stations_25-01-2017_processed.cnv'
saida2 = 'data/ctd/stations_27-05-2017_processed.cnv'
saida3 = 'data/ctd/stations_08-07-2017_processed.cnv'
saida4 = 'data/ctd/stations_01-10-2017_processed.cnv'
df = pd.read_csv('data/csv/coordenadas.csv', sep=';')
dates = set(df['Data'])
dates = list(dates)
'''
Saída 1 - 25-01-2017
'''
# Loading the data
hd1, hd2, variables, datapoints, alldata = load(saida1)
# Loading metadata
today = dates[2]
stations = list(df.loc[df['Data'] == today]['Ponto'])
lat = list(df.loc[df['Data'] == today]['Lat'])
lon = list(df.loc[df['Data'] == today]['Lon'])
[i.insert(3, i[2]) for i in [stations, lat, lon]]
# Splitting data into different stations
d = split_stations(datapoints, stations, variables, lat, lon)
# Removing upcasts
for st in d:
d[st] = remove_upcast(d[st])
# Creating variables with stations from the dictionary
locals().update(d)
# Let's put them all into lists
st_list = list(d.values())
# Picking out surface and bottom temperatures
top, bot, names = [], [], []
for i in st_list:
top.append(i['sal00:'][0])
bot.append(i['sal00:'][len(i)-1])
names.append(i['STATION'][0])
top, bot, names = pd.Series(top), pd.Series(bot), pd.Series(names)
df2 = pd.DataFrame([top, bot, names])
df2 = df2.transpose()
df2.to_csv('./25-jan-sal.csv')
'''
Saída 2 - 27-05-2017
'''
# Loading the data
hd1, hd2, variables, datapoints, alldata = load(saida2)
# Loading metadata
today = dates[0]
stations = list(df.loc[df['Data'] == today]['Ponto'])
lat = list(df.loc[df['Data'] == today]['Lat'])
lon = list(df.loc[df['Data'] == today]['Lon'])
# This particular day (27-05) there was a test station before sampling.
stations, lat, lon = ['test'] + stations, ['test'] + lat, ['test'] + lon
# Splitting data into different stations
d = split_stations(datapoints, stations, variables, lat, lon)
# Removing upcasts
for st in d:
d[st] = remove_upcast(d[st])
# Creating variables with stations from the dictionary
locals().update(d)
# Let's put them all into lists
st_list = list(d.values())
top, bot, names = [], [], []
for i in st_list:
top.append(i['sal00:'][0])
bot.append(i['sal00:'][len(i)-1])
names.append(i['STATION'][0])
top, bot, names = pd.Series(top), pd.Series(bot), pd.Series(names)
df2 = pd.DataFrame([top, bot, names])
df2 = df2.transpose()
df2.to_csv('./27-may-sal.csv')
'''
Saída 3 - 08-07-2017
'''
# Loading the data
hd1, hd2, variables, datapoints, alldata = load(saida3)
# Loading metadata
today = dates[3]
stations = list(df.loc[df['Data'] == today]['Ponto'])
lat = list(df.loc[df['Data'] == today]['Lat'])
lon = list(df.loc[df['Data'] == today]['Lon'])
# Splitting data into different stations
d = split_stations(datapoints, stations, variables, lat, lon)
# Removing upcasts
for st in d:
d[st] = remove_upcast(d[st])
# Creating variables with stations from the dictionary
locals().update(d)
st_list = list(d.values())
top, bot, names = [], [], []
for i in st_list:
top.append(i['sal00:'][0])
bot.append(i['sal00:'][len(i)-1])
names.append(i['STATION'][0])
top, bot, names = pd.Series(top), pd.Series(bot), pd.Series(names)
df2 = pd.DataFrame([top, bot, names])
df2 = df2.transpose()
df2.to_csv('./08-jul-sal.csv')
'''
Saída 4 - 01-10-2017
'''
# Loading the data
hd1, hd2, variables, datapoints, alldata = load(saida4)
# Loading metadata
today = dates[1]
stations = list(df.loc[df['Data'] == today]['Ponto'])
lat = list(df.loc[df['Data'] == today]['Lat'])
lon = list(df.loc[df['Data'] == today]['Lon'])
[i.insert(3, i[2]) for i in [stations, lat, lon]]
# Splitting data into different stations
d = split_stations(datapoints, stations, variables, lat, lon)
# Removing upcasts
for st in d:
d[st] = remove_upcast(d[st])
# Creating variables with stations from the dictionary
locals().update(d)
st_list = list(d.values())
top, bot, names = [], [], []
for i in st_list:
top.append(i['sal00:'][0])
bot.append(i['sal00:'][len(i)-1])
names.append(i['STATION'][0])
top, bot, names = | pd.Series(top) | pandas.Series |
import pytest
import numpy as np
import pandas as pd
from pandas import Categorical, Series, CategoricalIndex
from pandas.core.dtypes.concat import union_categoricals
from pandas.util import testing as tm
class TestUnionCategoricals(object):
def test_union_categorical(self):
# GH 13361
data = [
(list('abc'), list('abd'), list('abcabd')),
([0, 1, 2], [2, 3, 4], [0, 1, 2, 2, 3, 4]),
([0, 1.2, 2], [2, 3.4, 4], [0, 1.2, 2, 2, 3.4, 4]),
(['b', 'b', np.nan, 'a'], ['a', np.nan, 'c'],
['b', 'b', np.nan, 'a', 'a', np.nan, 'c']),
(pd.date_range('2014-01-01', '2014-01-05'),
pd.date_range('2014-01-06', '2014-01-07'),
pd.date_range('2014-01-01', '2014-01-07')),
(pd.date_range('2014-01-01', '2014-01-05', tz='US/Central'),
pd.date_range('2014-01-06', '2014-01-07', tz='US/Central'),
pd.date_range('2014-01-01', '2014-01-07', tz='US/Central')),
(pd.period_range('2014-01-01', '2014-01-05'),
pd.period_range('2014-01-06', '2014-01-07'),
pd.period_range('2014-01-01', '2014-01-07')),
]
for a, b, combined in data:
for box in [Categorical, CategoricalIndex, Series]:
result = union_categoricals([box(Categorical(a)),
box(Categorical(b))])
expected = Categorical(combined)
tm.assert_categorical_equal(result, expected,
check_category_order=True)
# new categories ordered by appearance
s = Categorical(['x', 'y', 'z'])
s2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([s, s2])
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['x', 'y', 'z', 'a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
s = Categorical([0, 1.2, 2], ordered=True)
s2 = Categorical([0, 1.2, 2], ordered=True)
result = union_categoricals([s, s2])
expected = Categorical([0, 1.2, 2, 0, 1.2, 2], ordered=True)
tm.assert_categorical_equal(result, expected)
# must exactly match types
s = Categorical([0, 1.2, 2])
s2 = Categorical([2, 3, 4])
msg = 'dtype of categories must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([s, s2])
msg = 'No Categoricals to union'
with tm.assert_raises_regex(ValueError, msg):
union_categoricals([])
def test_union_categoricals_nan(self):
# GH 13759
res = union_categoricals([pd.Categorical([1, 2, np.nan]),
pd.Categorical([3, 2, np.nan])])
exp = Categorical([1, 2, np.nan, 3, 2, np.nan])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical(['A', 'B']),
pd.Categorical(['B', 'B', np.nan])])
exp = Categorical(['A', 'B', 'B', 'B', np.nan])
tm.assert_categorical_equal(res, exp)
val1 = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-03-01'),
pd.NaT]
val2 = [pd.NaT, pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-02-01')]
res = union_categoricals([pd.Categorical(val1), pd.Categorical(val2)])
exp = Categorical(val1 + val2,
categories=[pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-03-01'),
pd.Timestamp('2011-02-01')])
tm.assert_categorical_equal(res, exp)
# all NaN
res = union_categoricals([pd.Categorical([np.nan, np.nan]),
pd.Categorical(['X'])])
exp = Categorical([np.nan, np.nan, 'X'])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical([np.nan, np.nan]),
pd.Categorical([np.nan, np.nan])])
exp = Categorical([np.nan, np.nan, np.nan, np.nan])
tm.assert_categorical_equal(res, exp)
def test_union_categoricals_empty(self):
# GH 13759
res = union_categoricals([pd.Categorical([]),
pd.Categorical([])])
exp = Categorical([])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical([]),
pd.Categorical([1.0])])
exp = Categorical([1.0])
tm.assert_categorical_equal(res, exp)
# to make dtype equal
nanc = pd.Categorical(np.array([np.nan], dtype=np.float64))
res = union_categoricals([nanc,
pd.Categorical([])])
tm.assert_categorical_equal(res, nanc)
def test_union_categorical_same_category(self):
# check fastpath
c1 = Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4])
c2 = Categorical([3, 2, 1, np.nan], categories=[1, 2, 3, 4])
res = union_categoricals([c1, c2])
exp = Categorical([1, 2, 3, 4, 3, 2, 1, np.nan],
categories=[1, 2, 3, 4])
tm.assert_categorical_equal(res, exp)
c1 = Categorical(['z', 'z', 'z'], categories=['x', 'y', 'z'])
c2 = Categorical(['x', 'x', 'x'], categories=['x', 'y', 'z'])
res = union_categoricals([c1, c2])
exp = Categorical(['z', 'z', 'z', 'x', 'x', 'x'],
categories=['x', 'y', 'z'])
tm.assert_categorical_equal(res, exp)
def test_union_categoricals_ordered(self):
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], ordered=False)
msg = 'Categorical.ordered must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
res = union_categoricals([c1, c1])
exp = Categorical([1, 2, 3, 1, 2, 3], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3, np.nan], ordered=True)
c2 = Categorical([3, 2], categories=[1, 2, 3], ordered=True)
res = union_categoricals([c1, c2])
exp = Categorical([1, 2, 3, np.nan, 3, 2], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], categories=[3, 2, 1], ordered=True)
msg = "to union ordered Categoricals, all categories must be the same"
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
def test_union_categoricals_ignore_order(self):
# GH 15219
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], ordered=False)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
msg = 'Categorical.ordered must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2], ignore_order=False)
res = union_categoricals([c1, c1], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([c1, c1], ignore_order=False)
exp = Categorical([1, 2, 3, 1, 2, 3],
categories=[1, 2, 3], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3, np.nan], ordered=True)
c2 = Categorical([3, 2], categories=[1, 2, 3], ordered=True)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, np.nan, 3, 2])
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], categories=[3, 2, 1], ordered=True)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([c2, c1], ignore_order=True,
sort_categories=True)
exp = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([4, 5, 6], ordered=True)
result = union_categoricals([c1, c2], ignore_order=True)
expected = Categorical([1, 2, 3, 4, 5, 6])
tm.assert_categorical_equal(result, expected)
msg = "to union ordered Categoricals, all categories must be the same"
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2], ignore_order=False)
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
def test_union_categoricals_sort(self):
# GH 13846
c1 = Categorical(['x', 'y', 'z'])
c2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['a', 'b', 'c', 'x', 'y', 'z'])
tm.assert_categorical_equal(result, expected)
# fastpath
c1 = Categorical(['a', 'b'], categories=['b', 'a', 'c'])
c2 = Categorical(['b', 'c'], categories=['b', 'a', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['a', 'b'], categories=['c', 'a', 'b'])
c2 = Categorical(['b', 'c'], categories=['c', 'a', 'b'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
# fastpath - skip resort
c1 = Categorical(['a', 'b'], categories=['a', 'b', 'c'])
c2 = Categorical(['b', 'c'], categories=['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['x', np.nan])
c2 = Categorical([np.nan, 'b'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['x', np.nan, np.nan, 'b'],
categories=['b', 'x'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([np.nan])
c2 = Categorical([np.nan])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical([np.nan, np.nan], categories=[])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([])
c2 = Categorical([])
result = union_categoricals([c1, c2], sort_categories=True)
expected = | Categorical([]) | pandas.Categorical |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 20 16:00:06 2018
@author: nmei
"""
if __name__ == "__main__":
import os
import pandas as pd
import numpy as np
import utils
import seaborn as sns
sns.set_style('whitegrid')
sns.set_context('poster')
from matplotlib import pyplot as plt
from statsmodels.formula.api import ols
from statsmodels.stats.anova import anova_lm
from itertools import combinations
import pymc3 as pm
import theano.tensor as tt
# define result saving directory
dir_saving = 'consective_transitions'
if not os.path.exists(dir_saving):
os.makedirs(dir_saving)
############################################################################################
df1 = pd.read_csv('e1.csv').iloc[:,1:]
df = df1.copy()
# select the columns that I need
df = df[['blocks.thisN',
'trials.thisN',
'key_resp_2.keys',
'resp.corr',
'resp_mrating.keys',
'participant',]]
# rename the columns
df.columns = ['blocks',
'trials',
'awareness',
'correctness',
'confidence',
'participant',]
# use success, awareness, and confidence as features
np.random.seed(12345)
# use judgement features
feature_names = [
'correctness',
'awareness',
'confidence',
]
target_name = 'awareness'
experiment = 'e1'
# for some of the variables, we need to rescale them to a more preferable range like 0-1
name_for_scale = ['awareness']
# ['ah', 'av', 'bj', 'cm', 'db', 'ddb', 'fcm', 'kf', 'kk', 'ml', 'qa','sk', 'yv']
# get one of the participants' data
transition_matrix,sub = [], []
transition_count = []
for participant,df_sub in df.groupby('participant'):
awareness = df_sub['awareness'].values - 1
with pm.Model() as model:
a = pm.Beta('a', 0.5, 0.5)
yl = pm.Bernoulli('yl', a, observed=awareness)
trace = pm.sample(1000,
step=pm.SMC(),
random_seed=42)
# for 1-back
temp = pd.crosstab(pd.Series(df_sub['awareness'].values[1:],name='N'),
pd.Series(df_sub['awareness'].values[:-1],name='N-1'),
normalize=1)
transition_matrix.append(temp.get_values().flatten())
temp = pd.crosstab(pd.Series(df_sub['awareness'].values[1:],name='N'),
pd.Series(df_sub['awareness'].values[:-1],name='N-1'),)
transition_count.append(temp.get_values().flatten())
sub.append(participant)
df1_transition = pd.DataFrame(transition_matrix,columns=['unaware-unaware',
'aware-unaware',
'unaware-aware',
'aware-aware'])
df1_count = pd.DataFrame(transition_count,columns=['unaware-unaware',
'aware-unaware',
'unaware-aware',
'aware-aware'])
df1_transition['experiment'] = 1
df1_transition['sub'] = sub
df1_count['experiment'] = 1
df1_count['sub'] = sub
##############################################################################
df2 = pd.read_csv('e2.csv').iloc[:,1:]
df = df2.copy()
# select the columns that I need
df = df[['blocks.thisN',
'trials.thisN',
'key_resp_2.keys',
'resp.corr',
'resp_mrating.keys',
'participant',]]
# rename the columns
df.columns = ['blocks',
'trials',
'awareness',
'correctness',
'confidence',
'participant',]
# use success, awareness, and confidence as features
np.random.seed(12345)
# use judgement features
feature_names = [
'correctness',
'awareness',
'confidence',
]
target_name = 'awareness'
experiment = 'e2'
# for some of the variables, we need to rescale them to a more preferable range like 0-1
name_for_scale = ['awareness']
transition_matrix,sub = [],[]
transition_count = []
for participant,df_sub in df.groupby('participant'):
df_sub = df_sub[df_sub['awareness'] != 3]
# for 1-back
temp = pd.crosstab(pd.Series(df_sub['awareness'].values[1:],name='N'),
pd.Series(df_sub['awareness'].values[:-1],name='N-1'),
normalize=1)
transition_matrix.append(temp.get_values().flatten())
temp = pd.crosstab(pd.Series(df_sub['awareness'].values[1:],name='N'),
pd.Series(df_sub['awareness'].values[:-1],name='N-1'),)
transition_count.append(temp.get_values().flatten())
sub.append(participant)
df2_transition = pd.DataFrame(transition_matrix,columns=['unaware-unaware',
'aware-unaware',
'unaware-aware',
'aware-aware'])
df2_count = pd.DataFrame(transition_count,columns=['unaware-unaware',
'aware-unaware',
'unaware-aware',
'aware-aware'])
df2_transition['experiment'] = 2
df2_transition['sub'] = sub
df2_count['experiment'] = 2
df2_count['sub'] = sub
##############################################################################
df3 = pd.read_csv('e3.csv').iloc[:,1:]
df = df3.copy()
# select the columns that I need
df = df[['blocks.thisN',
'trials.thisN',
'key_resp_2.keys',
'resp.corr',
'resp_mrating.keys',
'participant',]]
# rename the columns
df.columns = ['blocks',
'trials',
'awareness',
'correctness',
'confidence',
'participant',]
# use success, awareness, and confidence as features
np.random.seed(12345)
# use judgement features
feature_names = [
'correctness',
'awareness',
'confidence',
]
target_name = 'awareness'
experiment = 'e3'
# for some of the variables, we need to rescale them to a more preferable range like 0-1
name_for_scale = ['awareness']
transition_matrix,sub = [],[]
transition_count = []
for participant,df_sub in df.groupby('participant'):
df_sub = df_sub[df_sub['awareness'] != 3]
# for 1-back
temp = pd.crosstab(pd.Series(df_sub['awareness'].values[1:],name='N'),
pd.Series(df_sub['awareness'].values[:-1],name='N-1'),
normalize=1)
transition_matrix.append(temp.get_values().flatten())
temp = pd.crosstab(pd.Series(df_sub['awareness'].values[1:],name='N'),
pd.Series(df_sub['awareness'].values[:-1],name='N-1'),)
transition_count.append(temp.get_values().flatten())
sub.append(participant)
df3_transition = pd.DataFrame(transition_matrix,columns=['unaware-unaware',
'aware-unaware',
'unaware-aware',
'aware-aware'])
df3_count = pd.DataFrame(transition_count,columns=['unaware-unaware',
'aware-unaware',
'unaware-aware',
'aware-aware'])
df3_transition['experiment'] = 3
df3_transition['sub'] = sub
df3_count['experiment'] = 3
df3_count['sub'] = sub
##################################################################################
df_transition = | pd.concat([df1_transition,df2_transition,df3_transition]) | pandas.concat |
from __future__ import division # brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy.testing as npt
import os.path
import pandas as pd
import pkgutil
import sys
from tabulate import tabulate
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO, BytesIO
# #find parent directory and import model
# parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# print("parent_dir")
# print(parent_dir)
# sys.path.append(parent_dir)
from ..trex_exe import Trex, TrexOutputs
print("sys.path")
print(sys.path)
# load transposed qaqc data for inputs and expected outputs
# this works for both local nosetests and travis deploy
# input details
try:
if __package__ is not None:
csv_data = pkgutil.get_data(__package__, 'trex_qaqc_in_transpose.csv')
data_inputs = BytesIO(csv_data)
pd_obj_inputs = | pd.read_csv(data_inputs, index_col=0, engine='python') | pandas.read_csv |
from warnings import catch_warnings, simplefilter
import numpy as np
from numpy.random import randn
import pytest
import pandas as pd
from pandas import (
DataFrame, MultiIndex, Series, Timestamp, date_range, isna, notna)
from pandas.util import testing as tm
@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
class TestMultiIndexSetItem(object):
def test_setitem_multiindex(self):
with catch_warnings(record=True):
for index_fn in ('ix', 'loc'):
def assert_equal(a, b):
assert a == b
def check(target, indexers, value, compare_fn, expected=None):
fn = getattr(target, index_fn)
fn.__setitem__(indexers, value)
result = fn.__getitem__(indexers)
if expected is None:
expected = value
compare_fn(result, expected)
# GH7190
index = MultiIndex.from_product([np.arange(0, 100),
np.arange(0, 80)],
names=['time', 'firm'])
t, n = 0, 2
df = DataFrame(np.nan, columns=['A', 'w', 'l', 'a', 'x',
'X', 'd', 'profit'],
index=index)
check(target=df, indexers=((t, n), 'X'), value=0,
compare_fn=assert_equal)
df = DataFrame(-999, columns=['A', 'w', 'l', 'a', 'x',
'X', 'd', 'profit'],
index=index)
check(target=df, indexers=((t, n), 'X'), value=1,
compare_fn=assert_equal)
df = DataFrame(columns=['A', 'w', 'l', 'a', 'x',
'X', 'd', 'profit'],
index=index)
check(target=df, indexers=((t, n), 'X'), value=2,
compare_fn=assert_equal)
# gh-7218: assigning with 0-dim arrays
df = DataFrame(-999, columns=['A', 'w', 'l', 'a', 'x',
'X', 'd', 'profit'],
index=index)
check(target=df,
indexers=((t, n), 'X'),
value=np.array(3),
compare_fn=assert_equal,
expected=3, )
# GH5206
df = DataFrame(np.arange(25).reshape(5, 5),
columns='A,B,C,D,E'.split(','), dtype=float)
df['F'] = 99
row_selection = df['A'] % 2 == 0
col_selection = ['B', 'C']
with catch_warnings(record=True):
df.ix[row_selection, col_selection] = df['F']
output = DataFrame(99., index=[0, 2, 4], columns=['B', 'C'])
with catch_warnings(record=True):
tm.assert_frame_equal(df.ix[row_selection, col_selection],
output)
check(target=df,
indexers=(row_selection, col_selection),
value=df['F'],
compare_fn=tm.assert_frame_equal,
expected=output, )
# GH11372
idx = MultiIndex.from_product([
['A', 'B', 'C'],
date_range('2015-01-01', '2015-04-01', freq='MS')])
cols = MultiIndex.from_product([
['foo', 'bar'],
date_range('2016-01-01', '2016-02-01', freq='MS')])
df = DataFrame(np.random.random((12, 4)),
index=idx, columns=cols)
subidx = MultiIndex.from_tuples(
[('A', Timestamp('2015-01-01')),
('A', Timestamp('2015-02-01'))])
subcols = MultiIndex.from_tuples(
[('foo', Timestamp('2016-01-01')),
('foo', Timestamp('2016-02-01'))])
vals = DataFrame(np.random.random((2, 2)),
index=subidx, columns=subcols)
check(target=df,
indexers=(subidx, subcols),
value=vals,
compare_fn=tm.assert_frame_equal, )
# set all columns
vals = DataFrame(
np.random.random((2, 4)), index=subidx, columns=cols)
check(target=df,
indexers=(subidx, slice(None, None, None)),
value=vals,
compare_fn=tm.assert_frame_equal, )
# identity
copy = df.copy()
check(target=df, indexers=(df.index, df.columns), value=df,
compare_fn=tm.assert_frame_equal, expected=copy)
def test_multiindex_setitem(self):
# GH 3738
# setting with a multi-index right hand side
arrays = [np.array(['bar', 'bar', 'baz', 'qux', 'qux', 'bar']),
np.array(['one', 'two', 'one', 'one', 'two', 'one']),
np.arange(0, 6, 1)]
df_orig = DataFrame(np.random.randn(6, 3), index=arrays,
columns=['A', 'B', 'C']).sort_index()
expected = df_orig.loc[['bar']] * 2
df = df_orig.copy()
df.loc[['bar']] *= 2
tm.assert_frame_equal(df.loc[['bar']], expected)
# raise because these have differing levels
with pytest.raises(TypeError):
df.loc['bar'] *= 2
# from SO
# http://stackoverflow.com/questions/24572040/pandas-access-the-level-of-multiindex-for-inplace-operation
df_orig = DataFrame.from_dict({'price': {
('DE', 'Coal', 'Stock'): 2,
('DE', 'Gas', 'Stock'): 4,
('DE', 'Elec', 'Demand'): 1,
('FR', 'Gas', 'Stock'): 5,
('FR', 'Solar', 'SupIm'): 0,
('FR', 'Wind', 'SupIm'): 0
}})
df_orig.index = MultiIndex.from_tuples(df_orig.index,
names=['Sit', 'Com', 'Type'])
expected = df_orig.copy()
expected.iloc[[0, 2, 3]] *= 2
idx = pd.IndexSlice
df = df_orig.copy()
df.loc[idx[:, :, 'Stock'], :] *= 2
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[idx[:, :, 'Stock'], 'price'] *= 2
tm.assert_frame_equal(df, expected)
def test_multiindex_assignment(self):
# GH3777 part 2
# mixed dtype
df = DataFrame(np.random.randint(5, 10, size=9).reshape(3, 3),
columns=list('abc'),
index=[[4, 4, 8], [8, 10, 12]])
df['d'] = np.nan
arr = np.array([0., 1.])
with catch_warnings(record=True):
df.ix[4, 'd'] = arr
tm.assert_series_equal(df.ix[4, 'd'],
Series(arr, index=[8, 10], name='d'))
# single dtype
df = DataFrame(np.random.randint(5, 10, size=9).reshape(3, 3),
columns=list('abc'),
index=[[4, 4, 8], [8, 10, 12]])
with catch_warnings(record=True):
df.ix[4, 'c'] = arr
exp = Series(arr, index=[8, 10], name='c', dtype='float64')
tm.assert_series_equal(df.ix[4, 'c'], exp)
# scalar ok
with catch_warnings(record=True):
df.ix[4, 'c'] = 10
exp = Series(10, index=[8, 10], name='c', dtype='float64')
tm.assert_series_equal(df.ix[4, 'c'], exp)
# invalid assignments
with pytest.raises(ValueError):
with catch_warnings(record=True):
df.ix[4, 'c'] = [0, 1, 2, 3]
with pytest.raises(ValueError):
with catch_warnings(record=True):
df.ix[4, 'c'] = [0]
# groupby example
NUM_ROWS = 100
NUM_COLS = 10
col_names = ['A' + num for num in
map(str, np.arange(NUM_COLS).tolist())]
index_cols = col_names[:5]
df = DataFrame(np.random.randint(5, size=(NUM_ROWS, NUM_COLS)),
dtype=np.int64, columns=col_names)
df = df.set_index(index_cols).sort_index()
grp = df.groupby(level=index_cols[:4])
df['new_col'] = np.nan
f_index = np.arange(5)
def f(name, df2):
return Series(np.arange(df2.shape[0]),
name=df2.index.values[0]).reindex(f_index)
# TODO(wesm): unused?
# new_df = pd.concat([f(name, df2) for name, df2 in grp], axis=1).T
# we are actually operating on a copy here
# but in this case, that's ok
for name, df2 in grp:
new_vals = np.arange(df2.shape[0])
with catch_warnings(record=True):
df.ix[name, 'new_col'] = new_vals
def test_series_setitem(
self, multiindex_year_month_day_dataframe_random_data):
ymd = multiindex_year_month_day_dataframe_random_data
s = ymd['A']
s[2000, 3] = np.nan
assert isna(s.values[42:65]).all()
assert notna(s.values[:42]).all()
assert notna(s.values[65:]).all()
s[2000, 3, 10] = np.nan
assert isna(s[49])
def test_frame_getitem_setitem_boolean(
self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
df = frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
tm.assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
tm.assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
tm.assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
| tm.assert_almost_equal(df.values, values) | pandas.util.testing.assert_almost_equal |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core import ops
from pandas.errors import NullFrequencyError
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = pd.timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = | TimedeltaIndex(['1 Day', '12 Hours']) | pandas.TimedeltaIndex |
import pandas as pd
from isitfit.utils import logger
class TagsImplierHelper:
def __init__(self, names_df):
self.names_df = names_df
self.names_original = names_df.Name.tolist()
def freq_list(self):
logger.info("Step 1: calculate word frequencies")
# lower-case
self.names_lower = [x.lower() for x in self.names_original]
# count single word frequencies
# https://programminghistorian.org/en/lessons/counting-frequencies
import re
#names_split = (' '.join(names_original)).split(' ')
#words = re.findall("\w+", "the quick person did not realize his speed and the quick person bumped")
names_split = re.findall("\w+", ' '.join(self.names_lower))
# Counting bigrams
# https://stackoverflow.com/a/12488794/4126114
from itertools import tee, islice
from collections import Counter
def ngrams(lst, n):
tlst = lst
while True:
a, b = tee(tlst)
l = tuple(islice(a, n))
if len(l) == n:
yield l
next(b)
tlst = b
else:
break
def get_freq(n):
# names_freq = dict(Counter(zip(names_split, islice(names_split, n-1, None))))
names_freq = dict(Counter(ngrams(names_split, n)))
names_freq = [(k,v) for k,v in names_freq.items()]
return names_freq
self.freq_1w = get_freq(1)
self.freq_2w = get_freq(2)
def freq_df(self):
logger.info("Step 2: convert word frequencies to pandas dataframe")
# convert to pandas dataframe
def min_is_2(df_in):
return df_in[df_in.n >= 2] # minimum occurence is 2
def freq2df(freq_in, l):
df_freq_in = pd.DataFrame(freq_in, columns=['word_tuple', 'n'])
df_freq_in = min_is_2(df_freq_in)
df_freq_in['l'] = l
return df_freq_in
df_freq_1w = freq2df(self.freq_1w, 1)
df_freq_1w['word_1'] = df_freq_1w.word_tuple.apply(lambda x: x[0])
df_freq_1w['word_2'] = None
df_freq_2w = freq2df(self.freq_2w, 2)
df_freq_2w['word_1'] = df_freq_2w.word_tuple.apply(lambda x: x[0])
df_freq_2w['word_2'] = df_freq_2w.word_tuple.apply(lambda x: x[1])
# print("##########")
# print("before filter")
# print("1w")
# print(df_freq_1w)
# print("")
# print("2w")
# print(df_freq_2w)
# print("##########")
# filter out 1-grams if their 2-gram counterpart is superior
df_freq_2w = df_freq_2w.merge(df_freq_1w[['word_1', 'n']], how='left', left_on='word_1', right_on='word_1', suffixes=['', '.1w=2w.word1'])
df_freq_2w = df_freq_2w.merge(df_freq_1w[['word_1', 'n']], how='left', left_on='word_2', right_on='word_1', suffixes=['', '.1w=2w.word2'])
df_freq_2w = df_freq_2w.drop(columns=['word_1.1w=2w.word2'])
df_freq_2w = df_freq_2w[(df_freq_2w.n >= df_freq_2w['n.1w=2w.word1']) & (df_freq_2w.n >= df_freq_2w['n.1w=2w.word2'])]
# print("")
# print("after filtering 2w")
# print(df_freq_2w)
# drop from 1w the components that were used in the 2w
df_freq_1w = df_freq_1w[~(df_freq_1w.word_1.isin(df_freq_2w.word_1) | df_freq_1w.word_1.isin(df_freq_2w.word_2))]
# drop columns
df_freq_2w = df_freq_2w.drop(columns=['n.1w=2w.word1', 'n.1w=2w.word2'])
# concatenate into 1 df
df_freq_all = | pd.concat([df_freq_1w,df_freq_2w], axis=0) | pandas.concat |
import pytest
import numpy as np
import pandas as pd
from pandas import Categorical, Series, CategoricalIndex
from pandas.core.dtypes.concat import union_categoricals
from pandas.util import testing as tm
class TestUnionCategoricals(object):
def test_union_categorical(self):
# GH 13361
data = [
(list('abc'), list('abd'), list('abcabd')),
([0, 1, 2], [2, 3, 4], [0, 1, 2, 2, 3, 4]),
([0, 1.2, 2], [2, 3.4, 4], [0, 1.2, 2, 2, 3.4, 4]),
(['b', 'b', np.nan, 'a'], ['a', np.nan, 'c'],
['b', 'b', np.nan, 'a', 'a', np.nan, 'c']),
(pd.date_range('2014-01-01', '2014-01-05'),
pd.date_range('2014-01-06', '2014-01-07'),
pd.date_range('2014-01-01', '2014-01-07')),
(pd.date_range('2014-01-01', '2014-01-05', tz='US/Central'),
pd.date_range('2014-01-06', '2014-01-07', tz='US/Central'),
pd.date_range('2014-01-01', '2014-01-07', tz='US/Central')),
(pd.period_range('2014-01-01', '2014-01-05'),
pd.period_range('2014-01-06', '2014-01-07'),
pd.period_range('2014-01-01', '2014-01-07')),
]
for a, b, combined in data:
for box in [Categorical, CategoricalIndex, Series]:
result = union_categoricals([box(Categorical(a)),
box(Categorical(b))])
expected = Categorical(combined)
tm.assert_categorical_equal(result, expected,
check_category_order=True)
# new categories ordered by appearance
s = Categorical(['x', 'y', 'z'])
s2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([s, s2])
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['x', 'y', 'z', 'a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
s = Categorical([0, 1.2, 2], ordered=True)
s2 = Categorical([0, 1.2, 2], ordered=True)
result = union_categoricals([s, s2])
expected = Categorical([0, 1.2, 2, 0, 1.2, 2], ordered=True)
tm.assert_categorical_equal(result, expected)
# must exactly match types
s = Categorical([0, 1.2, 2])
s2 = Categorical([2, 3, 4])
msg = 'dtype of categories must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([s, s2])
msg = 'No Categoricals to union'
with tm.assert_raises_regex(ValueError, msg):
union_categoricals([])
def test_union_categoricals_nan(self):
# GH 13759
res = union_categoricals([pd.Categorical([1, 2, np.nan]),
pd.Categorical([3, 2, np.nan])])
exp = Categorical([1, 2, np.nan, 3, 2, np.nan])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical(['A', 'B']),
pd.Categorical(['B', 'B', np.nan])])
exp = Categorical(['A', 'B', 'B', 'B', np.nan])
tm.assert_categorical_equal(res, exp)
val1 = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-03-01'),
pd.NaT]
val2 = [pd.NaT, pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-02-01')]
res = union_categoricals([pd.Categorical(val1), pd.Categorical(val2)])
exp = Categorical(val1 + val2,
categories=[ | pd.Timestamp('2011-01-01') | pandas.Timestamp |
import pandas as pd
import numpy as np
import math
import cmath
import pickle
joints = ['Nose','Neck','Right_shoulder','Right_elbow','Right_wrist','Left_shoulder',
'Left_elbow','Left_wrist','Right_hip','Right_knee','Right_ankle','Left_hip',
'Left_knee','Left_ankle','Right_eye','Left_eye','Right_ear','Left_ear']
def calculateAngle2d(a, b, c):
x1, y1 = a
x2, y2 = b #midpoint
x3, y3 = c
ABx = x1 - x2
ABy = y1 - y2
BCx = x3 - x2
BCy = y3 - y2
dotProduct = ABx * BCx + ABy * BCy
# print(dotProduct)
magnitudeAB = math.sqrt(ABx * ABx + ABy * ABy)
# print(magnitudeAB)
magnitudeBC = math.sqrt(BCx * BCx + BCy * BCy)
# print(magnitudeBC)
angle = math.acos(dotProduct/(magnitudeAB*magnitudeBC))
angle = (angle * 180) / math.pi
# return(round(abs(angle), 4))
return angle
def calculateAngle3d(p1, p2, p3):
x1, y1, z1 = p1
x2, y2, z2 = p2
x3, y3, z3 = p3
ABx = x1 - x2
ABy = y1 - y2
ABz = z1 - z2
BCx = x3 - x2
BCy = y3 - y2
BCz = z3 - z2
dotProduct = ABx * BCx +ABy * BCy +ABz * BCz
magnitudeAB = ABx * ABx +ABy * ABy +ABz * ABz
magnitudeBC = BCx * BCx +BCy * BCy +BCz * BCz
angle = dotProduct
if (magnitudeAB == 0 or magnitudeBC == 0):
angle = 0.0
else:
angle = cmath.acos(angle/math.sqrt(magnitudeAB *magnitudeBC))
angle = (angle * 180) / math.pi
return(round(abs(angle), 4))
def calculateDistance(p1, p2):
squared_dist = np.sum((p1-p2)**2, axis=0)
dist = np.sqrt(squared_dist)
return dist
def get_init_pos_from_pkl():
with open('initial3d_by_mean.pkl', 'rb') as file:
init_pos3d = pickle.load(file)
with open('initial3d_by_median.pkl', 'rb') as file:
init_pos3d_median = pickle.load(file)
with open('initial2d_by_mean.pkl', 'rb') as file:
init_pos2d = pickle.load(file)
with open('initial2d_by_median.pkl', 'rb') as file:
init_pos2d_median = pickle.load(file)
with open('initial2d_dis_by_mean.pkl', 'rb') as file:
init_dis2d = pickle.load(file)
with open('initial2d_dis_by_median.pkl', 'rb') as file:
init_dis2d_median = pickle.load(file)
return init_dis2d, init_dis2d_median, init_pos2d, init_pos2d_median, init_pos3d, init_pos3d_median
def get_init_pos_from_csv():
df = pd.read_csv("C:\\Users\\Testing\\Downloads\\reachstepout_position3d_new.csv")
df2 = | pd.read_csv("C:\\Users\\Testing\\Downloads\\reachstepout_position2d_new.csv") | pandas.read_csv |
import numpy as np
import pandas as pd
import random
from rpy2.robjects.packages import importr
utils = importr('utils')
#utils.install_packages('prodlim')
prodlim = importr('prodlim')
eventglm = importr('eventglm')
#utils.install_packages('eventglm')
import rpy2.robjects as robjects
from rpy2.robjects import r
def get_clinical_data_po(df, cutoff):
data_converted = | pd.get_dummies(df,columns= ['race' ,'ethnicity' ,'pathologic_stage' ,'molecular_subtype'],dtype=float) | pandas.get_dummies |
"""Tests suite for Period handling.
Parts derived from scikits.timeseries code, original authors:
- <NAME> & <NAME>
- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
"""
from unittest import TestCase
from datetime import datetime, timedelta
from numpy.ma.testutils import assert_equal
from pandas.tseries.period import Period, PeriodIndex
from pandas.tseries.index import DatetimeIndex, date_range
from pandas.tseries.tools import to_datetime
import pandas.core.datetools as datetools
import numpy as np
from pandas import Series, TimeSeries
from pandas.util.testing import assert_series_equal
class TestPeriodProperties(TestCase):
"Test properties such as year, month, weekday, etc...."
#
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_interval_constructor(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
self.assertEquals(i1, i2)
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
self.assert_(i1 != i4)
self.assertEquals(i4, i5)
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/12/12', freq='D')
self.assertEquals(i1, i2.asfreq('B'))
i3 = Period('3/10/12', freq='b')
self.assertEquals(i1, i3)
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = | Period('9/1/2005', freq='Q') | pandas.tseries.period.Period |
import requests
from bs4 import BeautifulSoup
import multiprocessing as mp
import os
import pandas as pd
import time
folder = './adj_temp'
os.makedirs(folder, exist_ok=True)
def _try_request( url, params, max_tries = 10, pause = 0.01 ):
res = None
n_tries = 1
while True:
try:
res = requests.get( url, params=params )
break
except (KeyboardInterrupt, SystemExit):
raise
except:
time.sleep(pause)
if n_tries < max_tries:
# print( f"Trying {n_tries}-th time." )
n_tries += 1
else:
raise
return res
def get_fund_split_data(symbol):
"""Download fund split data from Tiantian Jijinwang
Args:
symbol (str): fund symbol
Returns:
pd.DataFrame: fund split data, indexed by date, with one column `amount`
"""
url = f'https://fundf10.eastmoney.com/fhsp_{symbol}.html'
# res = requests.get(url)
res = _try_request(url, None)
html = BeautifulSoup(res.text, features="lxml")
split_data = html.find('table', attrs={'class':'w782 comm fhxq'})
split = pd.read_html(str(split_data))[0]
if split.iloc[0,0] == '暂无拆分信息!':
return pd.DataFrame()
rename = {'年份' : 'year',
'拆分折算日' : 'date',
'拆分类型' : 'splitType',
'拆分折算比例' : 'amount'}
split = split.rename(columns=rename)
split['date'] = pd.to_datetime(split['date'])
split = split[split['amount']!="暂未披露"]
if len(split) == 0:
return pd.DataFrame()
split['amount'] = split['amount'].str.split(':', expand=True)[1].astype(float)
split = split.drop(['year', 'splitType'], axis=1).set_index('date').sort_index()
return split
def get_fund_div_data(symbol):
"""Download fund dividend data from Sina Finance
Args:
symbol (str): fund symbol
Returns:
pd.DataFrame: fund dividend data, indexed by date, with one column `amount`
"""
url = 'https://stock.finance.sina.com.cn/fundInfo/api/openapi.php/FundPageInfoService.tabfh'
data_input = {
'symbol' : symbol,
'format' : 'json',
}
# resp = requests.get(url, params=data_input)
resp = _try_request(url, data_input)
data = resp.json()
fhdata = data['result']['data']['fhdata']
if len(fhdata)==0:
return pd.DataFrame()
div = | pd.DataFrame(fhdata) | pandas.DataFrame |
'''
MIT License
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import csv
"""
Los productos que salen de la contribucion de la UDD son:
34
"""
import pandas as pd
import glob
from utils import *
import numpy as np
def prod33(fte, prod):
data = []
for file in glob.glob(fte + '/*IM.csv'):
print('Processing ' + file)
df = pd.read_csv(file, sep=",", encoding="utf-8", decimal=",")
# standardize column names
df.rename(columns={'date': 'Fecha', 'comuna': 'Comuna'}, inplace=True)
# hay 4 comunas perdidas 5502, 5703, 11302 12202
# 5502, 5703 listas
# 11302: O'Higgins no esta
# 122012: Antartica no esta
df = normalizaNombreCodigoRegionYComuna(df)
df = FechaAlFinal(df)
data.append(df)
df = | pd.concat(data) | pandas.concat |
"""This module contains the Model class in Pastas.
"""
from collections import OrderedDict
from logging import getLogger
from os import getlogin
import numpy as np
from pandas import date_range, Series, Timedelta, DataFrame, Timestamp
from .decorators import get_stressmodel
from .io.base import dump, _load_model
from .modelstats import Statistics
from .noisemodels import NoiseModel
from .plots import Plotting
from .solver import LeastSquares
from .stressmodels import Constant
from .timeseries import TimeSeries
from .utils import _get_dt, _get_time_offset, get_sample, \
frequency_is_supported, validate_name
from .version import __version__
class Model:
"""Class that initiates a Pastas time series model.
Parameters
----------
oseries: pandas.Series or pastas.TimeSeries
pandas Series object containing the dependent time series. The
observation can be non-equidistant.
constant: bool, optional
Add a constant to the model (Default=True).
noisemodel: bool, optional
Add the default noisemodel to the model. A custom noisemodel can be
added later in the modelling process as well.
name: str, optional
String with the name of the model, used in plotting and saving.
metadata: dict, optional
Dictionary containing metadata of the oseries, passed on the to
oseries when creating a pastas TimeSeries object. hence,
ml.oseries.metadata will give you the metadata.
Returns
-------
ml: pastas.model.Model
Pastas Model instance, the base object in Pastas.
Examples
--------
A minimal working example of the Model class is shown below:
>>> oseries = pd.Series([1,2,1], index=pd.to_datetime(range(3), unit="D"))
>>> ml = Model(oseries)
"""
def __init__(self, oseries, constant=True, noisemodel=True, name=None,
metadata=None):
self.logger = getLogger(__name__)
# Construct the different model components
self.oseries = TimeSeries(oseries, settings="oseries",
metadata=metadata)
if name is None:
name = self.oseries.name
if name is None:
name = 'Observations'
self.name = validate_name(name)
self.parameters = DataFrame(
columns=["initial", "name", "optimal", "pmin", "pmax", "vary",
"stderr"])
# Define the model components
self.stressmodels = OrderedDict()
self.constant = None
self.transform = None
self.noisemodel = None
# Default solve/simulation settings
self.settings = {
"tmin": None,
"tmax": None,
"freq": "D",
"warmup": Timedelta(3650, "D"),
"time_offset": Timedelta(0),
"noise": noisemodel,
"solver": None,
"fit_constant": True,
}
if constant:
constant = Constant(initial=self.oseries.series.mean(),
name="constant")
self.add_constant(constant)
if noisemodel:
self.add_noisemodel(NoiseModel())
# File Information
self.file_info = self._get_file_info()
# initialize some attributes for solving and simulation
self.sim_index = None
self.oseries_calib = None
self.interpolate_simulation = None
self.normalize_residuals = False
self.fit = None
# Load other modules
self.stats = Statistics(self)
self.plots = Plotting(self)
self.plot = self.plots.plot # because we are lazy
def __repr__(self):
"""Prints a simple string representation of the model.
"""
template = ('{cls}(oseries={os}, name={name}, constant={const}, '
'noisemodel={noise})')
return template.format(cls=self.__class__.__name__,
os=self.oseries.name,
name=self.name,
const=not self.constant is None,
noise=not self.noisemodel is None)
def add_stressmodel(self, stressmodel, replace=False):
"""Add a stressmodel to the main model.
Parameters
----------
stressmodel: pastas.stressmodel or list of pastas.stressmodel
instance of a pastas.stressmodel class. Multiple stress models
can be provided (e.g., ml.add_stressmodel([sm1, sm2]) in one call.
replace: bool, optional
force replace the stressmodel if a stressmodel with the same name
already exists. Not recommended but useful at times. Default is
False.
Notes
-----
To obtain a list of the stressmodel names, type:
>>> ml.get_stressmodel_names()
Examples
--------
>>> sm = ps.StressModel(stress, rfunc=ps.Gamma, name="stress")
>>> ml.add_stressmodel(sm)
To add multiple stress models at once you can do the following:
>>> sm1 = ps.StressModel(stress, rfunc=ps.Gamma, name="stress1")
>>> sm1 = ps.StressModel(stress, rfunc=ps.Gamma, name="stress2")
>>> ml.add_stressmodel([sm1, sm2])
See Also
--------
pastas.stressmodels
"""
# Method can take multiple stressmodels at once through args
if isinstance(stressmodel, list):
for sm in stressmodel:
self.add_stressmodel(sm)
elif (stressmodel.name in self.stressmodels.keys()) and not replace:
self.logger.error("The name for the stressmodel you are trying "
"to add already exists for this model. Select "
"another name.")
else:
self.stressmodels[stressmodel.name] = stressmodel
self.parameters = self.get_init_parameters(initial=False)
if self.settings["freq"] is None:
self._set_freq()
stressmodel.update_stress(freq=self.settings["freq"])
# Check if stress overlaps with oseries, if not give a warning
if (stressmodel.tmin > self.oseries.series.index.max()) or \
(stressmodel.tmax < self.oseries.series.index.min()):
self.logger.warning("The stress of the stressmodel has no "
"overlap with ml.oseries.")
self._check_stressmodel_compatibility()
def add_constant(self, constant):
"""Add a Constant to the time series Model.
Parameters
----------
constant: pastas.Constant
Pastas constant instance, possibly more things in the future.
Examples
--------
>>> d = ps.Constant()
>>> ml.add_constant(d)
"""
self.constant = constant
self.parameters = self.get_init_parameters(initial=False)
self._check_stressmodel_compatibility()
def add_transform(self, transform):
"""Add a Transform to the time series Model.
Parameters
----------
transform: pastas.transform
instance of a pastas.transform object.
Examples
--------
>>> tt = ps.ThresholdTransform()
>>> ml.add_transform(tt)
See Also
--------
pastas.transform
"""
transform.set_model(self)
self.transform = transform
self.parameters = self.get_init_parameters(initial=False)
self._check_stressmodel_compatibility()
def add_noisemodel(self, noisemodel):
"""Adds a noisemodel to the time series Model.
Parameters
----------
noisemodel: pastas.noisemodels.NoiseModelBase
Instance of NoiseModelBase
Examples
--------
>>> n = ps.NoiseModel()
>>> ml.add_noisemodel(n)
"""
self.noisemodel = noisemodel
self.noisemodel.set_init_parameters(oseries=self.oseries.series)
# check whether noise_alpha is not smaller than ml.settings["freq"]
freq_in_days = _get_dt(self.settings["freq"])
noise_alpha = self.noisemodel.parameters.initial.iloc[0]
if freq_in_days > noise_alpha:
self.noisemodel._set_initial("noise_alpha", freq_in_days)
self.parameters = self.get_init_parameters(initial=False)
@get_stressmodel
def del_stressmodel(self, name):
"""Method to safely delete a stress model from the Model.
Parameters
----------
name: str
string with the name of the stressmodel object.
Notes
-----
To obtain a list of the stressmodel names type:
>>> ml.get_stressmodel_names()
"""
self.stressmodels.pop(name, None)
self.parameters = self.get_init_parameters(initial=False)
def del_constant(self):
"""Method to safely delete the Constant from the Model.
"""
if self.constant is None:
self.logger.warning("No constant is present in this model.")
else:
self.constant = None
self.parameters = self.get_init_parameters(initial=False)
def del_transform(self):
"""Method to safely delete the transform from the Model.
"""
if self.transform is None:
self.logger.warning("No transform is present in this model.")
else:
self.transform = None
self.parameters = self.get_init_parameters(initial=False)
def del_noisemodel(self):
"""Method to safely delete the noise model from the Model.
"""
if self.noisemodel is None:
self.logger.warning("No noisemodel is present in this model.")
else:
self.noisemodel = None
self.parameters = self.get_init_parameters(initial=False)
def simulate(self, p=None, tmin=None, tmax=None, freq=None, warmup=None,
return_warmup=False):
"""Method to simulate the time series model.
Parameters
----------
p: array_like, optional
array_like object with the values as floats representing the
model parameters. See Model.get_parameters() for more info if
parameters is None.
tmin: str, optional
String with a start date for the simulation period (E.g. '1980').
If none is provided, the tmin from the oseries is used.
tmax: str, optional
String with an end date for the simulation period (E.g. '2010').
If none is provided, the tmax from the oseries is used.
freq: str, optional
String with the frequency the stressmodels are simulated. Must
be one of the following: (D, h, m, s, ms, us, ns) or a multiple of
that e.g. "7D".
warmup: float or int, optional
Warmup period (in Days).
return_warmup: bool, optional
Return the simulation including the the warmup period or not,
default is False.
Returns
-------
sim: pandas.Series
pandas.Series containing the simulated time series
Notes
-----
This method can be used without any parameters. When the model is
solved, the optimal parameters values are used and if not,
the initial parameter values are used. This allows the user to
get an idea of how the simulation looks with only the initial
parameters and no calibration.
"""
# Default options when tmin, tmax, freq and warmup are not provided.
if tmin is None and self.settings['tmin']:
tmin = self.settings['tmin']
else:
tmin = self.get_tmin(tmin, use_oseries=False, use_stresses=True)
if tmax is None and self.settings['tmax']:
tmax = self.settings['tmax']
else:
tmax = self.get_tmax(tmax, use_oseries=False, use_stresses=True)
if freq is None:
freq = self.settings["freq"]
if warmup is None:
warmup = self.settings["warmup"]
elif not isinstance(warmup, Timedelta):
warmup = Timedelta(warmup, "D")
# Get the simulation index and the time step
sim_index = self._get_sim_index(tmin, tmax, freq, warmup)
dt = _get_dt(freq)
# Get parameters if none are provided
if p is None:
p = self.get_parameters()
sim = Series(data=np.zeros(sim_index.size, dtype=float),
index=sim_index, fastpath=True)
istart = 0 # Track parameters index to pass to stressmodel object
for sm in self.stressmodels.values():
contrib = sm.simulate(p[istart: istart + sm.nparam],
sim_index.min(), tmax, freq, dt)
sim = sim.add(contrib)
istart += sm.nparam
if self.constant:
sim = sim + self.constant.simulate(p[istart])
istart += 1
if self.transform:
sim = self.transform.simulate(sim, p[istart:istart +
self.transform.nparam])
# Respect provided tmin/tmax at this point, since warmup matters for
# simulation but should not be returned, unless return_warmup=True.
if not return_warmup:
sim = sim.loc[tmin:tmax]
if sim.hasnans:
sim = sim.dropna()
self.logger.warning('Nan-values were removed from the simulation.')
sim.name = 'Simulation'
return sim
def residuals(self, p=None, tmin=None, tmax=None, freq=None, warmup=None):
"""Method to calculate the residual series.
Parameters
----------
p: array_like, optional
array_like object with the values as floats representing the
model parameters. See Model.get_parameters() for more info if
parameters is None.
tmin: str, optional
String with a start date for the simulation period (E.g. '1980').
If none is provided, the tmin from the oseries is used.
tmax: str, optional
String with an end date for the simulation period (E.g. '2010').
If none is provided, the tmax from the oseries is used.
freq: str, optional
String with the frequency the stressmodels are simulated. Must
be one of the following: (D, h, m, s, ms, us, ns) or a multiple of
that e.g. "7D".
warmup: float or int, optional
Warmup period (in Days).
Returns
-------
res: pandas.Series
pandas.Series with the residuals series.
"""
# Default options when tmin, tmax, freq and warmup are not provided.
if tmin is None:
tmin = self.settings['tmin']
if tmax is None:
tmax = self.settings['tmax']
if freq is None:
freq = self.settings["freq"]
# simulate model
sim = self.simulate(p, tmin, tmax, freq, warmup,
return_warmup=False)
# Get the oseries calibration series
oseries_calib = self.observations(tmin, tmax, freq)
# Get simulation at the correct indices
if self.interpolate_simulation is None:
if oseries_calib.index.difference(sim.index).size != 0:
self.interpolate_simulation = True
self.logger.info('There are observations between the '
'simulation timesteps. Linear interpolation '
'between simulated values is used.')
if self.interpolate_simulation:
# interpolate simulation to times of observations
sim_interpolated = np.interp(oseries_calib.index.asi8,
sim.index.asi8, sim.values)
else:
# all of the observation indexes are in the simulation
sim_interpolated = sim.reindex(oseries_calib.index)
# Calculate the actual residuals here
res = oseries_calib.subtract(sim_interpolated)
if res.hasnans:
res = res.dropna()
self.logger.warning('Nan-values were removed from the residuals.')
if self.normalize_residuals:
res = res.subtract(res.values.mean())
res.name = "Residuals"
return res
def noise(self, p=None, tmin=None, tmax=None, freq=None, warmup=None):
"""Method to simulate the noise when a noisemodel is present.
Parameters
----------
p: array_like, optional
array_like object with the values as floats representing the
model parameters. See Model.get_parameters() for more info if
parameters is None.
tmin: str, optional
String with a start date for the simulation period (E.g. '1980').
If none is provided, the tmin from the oseries is used.
tmax: str, optional
String with an end date for the simulation period (E.g. '2010').
If none is provided, the tmax from the oseries is used.
freq: str, optional
String with the frequency the stressmodels are simulated. Must
be one of the following: (D, h, m, s, ms, us, ns) or a multiple of
that e.g. "7D".
warmup: float or int, optional
Warmup period (in Days).
Returns
-------
noise : pandas.Series
Pandas series of the noise.
Notes
-----
The noise are the time series that result when applying a noise
model.
.. Note::
The noise is sometimes also referred to as the innovations.
Warnings
--------
This method returns None is no noise model is added to the model.
"""
if (self.noisemodel is None) or (self.settings["noise"] is False):
self.logger.error("Noise cannot be calculated if there is no "
"noisemodel present or is not used during "
"parameter estimation.")
return None
# Get parameters if none are provided
if p is None:
p = self.get_parameters()
# Calculate the residuals
res = self.residuals(p, tmin, tmax, freq, warmup)
p = p[-self.noisemodel.nparam:]
# Calculate the noise
noise = self.noisemodel.simulate(res, p)
return noise
def noise_weights(self, p=None, tmin=None, tmax=None, freq=None,
warmup=None):
""" Internal method to calculate the noise weights."""
# Get parameters if none are provided
if p is None:
p = self.get_parameters()
# Calculate the residuals
res = self.residuals(p, tmin, tmax, freq, warmup)
# Calculate the weights
weights = self.noisemodel.weights(res,
p[-self.noisemodel.nparam:])
return weights
def observations(self, tmin=None, tmax=None, freq=None,
update_observations=False):
"""Method that returns the observations series used for calibration.
Parameters
----------
tmin: str, optional
String with a start date for the simulation period (E.g. '1980').
If none is provided, the tmin from the oseries is used.
tmax: str, optional
String with an end date for the simulation period (E.g. '2010').
If none is provided, the tmax from the oseries is used.
freq: str, optional
String with the frequency the stressmodels are simulated. Must
be one of the following: (D, h, m, s, ms, us, ns) or a multiple of
that e.g. "7D".
update_observations: bool, optional
if True, force recalculation of the observations series, default
is False.
Returns
-------
oseries_calib: pandas.Series
pandas series of the oseries used for calibration of the model
Notes
-----
This method makes sure the simulation is compared to the nearest
observation. It finds the index closest to sim_index, and then returns
a selection of the oseries. in the residuals method, the simulation is
interpolated to the observation-timestamps.
"""
if tmin is None and self.settings['tmin']:
tmin = self.settings['tmin']
else:
tmin = self.get_tmin(tmin, use_oseries=False, use_stresses=True)
if tmax is None and self.settings['tmax']:
tmax = self.settings['tmax']
else:
tmax = self.get_tmax(tmax, use_oseries=False, use_stresses=True)
if freq is None:
freq = self.settings["freq"]
for key, setting in zip([tmin, tmax, freq], ["tmin", "tmax", "freq"]):
if key != self.settings[setting]:
update_observations = True
if self.oseries_calib is None or update_observations:
oseries_calib = self.oseries.series.loc[tmin:tmax]
# sample measurements, so that frequency is not higher than model
# keep the original timestamps, as they will be used during
# interpolation of the simulation
sim_index = self._get_sim_index(tmin, tmax, freq,
self.settings["warmup"])
if not oseries_calib.empty:
index = get_sample(oseries_calib.index, sim_index)
oseries_calib = oseries_calib.loc[index]
else:
oseries_calib = self.oseries_calib
return oseries_calib
def initialize(self, tmin=None, tmax=None, freq=None, warmup=None,
noise=None, weights=None, initial=True, fit_constant=True):
"""Method to initialize the model.
This method is called by the solve-method, but can also be triggered
manually. See the solve-method for a description of the arguments.
"""
if noise is None and self.noisemodel:
noise = True
elif noise is True and self.noisemodel is None:
self.logger.warning("Warning, solving with noise=True while no "
"noisemodel is present. noise set to False")
noise = False
self.settings["noise"] = noise
self.settings["weights"] = weights
self.settings["fit_constant"] = fit_constant
# Set the frequency & warmup
if freq:
self.settings["freq"] = frequency_is_supported(freq)
if warmup is not None:
self.settings["warmup"] = Timedelta(warmup, "D")
# Set time offset from the frequency and the series in the stressmodels
self.settings["time_offset"] = \
self._get_time_offset(self.settings["freq"])
# Set tmin and tmax
self.settings["tmin"] = self.get_tmin(tmin)
self.settings["tmax"] = self.get_tmax(tmax)
# make sure calibration data is renewed
self.sim_index = self._get_sim_index(self.settings["tmin"],
self.settings["tmax"],
self.settings["freq"],
self.settings["warmup"],
update_sim_index=True)
self.oseries_calib = self.observations(tmin=self.settings["tmin"],
tmax=self.settings["tmax"],
freq=self.settings["freq"],
update_observations=True)
self.interpolate_simulation = None
# Initialize parameters
self.parameters = self.get_init_parameters(noise, initial)
# Prepare model if not fitting the constant as a parameter
if self.settings["fit_constant"] is False:
self.parameters.loc["constant_d", "vary"] = False
self.parameters.loc["constant_d", "initial"] = 0.0
self.normalize_residuals = True
def solve(self, tmin=None, tmax=None, freq=None, warmup=None, noise=True,
solver=None, report=True, initial=True, weights=None,
fit_constant=True, **kwargs):
"""Method to solve the time series model.
Parameters
----------
tmin: str, optional
String with a start date for the simulation period (E.g. '1980').
If none is provided, the tmin from the oseries is used.
tmax: str, optional
String with an end date for the simulation period (E.g. '2010').
If none is provided, the tmax from the oseries is used.
freq: str, optional
String with the frequency the stressmodels are simulated. Must
be one of the following (D, h, m, s, ms, us, ns) or a multiple of
that e.g. "7D".
warmup: float or int, optional
Warmup period (in Days) for which the simulation is calculated,
but not used for the calibration period.
noise: bool, optional
Argument that determines if a noisemodel is used (only if
present). The default is noise=True.
solver: pastas.solver.BaseSolver class, optional
Class used to solve the model. Options are: ps.LeastSquares
(default) or ps.LmfitSolve. A class is needed, not an instance
of the class!
report: bool, optional
Print a report to the screen after optimization finished. This
can also be manually triggered after optimization by calling
print(ml.fit_report()) on the Pastas model instance.
initial: bool, optional
Reset initial parameters from the individual stress models.
Default is True. If False, the optimal values from an earlier
optimization are used.
weights: pandas.Series, optional
Pandas Series with values by which the residuals are multiplied,
index-based. Must have the same indices as the oseries.
fit_constant: bool, optional
Argument that determines if the constant is fitted as a parameter.
If it is set to False, the constant is set equal to the mean of
the residuals.
**kwargs: dict, optional
All keyword arguments will be passed onto minimization method
from the solver. It depends on the solver used which arguments
can be used.
Notes
-----
- The solver object including some results are stored as ml.fit.
From here one can access the covariance (ml.fit.pcov) and
correlation matrix (ml.fit.pcor).
- Each solver return a number of results after optimization. These
solver specific results are stored in ml.fit.result and can be
accessed from there.
See Also
--------
pastas.solver
Different solver objects are available to estimate parameters.
"""
# Initialize the model
self.initialize(tmin, tmax, freq, warmup, noise, weights, initial,
fit_constant)
if self.oseries_calib.empty:
raise ValueError("Calibration series 'oseries_calib' is empty! "
"Check 'tmin' or 'tmax'.")
# Store the solve instance
if solver is None:
if self.fit is None:
self.fit = LeastSquares(ml=self)
elif not issubclass(solver, self.fit.__class__):
self.fit = solver(ml=self)
self.settings["solver"] = self.fit._name
# Solve model
success, optimal, stderr = self.fit.solve(noise=self.settings["noise"],
weights=weights, **kwargs)
if not success:
self.logger.warning("Model parameters could not be estimated "
"well.")
if self.settings['fit_constant'] is False:
# Determine the residuals and set the constant to their mean
self.normalize_residuals = False
res = self.residuals(optimal).mean()
optimal[self.parameters.name == self.constant.name] = res
self.parameters.optimal = optimal
self.parameters.stderr = stderr
if report:
if isinstance(report, str):
output = report
else:
output = "full"
print(self.fit_report(output=output))
def set_initial(self, name, value, move_bounds=False):
"""Method to set the initial value of any parameter.
Parameters
----------
name: str
name of the parameter to update.
value: float
parameters value to use as initial estimate.
move_bounds: bool, optional
Reset pmin/pmax based on new initial value.
Examples
--------
>>> ml.set_initial("constant_d", 10)
"""
msg = "Deprecation warning: method is deprecated and will be removed" \
" in version 0.17.0. Use ml.set_parameter instead."
self.logger.warning(msg)
def set_vary(self, name, value):
"""Method to set if the parameter is allowed to vary.
Parameters
----------
name: str
name of the parameter to update.
value: bool
boolean to vary a parameter (True) or not (False).
Examples
--------
>>> ml.set_vary("constant_d", False)
"""
msg = "Deprecation warning: method is deprecated and will be removed" \
" in version 0.17.0. Use ml.set_parameter instead."
self.logger.error(msg)
def set_pmin(self, name, value):
"""Method to set the minimum value of a parameter.
Parameters
----------
name: str
name of the parameter to update.
value: float
minimum value for the parameter.
Examples
--------
>>> ml.set_pmin("constant_d", -10)
"""
msg = "Deprecation warning: method is deprecated and will be removed" \
" in version 0.17.0. Use ml.set_parameter instead."
self.logger.error(msg)
def set_pmax(self, name, value):
"""Method to set the maximum values of a parameter.
Parameters
----------
name: str
name of the parameter to update.
value: float
maximum value for the parameter.
Examples
--------
>>> ml.set_pmax("constant_d", 10)
"""
msg = "Deprecation warning: method is deprecated and will be removed" \
" in version 0.17.0. Use ml.set_parameter instead."
self.logger.error(msg)
def set_parameter(self, name, initial=None, vary=None, pmin=None,
pmax=None, move_bounds=False):
"""
Method to change the parameter properties.
Parameters
----------
name: str
name of the parameter to update. This has to be a single variable.
initial: float, optional
parameters value to use as initial estimate.
vary: bool, optional
boolean to vary a parameter (True) or not (False).
pmin: float, optional
minimum value for the parameter.
pmax: float, optional
maximum value for the parameter.
move_bounds: bool, optional
Reset pmin/pmax based on new initial value. Of move_bounds=True,
pmin and pmax must be None.
Examples
--------
>>> ml.set_parameter(name="constant_d", initial=10, vary=True,
>>> pmin=-10, pmax=20)
Note
----
It is highly recommended to use this method to set parameter
properties. Changing the parameter properties directly in the
parameter `DataFrame` may not work as expected.
"""
if name not in self.parameters.index:
msg = "parameter {} is not present in the model".format(name)
self.logger.error(msg)
raise KeyError(msg)
# Because either of the following is not necessarily present
noisemodel = self.noisemodel.name if self.noisemodel else "NotPresent"
constant = self.constant.name if self.constant else "NotPresent"
transform = self.transform.name if self.transform else "NotPresent"
# Get the model component for the parameter
cat = self.parameters.loc[name, "name"]
if cat in self.stressmodels.keys():
obj = self.stressmodels[cat]
elif cat == noisemodel:
obj = self.noisemodel
elif cat == constant:
obj = self.constant
elif cat == transform:
obj = self.transform
# Move pmin and pmax based on the initial
if move_bounds and initial:
if pmin or pmax:
raise KeyError("Either pmin/pmax or move_bounds must "
"be provided, but not both.")
factor = initial / self.parameters.loc[name, 'initial']
pmin = self.parameters.loc[name, 'pmin'] * factor
pmax = self.parameters.loc[name, 'pmax'] * factor
# Set the parameter properties
if initial is not None:
obj._set_initial(name, initial)
self.parameters.loc[name, "initial"] = initial
if vary is not None:
obj._set_vary(name, vary)
self.parameters.loc[name, "vary"] = bool(vary)
if pmin is not None:
obj._set_pmin(name, pmin)
self.parameters.loc[name, "pmin"] = pmin
if pmax is not None:
obj._set_pmax(name, pmax)
self.parameters.loc[name, "pmax"] = pmax
def _set_freq(self):
"""Internal method to set the frequency in the settings. This is
method is not yet applied and is for future development.
"""
freqs = set()
if self.oseries.freq:
# when the oseries has a constant frequency, us this
freqs.add(self.oseries.freq)
else:
# otherwise determine frequency from the stressmodels
for stressmodel in self.stressmodels.values():
if stressmodel.stress:
for stress in stressmodel.stress:
if stress.settings['freq']:
# first check the frequency, and use this
freqs.add(stress.settings['freq'])
elif stress.freq_original:
# if this is not available, and the original
# frequency is, take the original frequency
freqs.add(stress.freq_original)
if len(freqs) == 1:
# if there is only one frequency, use this frequency
self.settings["freq"] = next(iter(freqs))
elif len(freqs) > 1:
# if there are more frequencies, take the highest (lowest dt)
freqs = list(freqs)
dt = np.array([_get_dt(f) for f in freqs])
self.settings["freq"] = freqs[np.argmin(dt)]
else:
self.logger.info("Frequency of model cannot be determined. "
"Frequency is set to daily")
self.settings["freq"] = "D"
def _get_time_offset(self, freq):
"""Internal method to get the time offsets from the stressmodels.
Parameters
----------
freq: str
string with the frequency used for simulation.
Notes
-----
Method to check if the StressModel timestamps match
(e.g. similar hours)
"""
time_offsets = set()
for stressmodel in self.stressmodels.values():
for st in stressmodel.stress:
if st.freq_original:
# calculate the offset from the default frequency
t = st.series_original.index
base = t.min().ceil(freq)
mask = t >= base
if np.any(mask):
time_offsets.add(_get_time_offset(t[mask][0], freq))
if len(time_offsets) > 1:
msg = ("The time-offset with the frequency is not the same "
"for all stresses.")
self.logger.error(msg)
raise (Exception(msg))
if len(time_offsets) == 1:
return next(iter(time_offsets))
else:
return Timedelta(0)
def _get_sim_index(self, tmin, tmax, freq, warmup, update_sim_index=False):
"""Internal method to get the simulation index, including the warmup.
Parameters
----------
tmin: pandas.Timestamp
String with a start date for the simulation period (E.g. '1980').
If none is provided, the tmin from the oseries is used.
tmax: pandas.Timestamp
String with an end date for the simulation period (E.g. '2010').
If none is provided, the tmax from the oseries is used.
freq: str
String with the frequency the stressmodels are simulated. Must
be one of the following: (D, h, m, s, ms, us, ns) or a multiple of
that e.g. "7D".
warmup: pandas.Timedelta
Warmup period (in Days).
update_sim_index : bool, optional
if True, force recalculation of sim_index, default is False
Returns
-------
sim_index: pandas.DatetimeIndex
Pandas DatetimeIndex instance with the datetimes values for
which the model is simulated.
"""
# Check if any of the settings are updated
for key, setting in zip([tmin, tmax, freq, warmup],
["tmin", "tmax", "freq", "warmup"]):
if key != self.settings[setting]:
update_sim_index = True
break
if self.sim_index is None or update_sim_index:
tmin = (tmin - warmup).floor(freq) + self.settings["time_offset"]
sim_index = date_range(tmin, tmax, freq=freq)
else:
sim_index = self.sim_index
return sim_index
def get_tmin(self, tmin=None, use_oseries=True, use_stresses=False):
"""Method that checks and returns valid values for tmin.
Parameters
----------
tmin: str, optional
string with a year or date that can be turned into a pandas
Timestamp (e.g. pd.Timestamp(tmin)).
use_oseries: bool, optional
Obtain the tmin and tmax from the oseries. Default is True.
use_stresses: bool, optional
Obtain the tmin and tmax from the stresses. The minimum/maximum
time from all stresses is taken.
Returns
-------
tmin: pandas.Timestamp
returns pandas timestamps for tmin.
Notes
-----
The parameters tmin and tmax are leading, unless use_oseries is
True, then these are checked against the oseries index. The tmin and
tmax are checked and returned according to the following rules:
A. If no value for tmin is provided:
1. If use_oseries is True, tmin is based on the oseries
2. If use_stresses is True, tmin is based on the stressmodels.
B. If a values for tmin is provided:
1. A pandas timestamp is made from the string
2. if use_oseries is True, tmin is checked against oseries.
"""
# Get tmin from the oseries
if use_oseries:
ts_tmin = self.oseries.series.index.min()
# Get tmin from the stressmodels
elif use_stresses:
ts_tmin = Timestamp.max
for stressmodel in self.stressmodels.values():
if stressmodel.tmin < ts_tmin:
ts_tmin = stressmodel.tmin
# Get tmin and tmax from user provided values
else:
ts_tmin = | Timestamp(tmin) | pandas.Timestamp |
#!/usr/bin/env python
# coding: utf-8
# # >>>>>>>>>>>>>>>>>>>>Tarea número 3 <<<<<<<<<<<<<<<<<<<<<<<<
# # Estudiante: <NAME>
# # Ejercicio #1
# In[2]:
import os
import pandas as pd
import numpy as np
from math import pi
from sklearn.datasets import make_blobs
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import dendrogram, ward, single, complete,average,linkage, fcluster
import scipy.cluster.hierarchy as sch
from scipy.spatial.distance import pdist
from sklearn.preprocessing import StandardScaler
# In[3]:
# Función para calcular los centroides de cada cluster¶
def centroide(num_cluster, datos, clusters):
ind = clusters == num_cluster
return(pd.DataFrame(datos[ind].mean()).T)
# In[4]:
# Función para graficar los gráficos de Barras para la interpretación de clústeres
def bar_plot(centros, labels, cluster = None, var = None):
from math import ceil, floor
from seaborn import color_palette
colores = color_palette()
minimo = floor(centros.min()) if floor(centros.min()) < 0 else 0
def inside_plot(valores, labels, titulo):
plt.barh(range(len(valores)), valores, 1/1.5, color = colores)
plt.xlim(minimo, ceil(centros.max()))
plt.title(titulo)
if var is not None:
centros = np.array([n[[x in var for x in labels]] for n in centros])
colores = [colores[x % len(colores)] for x, i in enumerate(labels) if i in var]
labels = labels[[x in var for x in labels]]
if cluster is None:
for i in range(centros.shape[0]):
plt.subplot(1, centros.shape[0], i + 1)
inside_plot(centros[i].tolist(), labels, ('Cluster ' + str(i)))
plt.yticks(range(len(labels)), labels) if i == 0 else plt.yticks([])
else:
pos = 1
for i in cluster:
plt.subplot(1, len(cluster), pos)
inside_plot(centros[i].tolist(), labels, ('Cluster ' + str(i)))
plt.yticks(range(len(labels)), labels) if pos == 1 else plt.yticks([])
pos += 1
# In[5]:
# Función para graficar los gráficos tipo Radar para la interpretación de clústeres
def radar_plot(centros, labels):
from math import pi
centros = np.array([((n - min(n)) / (max(n) - min(n)) * 100) if
max(n) != min(n) else (n/n * 50) for n in centros.T])
angulos = [n / float(len(labels)) * 2 * pi for n in range(len(labels))]
angulos += angulos[:1]
ax = plt.subplot(111, polar = True)
ax.set_theta_offset(pi / 2)
ax.set_theta_direction(-1)
plt.xticks(angulos[:-1], labels)
ax.set_rlabel_position(0)
plt.yticks([10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
["10%", "20%", "30%", "40%", "50%", "60%", "70%", "80%", "90%", "100%"],
color = "grey", size = 8)
plt.ylim(-10, 100)
for i in range(centros.shape[1]):
valores = centros[:, i].tolist()
valores += valores[:1]
ax.plot(angulos, valores, linewidth = 1, linestyle = 'solid',
label = 'Cluster ' + str(i))
ax.fill(angulos, valores, alpha = 0.3)
plt.legend(loc='upper right', bbox_to_anchor = (0.1, 0.1))
# ### a) Cargue la tabla de datos SpotifyTop2018 40 V2.csv
# In[7]:
os.chdir("/Users/heinerleivagmail.com")
print(os.getcwd())
data = pd.read_csv('SpotifyTop2018_40_V2.csv',delimiter=',',decimal=".")
print(data)
# In[8]:
# Normalizando y centrando la tabla
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_values = scaler.fit_transform(data)
data.loc[:,:] = scaled_values
print(data)
datos = data
# In[9]:
ward_res = ward(datos) #Ward
single_res = single(datos) #Salto mínimo
complete_res = complete(datos) #Salto Máxim
average_res = average(datos) #Promedio
# ### b) Ejecute un Clustering Jerarquico con la agregacion del Salto Maximo, Salto Mınimo, Promedio y Ward. Grafique el dendograma con cortes para dos y tres clusteres.
# In[10]:
dendrogram(average_res,labels= datos.index.tolist())
plt.figure(figsize=(13,10))
dendrogram(complete_res,labels= datos.index.tolist())
plt.figure(figsize=(13,10))
dendrogram(single_res,labels= datos.index.tolist())
plt.figure(figsize=(13,10))
dendrogram(ward_res,labels= datos.index.tolist())
# Agrega cortes con 2 y 3 clústeres con agregación de Ward
ax = plt.gca()
limites = ax.get_xbound()
ax.plot(limites, [11, 11], '--', c='k')
ax.plot(limites, [9.4, 9.4], '--', c='k')
ax.text(limites[1], 11, ' dos clústeres', va='center', fontdict={'size': 15})
ax.text(limites[1], 9.4, ' tres clústeres', va='center', fontdict={'size': 15})
plt.xlabel("Orden en el eje X./nPor hacer la normalizacion de los datos el cluster 3 quedo muy cerca del 2")
plt.ylabel("Distancia o Agregación")
# ### c) Usando tres clusteres interprete los resultados del ejercicio anterior para el caso de agregacion de Ward usando graficos de barras y graficos tipo Radar.
# In[11]:
grupos = fcluster(linkage(pdist(datos), method = 'ward', metric='euclidean'), 3, criterion = 'maxclust')
grupos = grupos-1 # Se resta 1 para que los clústeres se enumeren de 0 a (K-1), como usualmente lo hace Python
# El siguiente print es para ver en qué cluster quedó cada individuo
print(grupos)
centros = np.array(pd.concat([centroide(0, datos, grupos),
centroide(1, datos, grupos),
centroide(2, datos, grupos)]))
print(centros)
plt.figure(1, figsize = (20, 8))
bar_plot(centros, datos.columns)
# In[12]:
# Interpretación 3 Clústeres - Gráfico Radar plot con Ward
grupos = fcluster(linkage(pdist(datos), method = 'ward', metric='euclidean'), 3, criterion = 'maxclust')
grupos = grupos-1
print(grupos)
centros = np.array(pd.concat([centroide(0, datos, grupos),
centroide(1, datos, grupos),
centroide(2, datos, grupos)]))
print(centros)
plt.figure(1, figsize = (10, 10))
radar_plot(centros, datos.columns)
# ### Interpretacion
# In[31]:
# Analisis:
# Cluster 1 (azul), este cluster se caracteriza por tener los niveles mas altos (100) en accousticness, es decir, las
# canciones en este cluster son las mas acusticas, tambien, tiene el mayor speechiness, es decir, hay muchas palabras
# en las canciones que estan en este cluster, ademas cuenta con el mayor numero en liveness (es decir hay publico en
# la cancion), tambien tiene los niveles mas altos de valence (mucha postitividad en las canciones), el time_signature
# que representa la cantidad de beats que hay en cada barra de medida y por ultimo danceability, que son las canciones
# que tienen mayor potencial para ser bailable, a modo general en este cluster se agrupan las canciones quee son mas
# positivas, mas aptas para bailar, con mayor sonido, mayor presencia de publico, es decir, son las canciones mas "alegres",
# por otro lado este cluster se caracteriza por tener canciones que tienen niveles 0 de instrumentalidad, su duracion en
# milisegundos es baja, su energy es moderada baja al igual que su loudness, es decir su sonoridad en la pista es baja.
# Cluster 2 (naranja): este se representa por tener las canciones que tienen mayor duracion en milisegundos, asi como
# las canciones que se encuentran en este cluster cuentan con tempo son las que tienen mayores beats por minuto (variable
# tempo). Ademas su acousticness es moderado, es decir estas canciones presentan algo de acustica y su speechiness, que
# es la presencia de palabras en las canciones tiende a ser bajo. En las demas variables este cluster presenta bajos niveles
# entonces se puede decir que este cluster se caracteriza por tener las canciones con mayor duracion, con mas beats por
# minuto y son canciones que combinan acustica y letras en sus estrofas.
#Cluster 3 (verde): en este caso las canciones que pertenecen a este cluster se caracterizan por tener los mas altos
# beats por minuto, presentan mucha instrumentalidad, su time_signature es alto, lo que representa altos beats en cada
# barra o medida, su intensidad es bastante alta (energy) y su sonoridad en decibiles tambien es bastante alta. Las
# canciones en este grupo se caracterizan por altamente instrumentales con nula cantidad de voces en sus piezas, y son
# canciones bastante intensas y con los beats mas altos por minuto, son canciones que son relativamente bailables, y su
# positividad musical es moderada y no presenta publico en sus piezas. Son canciones por asi decirlo, meramente instrumen-
# tales con poco o nulo registro de voz por parte de tun cantante.
# ### d) Grafique usando colores sobre las dos primeras componentes del plano principal en el Analisis en Componentes Principales los clusteres obtenidos segun la clasificacion Jerarquica (usando tres clusteres).
# In[13]:
# Importando datos
campo = pd.read_csv('SpotifyTop2018_40_V2.csv',delimiter=',',decimal=".")
# Normalizando y centrando la tabla
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_values = scaler.fit_transform(campo)
campo.loc[:,:] = scaled_values
datosx = campo
#Asignando variables
cal = datosx.iloc[:,[0,1,2,3,4,5,6,7,8,9,10]].values
# In[14]:
# Definiendo parametros de dendrograma
clustering_jerarquico = linkage(cal, 'ward')
# In[15]:
# Ploteando dendrograma
dendrogram = sch.dendrogram(clustering_jerarquico)
# In[16]:
# Asignando cluster a cada variable
clusters = fcluster(clustering_jerarquico, t=9.4, criterion = 'distance') #t corresponde al corte para obtener los 3
# clusters
clusters
# In[17]:
# Creando clusters en cada fila
datosx['target'] = clusters
# In[18]:
# Guardando nueva variable generada
campo.to_csv("/Users/heinerleivagmail.com/SpotifyTop2018_40_V3.csv")
# In[19]:
# Llamando DF creado con la asignacion de cada cluster (tabla ya esta normalizada)
df = pd.read_csv('SpotifyTop2018_40_V3.csv',delimiter=',',decimal=".")
# Separando variables numericas
x = df.iloc[:,[0,1,2,3,4,5,6,7,8,9,10]].values
# Separando los clusters obtenidos
y = df.iloc[:,[11]].values
# In[20]:
# Definiendo parametros del nuevo PCA a partir del Dendrograma
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(datosx)
principalDf = pd.DataFrame(data = principalComponents
, columns = ['Componente 0', 'Componente 1'])
finalDf = pd.concat([principalDf, df.iloc[:,[12]]], axis = 1)
finalDf.head(10)
# In[21]:
# Definicion de la estructura del PCA con colores respectivos
fig = plt.figure(figsize = (8,8))
ax = fig.add_subplot(1,1,1)
ax.set_xlabel('Componente 0', fontsize = 15)
ax.set_ylabel('Componente 1', fontsize = 15)
ax.set_title('Plano Principal', fontsize = 20)
targets = [1, 2, 3]
colors = ['g', 'r', 'b']
for target, color in zip(targets,colors):
indicesToKeep = finalDf['target'] == target
ax.scatter(finalDf.loc[indicesToKeep, 'Componente 0']
, finalDf.loc[indicesToKeep, 'Componente 1']
, c = color
, s = 50)
ax.legend(targets)
ax.grid()
# # Ejercicio 2
# ### a) Efectue un Clustering Jerarquico usando solo las variables numericas y de una interpretacion usando 3 clusteres.
# In[6]:
os.chdir("/Users/heinerleivagmail.com")
print(os.getcwd())
corazon = pd.read_csv('SAheart.csv',delimiter=';',decimal=".")
print(corazon.head())
print(corazon.shape)
# In[7]:
corazon2 = pd.DataFrame(data=corazon, columns=['sbp', 'tobacco', 'ldl',
'adiposity','typea','obesity','alcohol','age'])
print(corazon2)
print(corazon2.shape)
corazon2.describe()
# In[8]:
# Normalizando y centrando la tabla
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_values = scaler.fit_transform(corazon2)
corazon2.loc[:,:] = scaled_values
print(corazon2)
datos = corazon2
# In[9]:
ward_res = ward(datos) #Ward
single_res = single(datos) #Salto mínimo
complete_res = complete(datos) #Salto Máximo
average_res = average(datos) #Promedio
# In[10]:
dendrogram(average_res,labels= datos.index.tolist())
plt.figure(figsize=(13,10))
dendrogram(complete_res,labels= datos.index.tolist())
plt.figure(figsize=(13,10))
dendrogram(single_res,labels= datos.index.tolist())
plt.figure(figsize=(13,10))
dendrogram(ward_res,labels= datos.index.tolist())
# Agrega cortes solo en 3 clústeres con agregación de Ward
ax = plt.gca()
limites = ax.get_xbound()
ax.plot(limites, [20.7, 20.7], '--', c='k')
ax.text(limites[1], 20.7, ' tres clústeres', va='center', fontdict={'size': 15})
plt.xlabel("Orden en el eje X")
plt.ylabel("Distancia o Agregación")
# In[11]:
# Graficos de barras con Ward
grupos = fcluster(linkage(pdist(datos), method = 'ward', metric='euclidean'), 3, criterion = 'maxclust')
grupos = grupos-1 # Se resta 1 para que los clústeres se enumeren de 0 a (K-1), como usualmente lo hace Python
# El siguiente print es para ver en qué cluster quedó cada individuo
print(grupos)
centros = np.array(pd.concat([centroide(0, datos, grupos),
centroide(1, datos, grupos),
centroide(2, datos, grupos)]))
print(centros)
plt.figure(1, figsize = (30, 10))
bar_plot(centros, datos.columns)
# In[12]:
grupos = fcluster(linkage(pdist(datos), method = 'ward', metric='euclidean'), 3, criterion = 'maxclust')
grupos = grupos-1 # Se resta 1 para que los clústeres se enumeren de 0 a (K-1), como usualmente lo hace Python
# El siguiente print es para ver en qué cluster quedó cada individuo
print(grupos)
centros = np.array(pd.concat([centroide(0, datos, grupos),
centroide(1, datos, grupos),
centroide(2, datos, grupos)]))
print(centros)
plt.figure(1, figsize = (10, 10))
radar_plot(centros, datos.columns)
# ### Interpretacion
# In[32]:
# Para este segundo caso se puede ver como el cluster 1 (azul): son los individuos que estan sanos, ya que solo presentan
# un comportamiento tipo A alto muy alto, que los hace mas competitivos, orientados al trabajo, etc., en lo demas
# no presentan ninguna otra caracteristica.
# Cluster 2 (naranja): se caracteriza por tener a los individuos que tienen las edades mas altas, asi como la presion
# cardiaca, adiposidad y obesidad mas altas, asi como el colesterol, mientras que en otros parametros como el comporta-
# miento del tipo A (menos de 40%) y los niveles de alcohol estan bajos, es decir, no son consumidores de alcohol.
# En este cluster se pueden agrupar a todas aquellas personas que ya son avanzadas de edad y que presentan altos
# grados de obesidad y con ello colesterol y una presion cardiaca mas alta, y que ademas tienen una ligera tendencia
# a ser del comportamiento tipo A.
# En el cluster 3 (verde) se puede ver como los individuos de este grupo son los que tienen mas vicios (consumen mayores
# indices de alcohol y fuman mucho) ademas, presentan las edades altas de igual forma y su adiposidad tambien alcanza
# casi el 90%, por otro lado, presentan mas de un 60% de obesidad, y mas de un 40% de colesterol, ademas su presion
# cardiaca tambien es muy alta, pero su comportamiento tipo A es muy bajo, al parecer en este grupo estan las personas
# que son mayores tienen vicios, y ademas cuentan con presiones sanguineas altas.
# ### b) Efectue un Clustering Jerarquico usando las variables numericas y las variables categoricas. Luego de una interpretacion usando 3 clusteres.
# In[13]:
os.chdir("/Users/heinerleivagmail.com")
print(os.getcwd())
datos2 = pd.read_csv('SAheart.csv',delimiter=';',decimal=".")
print(datos.head())
print(datos.shape)
# In[14]:
def recodificar(col, nuevo_codigo):
col_cod = | pd.Series(col, copy=True) | pandas.Series |
import pytest
import sys, os
import pandas as pd
import pyDSlib
def test_count_subgroups_in_group():
df = {}
df['subgroup'] = []
df['group'] = []
for color in ['R','G','B']:
slice_ = [i for i in range(3)]
df['subgroup'] = df['subgroup']+ slice_+slice_
df['group'] = df['group'] + [color for vale in slice_+slice_]
df = | pd.DataFrame.from_dict(df) | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
import datetime as dt
import time
import matplotlib.pyplot as plt
import seaborn as sns
import vnpy.analyze.data.data_prepare as dp
from jqdatasdk import *
from vnpy.trader.database import database_manager
from mpl_toolkits.axisartist.parasite_axes import HostAxes, ParasiteAxes
import matplotlib.dates as dates
from matplotlib import ticker
import math
from vnpy.analyze.util.cal_returns import CalReturns
def step1_draw_close(data, show=False):
"""画出收盘价的价格曲线"""
df = data.copy()
if show:
fig, ax = plt.subplots(1, figsize=(16, 9))
df.close.plot(ax=ax, figsize=(16, 9), colormap='coolwarm')
plt.show()
def step2_pe_pb(data, show=False):
"""画出PE、PB之间的关系"""
df = data.copy()
print('PE\PB的相关系数如下:\n %s' % (df[['pe', 'pb']].corr()))
if show:
sns.jointplot(df['pe'], df['pe'], kind='reg', height=9)
fig, ax = plt.subplots(1, figsize=(16, 9))
df[['pe', 'pb']].plot(ax=ax, secondary_y=['pb'], figsize=(16, 9), colormap='coolwarm')
plt.show()
def step3_close_pe(data, pe_percentile_blow=0.4, pe_percentile_upper=0.6, show=False):
"""close与PE之间关系"""
df = data.copy()
print('CLOSE\PE的相关系数如下:\n %s' % (df[['close', 'pe']].corr()))
percentile_blow = df['pe'].quantile(pe_percentile_blow) # 4分位
percentile_upper = df['pe'].quantile(pe_percentile_upper) # 6分位
print('下分为使用%s,PE值:%s, 上分为使用%s,PE值:%s' % (
pe_percentile_blow, percentile_upper, pe_percentile_upper, percentile_upper))
if show:
sns.jointplot(df['close'], df['pe'], kind='reg', height=9)
fig, ax = plt.subplots(1, figsize=(16, 9))
df[['close', 'pe']].plot(ax=ax, secondary_y=['pe'], figsize=(16, 9), colormap='coolwarm')
plt.axhline(y=percentile_blow, color='g', linestyle='-')
plt.axhline(y=percentile_upper, color='r', linestyle='-')
plt.show()
def step4_close_percentile_pe(data, n=7, show=False, show_p_hist=False):
"""
close与PE百分位之间关系
不同时期之间的PE对比已经发生巨大变化,根据一个周期内百分位对比更有价值
"""
df = data.copy()
# 这里的计算按一年244个交易日计算
windows = int(n * 244) # 将时间取整数
if len(data) < windows:
print('当前数据小于滚动窗口设置,无法完成滚动分为计算')
return
column = 'percentile_' + str(n) + 'Y'
df[column] = df['pe'].rolling(windows).apply(lambda x: pd.Series(x).rank().iloc[-1] /
pd.Series(x).shape[0], raw=True)
if show:
fig, ax = plt.subplots(1, figsize=(16, 9))
df[['close', column]].plot(ax=ax, secondary_y=[column], figsize=(16, 9), colormap='coolwarm')
plt.show()
if show_p_hist:
"""动态百分位分布,直方图"""
fig, ax = plt.subplots(1, figsize=(16, 9))
df[column].hist(ax=ax, figsize=(16, 9))
plt.show()
return df
def first_trade_date_in_month(df):
"""找到每个月第一个交易日"""
month_first_date = set()
pre_year, pre_month = 0, 0
for index, row in df.iterrows():
if pre_year != index.year or pre_month != index.month:
month_first_date.add(index)
pre_year = index.year
pre_month = index.month
return month_first_date
def trade_model(data, column='percentile_7Y', show=False, show_annual_invest=True):
"""
交易模型:
1、低估:买入、适中:保持不变、高估:卖出
"""
df = data.copy()
# 去除无滚动百分位数据
df.dropna(inplace=True)
# 找每个月第一个交易日
month_first_date = first_trade_date_in_month(df)
# 假设每个月第一个交易日增加5000元可支配
month_invest_const = 5000
available_cash = 0 # 可用资金
stock_q = 0 # 股票数量(为计算方便,可以使用小数表示)
# 图形展示数据:累计投入、当前持有股票资产、变现回报
trade_date = []
invest_cash = []
stock_assets = []
return_cash = []
# 买入记录
trades = {}
df_return = pd.DataFrame(columns=('date', 'invest', 'stock', 'return'))
for index, row in df.iterrows():
# 首先还是遵守标准定投思想,投还是不投,不考虑投多少问题。卖出的资产直接入袋为安,不参与定投
trade_date.append(index)
if month_first_date.__contains__(index):
# available_cash = available_cash + month_invest_const
# 当月不投下月自动清空
available_cash = month_invest_const
if row[column] < 0.4 and available_cash > 0:
# 较低估值区间, 买入
afford_q = available_cash / row['close']
stock_q += afford_q
invest_cash.append(available_cash)
trades[index] = available_cash # 加入买入记录
available_cash = 0
return_cash.append(0)
elif row[column] > 0.6 and stock_q > 0:
# 过高估值区间, 卖出
selled_p = month_invest_const / row['close'] # 卖掉份数
stock_q = stock_q - selled_p
invest_cash.append(0)
return_cash.append(month_invest_const)
else:
# 不做任何操作
invest_cash.append(0)
return_cash.append(0)
stock_assets.append(stock_q * row['close'])
df_return['date'] = trade_date
df_return['invest'] = invest_cash
df_return['stock'] = stock_assets
df_return['return'] = return_cash
df_return['invest_cumsum'] = df_return['invest'].cumsum()
df_return['return_cumsum'] = df_return['return'].cumsum()
df_return['hold'] = df_return['return_cumsum'] + df_return['stock']
# 设置data为index
df_return['date'] = pd.to_datetime(df_return['date']) # 转换时间类型
df_return.set_index(['date'], inplace=True)
df_return.index.name = None # 去掉索引列名
df_return['close'] = df['close']
print(df_return.head())
# 计算年化收益
earings = CalReturns.annual_returns(trades, df_return.index[-1], df_return['hold'][-1])
print('年化收益率:%s' % earings)
if show:
fig, ax = plt.subplots(1, figsize=(16, 9))
df_return[['invest_cumsum', 'hold', 'close']].plot(ax=ax, secondary_y=['close'], figsize=(16, 9),
colormap='coolwarm')
plt.show()
if show_annual_invest:
"""展示年度投入与收益, 📊柱状图 (年度投入、年度剩余))"""
trade_year = [date.year for date in trade_date]
df_g = | pd.DataFrame(columns=('date', 'invest')) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import FinanceDataReader as fdr
from pykrx import stock
import datetime
import requests
# from datetime import timedelta # 마이크로초 전, 마이크로초 후 를 구하고 싶다면 timedelta
from dateutil.relativedelta import relativedelta # 몇달 전, 몇달 후, 몇년 전, 몇년 후 를 구하고 싶다면 relativedelta
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt import risk_models
from pypfopt import expected_returns
from pypfopt.discrete_allocation import DiscreteAllocation, get_latest_prices
from pypfopt import plotting
import warnings
warnings.filterwarnings(action='ignore')
from Class_Strategies import Strategies
import csv
stock_dual = Strategies.getHoldingsList('KOSPI')
prices = Strategies.getCloseDatafromList(stock_dual, '2021-01-01')
dualmomentumlist = Strategies.DualMomentum(prices, lookback_period = 20, n_selection = len(stock_dual)//2)
# # print(dualmomentumlist)
#
# with open('dualmomentumlist.csv','w') as file:
# write = csv.writer(file)
# write.writerow(dualmomentumlist)
def Dual_sharpe():
# 종목 이름 및 코드
kospi_temp = fdr.StockListing('KOSPI')[['Symbol', 'Name']]
kosdaq_temp = fdr.StockListing('KOSDAQ')[['Symbol', 'Name']]
code_name_dict = pd.concat([kospi_temp, kosdaq_temp])
code_name_dict = code_name_dict.set_index('Symbol').to_dict().get('Name') # {'095570': 'AJ네트웍스',
# assets = pd.read_csv('dualmomentumlist.csv') #np.array(dualmomentumlist)
# print(assets)
start_date = datetime.datetime.today() - relativedelta(years=3)
start_date = start_date.strftime('%Y%m%d')
today = datetime.datetime.today().strftime("%Y%m%d")
end_date = today
df = pd.DataFrame()
##################### 여기 추가됐습니다 : 관리종목 제거 ######################
# 관리종목 제거하기 위한 코드
# temp_assets = pd.read_csv('dualmomentumlist.csv')
temp_assets = np.array(dualmomentumlist)
# print(temp_assets)
krx_adm = fdr.StockListing('KRX-ADMINISTRATIVE')
# print(krx_adm)
# KRX 관리종목의 종목코드
under_ctrl = krx_adm['Symbol'].values
# print(under_ctrl)
assets = np.setdiff1d(temp_assets, under_ctrl)
# print(assets)
for s in assets:
df[s] = fdr.DataReader(s, start_date, end_date)['Close']
# drop null
dfnull = df.dropna(axis=1)
# 수익률의 공분산
mu = expected_returns.mean_historical_return(dfnull)
S = risk_models.sample_cov(dfnull)
# print(plotting.plot_covariance(S))
# 포폴 최적화 (Max sharp ratio) - 급등주
ef = EfficientFrontier(mu, S, solver="SCS")
weights = ef.max_sharpe()
cleaned_weights = ef.clean_weights()
print(ef.portfolio_performance(verbose=True))
one_million = 1000000
portfolio_val = 15 * one_million
latest_prices = get_latest_prices(dfnull)
weights = cleaned_weights
da = DiscreteAllocation(weights, latest_prices, total_portfolio_value=portfolio_val)
allocation, leftover = da.lp_portfolio(verbose=False)
rmse = da._allocation_rmse_error(verbose=False)
# 각 종목별 실제 투자 금액
inv_total_price = {}
for i in allocation.keys():
inv_total_price[i] = latest_prices.loc[i] * allocation[i]
inv_total_price
# 총 투자금액
investment = 0
for i in inv_total_price.values():
investment += i
print(investment)
# 각 종목별 실제 투자 비중
inv_total_weight = {}
for i in allocation.keys():
inv_total_weight[i] = inv_total_price[i] / investment
inv_total_weight
# 투자비중의 합계
investment_w = 0
for i in inv_total_weight.values():
investment_w += i
print(investment_w)
# 결과값으로 불러올 값을 리스트로 저장
name_list = [] # 종목명(회사이름)
total_price_stock = [] # 각 종목별 실제 투자 금액
total_weight_stock = [] # 각 종목별 실제 투자 비중
for i in allocation.keys(): # i = 포트폴리오에 할당된 종목의 종목코드
name_list.append(code_name_dict.get(i))
total_price_stock.append(inv_total_price.get(i))
total_weight_stock.append(inv_total_weight.get(i))
# Get the discrete allocation values
discrete_allocation_list = []
for symbol in allocation:
discrete_allocation_list.append(allocation.get(symbol))
print(discrete_allocation_list)
portfolio_df = | pd.DataFrame(columns=['종목명', '종목코드', '수량(주)', '투자금액(원)', '투자비중']) | pandas.DataFrame |
import pandas as pd
import os
os.chdir('db')
from pathlib import Path
import sys
#Package to pre process
import gensim
from gensim.utils import simple_preprocess
from gensim.models import ldamodel
from gensim.test.utils import datapath
import numpy as np
from gensim.models import Word2Vec
from shorttext.utils import standard_text_preprocessor_1
pre = standard_text_preprocessor_1()
train = pd.read_csv('train_set.csv')
test = pd.read_csv('test_set.csv')
# df_test = pd.DataFrame('test_set.csv')
def get_process_data_frame(data_frame):
# Preprocessing
data_frame['processed'] = data_frame['response_text'].apply(pre)
corpus = data_frame['processed'].apply(lambda x: x.split(' '))
return corpus
train_corpus = get_process_data_frame(train)
test_corpus = get_process_data_frame(test)
def get_word_2_vec_df(corpus, dim = 100):
# train word2vec model
# Default dimensions is 100
# sg: (default 0 or CBOW) The training algorithm, either CBOW (0) or skip gram (1).
model = Word2Vec(corpus, min_count=1)
# summarize the loaded model
# print(model)
# summarize vocabulary
# words = list(model.wv.vocab)
# print(words)
# access vector for one word
# print(model['think'])
mean_corpus = pd.DataFrame(corpus.apply(lambda x: np.mean(model[x], axis=0)))
return pd.DataFrame(mean_corpus['processed'].values.tolist(), index = mean_corpus.index)
dir = os.getcwd()
for dim in range(100, 501, 50):
train_set = get_word_2_vec_df(train_corpus, dim)
test_set = get_word_2_vec_df(test_corpus, dim)
train_set = pd.concat([train_set, train['response_round_score']], axis=1)
test_set = pd.concat([test_set, test['response_round_score']], axis=1)
train_file = "word_2_vec_train{}.csv".format(dim)
test_file = "word_2_vec_test{}.csv".format(dim)
train_set.to_csv(os.path.join(dir, 'word2vecmodels', train_file))
test_set.to_csv(os.path.join(dir, 'word2vecmodels', test_file))
for dim in range(10, 91, 20):
train_set = get_word_2_vec_df(train_corpus, dim)
test_set = get_word_2_vec_df(test_corpus, dim)
train_set = pd.concat([train_set, train['response_round_score']], axis=1)
test_set = | pd.concat([test_set, test['response_round_score']], axis=1) | pandas.concat |
# The script is used to perform analysis of XRF spectra measured by
# Olympus Delta XRF (https://www.olympus-ims.com/en/xrf-xrd/delta-handheld/delta-prof/).
# The measurement is done for powder samples which are fixed on the XRF
# device using a custom 3D printed plastic holder(s). Several holders can be used in one
# series of measurements, which should be specified in the command line arguments.
# The analysis is based on calculating calibration for a certain element,
# and calculating the amount of element in the samples with unknown amount.
import argparse
import chardet
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
import sys
from datetime import datetime
from glob import glob
from scipy import stats
from scipy.optimize import curve_fit
from scipy.signal import savgol_filter
from element_data import get_elements
##########
### Section with common vairables related to spectra file and measurements
##########
# CSV contains first column which has titles of rows
TITLE_COL = 1
# number of beams in the XRF measurement. 3 for Olympus Delta XRF
NUM_BEAMS = 3
# number of measurement repeats. Usually 3 is done
NUM_REPEATS = 3
# row for number of beams = ExposureNum
ROW_NUM_BEAMS = 0
# row for number of data points in spectrum
ROW_NUM_DATA = 4
# row for time of measurement, to calculate cps instead of cumulative counts
ROW_NUM_TIME = 7 # seconds
def get_spectrum(spectra: pd.DataFrame, # dataframe with all spectra (loaded CSV file)
spectrum_num: int, # zero based index of sample spectrum to take
repeat_num: int, # zero based measurement repeat number for the sample spectrum
beam_num: int, # zero based measurent beam number for the sample spectrum
num_repeats=NUM_REPEATS, # total number of repeats for each sample
num_beams=NUM_BEAMS, # total number of beams for each sample
title_col=TITLE_COL, # indicated if title column (first one) is present in CSV
skip_XRF_calibration=True) -> np.ndarray: # to skip first spectrum in CSV which is usually mandatory calibration for device
# calculate column index which is for spectrum to get
spectrum_num = title_col + int(skip_XRF_calibration) + num_repeats * spectrum_num * num_beams + repeat_num * num_repeats + beam_num
# print('Selected spectrum number:', spectrum_num)
# get number of data points in spectrum measured
num_points = int(spectra.iloc[ROW_NUM_DATA, spectrum_num])
# get measurement time to caluclate cps
meas_time = float(spectra.iloc[ROW_NUM_TIME, spectrum_num])
y_spectrum = spectra.iloc[-num_points:, spectrum_num].to_numpy() / meas_time
return y_spectrum
def fit_gauss(peak_spectrum: np.array) -> np.array:
'''Fit XRF peak with gaussian.'''
def gauss(x: np.array, *params) -> np.array:
# Gaussian function with params = [baseline, A, mu, sigma] parameters
baseline, A, mu, sigma = params
return baseline + A * np.exp(-(x - mu)**2 / (2. * sigma**2))
# inital params guess
p0 = [0., 1, 0., 1]
x = peak_spectrum[0] / np.max(peak_spectrum[0])
y = peak_spectrum[1] / np.max(peak_spectrum[1])
params, cov = curve_fit(gauss, x, y, p0)
peak_fit = gauss(x, *params) * np.max(peak_spectrum[1])
return np.array([peak_spectrum[0], peak_fit])
def calc_peak_ints(args: argparse.Namespace,
element: str,
spectrum_num: int) -> np.ndarray:
'''Calculate peak integrals for element for certain spetrum number.'''
# select beam number from the element data
element = args.elements_data[element]
repeat_ints = []
for rep_num in range(args.repeats):
spectrum = get_spectrum(args.spectra, spectrum_num, rep_num, element.beam,
num_repeats=args.repeats, num_beams=args.num_beams,
title_col=TITLE_COL, skip_XRF_calibration=args.skip_XRF_calibration)
spectrum = savgol_filter(spectrum, element.filter_window, 2)
# integrals for each peak
peak_ints = []
for peak_coords in element.int_limits:
# get indices from x coordinate
peak_mask = np.logical_and(args.x_keV >= peak_coords[0], args.x_keV <= peak_coords[1])
peak = np.array([args.x_keV[peak_mask], spectrum[peak_mask]])
# print(peak)
try:
fit = fit_gauss(peak)
peak_ints.append(np.sum(fit[1]))
'''if spectrum_num == 6 and rep_num == 1:
plt.plot(args.x_keV, spectrum)
plt.plot(peak[0], peak[1])
plt.plot(fit[0], fit[1])
plt.show()'''
except RuntimeError:
print('Gauss fit failed for spectrum', spectrum_num)
peak_ints.append(np.sum(peak[1]))
# print(peak_ints)
repeat_ints.append(peak_ints)
# calculate average and std for each peak for all repeats
repeat_ints = np.array(repeat_ints)
avgs = np.mean(repeat_ints, axis=0) # / weight, not used, see python element_content.py --help
stds = np.std(repeat_ints, axis=0) # / weight
# print('averages for', element.name, 'for spectrum', spectrum_num, avgs)
return avgs, stds
def calc_background(args: argparse.Namespace,
element: str) -> np.ndarray:
'''Calculates background for holders which are at the beginning of
spectra csv file.'''
if args.skip_background:
return np.array([]), np.array([])
else:
bg_avs = []
bg_stds = []
for i in range(args.num_holders):
av, std = calc_peak_ints(args, element, i)
bg_avs.append(av)
bg_stds.append(std)
# print('bg averages', np.array(bg_avs), 'bg stds', np.array(bg_stds))
return np.array(bg_avs), np.array(bg_stds)
def analyze_element(args: argparse.Namespace,
element: str) -> np.ndarray:
'''Analyze one element to get integrals of peaks.'''
bg_avs, bg_stds = calc_background(args, element)
# element = args.elements_data[element]
int_avs = []
int_stds = []
for sp_num in range(args.num_holders, args.num_spectra):
# weight = args.powder_weights[sp_num - args.num_holders]
holder = args.holders[sp_num]
avs, stds = calc_peak_ints(args, element, sp_num)
# print('averages for sample', sp_num, 'for element', element, avs)
if not args.skip_background:
avs = avs - bg_avs[holder]
stds = np.sqrt(stds**2 + bg_stds[holder]**2)
# print('averages after bg for sample', sp_num, 'for element', element, avs)
# print('stds after bg for sample', sp_num, 'for element', element, stds)
int_avs.append(avs)
int_stds.append(stds)
return np.array(int_avs), np.array(int_stds)
def lin_int(x, a, b):
# linear function with intercept to fit
return a * x + b
def lin(x, a):
# linear function without intercept to fit
return a * x
def calibrate(args: argparse.Namespace) -> dict:
# first deal with the powder element
# powder_avs, powder_stds = analyze_element(args, args.powder_element)
# get mask for samples that are meant for calibration
cal_samples_mask = np.array(args.element_amounts) != ''
# get x axis for samples that are meant for calibration
args.powder_weights = args.powder_weights[cal_samples_mask]
# calcualte the total number of Au (in umol) that are placed
# on holder for measurement
x_umol = np.array([float(x) for x in args.element_amounts if x != '']) # umol from agrs
x_umol = x_umol / args.calib_weight * args.powder_weights
x_umol = np.reshape(x_umol, (-1, ))
x_perc = x_umol # to be converted to percent for each element as each element has molar mass
# Figure
# plots integrals with errors foreach element in row
# if there is more than one peak for each element, then plots those peaks separately
peak_nums = [(args.elements_data[el].int_limits.shape[0]) for el in [args.powder_element] + args.elements]
# print('number of peaks for each element', peak_nums)
plt.rcParams["figure.figsize"] = [args.fig_size * i for i in plt.rcParams["figure.figsize"]]
fig, axs = plt.subplots(len([args.powder_element] + args.elements), max(peak_nums))
# fitting results to be saved as JSON
fitting_results = {}
# peak integrals for powder element
powder_avs, powder_stds = analyze_element(args, args.powder_element)
powder_avs = powder_avs[cal_samples_mask, :]
powder_stds = powder_stds[cal_samples_mask, :]
for j, el in enumerate([args.powder_element] + args.elements):
fitting_results[el] = []
# umol to percent conversion coefficient
umol_to_perc = args.elements_data[el].molar_weight / (args.calib_weight) * 1e3 / 1e4
# x_ppm = x_umol * args.elements_data[el].molar_weight / (args.calib_weight) * 1e3
el_avs, el_stds = analyze_element(args, el)
# get rid of samples that are not meant for calibration
el_avs = el_avs[cal_samples_mask, :]
el_stds = el_stds[cal_samples_mask, :]
# fitting for each peak on an element
for i in range(el_avs.shape[1]):
el_avs[:, i] = el_avs[:, i]
el_stds[:, i] = el_stds[:, i]
if j > 0:
# j == 0 is powder element, needed to calculate calibrations
# divide by the first peak integral for the powder element
# because it is usually the case for soils and powders to analyze
el_avs[:, i] = el_avs[:, i] / powder_avs[:, 0]
el_stds[:, i] = np.sqrt((el_stds[:, i] / powder_avs[:, 0])**2 + \
(el_avs[:, i] / powder_avs[:, 0] ** 2 * powder_stds[:, 0])**2)
x_perc = x_umol * umol_to_perc
# perform linear fitting for element (i.e. skipping args.powder_element)
# res = stats.linregress(x_perc, el_avs[:, i].T)
slope = 0; slope_err = 0; intercept = 0; intercept_err = 0
y_ampl = el_avs[:, i].T
fit_func = lin_int
if args.skip_intercept:
fit_func = lin
params, cov = curve_fit(fit_func, x_perc, y_ampl)
# calculate errors
errs = np.sqrt(np.diag(cov))
# get values and errors
slope = params[0]
slope_err = errs[0]
if not args.skip_intercept:
intercept = params[1]
intercept_err = errs[1]
# calculate r2
ss_res = np.sum((y_ampl - fit_func(x_perc, *params)) ** 2)
ss_tot = np.sum((y_ampl - np.mean(y_ampl)) ** 2)
r2 = 1 - (ss_res / ss_tot)
fitting_results[el].append({
'peak': args.elements_data[el].int_limits[i].tolist(),
'intercept': intercept,
'intercept err': intercept_err,
'slope': slope,
'slope err': slope_err,
# 'r2': res.rvalue**2,
'r2': r2,
'x perc': x_perc.tolist(),
'umol to perc': umol_to_perc,
'y peak area': el_avs[:, i].tolist(),
'y peak area err': el_stds[:, i].tolist(),
'calib weights': args.powder_weights.tolist()
})
# plotting
axs[j, i].errorbar(x_perc, el_avs[:, i], yerr=el_stds[:, i], fmt='o')
# axs[j, i].plot(x_perc, res.intercept + res.slope * x_perc)
axs[j, i].plot(x_perc, fit_func(x_perc, *params))
axs[j, i].set_title(el + ' peak [' + \
', '.join(map(str, args.elements_data[el].int_limits[i])) + \
'] keV; r2 = ' + f'{r2:.4f}')
axs[j, i].set_ylabel(el + ' peak area (a.u.)')
axs[j, i].set_xlabel(el + ' amount (percent)')
axs[j, i].legend([el, f'({slope:.3f}+/-{slope_err:.3f})*x + ({intercept:.3f}+/-{intercept_err:.3f})'])
fig.tight_layout()
# print(fitting_results)
plt.show()
# save calibrations
for el in fitting_results.keys():
if el != args.powder_element:
# no need to save calibration for powder element as there is no such
with open(os.path.join(args.calib_path, el + '_calib_' + args.calib_label + '.json'), 'w') as calib_file:
json.dump({el: fitting_results[el]}, calib_file)
return fitting_results
def analyze_content(args: argparse.Namespace) -> pd.DataFrame:
# peak integrals for powder element
powder_avs, powder_stds = analyze_element(args, args.powder_element)
res_df = pd.DataFrame(columns=['Element'] + args.labels)
for j, el in enumerate(args.elements):
print(f'{el} calibration: {args.calib_files[el]}')
el_avs, el_stds = analyze_element(args, el)
with open(args.calib_files[el], 'r') as cfile:
calib = json.load(cfile)
el_res = []
el_res_std = []
for i in range(el_avs.shape[1]):
# loop through peaks for each element
# calibration for peak i
calib_peak = calib[el][i]
# calculate peaks
el_avs[:, i] = el_avs[:, i] / powder_avs[:, 0]
el_stds[:, i] = np.sqrt((el_stds[:, i] / powder_avs[:, 0])**2 + \
(el_avs[:, i] / powder_avs[:, 0] ** 2 * powder_stds[:, 0])**2)
# calculate element percentage for each peak
intercept = calib_peak['intercept']
intercept_err = calib_peak['intercept err']
if args.skip_intercept:
# Note: it is much better to use different calibration where fit
# was done without intercept at all
intercept = 0
intercept_err = 0
el_perc = (el_avs[:, i] - intercept) / calib_peak['slope']
el_perc_std = np.sqrt((el_stds[:, i] / calib_peak['slope'])**2 + \
(intercept_err / calib_peak['slope'])**2 + \
((el_avs[:, i] - intercept) / calib_peak['slope']**2 * calib_peak['slope err'])**2)
el_res.append(el_perc)
el_res_std.append(el_perc_std)
el_res = np.array(el_res)
el_res_std = np.array(el_res_std)
el_res = np.mean(el_res, axis=0)
el_res_std = np.mean(el_res_std, axis=0)
res_df.loc[4*j] = [el + ' perc'] + el_res.tolist()
res_df.loc[4*j + 1] = [el + ' perc err'] + el_res_std.tolist()
# el_res = el_res / calib[el][0]['umol to perc']
# el_res_std = el_res_std / calib[el][0]['umol to perc']
el_res = el_res * args.powder_weights * 10 / args.elements_data[el].molar_weight
el_res_std = el_res_std * args.powder_weights * 10 / args.elements_data[el].molar_weight
res_df.loc[4*j + 2] = [el + ' umol'] + el_res.tolist()
res_df.loc[4*j + 3] = [el + ' umol err'] + el_res_std.tolist()
print(res_df.head())
res_df.to_csv(os.path.join(args.results_path, 'Result_' + \
os.path.splitext(os.path.basename(args.spectra_path))[0] + '.csv'),
index=False)
return res_df
class MetalContentParser(argparse.ArgumentParser):
'''Class to perform parsing the input arguments and do additional checks of the input data.'''
def error(self, message):
super().error(message)
def parse_args(self) -> argparse.Namespace:
args = super().parse_args()
# load spectra file
args.spectra_path = args.spectra
# get file encoding
if args.encoding == '':
with open(args.spectra_path, 'rb') as raw:
encoding = chardet.detect(raw.read())
args.encoding = encoding['encoding']
args.spectra = pd.read_csv(args.spectra_path, encoding=args.encoding, delimiter='\t')
if args.spectra.shape[1] == 1:
# something is wrong with delimiter
args.spectra = | pd.read_csv(args.spectra_path, encoding=args.encoding, delimiter=',') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""CICID1.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1q-T0VLplhSabpHZXApgXDZsoW7aG3Hnw
"""
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import time
from sklearn.metrics import accuracy_score
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
#for dirname, _, filenames in os.walk('/content/drive/My Drive/Colab Notebooks/kshield_project/dataset'):
# for filename in filenames:
# print(filename)
#print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
#/content/drive/My Drive/Colab Notebooks/kshield_project/dataset/cicids2017/MachineLearningCSV/MachineLearningCVE/Friday-WorkingHours-Afternoon-DDos.pcap_ISCX.csv
pd.set_option('display.float_format', '{:.5f}'.format)
df1=pd.read_csv("/content/drive/My Drive/Colab Notebooks/kshield_project/dataset/cicids2017/MachineLearningCSV/MachineLearningCVE/Friday-WorkingHours-Afternoon-DDos.pcap_ISCX.csv")
df2=pd.read_csv("/content/drive/My Drive/Colab Notebooks/kshield_project/dataset/cicids2017/MachineLearningCSV/MachineLearningCVE/Friday-WorkingHours-Afternoon-PortScan.pcap_ISCX.csv")
df3=pd.read_csv("/content/drive/My Drive/Colab Notebooks/kshield_project/dataset/cicids2017/MachineLearningCSV/MachineLearningCVE/Friday-WorkingHours-Morning.pcap_ISCX.csv")
df4=pd.read_csv("/content/drive/My Drive/Colab Notebooks/kshield_project/dataset/cicids2017/MachineLearningCSV/MachineLearningCVE/Monday-WorkingHours.pcap_ISCX.csv")
df5=pd.read_csv("/content/drive/My Drive/Colab Notebooks/kshield_project/dataset/cicids2017/MachineLearningCSV/MachineLearningCVE/Thursday-WorkingHours-Afternoon-Infilteration.pcap_ISCX.csv")
df6=pd.read_csv("/content/drive/My Drive/Colab Notebooks/kshield_project/dataset/cicids2017/MachineLearningCSV/MachineLearningCVE/Thursday-WorkingHours-Morning-WebAttacks.pcap_ISCX.csv")
df7= | pd.read_csv("/content/drive/My Drive/Colab Notebooks/kshield_project/dataset/cicids2017/MachineLearningCSV/MachineLearningCVE/Tuesday-WorkingHours.pcap_ISCX.csv") | pandas.read_csv |
#!/usr/bin/env python3
# See the README.md file
import json
import logging
import sys
import os
from datetime import datetime
import numpy
from PIL import Image
import pandas as pd
import requests
from skimage import exposure
# The site file, of the format: site_tag,latitude,longitude,start_date,end_date,kmAboveBelow,kmLeftRight
CSV = "sites.csv"
URL = "https://modis.ornl.gov/rst/api/v1/"
ORDURL = "https://modis.ornl.gov/subsetdata"
HEADER = {'Accept': 'application/json'}
BANDS = ['sur_refl_b01', 'sur_refl_b04', 'sur_refl_b03', 'sur_refl_qc_500m']
PROD = ['MYD09A1', 'MOD09A1']
# MXD09A1 QC, this runs parallel to BANDS, defs at https://lpdaac.usgs.gov/documents/925/MOD09_User_Guide_V61.pdf
BANDS_QC = [
60, # 111100, band 1
245760, # 111100000000000000, band 4
15360 # 11110000000000, band 3
]
SR_MAX = 16000
SR_MIN = -100
GAMMA = 0.4
IMG_DIR= 'site-imgs'
# The % of the image that should be of "high quality"
PRCNT_GOOD = .90 # Needs to be ~90% good
def json_sr_2_channel(band_name, band_idx, data, qc_band):
nsubs = len(data[band_name]['subset'])
if nsubs > 1:
logging.warning("using the first subset for %s ...", band_name)
scale = float(data[band_name]['scale'])
arr = numpy.array(data[band_name]['subset'][0]['data']).astype('f4')
arr = numpy.ma.masked_where(arr > SR_MAX, arr, copy=False)
arr = numpy.ma.masked_where(arr < SR_MIN, arr, copy=False)
arr *= scale
sr_min, sr_max = SR_MIN * scale, SR_MAX * scale
dat = ((arr - sr_min) * (1/(sr_max - sr_min) * 255)).astype('uint8')
qc_bnd = qc_band & BANDS_QC[band_idx]
dat = numpy.ma.masked_where(qc_bnd != 0, dat, copy=False)
return dat
#json_sr_2_channel
def post_m09a1(data):
logging.info("Building rgb image for MODIS Terra/Aqua Surface Reflectance (SREF) 8-Day L3 Global 500 %s", data['name'])
qc = numpy.array(data['sur_refl_qc_500m']['subset'][0]['data'], dtype='u4')
redish = json_sr_2_channel('sur_refl_b01', 0, data, qc)
greenish = json_sr_2_channel('sur_refl_b04', 1, data, qc)
blueish = json_sr_2_channel('sur_refl_b03', 2, data, qc)
msk = redish.mask | greenish.mask | blueish.mask
pgood = numpy.count_nonzero(msk == 0) / msk.shape[0]
logging.debug("Quality pixels ~ %f %%", pgood)
if pgood <= PRCNT_GOOD:
logging.warning("Skipping %s due to poor quality image", data['name'])
return
alpha = numpy.where(msk == True, 0, 255).astype('uint8')
shp = data['sur_refl_b01']['nrows'], data['sur_refl_b01']['ncols'], 4
try:
rgba = numpy.dstack((redish, greenish, blueish, alpha)).reshape(shp)
gcorrect = exposure.adjust_gamma(rgba, GAMMA)
img = Image.fromarray(gcorrect,'RGBA')
img.save(data['name'])
except ValueError as valerr:
logging.error("%s for %s, image not created", str(valerr), data['name'])
#post_m09a1
def subset_site_data(csv, prod):
coordinates = | pd.read_csv(csv) | pandas.read_csv |
import pandas as pd
import mdtraj as md
__all__ = ["load_dataframe", "load_trajectory", "plumed_to_pandas"]
def is_plumed_file(filename):
"""
Check if given file is in PLUMED format.
Parameters
----------
filename : string, optional
PLUMED output file
Returns
-------
bool
wheter is a plumed output file
"""
headers = pd.read_csv(filename, sep=" ", skipinitialspace=True, nrows=0)
is_plumed = True if " ".join(headers.columns[:2]) == "#! FIELDS" else False
return is_plumed
def plumed_to_pandas(filename="./COLVAR"):
"""
Load a PLUMED file and save it to a dataframe.
Parameters
----------
filename : string, optional
PLUMED output file
Returns
-------
df : DataFrame
Collective variables dataframe
"""
skip_rows = 1
# Read header
headers = | pd.read_csv(filename, sep=" ", skipinitialspace=True, nrows=0) | pandas.read_csv |
import matplotlib.pyplot as plt
import matplotlib
import scipy.stats as stats
from statsmodels.graphics.mosaicplot import mosaic
import statsmodels.api as sm
from statsmodels.formula.api import ols
import pandas as pd
import numpy as np
import scipy
class InteractionAnalytics():
@staticmethod
def rank_associations(df, conf_dict, col1, col2, col3):
try:
col2 = int(col2)
col3 = int(col3)
except:
pass
# Passed Variable is Numerical
if (col1 in conf_dict['NumericalColumns']) :
if len(conf_dict['NumericalColumns'])>1:
# Interaction with numerical variables
df2 = df[conf_dict['NumericalColumns']]
corrdf = df2.corr()
corrdf = abs(corrdf) # get the absolute values of correlations since negative correlations also matter
corrdf2 = corrdf[corrdf.index==col1].reset_index()[[each for each in corrdf.columns \
if col1 not in each]].unstack().sort_values(kind="quicksort",
ascending=False).head(col2)
corrdf2 = corrdf2.reset_index()
corrdf2.columns = ['Variable','level1', 'Correlation']
metric_num = "R-squared"
# Interaction with categorical variables
etasquared_dict = {}
if len(conf_dict['CategoricalColumns']) >= 1:
for each in conf_dict['CategoricalColumns']:
mod = ols('{} ~ C({})'.format(col1, each),data=df[[col1,each]],missing='drop').fit()
aov_table = sm.stats.anova_lm(mod, typ=1)
esq_sm = aov_table['sum_sq'][0]/(aov_table['sum_sq'][0]+aov_table['sum_sq'][1])
etasquared_dict[each] = esq_sm
topk_esq = pd.DataFrame.from_dict(etasquared_dict, orient='index').unstack().sort_values(\
kind = 'quicksort', ascending=False).head(col3).reset_index()
topk_esq.columns = ['level_0', 'Variable', 'Correlation']
metric_cat = 'Eta-Squared'
return corrdf2, metric_num, topk_esq, metric_cat
# Passed Variable is Categorical
else:
#Interaction with numerical variables
if len(conf_dict['NumericalColumns']) >= 1:
etasquared_dict = {}
for each in conf_dict['NumericalColumns']:
mod = ols('{} ~ C({})'.format(each, col1), data = df[[col1,each]]).fit()
aov_table = sm.stats.anova_lm(mod, typ=1)
esq_sm = aov_table['sum_sq'][0]/(aov_table['sum_sq'][0]+aov_table['sum_sq'][1])
etasquared_dict[each] = esq_sm
topk_esq = pd.DataFrame.from_dict(etasquared_dict, orient='index').unstack().sort_values(\
kind = 'quicksort', ascending=False).head(col2).reset_index()
topk_esq.columns = ['level_0', 'Variable', 'Correlation']
metric_num = 'Eta-Squared'
# Interaction with categorical variables
cramer_dict = {}
if len(conf_dict['CategoricalColumns'])>1:
for each in conf_dict['CategoricalColumns']:
if each !=col1:
tbl = pd.crosstab(df[col1], df[each])
chisq = stats.chi2_contingency(tbl, correction=False)[0]
try:
cramer = np.sqrt(chisq/sum(tbl))
except:
cramer = np.sqrt(chisq/tbl.values.sum())
pass
cramer_dict[each] = cramer
topk_cramer = | pd.DataFrame.from_dict(cramer_dict, orient='index') | pandas.DataFrame.from_dict |
#%%
import os
from pyteomics import mzid, mzml
import pandas as pd
import numpy as np
import glob
"""
Files are downloaded and manually randomly divided into different folders
the following code is repeated but has the same effect, it is applied to various folders to
generate pandas data frames and to store all the data in a single hdf5 file
"""
#%%
os.chdir('./files/train')
mzid_files=glob.glob('*.mzid')
indexed_mzid = mzid.chain.from_iterable(mzid_files, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid = []
for entry in(indexed_mzid):
all_mzid.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid)
mzid_df = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra)
spectra_df = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df = pd.merge(mzid_df,spectra_df,how='left',on=['file','id'])
merged_df = merged_df[['id','seq','mz','intensities']]
#%%
hdf = pd.HDFStore('/home/ubuntu/data/jiahao/files/train.hdf5', mode="w")
hdf.put(value=merged_df, key="df")
#%%
os.chdir('./train_1')
mzid_files_1=glob.glob('*.mzid')
indexed_mzid_1 = mzid.chain.from_iterable(mzid_files_1, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_1 = []
for entry in(indexed_mzid_1):
all_mzid_1.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_1)
mzid_df_1 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_1 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_1.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_1)
spectra_df_1 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_1 = pd.merge(mzid_df_1,spectra_df_1,how='left',on=['file','id'])
merged_df_1 = merged_df_1[['id','seq','mz','intensities']]
#%%
hdf.put(value=merged_df_1, key="df1")
# %%
os.chdir('./train_2')
mzid_files_2=glob.glob('*.mzid')
indexed_mzid_2 = mzid.chain.from_iterable(mzid_files_2, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_2 = []
for entry in(indexed_mzid_2):
all_mzid_2.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_2)
mzid_df_2 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_2 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_2.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_2)
spectra_df_2 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_2 = pd.merge(mzid_df_2,spectra_df_2,how='left',on=['file','id'])
merged_df_2 = merged_df_2[['id','seq','mz','intensities']]
#%%
hdf.put(value=merged_df_2, key="df2")
#%%
os.chdir('./train_3')
mzid_files_3 = glob.glob('*.mzid')
indexed_mzid_3 = mzid.chain.from_iterable(mzid_files_3, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_3 = []
for entry in(indexed_mzid_3):
all_mzid_3.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_3)
mzid_df_3 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_3 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_3.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_3)
spectra_df_3 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_3 = pd.merge(mzid_df_3,spectra_df_3,how='left',on=['file','id'])
merged_df_3 = merged_df_3[['id','seq','mz','intensities']]
#%%
hdf.put(value=merged_df_3, key="df3")
#%%
os.chdir('./train_4')
mzid_files_4 = glob.glob('*.mzid')
indexed_mzid_4 = mzid.chain.from_iterable(mzid_files_4, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_4 = []
for entry in(indexed_mzid_4):
all_mzid_4.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_4)
mzid_df_4 = | pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq}) | pandas.DataFrame |
# load
import pandas as pd
# import lightgbm
data = pd.read_csv("X_train.csv", index_col=0)
data["mark"] = | pd.read_csv("y_train.csv", index_col=0) | pandas.read_csv |
import pandas as pd
import numpy as np
import re
pd.options.mode.chained_assignment = None
class Validate:
def Member(self, data, current_org, missing_output, output_ID = None, personIDs = None):
key = 'Member'
bad_data_count = 0
bad_data_locations= []
last_row = 0
for row in data[key].values:
val = row[0]
# exit when the last row has been reached
if val == '' or pd.isna(val):
break
else:
# check for missing birthdates and log if True
if data[key]['Fødselsdato'][last_row] == '':
print(f'{current_org}: Bad Birthdate at {last_row}, {data[key]["Fødselsdato"][last_row]}')
bad_data_count += 1
bad_data_locations.append('Fødselsdato')
# Check if the member is missing membership onboarding date, or if this is set as an invalid value
reg_date = data[key]['Medlemskap registreringsdato'][last_row]
if type(reg_date) != pd._libs.tslibs.timestamps.Timestamp:
print(f'{current_org}: Missing membership onboarding date, or invalid data type at {last_row} for {str(reg_date)} with type {type(reg_date)}!')
bad_data_count += 1
bad_data_locations.append('Medlemskap registreringsdato')
# check if the member is missing training fee onboarding date
reg_date_tf = data[key]['Treningsavgift registreringsdato'][last_row]
if type(reg_date) != pd._libs.tslibs.timestamps.Timestamp:
print(f'{current_org}: Missing Training Fee onboarding date, or invalid data type at {last_row} for {str(reg_date_tf)} with type {type(reg_date_tf)}!')
bad_data_count += 1
bad_data_locations.append('Treningsavgift registreringsdato')
# check if new membership and Training fee has valid product names
mem = data[key]['Kontingent navn'][last_row]
mem_list = list(data['Membership']['Navn på kontigent'].values)
if mem not in mem_list:
found_mem_match = False
for m_product in mem_list:
if mem.lower().replace(' ', '') == m_product.lower().replace(' ', ''):
data[key]['Kontingent navn'][last_row] = m_product
found_mem_match = True
#print(f'{current_org}: Bad Membership at {last_row}, old product: "{mem}", new product: "{data[key]["Kontingent navn"][last_row]}"')
bad_data_count += 1
bad_data_locations.append('Kontingent navn - OK')
break
if not found_mem_match:
print(f'{current_org}: Non existent Membership at {last_row}! Product: "{mem}"')
bad_data_count += 1
bad_data_locations.append('Kontingent navn - BAD')
tf = data[key]['Treningsavgift navn'][last_row]
tf_list = list(data['Training fee']['Navn på Treningsvgift'].values)
if tf not in tf_list:
found_tf_match = False
for t_product in tf_list:
if tf.lower().replace(' ', '') == t_product.lower().replace(' ', ''):
data[key]['Treningsavgift navn'][last_row] = t_product
#print(f'{current_org}: Bad Training Fee at {last_row}, old product: "{tf}", new product: "{data[key]["Treningsavgift navn"][last_row]}"')
bad_data_count += 1
bad_data_locations.append('Treningsavgift - OK')
found_tf_match = True
break
if not found_tf_match:
print(f'{current_org}: Non existent Training fee at {last_row}! Product "{tf}"')
bad_data_count += 1
bad_data_locations.append('Treningsavgift - BAD')
# check for missing address data and copy for street if so
if data[key]['Adresse 1'][last_row] == "":
data[key]['Adresse 1'][last_row] = data[key]['Gatenavn'][last_row]
if output_ID != None:
# check for existance of output data for current org
if current_org in list(output_ID.keys()):
oid = output_ID[current_org]
if oid in list(personIDs.keys()):
org_data = personIDs[oid]
num_pid= len(org_data)
# set PersonID for member if available in output file
if last_row < num_pid:
data[key]['NIF ID'][last_row] = org_data[last_row][0]
#check for erronious last names, and attempt correction (based upon rules for last name in Folkereg.)
lastname = data[key].Etternavn.values[last_row]
lastname_count = len(lastname.split())
real_lastname = org_data[last_row][2]
real_firstname = org_data[last_row][1]
if lastname_count > 1 and lastname != real_lastname:
print(f'{current_org}: Bad name at {last_row}, old: {lastname}, new: {real_lastname}')
data[key]['Fornavn- og middelnavn'].values[last_row] = real_firstname
data[key].Etternavn.values[last_row] = real_lastname
bad_data_count += 1
bad_data_locations.append('Navn')
# log clubs without indentifiable output from KA
else:
missing_output.update({current_org: last_row})
last_row += 1
if last_row == 0:
print(f'{current_org}: NO MEMBERSHIPS CONFIGURED')
return data, last_row, bad_data_count, bad_data_locations, missing_output
def Training_Fee(self, data, current_org):
key = 'Training fee'
bad_data_count = 0
bad_data_locations= []
last_row = 0
for row in data[key].values:
val = row[0]
# exit when the last row has been reached
if val == '' or | pd.isna(val) | pandas.isna |
# python 2/3 compatibility
from __future__ import division, print_function
import sys
import os.path
import numpy
import pandas
import copy
import json
import jxmlease
import xml.etree.ElementTree as ET
import csv
from sbtab import SBtab
# package imports
import rba
from .data_block import DataBlock
class RBA_SimulationData(object):
"""
Class holding information on simulations with the model.
Attributes
----------
StructuralInformation : rbatools.rba_ModelStructure RBA_ModelStructure object.
Model description
SessionName : str
Name of simulation session
ProteinData : rbatools.data_block DataBlock object.
Simulated protein levels
ReactionData : rbatools.data_block DataBlock object.
Simulated reaction data
EnzymeData : rbatools.data_block DataBlock object.
Simulated enzyme data
ProcessData : rbatools.data_block DataBlock object.
Simulated process machinery data
MetaboliteConstraintData : rbatools.data_block DataBlock object.
Simulation information on mass-balance constraints
DensityConstraintData : rbatools.data_block DataBlock object.
Simulation information on compartment-density constraints
ProcessConstraintData : rbatools.data_block DataBlock object.
Simulation information on process-capacity constraints
EnzymeConstraintData : rbatools.data_block DataBlock object.
Simulation information on enzyme-efficiency constraints
Methods
----------
__init__(StaticData)
Initiates Simulation data object
fromSimulationResults(Controller, session_name)
Imports data from rbatools.RBA_Controler object
fromJSON(inputString)
Imports data from JSON string object
toXML()
Exports xml-file
toJSON()
Returns simulation data as JSON string
exportSBtab()
Exports SBtab files
exportSBtab_OneFile()
Exports simulation data in one single sbtab file
exportCSV(deleteZerosRows=True)
Exports simulation data as csv files
exportEscherMap(type='fluxes')
Exports input file for generation of Escher maps.
exportProteoMap()
Exports input file for the generation of Proteo maps from
simulation data.
"""
def __init__(self, StaticData):
"""
Initiates Simulation data object
Parameters
----------
StaticData : rbatools.RBA_ModelStructure
"""
self.StructuralInformation = copy.deepcopy(StaticData)
self.ProteinData = DataBlock()
self.ProtoProteinData = DataBlock()
self.ReactionData = DataBlock()
self.ExchangeData = DataBlock()
self.uniqueReactionData = DataBlock()
self.EnzymeData = DataBlock()
self.ProcessData = DataBlock()
self.MetaboliteConstraintData = DataBlock()
self.DensityConstraintData = DataBlock()
self.EnzymeConstraintData = DataBlock()
self.ProcessConstraintData = DataBlock()
self.GeneralRunInfo = DataBlock()
self.ObjectiveFunctionInfo = DataBlock()
self.SessionName = ''
self.ProteinData.fromDict({})
self.ProtoProteinData.fromDict({})
self.ReactionData.fromDict({})
self.ExchangeData.fromDict({})
self.uniqueReactionData.fromDict({})
self.EnzymeData.fromDict({})
self.ProcessData.fromDict({})
self.MetaboliteConstraintData.fromDict({})
self.DensityConstraintData.fromDict({})
self.EnzymeConstraintData.fromDict({})
self.ProcessConstraintData.fromDict({})
self.GeneralRunInfo.fromDict({})
self.ObjectiveFunctionInfo.fromDict({})
def fromSimulationResults(self, Controller, session_name=''):
"""
Imports data from rbatools.RBA_Controler object
Parameters
----------
Controller : rbatools.RBA_newControler
Defines from which object to extract the data from
session_name: str
Defines the name of the session
Is appended to the filenames generated by export methods.
"""
self.SessionName = session_name
self.run_names = list(Controller.Results['ObjectiveValue'])
ObjDict = {}
for run in list(Controller.Results['ObjectiveValue']):
ObjDict.update({run: Controller.Results['ObjectiveValue'].loc['ObjectiveValue', run]})
SolutionType_Dict = {}
for run in list(Controller.Results['SolutionType']):
SolutionType_Dict.update(
{run: Controller.Results['SolutionType'].loc['SolutionType', run]})
MuDict = {}
for run in list(Controller.Results['Mu']):
MuDict.update({run: Controller.Results['Mu'].loc['Mu', run]})
self.GeneralRunInfo.addEntries({'ProblemType': SolutionType_Dict})
self.GeneralRunInfo.addEntries({'Mu': MuDict})
self.GeneralRunInfo.addEntries({'Obj_Val': ObjDict})
for var in list(Controller.Results['ObjectiveFunction'].index):
if var not in self.ObjectiveFunctionInfo.Elements:
self.ObjectiveFunctionInfo.Elements.update({var: {}})
for run in list(Controller.Results['ObjectiveFunction']):
self.ObjectiveFunctionInfo.Elements[var].update(
{run: Controller.Results['ObjectiveFunction'].loc[var, run]})
for exchange in list(Controller.Results['ExchangeFluxes'].index):
if exchange not in self.ExchangeData.Elements:
self.ExchangeData.Elements.update({exchange: {}})
# self.ExchangeData.Elements[exchange].update({'ID': exchange})
for run in list(Controller.Results['ExchangeFluxes']):
self.ExchangeData.Elements[exchange].update(
{run: Controller.Results['ExchangeFluxes'].loc[exchange, run]})
for reaction in list(Controller.Results['Reactions'].index):
if reaction not in self.ReactionData.Elements:
self.ReactionData.Elements.update({reaction: {}})
# self.ReactionData.Elements[reaction].update({'ID': reaction})
for run in list(Controller.Results['Reactions']):
self.ReactionData.Elements[reaction].update(
{run: Controller.Results['Reactions'].loc[reaction, run]})
for reaction in list(Controller.Results['uniqueReactions'].index):
if reaction not in self.uniqueReactionData.Elements:
self.uniqueReactionData.Elements.update({reaction: {}})
# self.uniqueReactionData.Elements[reaction].update({'ID': reaction})
for run in list(Controller.Results['uniqueReactions']):
self.uniqueReactionData.Elements[reaction].update(
{run: Controller.Results['uniqueReactions'].loc[reaction, run]})
for enzyme in list(Controller.Results['Enzymes'].index):
if enzyme not in self.EnzymeData.Elements:
self.EnzymeData.Elements.update({enzyme: {}})
# self.EnzymeData.Elements[enzyme].update({'ID': enzyme})
for run in list(Controller.Results['Enzymes']):
self.EnzymeData.Elements[enzyme].update(
{run: Controller.Results['Enzymes'].loc[enzyme, run]})
for process in list(Controller.Results['Processes'].index):
if process not in self.ProcessData.Elements:
self.ProcessData.Elements.update({process: {}})
# self.ProcessData.Elements[process].update({'ID': process})
for run in list(Controller.Results['Processes']):
self.ProcessData.Elements[process].update(
{run: Controller.Results['Processes'].loc[process, run]})
for protein in list(Controller.Results['Proteins'].index):
if protein not in self.ProteinData.Elements:
self.ProteinData.Elements.update({protein: {}})
# self.ProteinData.Elements[protein].update({'ID': protein})
for run in list(Controller.Results['Proteins']):
self.ProteinData.Elements[protein].update(
{run: Controller.Results['Proteins'].loc[protein, run]})
for protoprotein in list(Controller.Results['ProtoProteins'].index):
if protoprotein not in self.ProtoProteinData.Elements:
self.ProtoProteinData.Elements.update({protoprotein: {}})
# self.ProtoProteinData.Elements[protoprotein].update({'ID': protoprotein})
for run in list(Controller.Results['ProtoProteins']):
self.ProtoProteinData.Elements[protoprotein].update(
{run: Controller.Results['ProtoProteins'].loc[protoprotein, run]})
for constr in list(Controller.Results['Constraints'].index):
if constr in list(self.StructuralInformation.MetaboliteConstraintsInfo.Elements.keys()):
if constr not in self.MetaboliteConstraintData.Elements:
self.MetaboliteConstraintData.Elements.update({constr: {}})
# self.MetaboliteConstraintData.Elements[constr].update({'ID': constr})
for run in list(Controller.Results['Constraints']):
self.MetaboliteConstraintData.Elements[constr].update(
{run: Controller.Results['Constraints'].loc[constr, run]})
if constr in list(self.StructuralInformation.DensityConstraintsInfo.Elements.keys()):
if constr not in self.DensityConstraintData.Elements:
self.DensityConstraintData.Elements.update({constr: {}})
# self.DensityConstraintData.Elements[constr].update({'ID': constr})
for run in list(Controller.Results['Constraints']):
self.DensityConstraintData.Elements[constr].update(
{run: Controller.Results['Constraints'].loc[constr, run]})
if constr in list(self.StructuralInformation.EnzymeConstraintsInfo.Elements.keys()):
if constr not in self.EnzymeConstraintData.Elements:
self.EnzymeConstraintData.Elements.update({constr: {}})
# self.EnzymeConstraintData.Elements[constr].update({'ID': constr})
for run in list(Controller.Results['Constraints']):
self.EnzymeConstraintData.Elements[constr].update(
{run: Controller.Results['Constraints'].loc[constr, run]})
if constr in list(self.StructuralInformation.ProcessConstraintsInfo.Elements.keys()):
if constr not in self.ProcessConstraintData.Elements:
self.ProcessConstraintData.Elements.update({constr: {}})
# self.ProcessConstraintData.Elements[constr].update({'ID': constr})
for run in list(Controller.Results['Constraints']):
self.ProcessConstraintData.Elements[constr].update(
{run: Controller.Results['Constraints'].loc[constr, run]})
def fromJSON(self, inputString):
"""
Imports data from JSON string object
Parameters
----------
inputString: json-string
"""
Block = json.loads(inputString)
self.ReactionData = DataBlock()
self.ProteinData = DataBlock()
self.EnzymeData = DataBlock()
self.ProcessData = DataBlock()
self.MetaboliteConstraintData = DataBlock()
self.DensityConstraintData = DataBlock()
self.EnzymeConstraintData = DataBlock()
self.ProcessConstraintData = DataBlock()
self.ReactionData.fromDict(Block['ReactionData'])
self.ProteinData.fromDict(Block['ProteinData'])
self.EnzymeData.fromDict(Block['EnzymeData'])
self.ProcessData.fromDict(Block['ProcessData'])
self.MetaboliteConstraintData.fromDict(Block['MetaboliteConstraintData'])
self.DensityConstraintData.fromDict(Block['DensityConstraintData'])
self.EnzymeConstraintData.fromDict(Block['EnzymeConstraintData'])
self.ProcessConstraintData.fromDict(Block['ProcessConstraintData'])
def exportXML(self):
"""
Exports xml-file
"""
x = htmlStyle(self)
root = ET.fromstring(jxmlease.emit_xml(x, encoding='utf-8'))
m = ET.tostring(root, 'utf-8')
return(m)
def exportSBtab(self, filename=None, add_links=False, rba=False):
"""
Exports simulation data in one single sbtab file
"""
GeneralRunInfoTable = self.GeneralRunInfo.toSBtab(
table_id='run_information', table_type='QuantityMatrix', table_name='Run information')
GeneralRunInfoTable.filename = 'RunInfo.tsv'
GeneralRunInfoTable.change_attribute(
'Text', 'Growth rates mu and cellular objective values (by default: minimisation of total enzyme concentration).')
#GeneralRunInfoTable.unset_attribute('Date')
GeneralRunInfoTable.unset_attribute('SBtabVersion')
ObjectiveFunctionDataTable = self.ObjectiveFunctionInfo.toSBtab(
table_id='objective_coefficients', table_type='QuantityMatrix', table_name='Linear objective')
ObjectiveFunctionDataTable.filename = 'ObjectiveFunctionData.tsv'
ObjectiveFunctionDataTable.change_attribute('Unit', '')
ObjectiveFunctionDataTable.change_attribute('QuantityType', 'objective_coefficient')
ObjectiveFunctionDataTable.change_attribute(
'Text', 'Coefficients in objective function (<0 : maximisation , >0 : minimisation)')
#ObjectiveFunctionDataTable.unset_attribute('Date')
ObjectiveFunctionDataTable.unset_attribute('SBtabVersion')
ReactionDataTable = self.ReactionData.toSBtab(
table_id='reaction_flux', table_type='QuantityMatrix', table_name='Reaction fluxes')
ReactionDataTable.filename = 'ReactionData.tsv'
ReactionDataTable.change_attribute('Unit', 'mmol/(h*gDW)')
ReactionDataTable.change_attribute('QuantityType', 'reaction_flux')
ReactionDataTable.change_attribute(
'Text', 'Reaction fluxes obtained in the simulation runs (table columns).')
#ReactionDataTable.unset_attribute('Date')
ReactionDataTable.unset_attribute('SBtabVersion')
EnzymeDataTable = self.EnzymeData.toSBtab(
table_id='enzyme_concentration', table_type='QuantityMatrix', table_name='Enzyme concentrations')
EnzymeDataTable.filename = 'EnzymeData.tsv'
EnzymeDataTable.change_attribute('Unit', 'mmol/gDW')
EnzymeDataTable.change_attribute('QuantityType', 'enzyme_concentration')
EnzymeDataTable.change_attribute(
'Text', 'Enzyme concentrations obtained in the simulation runs (table columns).')
#EnzymeDataTable.unset_attribute('Date')
EnzymeDataTable.unset_attribute('SBtabVersion')
ProcessDataTable = self.ProcessData.toSBtab(
table_id='machine_concentration', table_type='QuantityMatrix', table_name='Machine concentrations')
ProcessDataTable.filename = 'ProcessData.tsv'
ProcessDataTable.change_attribute('Unit', 'mmol/gDW')
ProcessDataTable.change_attribute('QuantityType', 'machine_concentration')
ProcessDataTable.change_attribute(
'Text', 'Macromolecular machine concentrations obtained in the simulation runs (table columns).')
#ProcessDataTable.unset_attribute('Date')
ProcessDataTable.unset_attribute('SBtabVersion')
ProteinDataTable = self.ProteinData.toSBtab(
table_id='protein_concentration', table_type='QuantityMatrix', table_name='Protein concentrations')
ProteinDataTable.filename = 'ProteinData.tsv'
ProteinDataTable.change_attribute('Unit', 'mmol/gDW')
ProteinDataTable.change_attribute('QuantityType', 'protein_concentration')
ProteinDataTable.change_attribute(
'Text', 'Protein concentrations obtained in the simulation runs (table columns).')
#ProteinDataTable.unset_attribute('Date')
ProteinDataTable.unset_attribute('SBtabVersion')
MetaboliteConstraintDataTable = self.MetaboliteConstraintData.toSBtab(
table_id='metabolite_mass_balance_dual', table_type='QuantityMatrix', table_name='Metabolite mass-balance dual values')
MetaboliteConstraintDataTable.filename = 'MetaboliteConstraintData.tsv'
MetaboliteConstraintDataTable.change_attribute('Unit', '')
MetaboliteConstraintDataTable.change_attribute('QuantityType', 'lagrange_multiplier')
MetaboliteConstraintDataTable.change_attribute(
'Text', 'Shadow prices of the metabolite mass-balance constraints obtained in the simulation runs (table columns). The measurement units of shadow prices are given by the measurement unit of objective function, divided by the measurement units of the respective constraints')
#MetaboliteConstraintDataTable.unset_attribute('Date')
MetaboliteConstraintDataTable.unset_attribute('SBtabVersion')
DensityConstraintDataTable = self.DensityConstraintData.toSBtab(
table_id='density_constraint_dual', table_type='QuantityMatrix', table_name='Compartment density dual values')
DensityConstraintDataTable.filename = 'DensityConstraintData.tsv'
DensityConstraintDataTable.change_attribute('QuantityType', 'lagrange_multiplier')
DensityConstraintDataTable.change_attribute('Unit', '')
DensityConstraintDataTable.change_attribute(
'Text', 'Shadow prices of the density constraints obtained in the simulation runs (table columns). The measurement units of shadow prices are given by the measurement unit of objective function, divided by the measurement units of the respective constraints')
#DensityConstraintDataTable.unset_attribute('Date')
DensityConstraintDataTable.unset_attribute('SBtabVersion')
EnzymeConstraintDataTable = self.EnzymeConstraintData.toSBtab(
table_id='enzyme_capacity_dual', table_type='QuantityMatrix', table_name='Enzyme capacity dual values')
EnzymeConstraintDataTable.filename = 'EnzymeConstraintData.tsv'
EnzymeConstraintDataTable.change_attribute('QuantityType', 'lagrange_multiplier')
EnzymeConstraintDataTable.change_attribute('Unit', '')
EnzymeConstraintDataTable.change_attribute(
'Text', 'Shadow prices of the enzyme-capacity constraints obtained in the simulation runs (table columns). The measurement units of shadow prices are given by the measurement unit of objective function, divided by the measurement units of the respective constraints')
#EnzymeConstraintDataTable.unset_attribute('Date')
EnzymeConstraintDataTable.unset_attribute('SBtabVersion')
ProcessConstraintDataTable = self.ProcessConstraintData.toSBtab(
table_id='machine_capacity_dual', table_type='QuantityMatrix', table_name='Machine capacity dual values')
ProcessConstraintDataTable.filename = 'ProcessConstraintData.tsv'
ProcessConstraintDataTable.change_attribute('Unit', '')
ProcessConstraintDataTable.change_attribute('QuantityType', 'lagrange_multiplier')
ProcessConstraintDataTable.change_attribute(
'Text', 'Shadow prices of the machine-capacity constraints obtained in the simulation runs (table columns). The measurement units of shadow prices are given by the measurement unit of objective function, divided by the measurement units of the respective constraints')
#ProcessConstraintDataTable.unset_attribute('Date')
ProcessConstraintDataTable.unset_attribute('SBtabVersion')
if filename is not None:
filename_SBtab = filename
else:
filename_SBtab = 'RBA_results'
if add_links:
ReactionDataTable.add_column(column_list=['!ElementID']+[str('(!'+'Reaction/'+entry+'!)')
for entry in list(ReactionDataTable.to_data_frame()['ID'])], position=1)
ProcessDataTable.add_column(column_list=['!ElementID']+[str('(!'+'Process/'+entry+'!)')
for entry in list(ProcessDataTable.to_data_frame()['ID'])], position=1)
EnzymeDataTable.add_column(column_list=['!ElementID']+[str('(!'+'Enzyme/'+entry+'!)')
for entry in list(EnzymeDataTable.to_data_frame()['ID'])], position=1)
ProteinDataTable.add_column(column_list=['!ElementID']+[str('(!'+'Protein/'+entry+'!)')
for entry in list(ProteinDataTable.to_data_frame()['ID'])], position=1)
MetaboliteConstraintDataTable.add_column(column_list=['!ElementID']+[str(
'(!'+'Compound/'+entry+'!)') for entry in list(MetaboliteConstraintDataTable.to_data_frame()['ID'])], position=1)
DensityConstraintDataTable.add_column(column_list=['!ElementID']+[str(
'(!'+'Compartment/'+entry+'!)') for entry in list(DensityConstraintDataTable.to_data_frame()['ID'])], position=1)
ProcessConstraintDataTable.add_column(column_list=['!ElementID']+[str(
'(!'+'Process/'+entry+'!)') for entry in list(ProcessConstraintDataTable.to_data_frame()['ID'])], position=1)
EnzymeConstraintDataTable.add_column(column_list=['!ElementID']+[str(
'(!'+'Enzyme/'+entry+'!)') for entry in list(EnzymeConstraintDataTable.to_data_frame()['ID'])], position=1)
filename_SBtab += '_HTML'
else:
ReactionDataTable.add_column(
column_list=['!ElementID']+list(ReactionDataTable.to_data_frame(rba=rba)['ID']), position=1)
ProcessDataTable.add_column(
column_list=['!ElementID']+list(ProcessDataTable.to_data_frame(rba=rba)['ID']), position=1)
EnzymeDataTable.add_column(
column_list=['!ElementID']+list(EnzymeDataTable.to_data_frame(rba=rba)['ID']), position=1)
ProteinDataTable.add_column(
column_list=['!ElementID']+list(ProteinDataTable.to_data_frame(rba=rba)['ID']), position=1)
MetaboliteConstraintDataTable.add_column(
column_list=['!ElementID']+list(MetaboliteConstraintDataTable.to_data_frame(rba=rba)['ID']), position=1)
DensityConstraintDataTable.add_column(
column_list=['!ElementID']+list(DensityConstraintDataTable.to_data_frame(rba=rba)['ID']), position=1)
ProcessConstraintDataTable.add_column(
column_list=['!ElementID']+list(ProcessConstraintDataTable.to_data_frame(rba=rba)['ID']), position=1)
EnzymeConstraintDataTable.add_column(
column_list=['!ElementID']+list(EnzymeConstraintDataTable.to_data_frame(rba=rba)['ID']), position=1)
ReactionDataTable.remove_column(position=2)
ProcessDataTable.remove_column(position=2)
EnzymeDataTable.remove_column(position=2)
ProteinDataTable.remove_column(position=2)
MetaboliteConstraintDataTable.remove_column(position=2)
DensityConstraintDataTable.remove_column(position=2)
ProcessConstraintDataTable.remove_column(position=2)
EnzymeConstraintDataTable.remove_column(position=2)
self.Out = SBtab.SBtabDocument(name='rbatools_SimulationData_withLinks',
sbtab_init=None, filename=str(filename_SBtab+'.tsv'))
self.Out.add_sbtab(GeneralRunInfoTable)
self.Out.add_sbtab(ObjectiveFunctionDataTable)
self.Out.add_sbtab(ReactionDataTable)
self.Out.add_sbtab(EnzymeDataTable)
self.Out.add_sbtab(ProcessDataTable)
self.Out.add_sbtab(ProteinDataTable)
self.Out.add_sbtab(MetaboliteConstraintDataTable)
self.Out.add_sbtab(DensityConstraintDataTable)
self.Out.add_sbtab(EnzymeConstraintDataTable)
self.Out.add_sbtab(ProcessConstraintDataTable)
self.Out.change_attribute('DocumentName', 'RBA data')
self.Out.name = filename
self.Out.change_attribute('DocumentType', 'rba-simulation-data')
self.Out.write()
def getSBtabDoc(self):
return self.Out
def exportJSON(self):
"""
Returns simulation data as JSON string
Returns
-------
JSON string
"""
Block = {'ReactionData': self.ReactionData.Elements,
'ProteinData': self.ProteinData.Elements,
'EnzymeData': self.EnzymeData.Elements,
'ProcessData': self.ProcessData.Elements,
'MetaboliteConstraintData': self.MetaboliteConstraintData.Elements,
'DensityConstraintData': self.DensityConstraintData.Elements,
'EnzymeConstraintData': self.EnzymeConstraintData.Elements,
'ProcessConstraintData': self.ProcessConstraintData.Elements}
return(json.dumps(Block))
def exportCSV(self, deleteZerosRows=True):
"""
Exports simulation data as csv files
Parameters
----------
deleteZerosRows: bool
Boolean wheter to remove rows which have only zero entries.
(e.g. reactions which never carry flux in all runs)
Default: True
"""
self.csvs = {}
exchange_data_csv = ''
if len(self.SessionName) > 0:
filename = self.SessionName+'_ExchangeData.csv'
else:
filename = 'ExchangeData.csv'
with open(filename, "w", newline='') as fp:
wr = csv.writer(fp, dialect='excel')
IDs = list(self.ExchangeData.Elements.keys())
exchange_data_csv += ','.join(['']+list(self.ExchangeData.Elements[IDs[0]].keys()))+'\n'
wr.writerow([None]+list(self.ExchangeData.Elements[IDs[0]].keys()))
for i in list(IDs):
valuerow = list(self.ExchangeData.Elements[i].values())
valuerow2 = []
for j in valuerow:
if | pandas.isna(j) | pandas.isna |
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import ast
import pandas as pd
import numpy as np
import scipy
from brightics.common.utils import get_default_from_parameters_if_required
from brightics.common.utils import check_required_parameters
from brightics.common.exception import BrighticsFunctionException as BFE
from dtaidistance import dtw
def timeseries_distance(table, **params):
params = get_default_from_parameters_if_required(params, _timeseries_distance)
check_required_parameters(_timeseries_distance, params, ['table'])
return _timeseries_distance(table, **params)
def _timeseries_distance(table, input_col_1, input_col_2, distance_type, alphabet=26, hold_cols=[]):
temp_table = table.copy()
if len(hold_cols) > 0:
out_table = temp_table[hold_cols]
else:
out_table = table.copy()
if table[input_col_1].dtype != table[input_col_2].dtype:
raise BFE.from_errors([{'0100': 'Data types of two input timeseries must be the same.'}])
if distance_type == 'Sax':
if alphabet < 3 or alphabet > 26:
raise BFE.from_errors(
[{'0100': 'Alphabet must be between 3 and 26 if distance_type is Sax.'}])
if not isinstance(table[input_col_1].loc[0], str):
raise BFE.from_errors(
[{'0100': 'Data types of input timeseries must be String if distance_type is Sax.'}])
sax_obj = SAX(alphabetSize=alphabet)
else:
sax_obj = None
if isinstance(table[input_col_1].loc[0], str):
raise BFE.from_errors(
[{'0100': 'Data types of input timeseries must be Array (Double) if distance_type is NOT Sax.'}])
func = lambda x: ast.literal_eval(x)
try:
temp_table[input_col_1] = temp_table[input_col_1].apply(func)
temp_table[input_col_2] = temp_table[input_col_2].apply(func)
except:
pass
arr_1 = temp_table[input_col_1].values
arr_2 = temp_table[input_col_2].values
distance_list = compute_distance(arr_1, arr_2, distance_type, sax_obj)
out_table['distance'] = distance_list
return {'out_table':out_table}
def compute_distance(arr_1, arr_2, distance_type, sax_obj):
if distance_type == 'Sax':
distance_func = sax_obj.compare_strings
elif distance_type == 'Dtw':
distance_func = _dtw
elif distance_type == 'Euclidean' or distance_type == 'EuclideanWithInterpolation':
distance_func = _euclidean
elif distance_type == 'Correlation':
distance_func = _corr
elif distance_type == 'L1Distance' or distance_type == 'L1DistanceWithInterpolation':
distance_func = _l1
distance_list = []
for ind in range(len(arr_1)):
temp_1 = np.array(arr_1[ind])
temp_2 = np.array(arr_2[ind])
if 'Interpolation' in distance_type:
temp_1 = _interpolate(temp_1)
temp_2 = _interpolate(temp_2)
try:
distance_list.append(distance_func(temp_1, temp_2))
except:
distance_list.append(np.nan)
return distance_list
def _dtw(in_1, in_2):
sim = dtw.distance(in_1, in_2)
return sim
def _euclidean(in_1, in_2):
return np.linalg.norm((in_1 - in_2), ord=2)
def _corr(in_1, in_2):
return np.corrcoef(in_1, in_2)[0, 1]
def _interpolate(arr):
return | pd.Series(arr) | pandas.Series |
import glob
from datetime import datetime, timezone
import pytz
from tzlocal import get_localzone
import pandas as pd
import streamlit as st
from google.oauth2 import service_account
from gspread_pandas import Spread, Client
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from lightgbm import LGBMClassifier
from joblib import dump, load
class DataMerger():
def __init__(self):
scopes=[
"https://www.googleapis.com/auth/spreadsheets",
]
credentials = service_account.Credentials.from_service_account_info(
st.secrets["gcp_service_account"],
scopes=scopes,
)
client = Client(scope=scopes, creds=credentials)
self.spread = Spread(st.secrets["private_gsheets_url"], client=client, sheet='book')
self.columns = ['Transaction date', 'Description', 'Amount', 'Booked balance', 'Account', 'User', 'Category']
def cast(self, df):
df[self.columns[0]] = pd.to_datetime(df[self.columns[0]], format='%Y-%m-%d').astype(str)
df[self.columns[1]] = df[self.columns[1]].astype(str)
df[self.columns[2:4]] = df[self.columns[2:4]].astype(float)
df[self.columns[4:]] = df[self.columns[4:]].astype(str)
return df
def merge(self, tmp_df):
master_df = self.spread.sheet_to_df(header_rows=1, index=None)
master_df = self.cast(master_df)
merged_df = | pd.concat([master_df, tmp_df], ignore_index=True) | pandas.concat |
from __future__ import division
from datetime import datetime
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
import pandas as pd
import numpy as np
import random
from nose.tools import assert_almost_equal as aae
import bt
import bt.algos as algos
def test_algo_name():
class TestAlgo(algos.Algo):
pass
actual = TestAlgo()
assert actual.name == 'TestAlgo'
class DummyAlgo(algos.Algo):
def __init__(self, return_value=True):
self.return_value = return_value
self.called = False
def __call__(self, target):
self.called = True
return self.return_value
def test_algo_stack():
algo1 = DummyAlgo(return_value=True)
algo2 = DummyAlgo(return_value=False)
algo3 = DummyAlgo(return_value=True)
target = mock.MagicMock()
stack = bt.AlgoStack(algo1, algo2, algo3)
actual = stack(target)
assert not actual
assert algo1.called
assert algo2.called
assert not algo3.called
def test_print_temp_data():
target = mock.MagicMock()
target.temp={}
target.temp['selected'] = ['c1','c2']
target.temp['weights'] = [0.5,0.5]
algo = algos.PrintTempData()
assert algo( target )
algo = algos.PrintTempData( 'Selected: {selected}')
assert algo( target )
def test_print_info():
target = bt.Strategy('s', [])
target.temp={}
algo = algos.PrintInfo()
assert algo( target )
algo = algos.PrintInfo( '{now}: {name}')
assert algo( target )
def test_run_once():
algo = algos.RunOnce()
assert algo(None)
assert not algo(None)
assert not algo(None)
def test_run_period():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunPeriod()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
target.now = None
assert not algo(target)
# run on first date
target.now = dts[0]
assert not algo(target)
# run on first supplied date
target.now = dts[1]
assert algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert not algo(target)
algo = algos.RunPeriod(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
# run on first date
target.now = dts[0]
assert not algo(target)
# first supplied date
target.now = dts[1]
assert not algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert algo(target)
# date not in index
target.now = datetime(2009, 2, 15)
assert not algo(target)
def test_run_daily():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunDaily()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('',[algo]),
data
)
target.data = backtest.data
target.now = dts[1]
assert algo(target)
def test_run_weekly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunWeekly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert not algo(target)
# new week
target.now = dts[3]
assert algo(target)
algo = algos.RunWeekly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert algo(target)
# new week
target.now = dts[3]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8),datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_monthly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunMonthly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert not algo(target)
# new month
target.now = dts[31]
assert algo(target)
algo = algos.RunMonthly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert algo(target)
# new month
target.now = dts[31]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_quarterly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunQuarterly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert not algo(target)
# new quarter
target.now = dts[90]
assert algo(target)
algo = algos.RunQuarterly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert algo(target)
# new quarter
target.now = dts[90]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_yearly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunYearly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert not algo(target)
# new year
target.now = dts[365]
assert algo(target)
algo = algos.RunYearly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert algo(target)
# new year
target.now = dts[365]
assert not algo(target)
def test_run_on_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunOnDate('2010-01-01', '2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
def test_run_if_out_of_bounds():
algo = algos.RunIfOutOfBounds(0.5)
dts = pd.date_range('2010-01-01', periods=3)
s = bt.Strategy('s')
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.temp['selected'] = ['c1', 'c2']
s.temp['weights'] = {'c1': .5, 'c2':.5}
s.update(dts[0])
s.children['c1'] = bt.core.SecurityBase('c1')
s.children['c2'] = bt.core.SecurityBase('c2')
s.children['c1']._weight = 0.5
s.children['c2']._weight = 0.5
assert not algo(s)
s.children['c1']._weight = 0.25
s.children['c2']._weight = 0.75
assert not algo(s)
s.children['c1']._weight = 0.24
s.children['c2']._weight = 0.76
assert algo(s)
s.children['c1']._weight = 0.75
s.children['c2']._weight = 0.25
assert not algo(s)
s.children['c1']._weight = 0.76
s.children['c2']._weight = 0.24
assert algo(s)
def test_run_after_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunAfterDate('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-03')
assert algo(target)
def test_run_after_days():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunAfterDays(3)
assert not algo(target)
assert not algo(target)
assert not algo(target)
assert algo(target)
def test_set_notional():
algo = algos.SetNotional('notional')
s = bt.FixedIncomeStrategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
notional = pd.Series(index=dts[:2], data=[1e6, 5e6])
s.setup( data, notional = notional )
s.update(dts[0])
assert algo(s)
assert s.temp['notional_value'] == 1e6
s.update(dts[1])
assert algo(s)
assert s.temp['notional_value'] == 5e6
s.update(dts[2])
assert not algo(s)
def test_rebalance():
algo = algos.Rebalance()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c1 = s['c1']
assert c1.value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000
assert c2.position == 10
assert c2.weight == 1.
def test_rebalance_with_commissions():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 999
assert s.capital == 99
c1 = s['c1']
assert c1.value == 900
assert c1.position == 9
assert c1.weight == 900 / 999.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 997
assert s.capital == 97
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 900
assert c2.position == 9
assert c2.weight == 900. / 997
def test_rebalance_with_cash():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
# set cash amount
s.temp['cash'] = 0.5
assert algo(s)
assert s.value == 999
assert s.capital == 599
c1 = s['c1']
assert c1.value == 400
assert c1.position == 4
assert c1.weight == 400.0 / 999
s.temp['weights'] = {'c2': 1}
# change cash amount
s.temp['cash'] = 0.25
assert algo(s)
assert s.value == 997
assert s.capital == 297
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 700
assert c2.position == 7
assert c2.weight == 700.0 / 997
def test_rebalance_updatecount():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.use_integer_positions(False)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4','c5'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 0.25, 'c2':0.25, 'c3':0.25, 'c4':0.25}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
assert s.value == 1000
assert s.capital == 0
# Update is called once when each weighted security is created (4)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 8
s.update(dts[1])
s.temp['weights'] = {'c1': 0.5, 'c2':0.5}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
# Update is called once for each weighted security before allocation (4)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 8
s.update(dts[2])
s.temp['weights'] = {'c1': 0.25, 'c2':0.25, 'c3':0.25, 'c4':0.25}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
# Update is called once for each weighted security before allocation (2)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 6
def test_rebalance_fixedincome():
algo = algos.Rebalance()
c1 = bt.Security('c1')
c2 = bt.CouponPayingSecurity('c2')
s = bt.FixedIncomeStrategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
coupons = pd.DataFrame(index=dts, columns=['c2'], data=0)
s.setup(data, coupons=coupons)
s.update(dts[0])
s.temp['notional_value'] = 1000
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 0.
assert s.notional_value == 1000
assert s.capital == -1000
c1 = s['c1']
assert c1.value == 1000
assert c1.notional_value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 0.
assert s.notional_value == 1000
assert s.capital == -1000*100
c2 = s['c2']
assert c1.value == 0
assert c1.notional_value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000*100
assert c2.notional_value == 1000
assert c2.position == 1000
assert c2.weight == 1.
def test_select_all():
algo = algos.SelectAll()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectAll(include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectAll(include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_randomly_n_none():
algo = algos.SelectRandomly(n=None) # Behaves like SelectAll
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectRandomly(n=None, include_no_data=True)
assert algo2(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectRandomly(n=None, include_negative=True)
assert algo3(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_randomly():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100.)
data['c1'][dts[0]] = np.nan
data['c2'][dts[0]] = 95
data['c3'][dts[0]] = -5
s.setup(data)
s.update(dts[0])
algo = algos.SelectRandomly(n=1)
assert algo(s)
assert s.temp.pop('selected') == ['c2']
random.seed(1000)
algo = algos.SelectRandomly(n=1, include_negative=True)
assert algo(s)
assert s.temp.pop('selected') == ['c3']
random.seed(1009)
algo = algos.SelectRandomly(n=1, include_no_data=True)
assert algo(s)
assert s.temp.pop('selected') == ['c1']
random.seed(1009)
# If selected already set, it will further filter it
s.temp['selected'] = ['c2']
algo = algos.SelectRandomly(n=1, include_no_data=True)
assert algo(s)
assert s.temp.pop('selected') == ['c2']
def test_select_these():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
algo = algos.SelectThese( ['c1'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
# make sure don't keep nan
s.update(dts[1])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectThese( ['c1', 'c2'], include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectThese(['c1', 'c2'], include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_where_all():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
s.setup(data, where = where)
s.update(dts[0])
algo = algos.SelectWhere('where')
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectWhere('where', include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectWhere('where', include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_where():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
where.loc[ dts[1] ] = False
where['c1'].loc[ dts[2] ] = False
algo = algos.SelectWhere('where')
s.setup(data, where=where)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
s.update(dts[1])
assert algo(s)
assert s.temp['selected'] == []
s.update(dts[2])
assert algo(s)
assert s.temp['selected'] == ['c2']
def test_select_where_legacy():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
where.loc[ dts[1] ] = False
where['c1'].loc[ dts[2] ] = False
algo = algos.SelectWhere(where)
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
s.update(dts[1])
assert algo(s)
assert s.temp['selected'] == []
s.update(dts[2])
assert algo(s)
assert s.temp['selected'] == ['c2']
def test_select_regex():
s = bt.Strategy('s')
algo = algos.SelectRegex( 'c1' )
s.temp['selected'] = ['a1', 'c1', 'c2', 'c11', 'cc1']
assert algo( s )
assert s.temp['selected'] == ['c1', 'c11', 'cc1']
algo = algos.SelectRegex( '^c1$' )
assert algo( s )
assert s.temp['selected'] == ['c1']
def test_resolve_on_the_run():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'b1'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c2'][dts[2]] = -5
on_the_run = pd.DataFrame(index=dts, columns=['c'], data='c1')
on_the_run.loc[dts[2], 'c'] = 'c2'
s.setup(data, on_the_run = on_the_run)
s.update(dts[0])
s.temp['selected'] = ['c', 'b1']
algo = algos.ResolveOnTheRun( 'on_the_run' )
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'b1' in selected
# make sure don't keep nan
s.update(dts[1])
s.temp['selected'] = ['c', 'b1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'b1' in selected
# if specify include_no_data then 2
algo2 = algos.ResolveOnTheRun('on_the_run', include_no_data=True)
s.temp['selected'] = ['c', 'b1']
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'b1' in selected
# behavior on negative prices
s.update(dts[2])
s.temp['selected'] = ['c', 'b1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'b1' in selected
algo3 = algos.ResolveOnTheRun('on_the_run', include_negative=True)
s.temp['selected'] = ['c', 'b1']
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c2' in selected
assert 'b1' in selected
def test_select_types():
c1 = bt.Security('c1')
c2 = bt.CouponPayingSecurity('c2')
c3 = bt.HedgeSecurity('c3')
c4 = bt.CouponPayingHedgeSecurity('c4')
c5 = bt.FixedIncomeSecurity('c5')
s = bt.Strategy('p', children = [c1, c2, c3, c4, c5])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4', 'c5'], data=100.)
coupons = pd.DataFrame(index=dts, columns=['c2', 'c4'], data=0.)
s.setup(data, coupons = coupons)
i = 0
s.update(dts[i])
algo = algos.SelectTypes(include_types=(bt.Security, bt.HedgeSecurity), exclude_types=())
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c3'])
algo = algos.SelectTypes(include_types=(bt.core.SecurityBase,), exclude_types=(bt.CouponPayingSecurity,))
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c3', 'c5'])
s.temp['selected'] = ['c1', 'c2', 'c3']
algo = algos.SelectTypes(include_types=(bt.core.SecurityBase,))
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c2', 'c3'])
def test_weight_equally():
algo = algos.WeighEqually()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.update(dts[0])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.5
assert 'c2' in weights
assert weights['c2'] == 0.5
def test_weight_specified():
algo = algos.WeighSpecified(c1=0.6, c2=0.4)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.6
assert 'c2' in weights
assert weights['c2'] == 0.4
def test_scale_weights():
s = bt.Strategy('s')
algo = algos.ScaleWeights( -0.5 )
s.temp['weights'] = {'c1': 0.5, 'c2': -0.4, 'c3':0 }
assert algo( s )
assert s.temp['weights'] == {'c1':-0.25, 'c2':0.2, 'c3':0}
def test_select_has_data():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=10)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[0]] = np.nan
data['c1'].loc[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
def test_select_has_data_preselected():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[0]] = np.nan
data['c1'].loc[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 0
@mock.patch('ffn.calc_erc_weights')
def test_weigh_erc(mock_erc):
algo = algos.WeighERC(lookback=pd.DateOffset(days=5))
mock_erc.return_value = pd.Series({'c1': 0.3, 'c2': 0.7})
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
assert mock_erc.called
rets = mock_erc.call_args[0][0]
assert len(rets) == 4
assert 'c1' in rets
assert 'c2' in rets
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.3
assert weights['c2'] == 0.7
def test_weigh_target():
algo = algos.WeighTarget('target')
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
target = pd.DataFrame(index=dts[:2], columns=['c1', 'c2'], data=0.5)
target['c1'].loc[dts[1]] = 1.0
target['c2'].loc[dts[1]] = 0.0
s.setup( data, target = target )
s.update(dts[0])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.5
assert weights['c2'] == 0.5
s.update(dts[1])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 1.0
assert weights['c2'] == 0.0
s.update(dts[2])
assert not algo(s)
def test_weigh_inv_vol():
algo = algos.WeighInvVol(lookback=pd.DateOffset(days=5))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
# high vol c1
data['c1'].loc[dts[1]] = 105
data['c1'].loc[dts[2]] = 95
data['c1'].loc[dts[3]] = 105
data['c1'].loc[dts[4]] = 95
# low vol c2
data['c2'].loc[dts[1]] = 100.1
data['c2'].loc[dts[2]] = 99.9
data['c2'].loc[dts[3]] = 100.1
data['c2'].loc[dts[4]] = 99.9
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c2'] > weights['c1']
aae(weights['c1'], 0.020, 3)
aae(weights['c2'], 0.980, 3)
@mock.patch('ffn.calc_mean_var_weights')
def test_weigh_mean_var(mock_mv):
algo = algos.WeighMeanVar(lookback=pd.DateOffset(days=5))
mock_mv.return_value = pd.Series({'c1': 0.3, 'c2': 0.7})
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
assert mock_mv.called
rets = mock_mv.call_args[0][0]
assert len(rets) == 4
assert 'c1' in rets
assert 'c2' in rets
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.3
assert weights['c2'] == 0.7
def test_weigh_randomly():
s = bt.Strategy('s')
s.temp['selected'] = ['c1', 'c2', 'c3']
algo = algos.WeighRandomly()
assert algo(s)
weights = s.temp['weights']
assert len( weights ) == 3
assert sum( weights.values() ) == 1.
algo = algos.WeighRandomly( (0.3,0.5), 0.95)
assert algo(s)
weights = s.temp['weights']
assert len( weights ) == 3
aae( sum( weights.values() ), 0.95 )
for c in s.temp['selected']:
assert weights[c] <= 0.5
assert weights[c] >= 0.3
def test_set_stat():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[1]] = 105
data['c2'].loc[dts[1]] = 95
stat = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=4.)
stat['c1'].loc[dts[1]] = 5.
stat['c2'].loc[dts[1]] = 6.
algo = algos.SetStat( 'test_stat' )
s.setup(data, test_stat = stat)
s.update(dts[0])
print()
print(s.get_data('test_stat'))
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 4.
assert stat['c2'] == 4.
s.update(dts[1])
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 5.
assert stat['c2'] == 6.
def test_set_stat_legacy():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[1]] = 105
data['c2'].loc[dts[1]] = 95
stat = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=4.)
stat['c1'].loc[dts[1]] = 5.
stat['c2'].loc[dts[1]] = 6.
algo = algos.SetStat( stat )
s.setup(data)
s.update(dts[0])
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 4.
assert stat['c2'] == 4.
s.update(dts[1])
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 5.
assert stat['c2'] == 6.
def test_stat_total_return():
algo = algos.StatTotalReturn(lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
stat = s.temp['stat']
assert len(stat) == 2
assert stat['c1'] == 105.0 / 100 - 1
assert stat['c2'] == 95.0 / 100 - 1
def test_select_n():
algo = algos.SelectN(n=1, sort_descending=True)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['stat'] = data.calc_total_return()
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
algo = algos.SelectN(n=1, sort_descending=False)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# return 2 we have if all_or_none false
algo = algos.SelectN(n=3, sort_descending=False)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# return 0 we have if all_or_none true
algo = algos.SelectN(n=3, sort_descending=False, all_or_none=True)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 0
def test_select_n_perc():
algo = algos.SelectN(n=0.5, sort_descending=True)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['stat'] = data.calc_total_return()
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
def test_select_momentum():
algo = algos.SelectMomentum(n=1, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
actual = s.temp['selected']
assert len(actual) == 1
assert 'c1' in actual
def test_limit_weights():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.temp['weights'] = {'c1': 0.6, 'c2':0.2, 'c3':0.2}
algo = algos.LimitWeights(0.5)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.5
assert w['c2'] == 0.25
assert w['c3'] == 0.25
algo = algos.LimitWeights(0.3)
assert algo(s)
w = s.temp['weights']
assert w == {}
s.temp['weights'] = {'c1': 0.4, 'c2':0.3, 'c3':0.3}
algo = algos.LimitWeights(0.5)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.4
assert w['c2'] == 0.3
assert w['c3'] == 0.3
def test_limit_deltas():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.temp['weights'] = {'c1': 1}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.1
s.temp['weights'] = {'c1': 0.05}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.05
s.temp['weights'] = {'c1': 0.5, 'c2': 0.5}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == 0.1
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == -0.1
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas({'c1': 0.1})
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == -0.5
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas({'c1': 0.1, 'c2': 0.3})
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == -0.3
# set exisitng weight
s.children['c1'] = bt.core.SecurityBase('c1')
s.children['c1']._weight = 0.3
s.children['c2'] = bt.core.SecurityBase('c2')
s.children['c2']._weight = -0.7
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.4
assert w['c2'] == -0.6
def test_rebalance_over_time():
target = mock.MagicMock()
rb = mock.MagicMock()
algo = algos.RebalanceOverTime(n=2)
# patch in rb function
algo._rb = rb
target.temp = {}
target.temp['weights'] = {'a': 1, 'b': 0}
a = mock.MagicMock()
a.weight = 0.
b = mock.MagicMock()
b.weight = 1.
target.children = {'a': a, 'b': b}
assert algo(target)
w = target.temp['weights']
assert len(w) == 2
assert w['a'] == 0.5
assert w['b'] == 0.5
assert rb.called
called_tgt = rb.call_args[0][0]
called_tgt_w = called_tgt.temp['weights']
assert len(called_tgt_w) == 2
assert called_tgt_w['a'] == 0.5
assert called_tgt_w['b'] == 0.5
# update weights for next call
a.weight = 0.5
b.weight = 0.5
# clear out temp - same as would Strategy
target.temp = {}
assert algo(target)
w = target.temp['weights']
assert len(w) == 2
assert w['a'] == 1.
assert w['b'] == 0.
assert rb.call_count == 2
# update weights for next call
# should do nothing now
a.weight = 1
b.weight = 0
# clear out temp - same as would Strategy
target.temp = {}
assert algo(target)
# no diff in call_count since last time
assert rb.call_count == 2
def test_require():
target = mock.MagicMock()
target.temp = {}
algo = algos.Require(lambda x: len(x) > 0, 'selected')
assert not algo(target)
target.temp['selected'] = []
assert not algo(target)
target.temp['selected'] = ['a', 'b']
assert algo(target)
def test_run_every_n_periods():
target = mock.MagicMock()
target.temp = {}
algo = algos.RunEveryNPeriods(n=3, offset=0)
target.now = pd.to_datetime('2010-01-01')
assert algo(target)
# run again w/ no date change should not trigger
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
target.now = pd.to_datetime('2010-01-04')
assert algo(target)
target.now = pd.to_datetime('2010-01-05')
assert not algo(target)
def test_run_every_n_periods_offset():
target = mock.MagicMock()
target.temp = {}
algo = algos.RunEveryNPeriods(n=3, offset=1)
target.now = pd.to_datetime('2010-01-01')
assert not algo(target)
# run again w/ no date change should not trigger
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
target.now = pd.to_datetime('2010-01-04')
assert not algo(target)
target.now = pd.to_datetime('2010-01-05')
assert algo(target)
def test_not():
target = mock.MagicMock()
target.temp = {}
#run except on the 1/2/18
runOnDateAlgo = algos.RunOnDate(pd.to_datetime('2018-01-02'))
notAlgo = algos.Not(runOnDateAlgo)
target.now = pd.to_datetime('2018-01-01')
assert notAlgo(target)
target.now = pd.to_datetime('2018-01-02')
assert not notAlgo(target)
def test_or():
target = mock.MagicMock()
target.temp = {}
#run on the 1/2/18
runOnDateAlgo = algos.RunOnDate(pd.to_datetime('2018-01-02'))
runOnDateAlgo2 = algos.RunOnDate(pd.to_datetime('2018-01-03'))
runOnDateAlgo3 = algos.RunOnDate(pd.to_datetime('2018-01-04'))
runOnDateAlgo4 = algos.RunOnDate(pd.to_datetime('2018-01-04'))
orAlgo = algos.Or([runOnDateAlgo, runOnDateAlgo2, runOnDateAlgo3, runOnDateAlgo4])
#verify it returns false when neither is true
target.now = pd.to_datetime('2018-01-01')
assert not orAlgo(target)
# verify it returns true when the first is true
target.now = pd.to_datetime('2018-01-02')
assert orAlgo(target)
# verify it returns true when the second is true
target.now = pd.to_datetime('2018-01-03')
assert orAlgo(target)
# verify it returns true when both algos return true
target.now = pd.to_datetime('2018-01-04')
assert orAlgo(target)
def test_TargetVol():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=7)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
# high vol c1
data.loc[dts[0],'c1'] = 95
data.loc[dts[1],'c1'] = 105
data.loc[dts[2],'c1'] = 95
data.loc[dts[3],'c1'] = 105
data.loc[dts[4],'c1'] = 95
data.loc[dts[5],'c1'] = 105
data.loc[dts[6],'c1'] = 95
# low vol c2
data.loc[dts[0], 'c2'] = 99
data.loc[dts[1], 'c2'] = 101
data.loc[dts[2], 'c2'] = 99
data.loc[dts[3], 'c2'] = 101
data.loc[dts[4], 'c2'] = 99
data.loc[dts[5], 'c2'] = 101
data.loc[dts[6], 'c2'] = 99
targetVolAlgo = algos.TargetVol(
0.1,
lookback=pd.DateOffset(days=5),
lag=pd.DateOffset(days=1),
covar_method='standard',
annualization_factor=1
)
s.setup(data)
s.update(dts[6])
s.temp['weights'] = {'c1':0.5, 'c2':0.5}
assert targetVolAlgo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert np.isclose(weights['c2'],weights['c1'])
unannualized_c2_weight = weights['c1']
targetVolAlgo = algos.TargetVol(
0.1*np.sqrt(252),
lookback=pd.DateOffset(days=5),
lag=pd.DateOffset(days=1),
covar_method='standard',
annualization_factor=252
)
s.setup(data)
s.update(dts[6])
s.temp['weights'] = {'c1': 0.5, 'c2': 0.5}
assert targetVolAlgo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert np.isclose(weights['c2'], weights['c1'])
assert np.isclose(unannualized_c2_weight, weights['c2'])
def test_PTE_Rebalance():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=30*4)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
# high vol c1
# low vol c2
for i,dt in enumerate(dts[:-2]):
if i % 2 == 0:
data.loc[dt,'c1'] = 95
data.loc[dt,'c2'] = 101
else:
data.loc[dt, 'c1'] = 105
data.loc[dt, 'c2'] = 99
dt = dts[-2]
data.loc[dt,'c1'] = 115
data.loc[dt,'c2'] = 97
s.setup(data)
s.update(dts[-2])
s.adjust(1000000)
s.rebalance(0.4,'c1')
s.rebalance(0.6,'c2')
wdf = pd.DataFrame(
np.zeros(data.shape),
columns=data.columns,
index=data.index
)
wdf['c1'] = 0.5
wdf['c2'] = 0.5
PTE_rebalance_Algo = bt.algos.PTE_Rebalance(
0.01,
wdf,
lookback=pd.DateOffset(months=3),
lag=pd.DateOffset(days=1),
covar_method='standard',
annualization_factor=252
)
assert PTE_rebalance_Algo(s)
s.rebalance(0.5, 'c1')
s.rebalance(0.5, 'c2')
assert not PTE_rebalance_Algo(s)
def test_close_positions_after_date():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
c3 = bt.Security('c3')
s = bt.Strategy('s', children = [c1, c2, c3])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100)
c1 = s['c1']
c2 = s['c2']
c3 = s['c3']
cutoffs= pd.DataFrame( { 'date' : [ dts[1], dts[2] ] }, index = ['c1','c2'] )
algo = algos.ClosePositionsAfterDates( 'cutoffs' )
s.setup(data, cutoffs=cutoffs)
s.update(dts[0])
s.transact( 100, 'c1')
s.transact( 100, 'c2')
s.transact( 100, 'c3')
algo(s)
assert c1.position == 100
assert c2.position == 100
assert c3.position == 100
# Don't run anything on dts[1], even though that's when c1 closes
s.update( dts[2])
algo(s)
assert c1.position == 0
assert c2.position == 0
assert c3.position == 100
assert s.perm['closed'] == set(['c1', 'c2'])
def test_roll_positions_after_date():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
c3 = bt.Security('c3')
s = bt.Strategy('s', children = [c1, c2, c3])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100)
c1 = s['c1']
c2 = s['c2']
c3 = s['c3']
roll = pd.DataFrame( { 'date' : [ dts[1], dts[2] ], 'target' : [ 'c3', 'c1' ], 'factor' : [ 0.5, 2.0 ] }, index = ['c1','c2'] )
algo = algos.RollPositionsAfterDates( 'roll' )
s.setup(data, roll=roll)
s.update(dts[0])
s.transact( 100, 'c1')
s.transact( 100, 'c2')
s.transact( 100, 'c3')
algo(s)
assert c1.position == 100
assert c2.position == 100
assert c3.position == 100
# Don't run anything on dts[1], even though that's when c1 closes
s.update( dts[2])
algo(s)
assert c1.position == 200 # From c2
assert c2.position == 0
assert c3.position == 100 + 50
assert s.perm['rolled'] == set(['c1', 'c2'])
def test_replay_transactions():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
s = bt.Strategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100)
c1 = s['c1']
c2 = s['c2']
transactions = pd.DataFrame( [ ( pd.Timestamp( '2009-12-01 00'), 'c1', 100, 99.5),
( pd.Timestamp( '2010-01-01 10'), 'c1', -100, 101),
( pd.Timestamp( '2010-01-02 00'), 'c2', 50, 103)
],
columns = ['Date', 'Security', 'quantity', 'price'])
transactions = transactions.set_index( ['Date','Security'])
algo = algos.ReplayTransactions( 'transactions' )
s.setup(data, bidoffer={}, transactions=transactions) # Pass bidoffer so it will track bidoffer paid
s.adjust(1000)
s.update(dts[0])
algo(s)
assert c1.position == 100
assert c2.position == 0
assert c1.bidoffer_paid == -50
s.update(dts[1])
algo(s)
assert c1.position == 0
assert c2.position == 50
assert c1.bidoffer_paid == -100
assert c2.bidoffer_paid == 150
def test_replay_transactions_consistency():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
s = bt.Strategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100)
transactions = pd.DataFrame( [ ( pd.Timestamp( '2010-01-01 00'), 'c1', -100., 101.),
( pd.Timestamp( '2010-01-02 00'), 'c2', 50., 103.)
],
columns = ['Date', 'Security', 'quantity', 'price'])
transactions = transactions.set_index( ['Date','Security'])
algo = algos.ReplayTransactions( 'transactions' )
strategy = bt.Strategy('strategy', algos = [ algo ], children = [c1, c2])
backtest = bt.backtest.Backtest(strategy, data, name='Test',
additional_data={'bidoffer':{}, 'transactions':transactions})
out = bt.run(backtest)
t1 = transactions.sort_index(axis=1)
t2 = out.get_transactions().sort_index(axis=1)
assert t1.equals( t2 )
def test_simulate_rfq_transactions():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
s = bt.Strategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100)
c1 = s['c1']
c2 = s['c2']
rfqs = pd.DataFrame( [ ( 'A', pd.Timestamp( '2009-12-01 00'), 'c1', 100),
( 'B', pd.Timestamp( '2010-01-01 10'), 'c1', -100),
( 'C', pd.Timestamp( '2010-01-01 12'), 'c1', 75),
( 'D', pd.Timestamp( '2010-01-02 00'), 'c2', 50)
],
columns = ['id', 'Date', 'Security', 'quantity'])
rfqs = rfqs.set_index(['Date','Security'])
def model( rfqs, target ):
# Dummy model - in practice this model would rely on positions and values in target
transactions = rfqs[ ['quantity']]
prices = {'A' : 99.5, 'B' : 101, 'D':103}
transactions[ 'price' ] = rfqs.id.apply( lambda x : prices.get(x) )
return transactions.dropna()
algo = algos.SimulateRFQTransactions( 'rfqs', model )
s.setup(data, bidoffer={}, rfqs=rfqs) # Pass bidoffer so it will track bidoffer paid
s.adjust(1000)
s.update(dts[0])
algo(s)
assert c1.position == 100
assert c2.position == 0
assert c1.bidoffer_paid == -50
s.update(dts[1])
algo(s)
assert c1.position == 0
assert c2.position == 50
assert c1.bidoffer_paid == -100
assert c2.bidoffer_paid == 150
def test_update_risk():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
s = bt.Strategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'].loc[dts[1]] = 105
data['c2'].loc[dts[1]] = 95
c1 = s['c1']
c2 = s['c2']
algo = algos.UpdateRisk('Test', history=False)
s.setup(data, unit_risk={'Test':data})
s.adjust(1000)
s.update(dts[0])
assert algo( s )
assert s.risk['Test'] == 0
assert c1.risk['Test'] == 0
assert c2.risk['Test'] == 0
s.transact( 1, 'c1')
s.transact( 5, 'c2')
assert algo( s )
assert s.risk['Test'] == 600
assert c1.risk['Test'] == 100
assert c2.risk['Test'] == 500
s.update(dts[1])
assert algo( s )
assert s.risk['Test'] == 105 + 5*95
assert c1.risk['Test'] == 105
assert c2.risk['Test'] == 5*95
assert not hasattr( s, 'risks' )
assert not hasattr( c1, 'risks' )
assert not hasattr( c2, 'risks' )
def test_update_risk_history_1():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
s = bt.Strategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'].loc[dts[1]] = 105
data['c2'].loc[dts[1]] = 95
c1 = s['c1']
c2 = s['c2']
algo = algos.UpdateRisk('Test', history=1)
s.setup(data, unit_risk={'Test':data})
s.adjust(1000)
s.update(dts[0])
assert algo( s )
assert s.risks['Test'][0] == 0
s.transact( 1, 'c1')
s.transact( 5, 'c2')
assert algo( s )
assert s.risks['Test'][0] == 600
s.update(dts[1])
assert algo( s )
assert s.risks['Test'][0] == 600
assert s.risks['Test'][1] == 105 + 5*95
assert not hasattr( c1, 'risks' )
assert not hasattr( c2, 'risks' )
def test_update_risk_history_2():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
s = bt.Strategy('s', children = [c1, c2])
dts = | pd.date_range('2010-01-01', periods=3) | pandas.date_range |
import pandas as pd
from loguru import logger
import arrow
import time
import json
import requests
import tqdm
from retrying import retry
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', 'Cookie': 'G_zj_gsid=08890c40500a4a8ab21e0b2b9e9e47b1-gsid-', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'}
index = 1
df = pd.DataFrame()
while True:
reqBody = {"pageNum": index, "pageSize": 20, "createTimeFrom": 1609430400000, "createTimeTo": 1623227340000, "channelList": [], "areaList": ["ff8080815d551320015d589cc1da0014", "ff8080815d551320015d589cc1da0014$0", "ff8080815df350d4015df3d64603057a", "ff8080815df350d4015df4333f0006cb", "ff8080815df350d4015df44090fd06f5", "ff8080815df350d4015df44e7d100706", "ff8080815de36663015de3e0d0380000", "ff8080815de36663015de3ef492c0015", "ff8080815df45323015df461a0e6000b", "ff8080815df482ba015df7dceec40326", "ff8080815df45323015df46e7610001d", "ff8080815df482ba015df7eb3a8c0367", "<KEY>6", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "ff8080816f12372d016f1d5959bb7545"], "statusList": []}
res = json.loads(requests.post(url = 'https://opt.zjzwfw.gov.cn/rest/api/evaluation/case/historyForHandle/list?ctoken=<PASSWORD>', json=reqBody, headers = headers).text)
# print(res)
if not res['data']['list']:
break
df = df.append( | pd.DataFrame(res['data']['list']) | pandas.DataFrame |
'''
This sample shows how to set Column Format with DataFrame and from_df, to_df functions.
Make sure you've installed pandas. To install the module,
open the Script Window (Shift+Alt+3), type the following and press Enter:
pip install pandas
The following will check and install:
pip -chk pandas
'''
import originpro as op
import pandas as pd
# Create a dataframe to fill the sheet
df = pd.DataFrame({
'Date': ['10/25/2018','02/21/2019','04/01/2020'],
'Gender':['Male','Male','Female'],
'Score': [75.5, 86.7, 91],
})
df['Date'] = | pd.to_datetime(df['Date']) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 5 23:37:07 2019
@author: manuel
"""
# Exercise 12
# Implement k-means soft clustering with online update, adopting the Euclidean
# distance as dissimilarity metric. Given the dataset data3.csv, apply the
# algorithm using $k = 3$ and $\eta = 0.1$ until the algorithm converges. As
# initial centroids use $x_{32}, x_{33}, x_{123}$, where the subscripts define
# the indexes of the points in the dataset. After convergence of the algorithm,
# compute the membership of $x_{10}$ with respect to each centroid and also
# plot the dataset using different colors for $x_{10}, x_{32}, x_{33}, x_{123}$.
import pandas as pd
import matplotlib.pyplot as plt
import math
def euclidean_distance(s1,s2):
"""
Compute the Euclidean distance between two n-dimensional objects.
"""
tmpsum = 0
for index,value in enumerate(s1):
tmpsum += (s1[index]-s2[index])**2
return math.sqrt(tmpsum)
def points_difference(point1,point2):
"""
Compute the difference between two n-dimensional objects and save it in
a pandas Series.
"""
tmplist = []
for index,value in enumerate(point1):
tmplist.append(point1[index] - point2[index])
return | pd.Series(tmplist) | pandas.Series |
#!/usr/bin/env python3
# Author:: <NAME> (mailto:<EMAIL>)
"""
Python Class Check Yellowstone Campground Booking API for Availability
"""
from datetime import datetime, timedelta
from json import loads
import logging
from random import choice
from typing import List, Optional
from urllib import parse
from pandas import DataFrame, to_datetime
from pytz import timezone
import requests
import tenacity
from camply.config import STANDARD_HEADERS, USER_AGENTS, YellowstoneConfig
from camply.containers import AvailableCampsite
from camply.providers.base_provider import BaseProvider
from camply.utils import logging_utils
logger = logging.getLogger(__name__)
class YellowstoneLodging(BaseProvider):
"""
Scanner for Lodging in Yellowstone
"""
def __repr__(self):
"""
String Representation
Returns
-------
str
"""
return "<YellowstoneLodging>"
def _get_monthly_availability(self, month: datetime, nights: int = None) -> dict:
"""
Check All Lodging in Yellowstone for Campground Data
Returns
-------
data_availability: dict
Data Availability Dictionary
"""
query_dict = dict(date=self._ensure_current_month(month=month),
limit=31,
rate_code=YellowstoneConfig.RATE_CODE)
if nights is not None:
query_dict.update(dict(nights=nights))
api_endpoint = self._get_api_endpoint(url_path=YellowstoneConfig.YELLOWSTONE_LODGING_PATH,
query=None)
logger.info(f"Searching for Yellowstone Lodging Availability: {month.strftime('%B, %Y')}")
all_resort_availability_data = self.make_yellowstone_request(endpoint=api_endpoint,
params=query_dict)
return all_resort_availability_data
@staticmethod
@tenacity.retry(wait=tenacity.wait_random_exponential(multiplier=3, max=1800),
stop=tenacity.stop.stop_after_delay(6000))
def _try_retry_get_data(endpoint: str, params: Optional[dict] = None):
"""
Try and Retry Fetching Data from the Yellowstone API. Unfortunately this is a required
method to request the data since the Yellowstone API doesn't always return data.
Parameters
----------
endpoint: str
API Endpoint
params
Returns
-------
"""
yellowstone_headers = choice(USER_AGENTS)
yellowstone_headers.update(STANDARD_HEADERS)
yellowstone_headers.update(YellowstoneConfig.API_REFERRERS)
response = requests.get(url=endpoint,
headers=yellowstone_headers,
params=params, timeout=30)
if response.status_code == 200 and response.text.strip() != "":
return loads(response.content)
else:
error_message = ("Something went wrong with checking the "
"Yellowstone Booking API. Will continue retrying.")
logger.warning(error_message)
raise RuntimeError(error_message)
@staticmethod
def make_yellowstone_request(endpoint: str, params: Optional[dict] = None):
"""
Try and Retry Fetching Data from the Yellowstone API. Unfortunately this is a required
method to request the data since the Yellowstone API doesn't always return data.
Parameters
----------
endpoint: str
API Endpoint
params
Returns
-------
"""
try:
content = YellowstoneLodging._try_retry_get_data(endpoint=endpoint, params=params)
except RuntimeError as re:
raise RuntimeError(f"error_message: {re}")
return content
@classmethod
def _get_api_endpoint(cls, url_path: str, query: Optional[dict] = None) -> str:
"""
Build the API Endpoint for All Yellowstone Lodging
"""
if query is not None:
query_string = parse.urlencode(query=query)
else:
query_string = ""
url_components = dict(scheme=YellowstoneConfig.API_SCHEME,
netloc=YellowstoneConfig.API_BASE_ENDPOINT,
url=url_path,
params="", query=query_string, fragment="")
api_endpoint = parse.urlunparse(tuple(url_components.values()))
return api_endpoint
@classmethod
def _return_lodging_url(cls, lodging_code: str, month: datetime,
params: Optional[dict] = "") -> str:
"""
Return a Browser Loadable URL to book from
Parameters
----------
lodging_code: str
Lodging Code from API
month: datetime
Month to return bookings filtered to
params: Optional[dict]
Optional URL Parameters
Returns
-------
str
URL String
"""
query = dict(dateFrom=month.strftime("%m-%d-%Y"))
if params is not None:
query.update(params)
query_string = parse.urlencode(query=query)
url_components = dict(scheme=YellowstoneConfig.API_SCHEME,
netloc=YellowstoneConfig.WEBUI_BASE_ENDPOINT,
url=f"{YellowstoneConfig.WEBUI_BOOKING_PATH}/{lodging_code}",
params="", query=query_string, fragment="")
webui_endpoint = parse.urlunparse(tuple(url_components.values()))
return webui_endpoint
@classmethod
def _compile_campground_availabilities(cls, availability: dict) -> List[dict]:
"""
Gather Data about campground availabilities within a JSON Availability Objet
Parameters
----------
availability: dict
JSON Availability Object
Returns
-------
available_campsites: List[dict]
List of Availabilities as JSON
"""
available_campsites = list()
for date_string in availability.keys():
booking_date = datetime.strptime(date_string, "%m/%d/%Y")
daily_data = availability[date_string]
camping_keys = [key for key in daily_data.keys() if
YellowstoneConfig.LODGING_CAMPGROUND_QUALIFIER in key]
for hotel_code in camping_keys:
hotel_data = daily_data[hotel_code]
try:
hotel_title = hotel_data[YellowstoneConfig.LODGING_RATES][
YellowstoneConfig.RATE_CODE][
YellowstoneConfig.LODGING_TITLE]
hotel_rate_mins = hotel_data[YellowstoneConfig.LODGING_RATES][
YellowstoneConfig.RATE_CODE][
YellowstoneConfig.LODGING_BASE_PRICES]
if hotel_rate_mins != {"1": 0}:
min_capacity = int(min(hotel_rate_mins.keys()))
max_capacity = int(max(hotel_rate_mins.keys()))
capacity = (min_capacity, max_capacity)
campsite = dict(
campsite_id=None,
booking_date=booking_date,
campsite_occupancy=capacity,
recreation_area=YellowstoneConfig.YELLOWSTONE_RECREATION_AREA_NAME,
recreation_area_id=YellowstoneConfig.YELLOWSTONE_RECREATION_AREA_ID,
facility_name=hotel_title.replace(
*YellowstoneConfig.YELLOWSTONE_CAMPGROUND_NAME_REPLACE),
facility_id=hotel_code)
available_campsites.append(campsite)
except (KeyError, TypeError):
_ = hotel_data[YellowstoneConfig.LODGING_ERROR_MESSAGE]
logger.info(f"\t{logging_utils.get_emoji(available_campsites)}\t"
f"{len(available_campsites)} sites found.")
return available_campsites
def _gather_campsite_specific_availability(self, available_campsites: List[dict],
month: datetime,
nights: Optional[int] = None) -> List[dict]:
"""
Given a DataFrame of campsite availability, return updated Data with details about the
actual campsites that are available (i.e Tent Size, RV Length, Etc)
Parameters
----------
available_campsites: List[dict]
List of Available Campsites as JSON objects
month: datetime
Month object
Returns
-------
List[dict]
"""
available_room_array = list()
availability_df = DataFrame(data=available_campsites)
if availability_df.empty is True:
return available_room_array
for facility_id, facility_df in availability_df.groupby(YellowstoneConfig.FACILITY_ID):
api_endpoint = self._get_api_endpoint(
url_path=YellowstoneConfig.YELLOWSTONE_CAMPSITE_AVAILABILITY,
query=None)
params = dict(date=self._ensure_current_month(month=month), limit=31)
if nights is not None:
params.update(dict(nights=nights))
campsite_data = self.make_yellowstone_request(endpoint=f"{api_endpoint}/{facility_id}",
params=params)
campsite_availability = campsite_data[YellowstoneConfig.BOOKING_AVAILABILITY]
booking_dates = campsite_availability.keys()
availabilities = self._process_daily_availability(
booking_dates=booking_dates,
campsite_availability=campsite_availability,
facility_id=facility_id)
available_room_array += availabilities
return available_room_array
@classmethod
def _process_daily_availability(cls, booking_dates: List[str],
campsite_availability: dict, facility_id: str) -> List[dict]:
"""
Process Monthly Availability
Parameters
----------
booking_dates: List[str]
List of booking dates to process
campsite_availability: dict
Campsite availability dict
facility_id: str
Identification of the Facility
Returns
-------
List[dict]
"""
daily_availabilities = list()
for booking_date_str in booking_dates:
daily_availability = campsite_availability[booking_date_str]
if daily_availability[YellowstoneConfig.FACILITY_STATUS] == \
YellowstoneConfig.FACILITY_STATUS_QUALIFIER:
available_rooms = daily_availability[YellowstoneConfig.FACILITY_ROOMS]
for room in available_rooms:
if room[YellowstoneConfig.FACILITY_AVAILABLE_QUALIFIER] > 0:
daily_availabilities.append(dict(
booking_date=booking_date_str,
facility_id=facility_id,
campsite_code=room[YellowstoneConfig.FACILITY_ROOM_CODE],
available=room[YellowstoneConfig.FACILITY_AVAILABLE_QUALIFIER],
price=room[YellowstoneConfig.FACILITY_PRICE]))
return daily_availabilities
def _get_property_information(self, available_rooms: List[dict]) -> List[dict]:
"""
Gather Information About All Campgrounds / Hotels within Yellowstone
Parameters
----------
available_rooms: List[dict]
Returns
-------
List[dict]
"""
property_info_array = list()
availability_df = | DataFrame(data=available_rooms) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 11 13:38:13 2020
@author: zhanghai
"""
'''
Input parameters: ticker,interval, test start date, test end date, model name
Output : dataframe: initial deposit, gross profit,gross loss, total net profit,profit factor,
expected payoff, absolute drawdown, maximal drawdown, relative drawdown,
profit trades%, loss trades
Entry strategy: -1: close long position and wait;
0: keep current position;
1: create
Exit strategy: reverse signal appears;
reach stop loss entry-ATR
position size control: ATR based,ATR period = 20; ATR multiplier = 1; 2% risk tolerance, nomal value is average of ATR
'''
import pandas as pd
pd.set_option('mode.chained_assignment', None)
import numpy as np
import datetime
import matplotlib.pyplot as plt
import stockstats
import sys
sys.path.append('../')
from uis.calculate_ama import calculate_ama
from data_processing.load_data import load_rawdata
class AMABackTest():
def __init__(self, etf, start_date, end_date, model_name='AMA',initial_deposit=100000, price_type='open',er_window = 9, slow_window = 20, fast_window = 4):
self.ticker = etf
self.start_date = start_date
self.end_date = end_date
self.model_name = model_name
self.init_deposit = initial_deposit
self.price_type = price_type
self.er_window = er_window
self.slow_window = slow_window
self.fast_window = fast_window
self.raw_data = load_rawdata(etf, 'weekly')
print("Self.raw",self.raw_data)
self.indicators = stockstats.StockDataFrame.retype(self.raw_data.copy())
ama, _, _ = calculate_ama(self.raw_data, self.indicators, self.price_type, self.er_window, self.slow_window, self.fast_window)
self.raw_data['ama'] = ama
self.report = pd.DataFrame(columns=['position size','total','profit'])
def predict(self,cur_date):
if self.raw_data.loc[cur_date]['open'] > self.raw_data.loc[cur_date]['ama'] and self.raw_data.loc[cur_date]['close'] > \
self.raw_data.loc[cur_date]['ama']:
signal = 1
elif self.raw_data.loc[cur_date]['open'] < self.raw_data.loc[cur_date]['ama'] and self.raw_data.loc[cur_date]['close'] < \
self.raw_data.loc[cur_date]['ama']:
signal = -1
else:
signal = 0
return signal
def up_action(self,cur_date):
if self.position == 'empty':
self.entry_price = self.raw_data['close'][cur_date]
self.entyr_atr = self.indicators['atr'].loc[cur_date]
self.position_size = int(self.total_value*0.05/self.entyr_atr)
if self.position_size*self.entry_price > self.total_value:
self.position_size = int(self.total_value/self.entry_price)
df = pd.DataFrame({'position size':self.position_size,'total':self.total_value,'profit':0},index=[cur_date])
self.report = self.report.append(df)
self.position = 'long'
else:
stop_price = self.entry_price - 0.1*self.entyr_atr
target_price = self.entry_price * 1.3
if self.raw_data['low'][cur_date] < stop_price < self.raw_data['high'][cur_date]:
#sell at stop price
profit = round((self.raw_data['high'][cur_date] - self.entry_price)*self.position_size, 2)
self.total_value += profit
df = pd.DataFrame({'position size':0,'total':self.total_value,'profit':profit},index=[cur_date])
self.report = self.report.append(df)
self.position = 'empty'
elif self.raw_data['low'][cur_date] < target_price < self.raw_data['high'][cur_date]:
#sell at target price
profit = round((self.raw_data['low'][cur_date]-self.entry_price)*self.position_size, 2)
self.total_value += profit
df = pd.DataFrame({'position size':0,'total':self.total_value,'profit':profit},index=[cur_date])
self.report = self.report.append(df)
self.position = 'empty'
else:
#hold the position
profit = round((self.raw_data['close'][cur_date]-self.entry_price)*self.position_size, 2)
current_value = self.total_value + profit
df = pd.DataFrame({'position size':self.position_size,'total':current_value,'profit':0},index=[cur_date])
self.report = self.report.append(df)
def down_action(self,cur_date):
if self.position == 'long':
#sell long position
sell_price = self.raw_data['close'][cur_date]
profit = round((sell_price-self.entry_price)*self.position_size, 2)
self.total_value += profit
df = | pd.DataFrame({'position size':0,'total':self.total_value,'profit':profit},index=[cur_date]) | pandas.DataFrame |
#!/usr/bin/env python3
from logFormat import C
from util import Cred
from typing import List
import lxml.html, lxml.cssselect, os, pandas, requests
csssel = lxml.cssselect.CSSSelector
listText = lxml.etree.XPath('text()') # [Using XPath to find text](https://lxml.de/tutorial.html#using-xpath-to-find-text)
headers = {
# [https://httpbin.org/headers]
# [How to fake and rotate User Agents using Python 3](https://www.scrapehero.com/how-to-fake-and-rotate-user-agents-using-python-3/)
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-US,en;q=0.9,es;q=0.8,es-419;q=0.7,fr;q=0.6,ru;q=0.5",
"Dnt": "1",
"Referer": "https://www.google.com/",
"User-Agent":os.environ['USERAGENT']
}
def eTreeGet(url:str) -> lxml.html.HtmlElement:
'''Get HTML request and return as eTree element.'''
resp = requests.get(url=url, headers=headers).content
return lxml.html.fromstring(resp)
def lastfmNeighbors(user:str=Cred.user) -> pandas.DataFrame:
'''Parse lastFM neighbors page for {user}.'''
eTree = eTreeGet(f'https://www.last.fm/user/{user}/neighbours')
users = [user.text for user in csssel('a.user-list-link')(eTree)]
artists = [[artist.text for artist in csssel('a')(user)] for user in csssel('p.user-list-shared-artists')(eTree)]
return | pandas.DataFrame({'user':users,'artistsInCommon':artists}) | pandas.DataFrame |
""" function to read data from dwd server """
from itertools import zip_longest, groupby
from pathlib import Path
from typing import List, Tuple, Optional, Union
import re
from io import BytesIO
import pandas as pd
from python_dwd.additionals.functions import retrieve_parameter_from_filename, retrieve_period_type_from_filename, \
retrieve_time_resolution_from_filename, cast_to_list
from python_dwd.additionals.helpers import create_stationdata_dtype_mapping
from python_dwd.constants.column_name_mapping import GERMAN_TO_ENGLISH_COLUMNS_MAPPING
from python_dwd.constants.metadata import NA_STRING, STATIONDATA_SEP, STATID_REGEX, H5_FORMAT, STATIONDATA_NAME
from python_dwd.constants.access_credentials import DWD_FOLDER_MAIN
from python_dwd.file_path_handling.path_handling import create_folder
def parse_dwd_data(filenames_and_files: Optional[Union[List[str], List[Tuple[str, BytesIO]]]] = None,
write_file: bool = False,
prefer_local: bool = False,
folder: str = DWD_FOLDER_MAIN,
**kwargs) -> pd.DataFrame:
"""
This function is used to read the stationdata for which the local zip link is
provided by the 'download_dwd' function. It checks the zipfile from the link
for its parameters, opens every zipfile in the list of files and reads in the
containing product file, and if there's an error or it's wanted the zipfile is
removed afterwards.
Args:
filenames_and_files: list of local stored files that should be read
write_file: if true, the raw zip file will not be deleted, default is false.
prefer_local: define if preferably data is loaded from local file
folder: the folder where data either should be written to or loaded from
**kwargs: used for alternative input, as a set of station ids, parameter, timeresolution, periodtype
Returns:
pandas.DataFrame with requested data, for different station ids the data is still put into one DataFrame
"""
# Unpack values
try:
filenames = []
files = []
for filename, file in filenames_and_files:
filenames.append(filename)
files.append(file)
except ValueError:
filenames = filenames_and_files
files = None
except TypeError:
filenames = None
files = None
finally:
filenames = cast_to_list(filenames)
files = cast_to_list(files)
try:
sample_file = filenames[0]
station_ids = [str(int(re.findall(STATID_REGEX, filename).pop(0))) for filename in filenames]
time_res = retrieve_time_resolution_from_filename(sample_file)
parameter = retrieve_parameter_from_filename(sample_file, time_resolution=time_res)
period = retrieve_period_type_from_filename(sample_file)
except (IndexError, TypeError):
try:
station_ids = cast_to_list(kwargs["station_ids"])
time_res = kwargs["time_resolution"]
parameter = kwargs["parameter"]
period = kwargs["period_type"]
except (KeyError, ValueError):
raise ValueError(f"Error: Could neither parse parameters from filename nor from kwargs (station_ids, "
f"parameter, time_resolution, period_type).")
finally:
station_ids = cast_to_list(station_ids)
data = []
for statid, group in groupby(zip_longest(station_ids, filenames, files), key=lambda x: x[0]):
request_string = f"{parameter.value}/{time_res.value}/{period.value}/{statid}"
data.append(
_parse_dwd_data(group, prefer_local, folder, write_file, request_string)
)
try:
data = pd.concat(data).reset_index(drop=True)
except ValueError:
data = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/1/26 13:10
Desc: 申万指数-申万一级、二级和三级
http://www.swsindex.com/IdxMain.aspx
https://legulegu.com/stockdata/index-composition?industryCode=851921.SI
"""
import time
import json
import pandas as pd
from akshare.utils import demjson
import requests
from bs4 import BeautifulSoup
from akshare.index.cons import sw_headers, sw_payload, sw_url
def sw_index_representation_spot() -> pd.DataFrame:
"""
申万-市场表征实时行情数据
http://www.swsindex.com/idx0120.aspx?columnid=8831
:return: 市场表征实时行情数据
:rtype: pandas.DataFrame
"""
url = "http://www.swsindex.com/handler.aspx"
params = {
"tablename": "swzs",
"key": "L1",
"p": "1",
"where": "L1 in('801001','801002','801003','801005','801300','801901','801903','801905','801250','801260','801270','801280','802613')",
"orderby": "",
"fieldlist": "L1,L2,L3,L4,L5,L6,L7,L8,L11",
"pagecount": "9",
"timed": "1632300641756",
}
r = requests.get(url, params=params)
data_json = demjson.decode(r.text)
temp_df = pd.DataFrame(data_json["root"])
temp_df.columns = ["指数代码", "指数名称", "昨收盘", "今开盘", "成交额", "最高价", "最低价", "最新价", "成交量"]
temp_df["昨收盘"] = pd.to_numeric(temp_df["昨收盘"])
temp_df["今开盘"] = pd.to_numeric(temp_df["今开盘"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"])
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
return temp_df
def sw_index_spot() -> pd.DataFrame:
"""
申万一级行业-实时行情数据
http://www.swsindex.com/idx0120.aspx?columnid=8832
:return: 申万一级行业实时行情数据
:rtype: pandas.DataFrame
"""
url = "http://www.swsindex.com/handler.aspx"
result = []
for i in range(1, 3):
payload = sw_payload.copy()
payload.update({"p": i})
payload.update({"timed": int(time.time() * 1000)})
r = requests.post(url, headers=sw_headers, data=payload)
data = r.content.decode()
data = data.replace("'", '"')
data = json.loads(data)
result.extend(data["root"])
temp_df = pd.DataFrame(result)
temp_df["L2"] = temp_df["L2"].str.strip()
temp_df.columns = ["指数代码", "指数名称", "昨收盘", "今开盘", "成交额", "最高价", "最低价", "最新价", "成交量"]
temp_df["昨收盘"] = pd.to_numeric(temp_df["昨收盘"])
temp_df["今开盘"] = pd.to_numeric(temp_df["今开盘"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"])
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
return temp_df
def sw_index_second_spot() -> pd.DataFrame:
"""
申万二级行业-实时行情数据
http://www.swsindex.com/idx0120.aspx?columnId=8833
:return: 申万二级行业-实时行情数据
:rtype: pandas.DataFrame
"""
result = []
for i in range(1, 6):
payload = {
"tablename": "swzs",
"key": "L1",
"p": "1",
"where": "L1 in('801011','801012','801013','801014','801015','801016','801021','801022','801023','801032','801033','801034','801035','801036','801037','801041','801051','801072','801073','801074','801075','801081','801082','801083','801084','801092','801093','801094','801101','801102','801111','801112','801123','801131','801132','801141','801142','801143','801151','801152','801153','801154','801155','801156','801161','801162','801163','801164','801171','801172','801173','801174','801175','801176','801177','801178','801181','801182','801191','801192','801193','801194','801202','801211','801212','801213','801214','801222','801223','801053','801054','801055','801076','801203','801204','801205','801711','801712','801713','801721','801722','801723','801724','801725','801731','801732','801733','801734','801741','801742','801743','801744','801751','801752','801761','801881','801017','801018')",
"orderby": "",
"fieldlist": "L1,L2,L3,L4,L5,L6,L7,L8,L11",
"pagecount": "98",
"timed": "",
}
payload.update({"p": i})
payload.update({"timed": int(time.time() * 1000)})
r = requests.post(sw_url, headers=sw_headers, data=payload)
data = r.content.decode()
data = data.replace("'", '"')
data = json.loads(data)
result.extend(data["root"])
temp_df = pd.DataFrame(result)
temp_df["L2"] = temp_df["L2"].str.strip()
temp_df.columns = ["指数代码", "指数名称", "昨收盘", "今开盘", "成交额", "最高价", "最低价", "最新价", "成交量"]
temp_df["昨收盘"] = pd.to_numeric(temp_df["昨收盘"])
temp_df["今开盘"] = pd.to_numeric(temp_df["今开盘"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"])
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
return temp_df
def sw_index_cons(symbol: str = "801011") -> pd.DataFrame:
"""
申万指数成份信息-包括一级和二级行业都可以查询
http://www.swsindex.com/idx0210.aspx?swindexcode=801010
:param symbol: 指数代码
:type symbol: str
:return: 申万指数成份信息
:rtype: pandas.DataFrame
"""
url = f"http://www.swsindex.com/downfile.aspx?code={symbol}"
r = requests.get(url)
soup = BeautifulSoup(r.text, "html5lib")
data = []
table = soup.findAll("table")[0]
rows = table.findAll("tr")
for row in rows:
cols = row.findAll("td")
if len(cols) >= 4:
stock_code = cols[0].text
stock_name = cols[1].text
weight = cols[2].text
start_date = cols[3].text
data.append(
{
"stock_code": stock_code,
"stock_name": stock_name,
"start_date": start_date,
"weight": weight,
}
)
temp_df = pd.DataFrame(data)
temp_df["start_date"] = pd.to_datetime(temp_df["start_date"]).dt.date
temp_df["weight"] = pd.to_numeric(temp_df["weight"])
return temp_df
def sw_index_daily(
symbol: str = "801011",
start_date: str = "20191201",
end_date: str = "20201207",
) -> pd.DataFrame:
"""
申万指数一级和二级日频率行情数据
http://www.swsindex.com/idx0200.aspx?columnid=8838&type=Day
:param symbol: 申万指数
:type symbol: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 申万指数日频率行情数据
:rtype: pandas.DataFrame
"""
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
url = "http://www.swsindex.com/excel2.aspx"
params = {
"ctable": "swindexhistory",
"where": f" swindexcode in ('{symbol}') and BargainDate >= '{start_date}' and BargainDate <= '{end_date}'",
}
r = requests.get(url, params=params)
soup = BeautifulSoup(r.text, "html5lib")
data = []
table = soup.findAll("table")[0]
rows = table.findAll("tr")
for row in rows:
cols = row.findAll("td")
if len(cols) >= 10:
symbol = cols[0].text
index_name = cols[1].text
date = cols[2].text
open_ = cols[3].text
high = cols[4].text
low = cols[5].text
close = cols[6].text
vol = cols[7].text
amount = cols[8].text
change_pct = cols[9].text
data.append(
{
"index_code": symbol.replace(",", ""),
"index_name": index_name.replace(",", ""),
"date": date.replace(",", ""),
"open": open_.replace(",", ""),
"high": high.replace(",", ""),
"low": low.replace(",", ""),
"close": close.replace(",", ""),
"vol": vol.replace(",", ""),
"amount": amount.replace(",", ""),
"change_pct": change_pct.replace(",", ""),
}
)
temp_df = pd.DataFrame(data)
temp_df["date"] = pd.to_datetime(temp_df["date"]).dt.date
temp_df["open"] = pd.to_numeric(temp_df["open"])
temp_df["high"] = pd.to_numeric(temp_df["high"])
temp_df["low"] = pd.to_numeric(temp_df["low"])
temp_df["close"] = pd.to_numeric(temp_df["close"])
temp_df["vol"] = pd.to_numeric(temp_df["vol"])
temp_df["amount"] = pd.to_numeric(temp_df["amount"])
temp_df["change_pct"] = pd.to_numeric(temp_df["change_pct"])
return temp_df
def sw_index_daily_indicator(
symbol: str = "801011",
start_date: str = "20191201",
end_date: str = "20210907",
data_type: str = "Day",
) -> pd.DataFrame:
"""
申万一级和二级行业历史行情指标
http://www.swsindex.com/idx0200.aspx?columnid=8838&type=Day
:param symbol: 申万指数
:type symbol: str
:param start_date: 开始时间
:type start_date: str
:param end_date: 结束时间
:type end_date: str
:param data_type: choice of {"Day": 日报表, "Week": 周报表}
:type data_type: str
:return: 申万指数不同频率数据
:rtype: pandas.DataFrame
"""
start_date = "-".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "-".join([end_date[:4], end_date[4:6], end_date[6:]])
url = "http://www.swsindex.com/excel.aspx"
params = {
"ctable": "V_Report",
"where": f" swindexcode in ('{symbol}') and BargainDate >= '{start_date}' and BargainDate <= '{end_date}' and type='{data_type}'",
}
r = requests.get(url, params=params)
soup = BeautifulSoup(r.text, "html5lib")
data = []
table = soup.findAll("table")[0]
rows = table.findAll("tr")
for row in rows:
cols = row.findAll("td")
if len(cols) >= 14:
symbol = cols[0].text
index_name = cols[1].text
date = cols[2].text
close = cols[3].text
volume = cols[4].text
chg_pct = cols[5].text
turn_rate = cols[6].text
pe = cols[7].text
pb = cols[8].text
v_wap = cols[9].text
turnover_pct = cols[10].text
float_mv = cols[11].text
avg_float_mv = cols[12].text
dividend_yield_ratio = cols[13].text
data.append(
{
"index_code": symbol,
"index_name": index_name,
"date": date,
"close": close,
"volume": volume,
"chg_pct": chg_pct,
"turn_rate": turn_rate,
"pe": pe,
"pb": pb,
"vwap": v_wap,
"float_mv": float_mv,
"avg_float_mv": avg_float_mv,
"dividend_yield_ratio": dividend_yield_ratio,
"turnover_pct": turnover_pct,
}
)
temp_df = pd.DataFrame(data)
temp_df["date"] = pd.to_datetime(temp_df["date"]).dt.date
temp_df["close"] = pd.to_numeric(temp_df["close"])
temp_df["volume"] = temp_df["volume"].apply(lambda x: x.replace(",", ""))
temp_df["volume"] = pd.to_numeric(temp_df["volume"])
temp_df["chg_pct"] = pd.to_numeric(temp_df["chg_pct"])
temp_df["turn_rate"] = pd.to_numeric(temp_df["turn_rate"])
temp_df["pe"] = pd.to_numeric(temp_df["pe"])
temp_df["pb"] = pd.to_numeric(temp_df["pb"])
temp_df["vwap"] = pd.to_numeric(temp_df["vwap"])
temp_df["float_mv"] = temp_df["float_mv"].apply(lambda x: x.replace(",", ""))
temp_df["float_mv"] = pd.to_numeric(
temp_df["float_mv"],
)
temp_df["avg_float_mv"] = temp_df["avg_float_mv"].apply(
lambda x: x.replace(",", "")
)
temp_df["avg_float_mv"] = pd.to_numeric(temp_df["avg_float_mv"])
temp_df["dividend_yield_ratio"] = pd.to_numeric(temp_df["dividend_yield_ratio"])
temp_df["turnover_pct"] = pd.to_numeric(temp_df["turnover_pct"])
return temp_df
def sw_index_third_info() -> pd.DataFrame:
"""
乐咕乐股-申万三级-分类
https://legulegu.com/stockdata/sw-industry-overview#level1
:return: 分类
:rtype: pandas.DataFrame
"""
url = "https://legulegu.com/stockdata/sw-industry-overview"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
code_raw = soup.find("div", attrs={"id": "level3Items"}).find_all(
"div", attrs={"class": "lg-industries-item-chinese-title"}
)
name_raw = soup.find("div", attrs={"id": "level3Items"}).find_all(
"div", attrs={"class": "lg-industries-item-number"}
)
value_raw = soup.find("div", attrs={"id": "level3Items"}).find_all(
"div", attrs={"class": "lg-sw-industries-item-value"}
)
code = [item.get_text() for item in code_raw]
name = [item.get_text().split("(")[0] for item in name_raw]
num = [item.get_text().split("(")[1].split(")")[0] for item in name_raw]
num_1 = [
item.find_all("span", attrs={"class": "value"})[0].get_text().strip()
for item in value_raw
]
num_2 = [
item.find_all("span", attrs={"class": "value"})[1].get_text().strip()
for item in value_raw
]
num_3 = [
item.find_all("span", attrs={"class": "value"})[2].get_text().strip()
for item in value_raw
]
num_4 = [
item.find_all("span", attrs={"class": "value"})[3].get_text().strip()
for item in value_raw
]
temp_df = pd.DataFrame([code, name, num, num_1, num_2, num_3, num_4]).T
temp_df.columns = [
"行业代码",
"行业名称",
"成份个数",
"静态市盈率",
"TTM(滚动)市盈率",
"市净率",
"静态股息率",
]
temp_df["成份个数"] = pd.to_numeric(temp_df["成份个数"])
temp_df["静态市盈率"] = pd.to_numeric(temp_df["静态市盈率"])
temp_df["TTM(滚动)市盈率"] = pd.to_numeric(temp_df["TTM(滚动)市盈率"])
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"])
temp_df["静态股息率"] = pd.to_numeric(temp_df["静态股息率"])
return temp_df
def sw_index_third_cons(symbol: str = "851921.SI") -> pd.DataFrame:
"""
乐咕乐股-申万三级-行业成份
https://legulegu.com/stockdata/index-composition?industryCode=851921.SI
:param symbol: 三级行业的行业代码
:type symbol: str
:return: 行业成份
:rtype: pandas.DataFrame
"""
url = f"https://legulegu.com/stockdata/index-composition?industryCode={symbol}"
temp_df = pd.read_html(url)[0]
temp_df.columns = [
"序号",
"股票代码",
"股票简称",
"纳入时间",
"申万1级",
"申万2级",
"申万3级",
"价格",
"市盈率",
"市盈率ttm",
"市净率",
"股息率",
"市值",
]
temp_df["价格"] = pd.to_numeric(temp_df["价格"], errors="coerce")
temp_df["市盈率"] = pd.to_numeric(temp_df["市盈率"], errors="coerce")
temp_df["市盈率ttm"] = pd.to_numeric(temp_df["市盈率ttm"], errors="coerce")
temp_df["市净率"] = pd.to_ | numeric(temp_df["市净率"], errors="coerce") | pandas.to_numeric |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import csv
import pandas as pd
# In[2]:
df = | pd.read_csv("C:\\Users\\user\\Downloads\\moreno_highschool\\out.moreno_highschool_highschool", sep=" ", header=None, skiprows=2, names=["ndidfr", "ndidto", "weight"]) | pandas.read_csv |
########
# LocusExtractor
# Created by <NAME> for the Meningitis lab in the CDC, under contract with IHRC. Inc.
# Version 0.8, 20 Mar 2015
#
# The organization of this script is horrible. Sorry. - ACR
#pylint: disable=global-statement, broad-except
script_version = 1.5
script_subversion = 7 ##Added notes about composite coverage
import os
import sys
import re
import pandas as pd
import numpy as np
import utilities
import seq_utilities
import genomeOrganizer ##Note. This should be replaced with NGS_data_utilities at some point.
from BLASThelpers import BLASTheaders as bh
import BLASThelpers
from SequenceExporter import SequenceExporter
import AmpliconExtractor
# import SRST2_controller
from AlleleReferenceManager import AlleleReferenceManager, referenceSubdirectory
from LookupTableManager import LookupTableManager
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
from Bio.Blast.Applications import NcbiblastnCommandline, NcbitblastnCommandline
from Bio import SeqIO
from shlex import quote as sq
import shutil
import getpass
import time
import traceback
_debug = False
# import traceback
import warnings
# import sys
def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
print("Start Warning:")
traceback.print_stack()
# log = file if hasattr(file,'write') else sys.stderr
print(warnings.formatwarning(message, category, filename, lineno, line))
print("End warning.")
warnings.showwarning = warn_with_traceback
pep_messages = {
'yes_locus': 'Peptide not found even though DNA locus was identified',
'no_locus' : 'Peptide not found; locus not present',
'stop' : '-AfterStopCodon',
'out' : '-OutOfFrame'
}
notFound = 'Not found'
warn_edge = '-AtEdge'
warn_frameshift = '-Frameshift'
warn_insertion = '-Insertion'
warn_disruption = '-Disruption'
key_additional = 'Additional'
multiple = 'multiple'
def vprint(text):
if _debug:
print(text)
### Similarity requirement for identifying locus by imperfect match to known allele: don't want to hit homologs. I have no good way of setting this
####abcZ has two alleles in the reference file with < 75% identity
initial_table_columns = ['Filename','Unique_ID','Analysis_Version','Analysis_User','Analysis_Time','Lab_ID']
_min_identity = 0.7
_min_coverage = 0.8
_warning_distance_edge = 1000
## set format for BLAST tabular output
_BLASTheaderList = ['qseqid','sseqid','length','qcovhsp','qstart','qend','qlen','sstart','send','slen','qcovs',
'evalue','bitscore','pident','nident','mismatch','gapopen','gaps','qframe','sframe','sstrand','positive']
# _outfmt_str = '\'{}\''.format(' '.join(['6']+_BLASTheaderList))
# _outfmt_head = [bh[h] for h in _BLASTheaderList]
_outfmt_str, _outfmt_head = BLASThelpers.BLASTtableCommandAndHeaders(_BLASTheaderList)
LE_version='%(prog)s {}.{}'.format(script_version,script_subversion)
### other global variables
# _genes_to_query = set()
####Start organizing the files and reading in all the settings
class PrimaryExporter:
MLST_headers = ['ST','clonal_complex']
##Setup export file; define header that are independent of the allele reference files
headers = {}
headers['ID'] = 'Lab_ID'
headers['Nm_ST'] = 'Nm_MLST_ST'
headers['Nm_clonal_complex'] = 'Nm_MLST_cc'
headers['Hi_ST'] = 'Hi_MLST_ST'
headers['PorA_type'] = 'PorA_type'
def __init__(self,parent,export_template_file,export_reference_file,lookupDir):
##TODO: these other variables can be drawn from the parent
##Location for writing output and intermediate files
self.reference_manager = parent.reference_manager ## the LocusExtractor object
self.lookupDir = lookupDir
#Get headers
if os.path.isfile(export_template_file):
self.export_template = pd.read_csv(export_template_file,header=0)
else:
raise IOError("Need to supply an export table file: {}".format(export_template_file))
#Merge with list of files in genome_frame -- use filename as index (unique identifier)
if parent.genomeFrame is not None:
self.allele_table = parent.genomeFrame[initial_table_columns].copy()
# self.export_table = pd.merge(self.allele_table,export_template,how='left') #inclusive of all columns and rows
else:
##Record all alleles, separate from standardized output
self.allele_table = pd.DataFrame()
# self.export_table = export_template
#Map genes to headers
if os.path.isfile(export_reference_file):
with open(export_reference_file) as headers_in:
for line in headers_in: #should be two items; tab delimited
if not re.match(r'\s*#',line): #comment line
fields = line.split()
self.headers[fields[0]] = fields[1]
if len(fields) > 2:
print("Warning: A line in {} has more than two items in it: {}".format(export_reference_file,line))
else:
raise IOError("Need to supply a key linking genes to export table fields: {}".format(export_reference_file))
#Confirm consistancy of headers and the reference file: There may be fields in the table that are not connected to anything
for (value, header) in self.headers.items():
if header not in self.export_template.columns:
print("Warning: cannot find header for {}. Should be {}".format(value,header)) ## ToDo: guess at header and change to warning
#MLST
self.MLST_schemes = dict()
def save_lookup(self,filename,text_dir):
##Make a dict of data frames -- one for each gene with lookup table. Return them so they are accessible to the mol sheet
## Load lookup tables
if os.path.isdir(self.lookupDir):
lookupTables = LookupTableManager(self.lookupDir,set(self.reference_manager.getLoci()))
###Create blank tables
lookup_tables_export = {x : pd.DataFrame() for x in lookupTables.lookupGeneList()}
##Iterate over each genome
for _, row in self.allele_table.iterrows():
for gene in lookupTables.lookupGeneList():
assert gene in lookup_tables_export, 'lookup tables and export must be synchronized'
##First, establish identifier
export_series = pd.Series({'Filename':row['Filename'],'Unique_ID':row['Unique_ID'],'Lab_ID':row['Lab_ID']})
allele = row[gene]
export_series = export_series.append(lookupTables.lookupRow(gene,allele))
lookup_tables_export[gene] = lookup_tables_export[gene].append(export_series,ignore_index=True)
##save to directory of csv
csv_dir = os.path.join(text_dir,os.path.splitext(filename)[0])
utilities.safeMakeDir(csv_dir)
for gene, table in lookup_tables_export.items():
csv_file = os.path.join(csv_dir,'{}_lookup.csv'.format(gene))
try:
utilities.safeOverwriteCSV(csv_file,table,index=False)
except IOError:
print("Failed to export lookup data to "+csv_file)
##save to multi-tab excel
xlsx_file = utilities.setExt(filename,'.xlsx')
try:
writer = pd.ExcelWriter(xlsx_file)
for gene, table in lookup_tables_export.items():
table.to_excel(writer,gene,index=False)
writer.save()
except Exception as e:
print("Unable to save Excel file {}. Use the CSV version. Warning: Excel will try to convert some values to dates unless you tell it not to during import.".format(filename))
print(format(e))
print("at line:")
traceback.print_tb(sys.exc_info()[2])
return lookup_tables_export
##This makes changes to the allele table that will not be saved if save_allele happens first
def save_mol(self,filename,text_dir):
## Load lookup tables
if os.path.isdir(self.lookupDir):
lookupTables = LookupTableManager(self.lookupDir,set(self.reference_manager.getLoci()))
##Get some identifiers for writing to table
ST_head = self.headers['Nm_ST']
CC_head = self.headers['Nm_clonal_complex']
###Create blank table
export_table = pd.DataFrame()
##Iterate over each genome
for i, row in self.allele_table.iterrows():
try:
##First, establish identifier
export_series = row[initial_table_columns].copy()
# export_series = pd.Series({'Filename':row['Filename'],'Unique_ID':row['Unique_ID'],'Lab_ID':row['Lab_ID']})
##Then transfer all the relevant genes, translating the header column
for header, value in row.iteritems():
if header in self.headers:
export_series[self.headers[header]] = value
##Record MLST information
ST_ID, CC_ID = self.findST('Nm',row)
self.allele_table.loc[i,ST_head] = ST_ID
if ST_ID.startswith('New'):
ST_ID = 'New'
export_series.loc[ST_head] = ST_ID
export_series.loc[CC_head] = CC_ID
##Record Hi MLST
Hi_ST, _ = self.findST('Hi',row)
self.allele_table.loc[i,'Hi_ST'] = Hi_ST
if Hi_ST.startswith('New'):
Hi_ST = 'New'
export_series.loc[self.headers['Hi_ST']] = Hi_ST
## Make PorA nomenclature
PorA_ID = self.exportPorAType(row)
export_series['PorA_type'] = PorA_ID
##Fill in antigen nomenclature from lookup table
# simple transfers
for gene in lookupTables.lookupGeneList():
allele = row[gene]
#For export table
for dest, source in lookupTables.transferHeaders(gene):
if pd.notnull(allele) and allele.isdigit():
value = lookupTables.lookup(gene,allele,source)
else:
value = 'Allele not identified for {}. Cannot lookup {}'.format(gene,source)
export_series[dest] = value
# #For the massive dump file
# for header in lookupTables.transferRawData(gene):
# if header.lower().startswith(gene.lower()):
# allele_header = header
# else:
# allele_header = gene + '_' + header
# if pd.notnull(allele) and allele.isdigit():
# value = lookupTables.lookup(gene,allele,header)
# if value == '':
# value = 'No lookup value'
# else:
# value = "Allele not identified"
# self.allele_table.loc[idx,allele_header] = value
#### Do validations and recodings for each gene sucessively
fHbp = 'fHbp'
fHbp_allele = row[fHbp]
try:
if str(fHbp_allele).isdigit():
##Validation of peptide
pep_lookup = lookupTables.lookup(fHbp,fHbp_allele,'peptide_id')
if pep_lookup.startswith('no peptide_id value'):#blank ##See LookupTableManager.lookup
pep_lookup = lookupTables.lookup(fHbp,fHbp_allele,'flags')
if pep_lookup.startswith('no flags value'): ##See LookupTableManager.lookup
pep_lookup = 'no information'
if not isinstance(pep_lookup,str): ##This preceeds an exception
print("Error (not fatal): pep_lookup is a {} valued at {}".format(type(pep_lookup),pep_lookup))
FHbp = 'FHbp_pep'
pep_BLAST = row[FHbp]
if pd.isnull(pep_BLAST) or not pep_BLAST.isdigit():
if not pep_lookup.isdigit(): #Lookup table provided information that BLAST had no access to
pep_BLAST += ';' + pep_lookup
export_series[self.headers[FHbp]] = pep_BLAST
pep_lookup = pep_BLAST ##Just so the next step doesn't fail; already added lookup to BLAST
if pep_lookup != pep_BLAST:
print("Warning: Conflicting fHbp peptide assignments...")
print("Sequence search: {}".format(pep_BLAST))
print("Allele lookup: {}".format(pep_lookup))
print("Lookup tables will be queried using the sequence search result")
except AttributeError:
print(str(fHbp_allele))
## Add to table
export_series.loc['NhbA_Protein_subvariant_Novartis'] = Nov_format(row['NhbA_pep'])
## nadA
nadA_allele = row['nadA']
NadA_pep = row['NadA_pep']
Nov_nadA = 'Error. See alleles output for details. Contact developer.'
if pd.notnull(NadA_pep) and NadA_pep.isdigit():
NadA_var = lookupTables.lookup('NadA_pep',NadA_pep,'NadA_variant')
if not (NadA_var.startswith('no ') or NadA_var.startswith('allele ')): ##See LookupTableManager.lookup
Nov_nadA = '{}.{}'.format(NadA_var,NadA_pep)
else:
Nov_nadA = 'peptide {}; family unknown'.format(NadA_pep)
if pd.notnull(nadA_allele) and nadA_allele.isdigit(): ##Just check for consistancy
NadA_pep_lookup = lookupTables.lookup('nadA',nadA_allele,'NadA_peptide')
if (NadA_pep_lookup.isdigit()) and (int(NadA_pep) != int(NadA_pep_lookup)):
print("Warning: Conflicting nadA peptide assignments...")
print("Sequence search: {}".format(NadA_pep))
print("Allele lookup: {}".format(NadA_pep_lookup))
print("Lookup tables will be queried using the peptide sequence search result, rather than the DNA allele")
elif pd.notnull(nadA_allele) and nadA_allele.isdigit(): #This really should not happen
print("NadA peptide is not available for {}, but DNA allele is. The peptide variant value will be queried using the nucleotide sequence search result".format(row['Unique_ID']))
NadA_var = lookupTables.lookup('nadA',nadA_allele,'NadA_variant')
NadA_flag = lookupTables.lookup('nadA',nadA_allele,'flags')
if NadA_var.startswith('no '): ##See LookupTableManager.lookup
if not NadA_flag.startswith('no '): ##See LookupTableManager.lookup
Nov_nadA = '{}'.format(NadA_flag)
else:
if NadA_flag.startswith('no '):##See LookupTableManager.lookup
Nov_nadA = '{} (peptide ID unavailable)'.format(NadA_var)
else:
Nov_nadA = '{} {}'.format(NadA_var,NadA_flag)
elif NadA_pep in [pep_messages['yes_locus'],pep_messages['no_locus']]: ##Presence of DNA locus not really relevant, since there is often amplification of the deletion
Nov_nadA = notFound
elif warn_edge in NadA_pep:
Nov_nadA = 'Incomplete ORF. Possible transposon insertion.'
###TODO: see warn_edge source for how to provide more info
elif pd.notnull(NadA_pep):
Nov_nadA = NadA_pep
export_series.loc['NadA_Protein_subvariant_Novartis'] = Nov_nadA
# export_table = export_table.append(export_series,ignore_index=True)
except Exception as e:
print("Failed to format molecular data for {}".format(row['Unique_ID']))
print(str(e))
print("at line:")
traceback.print_tb(sys.exc_info()[2])
export_table = export_table.append(export_series,ignore_index=True)
##Add all columns from the template, and re-order according to template while avoiding any duplications
export_table = export_table.append(self.export_template)
export_table_headers = [x for x in initial_table_columns if x not in self.export_template.columns.tolist()] + self.export_template.columns.tolist()
export_table = export_table[export_table_headers]
assert self.allele_table['Unique_ID'].tolist() == export_table['Unique_ID'].tolist(),"Export tables are out of order"
csv_base = filename if not isinstance(text_dir,str) else os.path.join(text_dir,os.path.basename(filename))
csv_file = utilities.setExt(csv_base,'.csv')
utilities.safeOverwriteCSV(csv_file,export_table,index=False)
xlsx_file = utilities.setExt(filename,'.xlsx')
if not utilities.safeOverwriteTable(xlsx_file,export_table,'excel',index=False):
print("Unable to save Excel file {}. Use the CSV version. Warning: Excel will try to convert some values to dates unless you tell it not to during import.".format(filename))
json_file = utilities.setExt(filename,'.json')
utilities.safeOverwriteTable(json_file,export_table,'json')
def save_alleles(self,filename,column_order=None,text_dir=None):
if column_order is None:
column_order = []
new_column_order = [x for x in column_order if x in self.allele_table.columns]
if len(new_column_order) != len(column_order):
new_set = set(new_column_order)
old_set = set(column_order)
print("#################")
print("Warning: allele data table is missing the following columns:")
print("\t"+",".join(old_set.difference(new_set)))
### Report if any of the alleles have an internal stop codon
for idx, row in self.allele_table.iterrows(): #for each genome
stop_list = [] # record internal stops
for item in row.index:#evaluate each gene,
allele_frame = self.reference_manager.getAlleleDataFrame(item) #Returns None if not existant
if allele_frame is not None:
allele_ID = row.loc[item]
if isinstance(allele_ID, str) and allele_ID.isdigit():
try:
if allele_frame.loc[allele_ID,'InternalStop']: # == 'True'
stop_list.append(item)
except KeyError:
print("Allele {} is not in lookup list. Contact developer.".format(allele_ID))
self.allele_table.loc[idx,'InternalStopsInKnownAlleles'] = ','.join(stop_list) if len(stop_list) > 0 else 'No internal stops'
### Need to be careful to do save_mol first since it updates the allele_table by adding information from lookup tables
self.allele_table['nhbA_Novartis_format'] = self.allele_table['NhbA_pep'].apply(Nov_format)
## Arrange columns according to parameter -- any unspecified columns get sorted alphabetically
cols = [x for x in initial_table_columns if x not in column_order] + [c.strip() for c in column_order if c in self.allele_table.columns]
remainder = [c.strip() for c in self.allele_table.columns.tolist() if c not in cols]
remainder.sort(key=lambda s: s.lower())
cols += remainder
try:
if text_dir is None:
csv_file = filename
else:
csv_file = os.path.join(text_dir,os.path.basename(filename))
utilities.safeOverwriteCSV(csv_file,self.allele_table[cols],index=False)
except IOError:
print("Unable to save final allele table to CSV.")
xlsx_file = utilities.setExt(filename,'.xlsx')
try:
utilities.safeOverwriteTable(xlsx_file,self.allele_table[cols],'excel',index=False)
except IOError:
print("Unable to save Excel file {}. Use the CSV version. Warning: Excel will try to convert some values to dates unless you tell it not to during import.".format(filename))
# def save_temp_alleles(self,filename,column_order=None):
# try:
# utilities.safeOverwriteCSV(filename,self.allele_table[cols],index=False)
# except IOError:
# print("Unable to save temporary allele table to CSV.")
#~ ##Designate an index for this isolate. The only unique idetifier is the filename itself
#~ def establishIndex(filename):
#~
#~
def loadMLSTscheme(self,Identifier,MLST_profile_file):
assert os.path.isfile(MLST_profile_file)
profile_table = pd.read_table(MLST_profile_file,header=0,dtype=str)
cc_list = profile_table['clonal_complex'].unique()
vprint("Loading profile table for {}. Has {} profiles in {} clonal complexes".format(Identifier,len(profile_table),len(cc_list)))
MLST_genes = []
locus_list = self.reference_manager.getLoci()
for header in profile_table.columns:
SpeciesLocus = '{}_{}'.format(Identifier,header)
if (SpeciesLocus in locus_list) or (header in locus_list):
MLST_genes.append(header)
elif header not in self.MLST_headers:
print("Notice: MLST header \"{}\" is not in the list of genes to search for.".format(header))
this_scheme = dict()
this_scheme['genes'] = MLST_genes
this_scheme['profile_table'] = profile_table
self.MLST_schemes[Identifier] = this_scheme
def exportAlleleID(self,genomeName,genomeFile, gene,allele_ID):
if allele_ID is None:
allele_ID = notFound
self.addItemToAlleles(genomeName,genomeFile,gene,allele_ID)
def addItemToAlleles(self,genomeName,genomeFile,header,value):
i = self.getIndex(genomeName,genomeFile)
self.allele_table.loc[i,header] = value
def noteDuplicateRegion(self,genomeName,genomeFile,gene):
dups = 'Duplicates'
if dups not in self.allele_table:
self.allele_table[dups] = ''
i = self.getIndex(genomeName,genomeFile)
n = self.allele_table.loc[i,dups]
if isinstance(n,str) and len(n) > 0:
n += ' {}'.format(gene)
else:
n = gene
self.allele_table.loc[i,dups] = n
##Pass * to match all
def getIndex(self,genomeName,genomeFile):
at = self.allele_table
if genomeName == '*':
nameMatch = at['Unique_ID'] != ''
else:
nameMatch = at['Unique_ID'] == genomeName
if genomeFile == '*':
fileMatch = at['Filename'] != ''
else:
fileMatch = at['Filename'] == genomeFile
match = at[ nameMatch & fileMatch]
idx = match.index.tolist()
if len(idx) != 1:
print("Error finding genome: {}".format(genomeName))
print("\tMatching names: {}".format(sum(nameMatch)))
print("\tMatching files: {}".format(sum(fileMatch)))
raise IOError("Failure to find index of genome")
result = idx[0]
# assert self.export_table.ix[result,'Lab_ID'] == genomeName, 'Genome name: {}'.format(genomeName)
# assert self.export_table.ix[result,'Filename'] == genomeFile, 'Genome file: {}'.format(genomeFile)
return result
## Finds ST if it is the profile table and genes have already been identified
## Identifier is species
## allele_series is the row of allele data
### New STs will be added to the profile table with the "uniqueID" from this batch.
def findST(self,Identifier,allele_series):
##get index for genome
# file_index = self.getIndex(genomeInfo['name'],genomeInfo['original'])
this_scheme = self.MLST_schemes[Identifier]
MLST_genes = this_scheme['genes']
## Filter profiles based on results for each gene
profile_table = this_scheme['profile_table']
profile_filtered = profile_table.copy()
#Search for each locus
thisST = {'clonal_complex':None} ##Failure to set this results in a spurious match between thisSt and table STs with no Clonal Complex. I don't know why
###TODO: figure out why omitting "clonal_complex" results in matches
CC_ID = 'None identified'
try:
for gene in MLST_genes:
SpeciesLocus = '{}_{}'.format(Identifier,gene) ##Need this because both Hi and Nm use the same gene name
if SpeciesLocus in allele_series:
allele_ID = allele_series.loc[SpeciesLocus]
elif gene in allele_series:
allele_ID = allele_series.loc[gene]
else:
allele_ID = None
print("Error: MLST scheme for {} does not include locus {}".format(Identifier,gene))
##Narrow down potential profiles
if isinstance(allele_ID,int):
filter_ID = str(allele_ID)
elif isinstance(allele_ID,str):
if allele_ID.isdigit():
filter_ID = allele_ID
elif allele_ID == notFound:
filter_ID = '0'
elif allele_ID.startswith('New'):
filter_ID = '-1'
else:
raise ValueError('Illegal MLST allele: {}'.format(allele_ID))
else:
raise ValueError('Illegal MLST allele: {}'.format(allele_ID))
# if not pd.isnull(allele_ID) and allele_ID not in ['New','Not found']:
# filter_ID = str(allele_ID)
# else:
# filter_ID = '0' # Value of 0 should eliminate all STs
##Assure that there is no confusion from assigning "None" to 0
# if (filter_ID < 1 and allele_ID is not None):
# print("Warning: Invalid allele value for gene {} in genome {}: {}".format(gene,genomeInfo['name'],allele_ID))
# profile_filtered = profile_filtered[profile_filtered[gene] == filter_ID]
thisST[gene] = filter_ID
except ValueError:
ST_ID = 'Error: Illegal MLST allele'
else:
##Check for valid ST
result_set = set()
for g in MLST_genes:
result_set.add(thisST[g])
null_set = set('0')
if result_set == null_set:
ST_ID = 'Not applicable'
elif result_set > null_set:
ST_ID = 'Error: Not all loci present'
else:
ST_ID = 'New'
profile_filtered['matches'] = np.sum(profile_filtered == pd.Series(thisST),axis=1) # pylint: disable=no-member
profile_filtered = profile_filtered.sort_values(by=['matches'],ascending=False)
best_profile = profile_filtered.iloc[0]
most_matches = best_profile['matches']
profile_filtered = profile_filtered[profile_filtered['matches'] >= most_matches]
count = len(profile_filtered.index)
if most_matches == len(MLST_genes):
ST_ID = best_profile.loc['ST']
CC_ID = best_profile.loc['clonal_complex']
if count > 1:
raise Exception("Corruption of MLST profile table -- multiple ST with same alleles")
elif ST_ID == 'New': ##No match
print('Unable to find {} MLST for {}. Best match is {} alleles'.format(Identifier,allele_series['Unique_ID'],most_matches))
max_mismatch = 3
max_in_list = 10
mismatch = len(MLST_genes) - most_matches
in_list = len(profile_filtered)
if (mismatch <= max_mismatch) or (in_list < max_in_list):
for _, row in profile_filtered.iterrows():
try:
this_ST = row.loc['ST']
this_CC = row.loc['clonal_complex']
except Exception:
this_ST = None
this_CC = None
if pd.isnull(this_CC):
this_CC = 'unassigned'
print('\tClose match to {} ST {} from CC {}'.format(Identifier,this_ST,this_CC))
else:
print("There are {} {} STs with {} mismatches. Not printing list".format(in_list,Identifier,mismatch))
unique_CC = profile_filtered['clonal_complex'].unique()
if most_matches >= len(MLST_genes) - max_mismatch:
unique_goodCC = [x for x in unique_CC if pd.notnull(x)]
if len(unique_goodCC) == 1:
if pd.notnull(unique_goodCC[0]): ##TODO: find better way to deal with MLST schemes lacking clonal complexes
print('Single CC among best matches: {}'.format(unique_goodCC[0]))
#CC_ID = "Most similar to {} ({}/{} matches)".format(unique_goodCC[0],most_matches,len(MLST_genes))
CC_ID = 'N/A'
##Record this ST in the table if all alleles are defined
defined = True
for gene in MLST_genes:
if not str(thisST[gene]).isdigit(): ##CC and ST are givn non-numeric values
defined = False
if defined:
ST_ID = "New_ST-"+allele_series['Unique_ID']
thisST['ST'] = ST_ID
thisST['clonal_complex'] = CC_ID
this_scheme['profile_table'] = profile_table.append(thisST,ignore_index=True)
elif ST_ID.startswith('Error'):
print("Error identifying {} ST. Some loci are present, others absent".format(Identifier))
return ST_ID, CC_ID
# ST_head = self.headers['ST']
# CC_head = self.headers['clonal_complex']
# self.export_table.loc[file_index,ST_head] = ST_ID
# self.export_table.loc[file_index,CC_head] = CC_ID
def exportPorAType(self,allele_series):
PorA_ID = None
if 'porA' in allele_series.index:
#find index - allele_table and export_table share indicies
# i = self.getIndex(genomeName,genomeFile)
allele_ID = allele_series.loc['porA']
##Note: whether PorA is out of frame is not relevant for typing, but it is for vaccine interpretation
##TODO: confirm if this "is null" test is still relevant.
VR1_ID = allele_series.loc['PorA_VR1_pep']
VR2_ID = allele_series.loc['PorA_VR2_pep']
if (VR1_ID == notFound) or (VR2_ID == notFound) or VR1_ID.startswith('New') or VR2_ID.startswith('New'):
if (allele_ID == notFound):
PorA_ID = notFound
elif allele_ID.startswith('New'):
PorA_ID = 'New'
print("Need to extract variable regions from PorA allele")
else:
PorA_ID = 'New'
print("Error: PorA allele was found in reference file but VR sequences were not. Contact developer.")
elif (VR1_ID == VR2_ID) and VR1_ID in pep_messages.values():
PorA_ID = VR1_ID ##This is only if both VRs are missing
else:
PorA_ID = "P1.{},{}".format(VR1_ID,VR2_ID)
else:
print("Error:PorA has not been analyzed. Notify developer")
return PorA_ID
#Takes a string representation of an integer, and gives it the Novartis format for nhbA allels
def Nov_format(allele):
if str(allele).isdigit():
out = "p{:04}".format(int(allele))
else:
out = allele
return out
def ConditionalInsert(pid,tempname):
return utilities.appendToFilename(tempname, "_"+pid) if pid else tempname
class LocusExtractor:
def __init__(self,gd,args,ref_man,setting_dir,output_dir):
self.outDir = output_dir
self.textDir = os.path.join(self.outDir,'Results_text')
utilities.safeMakeDir(self.textDir)
self.settingDir = setting_dir
self.blast_tempDir = os.path.join(self.outDir,'blast_temp_files/')## clean this up at the end
utilities.safeMakeDir(self.blast_tempDir)
self.DNA_dump_file = ConditionalInsert(args.projectID,os.path.join(self.outDir,'mismatched_DNA.fasta'))
self.pep_dump_file = ConditionalInsert(args.projectID,os.path.join(self.outDir,'mismatched_peptides.fasta'))
self.primary_export_file = ConditionalInsert(args.projectID,os.path.join(self.outDir,'molecular_data.csv'))
self.lookup_export_file = ConditionalInsert(args.projectID,os.path.join(self.outDir,'lookup_data.csv'))
self.allele_export_file = ConditionalInsert(args.projectID,os.path.join(self.outDir,'allele_data.csv'))
self.sequence_export_file = ConditionalInsert(args.projectID,os.path.join(self.outDir,'sequence_data.csv'))
self.blast_summary_file = ConditionalInsert(args.projectID,os.path.join(self.outDir,'blast_summaries.tab'))
self.export_table_file = os.path.join(self.settingDir,'molecular_export_template.csv')
self.export_reference_file = os.path.join(self.settingDir,"locus2molecular_export_headers.txt")
self.export_sequences_file = os.path.join(self.settingDir,'sequence_export_template.csv')
self.sequence_export_list = os.path.join(self.settingDir,"locus4sequence_export.txt")
self.primer_file = os.path.join(self.settingDir,"locus2primers.csv")
self.genomeFrame = gd
self.genomeFrame['Analysis_Version'] = 'LocusExtractor.py {}.{}'.format(script_version,script_subversion)
self.genomeFrame['Analysis_User'] = getpass.getuser()
self.genomeFrame['Analysis_Time'] = time.ctime()#strftime("%c")
self.reference_manager = ref_man
self.export_best_match = args.export_best_match
self.preserve_BLAST_hits = args.preserve_BLAST_hits
# if args.is_reads:
# print("Not implemented. Not planning on it...")
# # # gd = convertPairedReadsToSingles(gd)
# # for _,row in gd.iterrows():
# # queryOptions, _ = SRST2_controller.constructQueryOptions(row,self.outDir)
# # ref_list = self.reference_manager.getAllRefFiles()
# # full_command = SRST2_controller.constructFullCommand(queryOptions, ref_list)
# # SRST2_controller.run_SRST2(full_command)
# # # gd = S RST2_controller.convertPairedReadsToSingles(readFrame)
# else:
self.loadSettings(args)
for _,row in gd.iterrows():
try:
filename = row.loc['Filename']
uniqueid = row.loc['Unique_ID']
except KeyError:
print("Error")
else:
if (not isinstance(filename,str)) or (not os.path.exists(filename)):
print("Error: cannot locate file for {}: {}".format(uniqueid,filename))
else:
try:
(file_format,compressed) = utilities.guessFileFormat(filename)
self.evaluateGenome(uniqueid,filename,file_format,compressed)
except Exception as e:
print("Failed to evaluate genome file {}. Contact developer".format(filename))
print(format(e))
print("at line:")
traceback.print_tb(sys.exc_info()[2])
# if _debug:
# raise
try:
self.primary_exporter.allele_table.to_csv(os.path.join(self.outDir,'incremental_alleles.tab'),sep='\t')
except IOError:
print("Failed to save incremental allele table.")
self.finish()
def loadSettings(self,args):
##This is the main setting manager (reference files for alleles)
print('## Begin Loading Settings ##')
self.reference_manager.loadReferences(utilities.safeMakeOutputFolder(os.path.join(self.outDir,'References')))
#Check some files
self.DNA_dump_file = utilities.checkForOverwrite(self.DNA_dump_file)
self.pep_dump_file = utilities.checkForOverwrite(self.pep_dump_file)
## Load template file, bind alleles to fields, and load MLST information
lookupDir = os.path.join(self.settingDir,'lookupTables/')
self.primary_exporter = PrimaryExporter(self,self.export_table_file,self.export_reference_file,lookupDir)
for identifier, filename in self.reference_manager.getMLSTschemes().items():
assert os.path.isfile(filename), "MLST scheme {} does not have a file: {}".format(identifier,filename)
self.primary_exporter.loadMLSTscheme(identifier,filename)
##Load the other export file template, along with gene bindings
self.sequence_exporter = SequenceExporter(self.export_sequences_file,self.sequence_export_list,self.genomeFrame)
##Read in primer file
self.amp_extractor = AmpliconExtractor.AmpliconExtractor(self.primer_file,working_dir=self.outDir,generate_output=self.preserve_BLAST_hits)
self.blast_summary_frame = pd.DataFrame()
print('## Finished Loading Settings ##')
print('')
#####Start processing the genome
#####Functions#######3
##Returns a Pandas Series object representing the allele with the best hit from BLAST
#Gene: name
#Query_file: FASTA file with named alleles of locus being searched for
#genome_info: dict from AmpliconExtractor.setupGenomeForBlastBasedExtraction; includes 'db' for BLAST db to search against
##DNA searches try to do full length match of a similar, long sequence (i.e. megablast, with high reward for matches)
##Protein searches try to do exact match of short sequence (i.e. tblastn)
###Issue: this has no way to report multiple hits to the allele table. Should it return None? A list?
def bestAlleleByBlast(self,gene,query_file,genome_info,is_DNA=True, is_subregion=False):
if not isinstance(query_file,str) and not os.path.isfile(query_file):
raise ValueError("Invalid query filename: {}".format(query_file))
db_name = genome_info['db']
if not isinstance(db_name,str):
raise ValueError("Invalid BLAST database name: {}".format(db_name))
db_base = os.path.basename(db_name)
(db_base,_) = os.path.splitext(db_base)
outfile = os.path.join(self.blast_tempDir,gene+'.'+db_base+'.blast.txt')
if is_DNA:
##Try to force full-length alignment by increasing relative value of rewards
blast_cline = NcbiblastnCommandline(query=sq(query_file),db=sq(db_name),outfmt=_outfmt_str,out=sq(outfile),evalue=0.1,reward=2,penalty=-2,gapopen=4,gapextend=2)
else:
blast_cline = NcbitblastnCommandline(query=sq(query_file),db=sq(db_name),outfmt=_outfmt_str,out=sq(outfile),evalue=0.1,window_size=15,matrix='BLOSUM80',seg='no')
#A very strict matirx (PAM30) may be best...but I'm not seeing any performance problem even with default BLOSUM65. Seg was a major problem
stdout = stderr = None
try:
stdout, stderr = blast_cline()
except Exception as e:
print("Blast failed on {} with {}...output below...".format(gene,query_file))
print("\t{}".format(stdout))
print("\t{}".format(stderr))
print(format(e))
print("at line:")
traceback.print_tb(sys.exc_info()[2])
results = | pd.read_table(outfile,names=_outfmt_head) | pandas.read_table |
from datetime import datetime
import warnings
import numpy as np
from numpy.random import randn
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Index, Series
import pandas._testing as tm
from pandas.core.window.common import flex_binary_moment
from pandas.tests.window.common import (
moments_consistency_cov_data,
moments_consistency_is_constant,
moments_consistency_mock_mean,
moments_consistency_series_data,
moments_consistency_std_data,
moments_consistency_var_data,
moments_consistency_var_debiasing_factors,
)
def _rolling_consistency_cases():
for window in [1, 2, 3, 10, 20]:
for min_periods in {0, 1, 2, 3, 4, window}:
if min_periods and (min_periods > window):
continue
for center in [False, True]:
yield window, min_periods, center
# binary moments
def test_rolling_cov(series):
A = series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).cov(B)
tm.assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
def test_rolling_corr(series):
A = series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).corr(B)
tm.assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
# test for correct bias correction
a = tm.makeTimeSeries()
b = tm.makeTimeSeries()
a[:5] = np.nan
b[:10] = np.nan
result = a.rolling(window=len(a), min_periods=1).corr(b)
tm.assert_almost_equal(result[-1], a.corr(b))
@pytest.mark.parametrize("func", ["cov", "corr"])
def test_rolling_pairwise_cov_corr(func, frame):
result = getattr(frame.rolling(window=10, min_periods=5), func)()
result = result.loc[(slice(None), 1), 5]
result.index = result.index.droplevel(1)
expected = getattr(frame[1].rolling(window=10, min_periods=5), func)(frame[5])
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(result, expected, check_names=False)
@pytest.mark.parametrize("method", ["corr", "cov"])
def test_flex_binary_frame(method, frame):
series = frame[1]
res = getattr(series.rolling(window=10), method)(frame)
res2 = getattr(frame.rolling(window=10), method)(series)
exp = frame.apply(lambda x: getattr(series.rolling(window=10), method)(x))
tm.assert_frame_equal(res, exp)
tm.assert_frame_equal(res2, exp)
frame2 = frame.copy()
frame2.values[:] = np.random.randn(*frame2.shape)
res3 = getattr(frame.rolling(window=10), method)(frame2)
exp = DataFrame(
{k: getattr(frame[k].rolling(window=10), method)(frame2[k]) for k in frame}
)
tm.assert_frame_equal(res3, exp)
@pytest.mark.slow
@pytest.mark.parametrize(
"window,min_periods,center", list(_rolling_consistency_cases())
)
def test_rolling_apply_consistency(
consistency_data, base_functions, no_nan_functions, window, min_periods, center
):
x, is_constant, no_nans = consistency_data
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning
)
# test consistency between rolling_xyz() and either (a)
# rolling_apply of Series.xyz(), or (b) rolling_apply of
# np.nanxyz()
functions = base_functions
# GH 8269
if no_nans:
functions = no_nan_functions + base_functions
for (f, require_min_periods, name) in functions:
rolling_f = getattr(
x.rolling(window=window, center=center, min_periods=min_periods), name
)
if (
require_min_periods
and (min_periods is not None)
and (min_periods < require_min_periods)
):
continue
if name == "count":
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
else:
if name in ["cov", "corr"]:
rolling_f_result = rolling_f(pairwise=False)
else:
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
# GH 9422
if name in ["sum", "prod"]:
tm.assert_equal(rolling_f_result, rolling_apply_f_result)
@pytest.mark.parametrize("window", range(7))
def test_rolling_corr_with_zero_variance(window):
# GH 18430
s = Series(np.zeros(20))
other = Series(np.arange(20))
assert s.rolling(window=window).corr(other=other).isna().all()
def test_flex_binary_moment():
# GH3155
# don't blow the stack
msg = "arguments to moment function must be of type np.ndarray/Series/DataFrame"
with pytest.raises(TypeError, match=msg):
flex_binary_moment(5, 6, None)
def test_corr_sanity():
# GH 3155
df = DataFrame(
np.array(
[
[0.87024726, 0.18505595],
[0.64355431, 0.3091617],
[0.92372966, 0.50552513],
[0.00203756, 0.04520709],
[0.84780328, 0.33394331],
[0.78369152, 0.63919667],
]
)
)
res = df[0].rolling(5, center=True).corr(df[1])
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
# and some fuzzing
for _ in range(10):
df = DataFrame(np.random.rand(30, 2))
res = df[0].rolling(5, center=True).corr(df[1])
try:
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
except AssertionError:
print(res)
def test_rolling_cov_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2)
expected = Series([None, None, 2.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2a)
tm.assert_series_equal(result, expected)
def test_rolling_corr_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2)
expected = Series([None, None, 1.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2a)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"f",
[
lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(window=10, min_periods=5).quantile(quantile=0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),
lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),
],
)
@td.skip_if_no_scipy
def test_rolling_functions_window_non_shrinkage(f):
# GH 7764
s = Series(range(4))
s_expected = Series(np.nan, index=s.index)
df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=["A", "B"])
df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
def test_rolling_functions_window_non_shrinkage_binary():
# corr/cov return a MI DataFrame
df = DataFrame(
[[1, 5], [3, 2], [3, 9], [-1, 0]],
columns=Index(["A", "B"], name="foo"),
index=Index(range(4), name="bar"),
)
df_expected = DataFrame(
columns=Index(["A", "B"], name="foo"),
index=pd.MultiIndex.from_product([df.index, df.columns], names=["bar", "foo"]),
dtype="float64",
)
functions = [
lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)),
lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)),
]
for f in functions:
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
def test_rolling_skew_edge_cases():
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = d.rolling(window=5).skew()
tm.assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = d.rolling(window=2).skew()
tm.assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 0.177994, 1.548824]
d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401])
expected = Series([np.NaN, np.NaN, np.NaN, 0.177994, 1.548824])
x = d.rolling(window=4).skew()
tm.assert_series_equal(expected, x)
def test_rolling_kurt_edge_cases():
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = d.rolling(window=5).kurt()
tm.assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = d.rolling(window=3).kurt()
tm.assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 1.224307, 2.671499]
d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401])
expected = Series([np.NaN, np.NaN, np.NaN, 1.224307, 2.671499])
x = d.rolling(window=4).kurt()
tm.assert_series_equal(expected, x)
def test_rolling_skew_eq_value_fperr():
# #18804 all rolling skew for all equal values should return Nan
a = Series([1.1] * 15).rolling(window=10).skew()
assert np.isnan(a).all()
def test_rolling_kurt_eq_value_fperr():
# #18804 all rolling kurt for all equal values should return Nan
a = Series([1.1] * 15).rolling(window=10).kurt()
assert np.isnan(a).all()
def test_rolling_max_gh6297():
"""Replicate result expected in GH #6297"""
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 2 datapoints on one of the days
indices.append(datetime(1975, 1, 3, 6, 0))
series = Series(range(1, 7), index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
expected = Series(
[1.0, 2.0, 6.0, 4.0, 5.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
def test_rolling_max_resample():
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be max
expected = Series(
[0.0, 1.0, 2.0, 3.0, 20.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
# Now specify median (10.0)
expected = Series(
[0.0, 1.0, 2.0, 3.0, 10.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").median().rolling(window=1).max()
tm.assert_series_equal(expected, x)
# Now specify mean (4+10+20)/3
v = (4.0 + 10.0 + 20.0) / 3.0
expected = Series(
[0.0, 1.0, 2.0, 3.0, v],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").mean().rolling(window=1).max()
tm.assert_series_equal(expected, x)
def test_rolling_min_resample():
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be min
expected = Series(
[0.0, 1.0, 2.0, 3.0, 4.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
r = series.resample("D").min().rolling(window=1)
tm.assert_series_equal(expected, r.min())
def test_rolling_median_resample():
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be median
expected = Series(
[0.0, 1.0, 2.0, 3.0, 10],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").median().rolling(window=1).median()
tm.assert_series_equal(expected, x)
def test_rolling_median_memory_error():
# GH11722
n = 20000
Series(np.random.randn(n)).rolling(window=2, center=False).median()
Series(np.random.randn(n)).rolling(window=2, center=False).median()
def test_rolling_min_max_numeric_types():
# GH12373
types_test = [np.dtype(f"f{width}") for width in [4, 8]]
types_test.extend(
[np.dtype(f"{sign}{width}") for width in [1, 2, 4, 8] for sign in "ui"]
)
for data_type in types_test:
# Just testing that these don't throw exceptions and that
# the return type is float64. Other tests will cover quantitative
# correctness
result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).max()
assert result.dtypes[0] == np.dtype("f8")
result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).min()
assert result.dtypes[0] == np.dtype("f8")
def test_moment_functions_zero_length():
# GH 8056
s = Series(dtype=np.float64)
s_expected = s
df1 = DataFrame()
df1_expected = df1
df2 = DataFrame(columns=["a"])
df2["a"] = df2["a"].astype("float64")
df2_expected = df2
functions = [
lambda x: x.rolling(window=10, min_periods=0).count(),
lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(window=10, min_periods=5).quantile(0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),
lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),
]
for f in functions:
try:
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df1_result = f(df1)
tm.assert_frame_equal(df1_result, df1_expected)
df2_result = f(df2)
tm.assert_frame_equal(df2_result, df2_expected)
except (ImportError):
# scipy needed for rolling_window
continue
def test_moment_functions_zero_length_pairwise():
df1 = | DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
'''
FILE: nav_manager.py
DESCRIPTION: Contains the various classes used by the r2rNavManagerPy programs.
BUGS:
NOTES:
AUTHOR: <NAME>
COMPANY: OceanDataTools
VERSION: 0.3
CREATED: 2021-04-15
REVISION: 2021-05-11
LICENSE INFO: This code is licensed under MIT license (see LICENSE.txt for details)
Copyright (C) OceanDataTools 2021
'''
import sys
import json
import logging
from io import StringIO
from datetime import datetime, timedelta
from os.path import dirname, realpath, basename
sys.path.append(dirname(dirname(realpath(__file__))))
import numpy as np
import pandas as pd
from rdp import rdp
from geopy import Point
from geopy.distance import great_circle
from lib.utils import calculate_bearing, read_r2rnavfile
from lib.geocsv_templates import bestres_header, onemin_header, control_header
R2RNAV_COLS = ['iso_time','ship_longitude','ship_latitude','nmea_quality','nsv','hdop','antenna_height','valid_cksum','valid_parse','sensor_time','deltaT','sensor_deltaT','valid_order','distance','speed_made_good','course_made_good','acceleration']
parse_cols = ['iso_time','ship_longitude','ship_latitude','nmea_quality','nsv','hdop','antenna_height','valid_cksum','valid_parse','sensor_time']
bestres_cols = ['iso_time','ship_longitude','ship_latitude','nmea_quality','nsv','hdop','antenna_height','speed_made_good','course_made_good']
onemin_cols = ['iso_time','ship_longitude','ship_latitude','speed_made_good','course_made_good']
control_cols = ['iso_time','ship_longitude','ship_latitude']
MAX_SPEED = 8.7 # m/s
MAX_ACCEL = 1 # m/s^2
MAX_DELTA_T = 300 # seconds
RDP_EPSILON = 0.001
rounding = {
'ship_longitude': 8,
'ship_latitude': 8,
'speed_made_good': 2,
'course_made_good': 3
}
class NpEncoder(json.JSONEncoder):
"""
Custom JSON string encoder used to deal with NumPy arrays
"""
def default(self, obj): # pylint: disable=arguments-differ
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, datetime):
return obj.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
return super().default(obj)
class NavInfoReport():
"""
Class for building navinfo reports
"""
def __init__(self, filename):
self._filename = filename
self._start_ts = None
self._end_ts = None
self._start_coord = [None, None]
self._end_coord = [None, None]
self._bbox = [ None, None, None, None]
self._parse_errors = None
self._total_lines = None
@property
def filename(self):
'''
Getter function for self._filename
'''
return self._filename
@property
def start_ts(self):
'''
Getter function for self._start_ts
'''
return self._start_ts
@property
def end_ts(self):
'''
Getter function for self._end_ts
'''
return self._end_ts
@property
def start_coord(self):
'''
Getter function for self._start_coord
'''
return self._start_coord
@property
def end_coord(self):
'''
Getter function for self._end_coord
'''
return self._end_coord
@property
def bbox(self):
'''
Getter function for self._bbox
'''
return self._bbox
@property
def parse_errors(self):
'''
Getter function for self._parse_errors
'''
return self._parse_errors
@property
def total_lines(self):
'''
Getter function for self._total_lines
'''
return self._total_lines
def build_report(self, dataframe):
"""
Build the NavInfo report
"""
self._parse_errors = len(dataframe[(dataframe['valid_parse'] == 0)])
self._total_lines = len(dataframe.index)
first_valid_row = dataframe[dataframe['valid_parse'] == 1].iloc[0]
idx = dataframe.query('valid_parse.eq(1)').index.max()
last_valid_row = dataframe.iloc[idx]
self._start_ts = first_valid_row['iso_time']
self._end_ts = last_valid_row['iso_time']
self._start_coord = [first_valid_row['ship_longitude'],first_valid_row['ship_latitude']]
self._end_coord = [last_valid_row['ship_longitude'],last_valid_row['ship_latitude']]
self._bbox = [dataframe['ship_longitude'].max(),dataframe['ship_latitude'].max(),dataframe['ship_longitude'].min(),dataframe['ship_latitude'].min()]
def __str__(self):
return "NavInfo Report: %s\n\
Navigation Start/End Info:\n\
\tStart Date: %s\n\
\tEnd Date: %s\n\
\tStart Lat/Lon: [%f,%f]\n\
\tEnd Lat/Lon: [%f,%f]\n\
Navigation Bounding Box Info:\n\
\tMinimum Longitude: %f\n\
\tMaximum Longitude: %f\n\
\tMinimum Latitude: %f\n\
\tMaximum Latitude: %f\n\
Parsing Errors: %d\n\
Total Lines of Data: %s\
" % (basename(self._filename), self._start_ts.strftime("%Y-%m-%dT%H:%M:%S.%fZ"), self._end_ts.strftime("%Y-%m-%dT%H:%M:%S.%fZ"), self._start_coord[1], self._start_coord[0], self._end_coord[1], self._end_coord[0], self._bbox[2], self._bbox[0], self._bbox[3], self._bbox[1], self._parse_errors, self._total_lines)
def to_json(self):
"""
Return test data as json object
"""
return {"filename": self._filename, "startTS": self._start_ts.strftime("%Y-%m-%dT%H:%M:%S.%fZ"), "endTS": self._end_ts.strftime("%Y-%m-%dT%H:%M:%S.%fZ"), "startCoord": self._start_coord, "endCoord": self._end_coord, "bbox": self._bbox, "parseErrors": self._parse_errors, "totalLines": self._total_lines}
class NavQAReport(): # pylint: disable=too-many-instance-attributes
"""
Class for a navqa reports
"""
def __init__(self, filename, delta_t_threshold = MAX_DELTA_T, speed_threshold = MAX_SPEED, acceleration_threshold = MAX_ACCEL):
# The filename
self._filename = filename
# Total data rows
self._total_lines = None
# The QA thresholds
self._delta_t_threshold = pd.Timedelta("{} seconds".format(delta_t_threshold))
self._horizontal_speed_threshold = speed_threshold
self._horzontal_acceleration_threshold = acceleration_threshold
# The QA min/max
self._antenna_altitude = [ None, None ]
self._horizontal_speed = [ None, None ]
self._horizontal_acceleration = [ None, None ]
self._horizontal_speed = [ None, None ]
self._distance_from_port = [ None, None ]
self._timestamps = [ None, None ]
self._nsv = [ None, None ]
self._hdop = [ None, None ]
self._delta_t = [ None, None ]
# The QA errors
self._delta_t_errors = None
self._out_of_sequence_errors = None
self._nmea_qualty_errors = None
self._horizontal_speed_errors = None
self._horizontal_acceleration_errors = None
self._parse_errors = None
self._cksum_errors = None
def build_report(self, dataframe):
'''
Build the navqa report.
'''
self._total_lines = len(dataframe.index)
self._antenna_altitude = [ dataframe['antenna_height'].min(), dataframe['antenna_height'].max() ]
self._horizontal_speed = [ dataframe['speed_made_good'].min(), dataframe['speed_made_good'].max() ]
self._horizontal_acceleration = [ dataframe['acceleration'].min(), dataframe['acceleration'].max() ]
self._distance_from_port = [ 0, 0 ]
self._timestamps = [ dataframe['iso_time'].iloc[0], dataframe['iso_time'].iloc[-1] ]
self._nsv = [ int(dataframe['nsv'].min()), int(dataframe['nsv'].max()) ]
self._hdop = [ dataframe['hdop'].min(), dataframe['hdop'].max() ]
self._delta_t = [ dataframe['deltaT'].min(), dataframe['deltaT'].max() ]
self._delta_t_errors = len(dataframe[(dataframe['deltaT'] > self._delta_t_threshold)])
self._out_of_sequence_errors = len(dataframe[(dataframe['valid_order'] == 0)])
self._nmea_qualty_errors = self._total_lines - len(dataframe[dataframe['nmea_quality'].between(1,3)])
self._horizontal_speed_errors = len(dataframe[(dataframe['speed_made_good'] > self._horizontal_speed_threshold)])
self._horizontal_acceleration_errors = len(dataframe[(dataframe['acceleration'] > self._horzontal_acceleration_threshold)])
self._parse_errors = len(dataframe[(dataframe['valid_parse'] == 0)])
self._cksum_errors = len(dataframe[(dataframe['valid_cksum'] == 0)])
def __str__(self):
return "NavQA Report: %s\n\
Duration and range of values:\n\
Maximum Antenna Altitude: %0.3f m\n\
Minimum Antenna Altitude: %0.3f m\n\
Maximum Horizontal Speed: %0.3f m/s\n\
Minimum Horizontal Speed: %0.3f m/s\n\
Maximum Horizontal Acceleration: %0.3f m/s^2\n\
Minimum Horizontal Acceleration: %0.3f m/s^2\n\
Distance from Port Start: %0.2f m\n\
Distance from Port End: %0.2f m\n\
First epoch: %s\n\
Last epoch: %s\n\
Possible Number of Epochs with Observations:\n\
Actual Number of Epochs with Observations:\n\
Actual Countable Number of Epoch with Observations:\n\
Absent Number of Epochs with Observations:\n\
Flagged Number of Epochs with Observations:\n\
Number of satellites:\n\
Maximum Number of Satellites: %d\n\
Minimum Number of Satellites: %d\n\
Maximum HDOP: %0.1f\n\
Minimum HDOP: %0.1f\n\n\
Qualtiy Assessment:\n\
Longest epoch gap: %s\n\
Number of Gaps Longer than Threshold: %d\n\
Percentage of Gaps Longer than Threshold: %0.3f %%\n\
Number of Epochs Out of Sequence: %d\n\
Percent records out of sequence: %0.3f %%\n\
Number of Epochs with Bad GPS Quality Indicator: %d\n\
Percent records with Bad GPS Quality Indicator: %0.3f %%\n\
Number of Horizontal Speeds Exceeding Threshold: %d\n\
Percent Unreasonable Horizontal Speeds: %0.3f %%\n\
Number of Horizontal Accelerations Exceeding Threshold: %d\n\
Percent Unreasonable Horizontal Accelerations: %0.3f %%\n\
" % (basename(self._filename),
self._antenna_altitude[1],
self._antenna_altitude[0],
self._horizontal_speed[1],
self._horizontal_speed[0],
self._horizontal_acceleration[1],
self._horizontal_acceleration[0],
self._distance_from_port[0],
self._distance_from_port[1],
self._timestamps[0].strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
self._timestamps[1].strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
self._nsv[1],
self._nsv[0],
self._hdop[1],
self._hdop[0],
str(self._delta_t[1]),
self._delta_t_errors,
100 * self._delta_t_errors/self._total_lines,
self._out_of_sequence_errors,
100 * self._out_of_sequence_errors/self._total_lines,
self._nmea_qualty_errors,
100 * self._nmea_qualty_errors/self._total_lines,
self._horizontal_speed_errors,
100 * self._horizontal_speed_errors/self._total_lines,
self._horizontal_acceleration_errors,
100 * self._horizontal_acceleration_errors/self._total_lines,
)
def to_json(self):
"""
Return test data json object
"""
return {
"filename": self._filename,
"antennaAltitudeMax": self._antenna_altitude[1],
"antennaAltitudeMin": self._antenna_altitude[0],
"horizontalSpeedMax": self._horizontal_speed[1],
"horizontalSpeedMin": self._horizontal_speed[0],
"horizontalAccelerationMax": self._horizontal_acceleration[1],
"horizontalAccelerationMin": self._horizontal_acceleration[0],
"distanceFromStartPort": self._distance_from_port[0],
"distanceFromEndPort": self._distance_from_port[1],
"firstEpoch": self._timestamps[0].strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"lastEpoch": self._timestamps[1].strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"satellitesMax": self._nsv[1],
"satellitesMin": self._nsv[0],
"hdopMax": self._hdop[1],
"hdopMin": self._hdop[0],
"deltaTMax": str(self._delta_t[1]),
"deltaTErrorPercentage": round(self._delta_t_errors/self._total_lines, 2) * 100,
"outOfSequenceErrors": self._out_of_sequence_errors,
"outOfSequenceErrorPercentage": round(self._out_of_sequence_errors/self._total_lines, 2) * 100,
"nmeaQualtyErrors": self._nmea_qualty_errors,
"nmeaQualtyErrorPercentage": round(self._nmea_qualty_errors/self._total_lines, 2) * 100,
"horizontalSpeedErrors": self._horizontal_speed_errors,
"horizontalSpeedErrorPercentage": round(self._horizontal_speed_errors/self._total_lines, 2) * 100,
"horizontalAccelerationErrors": self._horizontal_acceleration_errors,
"horizontalAccelerationErrorPercentage": round(self._horizontal_acceleration_errors/self._total_lines, 2) * 100
}
class NavExport():
"""
Class for build navexport products
"""
def __init__(self, filename, delta_t_threshold = MAX_DELTA_T, speed_threshold = MAX_SPEED, acceleration_threshold = MAX_ACCEL):
# The filename
self._filename = filename
# The QA thresholds
self._delta_t_threshold = pd.Timedelta("{} seconds".format(delta_t_threshold))
self._horizontal_speed_threshold = speed_threshold
self._horzontal_acceleration_threshold = acceleration_threshold
self._geocsv_header = None
self._data = None
@property
def data(self):
'''
Getter function for self.
'''
return self._data
@staticmethod
def _round_data(data_frame, precision=None):
"""
Round the data in the data_frame to the specified precision
"""
if precision is None or bool(precision):
try:
decimals = pd.Series(precision.values(), index=precision.keys())
return data_frame.round(decimals)
except Exception as err:
logging.error("Could not round data")
logging.error(str(err))
raise err
return data_frame
def read_r2rnavfile(self, file_format='csv'):
"""
Build the NavExport dataframe from the NavExport filename
"""
self._data = read_r2rnavfile(self._filename, file_format)
# remove bad parse rows
logging.debug("Culling bad parses")
self._data = self._data[self._data['valid_parse'] == 1]
def crop_data(self, start_ts=None, end_ts=None):
"""
Crop the NavExport dataframe to the start/end timestamps specified.
"""
try:
if start_ts is not None:
logging.debug(" start_dt: %s", start_ts)
self._data = self._data[(self._data['iso_time'] >= start_ts)]
if end_ts is not None:
logging.debug(" stop_dt: %s", end_ts)
self._data = self._data[(self._data['iso_time'] <= end_ts)]
except Exception as err:
logging.error("Could not crop data")
logging.error(str(err))
raise err
def apply_qc(self):
"""
Apply the QC rules to the NavExport dataframe
"""
# remove bad gps fixes
logging.debug("Culling bad NMEA fixes")
self._data = self._data[self._data['nmea_quality'].between(1,3)]
# remove bad cksums
logging.debug("Culling bad cksums")
self._data = self._data[self._data['valid_cksum'] == 1]
# remove bad sequence data points
logging.debug("Culling out-of-sequence data")
self._data = self._data[self._data['valid_order'] == 1]
# remove bad speeds
logging.debug("Culling data exceeding speed threshold")
self._data = self._data[self._data['speed_made_good'] <= self._horizontal_speed_threshold]
# remove bad accelerations
logging.debug("Culling data exceeding acceleration threshold")
self._data = self._data[self._data['acceleration'] <= self._horzontal_acceleration_threshold]
def build_bestres(self):
"""
Build the bestres dataset from the NavExport dataframe
"""
drop_columns = [x for x in list(self._data.columns) if x not in bestres_cols]
logging.debug("Dropping columns: %s", drop_columns)
self._data = self._data.drop(drop_columns, axis = 1)
logging.debug("Rounding data: %s", rounding)
self._data = self._round_data(self._data, rounding)
# Update geocsv header
self._geocsv_header = bestres_header
def build_1min(self):
"""
Build the 1min dataset from the NavExport dataframe
"""
drop_columns = [x for x in list(self._data.columns) if x not in onemin_cols]
logging.debug("Dropping columns: %s", drop_columns)
self._data = self._data.drop(drop_columns, axis = 1)
logging.debug('Subsampling data...')
self._data.set_index('iso_time',inplace=True)
self._data = self._data.resample('1T', label='left', closed='left').first()
self._data.reset_index(inplace=True)
self._data.dropna(inplace=True, thresh=4)
# Calculate deltaT column
logging.debug('Building deltaT column...')
self._data = self._data.join(self._data['iso_time'].diff().to_frame(name='deltaT'))
# Calculate distance column
logging.debug("Building distance column...")
self._data['point'] = self._data.apply(lambda row: Point(latitude=row['ship_latitude'], longitude=row['ship_longitude']), axis=1)
self._data['point_next'] = self._data['point'].shift(1)
self._data.loc[self._data['point_next'].isna(), 'point_next'] = None
self._data['distance'] = self._data.apply(lambda row: great_circle(row['point'], row['point_next']).km if row['point_next'] is not None else float('nan'), axis=1)
# Calculate speed_made_good column
logging.debug("Building speed_made_good column...")
self._data['speed_made_good'] = (self._data['distance'] * 1000) / self._data.deltaT.dt.total_seconds()
# Calculate course_made_good column
logging.debug("Building course_made_good column...")
self._data['course_made_good'] = self._data.apply(lambda row: calculate_bearing(tuple(row['point']), tuple(row['point_next'])) if row['point_next'] is not None else float('nan'), axis=1)
self._data = self._data.drop('point_next', axis=1)
self._data = self._data.drop('point', axis=1)
self._data = self._data.drop('distance', axis=1)
self._data = self._data.drop('deltaT', axis=1)
logging.debug("Rounding data: %s", rounding)
self._data = self._round_data(self._data, rounding)
# Update geocsv header
self._geocsv_header = onemin_header
def build_control(self):
"""
Build the control dataset from the NavExport dataframe
"""
drop_columns = [x for x in list(self._data.columns) if x not in control_cols]
logging.debug("Dropping columns: %s", drop_columns)
self._data = self._data.drop(drop_columns, axis = 1)
# run rdp algorithim
logging.debug("Building control coordinates using RDP algorithim")
coords = self._data.filter(['ship_longitude','ship_latitude'], axis=1).to_numpy()
control = rdp(coords, epsilon=RDP_EPSILON)
logging.debug("Length of full-res coordinates: %d", len(self._data.index))
logging.debug("Length of control coordinates: %d", control.shape[0])
control_df = pd.DataFrame(control, columns = ['ship_longitude','ship_latitude'])
self._data = pd.merge(control_df, self._data, on=['ship_longitude','ship_latitude'], how='left')
self._data = self._data[control_cols]
logging.debug("Rounding data: %s", rounding)
self._data = self._round_data(self._data, rounding)
# Update geocsv header
self._geocsv_header = control_header
def geocsv_header(self, custom_meta = None):
"""
Build the geocsv header, apply any custom metadata and return it as a
string.
"""
geocsv_header = ""
for key, _ in self._geocsv_header.items():
if custom_meta and key in custom_meta:
geocsv_header += "#{}: {}\n".format(key, custom_meta[key])
else:
geocsv_header += "#{}: {}\n".format(key, self._geocsv_header[key])
return geocsv_header
def to_csv(self):
'''
Output self._data in csv format.
'''
output = StringIO()
self._data.to_csv(output, index=False, na_rep='NAN')
output.seek(0)
print(output.read())
class NavParser():
"""
Root Class for a nav parsers
"""
def __init__(self, name, description=None, example_data=None):
self._name = name
self._description = description
self._example_data = example_data
self._parse_cols = parse_cols
self._file_report = []
self._df_proc = pd.DataFrame()
@property
def name(self):
'''
Getter function for self._name
'''
return self._name
@property
def description(self):
'''
Getter function for self._description
'''
return self._description
@property
def example_data(self):
'''
Getter function for self._example_data
'''
return self._example_data
@property
def parse_cols(self):
'''
Getter function for self._parse_cols
'''
return self._parse_cols
@property
def file_report(self):
'''
Getter function for self._file_report
'''
return self._file_report
@property
def dataframe(self):
'''
Getter function for self._df_proc
'''
return self._df_proc
def parse_file(self, filepath):
"""
Process the given file. This function must be overrided by subclasses
"""
raise NotImplementedError('process_file must be implemented by subclass')
def add_file_report(self, file_report):
"""
Append the file_report to the NavParser's array of file reports.
"""
self._file_report.append(file_report)
def add_dateframe(self, data):
"""
Add the dataframe data to the NavParser's _df_proc dataframe
"""
if self._df_proc.empty:
self._df_proc = data
else:
self._df_proc = pd.concat([self._df_proc, data], ignore_index=True)
def proc_dataframe(self):
"""
Process the dataframe to calculate deltaT, distance, bearing, velocity,
and acceleration
"""
# Calculate deltaT column
logging.debug('Building deltaT column...')
self._df_proc = self._df_proc.join(self._df_proc['iso_time'].diff().to_frame(name='deltaT'))
# Calculate sensor deltaT column
logging.debug('Building sensor deltaT column...')
self._df_proc = self._df_proc.join(self._df_proc['sensor_time'].diff().to_frame(name='sensor_deltaT'))
# If sensor_time does not have a date (i.e. GGA/GLL), set day offset to
# 0, this gets around files spanning multiple days
self._df_proc['sensor_deltaT'] = self._df_proc.apply(lambda row: row['sensor_deltaT'] + timedelta(days=1) if row['sensor_time'].year == 1900 and row['sensor_deltaT'].days == -1 else row['sensor_deltaT'], axis=1)
# If iso_time or sensor_time is negative flag as bad
logging.debug("Flagging rows that may be out-of-sequence...")
self._df_proc['valid_order'] = self._df_proc.apply(lambda row: 1 if | pd.isnull(row['deltaT']) | pandas.isnull |
from sklearn.svm import SVR
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.externals import joblib
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
'''
this file is to use 10 fold cross-validation to select best kernal function
that give best performence in SVR model.
after establishing our SVR model, we will use cross-validation to evaluate our
model by get average MSE, RSS, R^2
'''
# this functon is used to read data set into Dataframe.
def svr_data():
X1 = pd.read_csv('kratosbat/Data/DataForSVR/GC_PCA.csv')
y1 = pd.read_csv('kratosbat/Data/NEWTrainingData_StandardScaler.csv').loc[:, ['Gravimetric Capacity (units)']]
X2 = | pd.read_csv('kratosbat/Data/DataForSVR/VC_PCA.csv') | pandas.read_csv |
import pandas as pd
import variables
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
import sys
import matplotlib.ticker as mtick
import numpy as np
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
from matplotlib import collections as matcoll
from textwrap import wrap
def create_long_df(
df, pivot_list, section_name="_section", subsection_name="_subsection"
):
columns = list(df.columns)
drop_columns = [
col for col in columns if (section_name in col) or (subsection_name in col)
]
question_sections, question_subsections = generate_question_list(
columns, section_name, subsection_name
)
df = df.drop(columns=drop_columns)
long_df = pd.melt(df, id_vars=pivot_list, var_name="question", value_name="answer")
long_df["section"] = long_df["question"].map(question_sections)
long_df["sub_section"] = long_df["question"].map(question_subsections)
long_df["sub_section"] = long_df["sub_section"].fillna(long_df["section"])
return long_df
def generate_question_list(columns, section_name, subsection_name):
question_sections = {}
question_subsections = {}
for index, value in enumerate(columns):
if section_name in value:
section = value
i = index + 1
while i < len(columns) and section_name not in columns[i]:
if subsection_name in columns[i]:
subsection = columns[i]
y = i + 1
while y < len(columns) and subsection_name not in columns[y]:
if section_name in columns[y]:
break
else:
question_subsections[columns[y]] = subsection
y += 1
else:
question_sections[columns[i]] = section
i += 1
return question_sections, question_subsections
def determine_notable_questions_list(df, groupings, perent_threshold, n_threshold):
notable_divergencies = []
for question in variables.question_columns:
for grouping in groupings:
_group = pd.crosstab(
df[grouping], df[question], normalize="index", margins=True
)
_group_all = _group[_group.index == "All"]
_group.drop("All", inplace=True)
_group_count = pd.crosstab(df[grouping], df[question])
# for i in range(2):
# if i == 1:
# kind = "promoter"
# else:
# kind = "detractor"
amount_above = _group[1].max() - _group_all[1].values[0]
amount_below = _group_all[1].values[0] - _group[1].min()
max_index = _group[_group[1] == _group[1].max()].index.values[0]
max_count = _group_count[_group_count.index == max_index][1].values[0]
min_index = _group[_group[1] == _group[1].min()].index.values[0]
min_count = _group_count[_group_count.index == min_index][1].values[0]
if (amount_above >= perent_threshold) & (max_count >= n_threshold):
notable_divergencies.append(
{
"grouping": grouping,
"question": question,
# "kind": kind,
"type": "above_average",
"delta": amount_above,
"value": _group[1].max(),
"count": max_count,
"index": max_index,
}
)
elif (amount_below >= perent_threshold) & (min_count >= n_threshold):
notable_divergencies.append(
{
"grouping": grouping,
"question": question,
# "kind": kind,
"type": "below_average",
"delta": amount_below,
"value": _group[1].min(),
"count": min_count,
"index": min_index,
}
)
notable_divergencies_df = | pd.DataFrame(notable_divergencies) | pandas.DataFrame |
'''
Created on Jul 5, 2018
@author: cef
'''
import os, sys, copy, logging, time
#weakref
from collections import OrderedDict
from weakref import WeakValueDictionary as wdict
import pandas as pd
import numpy as np
import model.sofda.hp.basic as hp_basic
import model.sofda.hp.pd as hp_pd
#import hp.plot
import model.sofda.hp.oop as hp_oop
import model.sofda.hp.sim as hp_sim
import model.sofda.hp.sel as hp_sel
from model.sofda.hp.pd import view
mod_logger = logging.getLogger(__name__)
mod_logger.debug('initilized')
"""doesnt seem worth it
class Outputme_wrap(object): #wrapper for object on which Outputrs are applied
pass """
class Outputr(hp_sel.Sel_usr_wrap,
hp_sim.Sim_o,
hp_oop.Child): #Standalone outputr worker
#===========================================================================
# program pars
#===========================================================================
'changed this so outputrs run every time step'
upd_sim_lvl = 2 #control when this is run
# object handling overrides
"""
raise_kids_f = False
load_data_f = False"""
db_f = True #override to perform db_f on writer (False could improve speed)
#===========================================================================
# user provided pars
#===========================================================================
#sel_n = None #selector name
custom_exe = None #custom outputter string
sim_stats_exe = None #numpy stats to perform for each siulation
dt_n = None #Tstep name for time slice outputrs
out_attn = None #attribute name selected for outputting
#pclass_n = None #oject class name selected for outputting
post_exe = None
ses_plot = None #type of plot to generate for simulation summary data
#out_attn_sfx = None
desc = None
#===========================================================================
# calculation pars
#===========================================================================
#pick_d = None
otype = None #type of outputer
data = None #holds the data for this outputr
dtype = None #data type for outputr data detected from out_attn suffix
def __init__(self, *args, **kwargs):
logger = mod_logger.getChild('Outputr') #have to use this as our own logger hasnt loaded yet
logger.debug('start __init__')
super(Outputr, self).__init__(*args, **kwargs)
#=======================================================================
#unique setup
#=======================================================================
self.codename = self.get_codename()
if not self.dt_n is None:
self.dt_n = self.get_dt_n(self.dt_n)
logger.debug('\n')
logger.debug('set_dtype \n')
self.set_dtype()
logger.debug('make_pick \n')
self.pick_d = self.make_pick()
logger.debug('set_otype()')
self.set_otype()
logger.debug('set_dimensions \n')
self.set_dimensions()
logger.debug('set_outf \n')
self.set_outf()
#=======================================================================
# checking
#=======================================================================
if self.db_f:
logger.debug('db_f=TRUE \n')
self.check_outputr()
logger.debug('__init__ finished with pclass_n = \'%s\' \n'%self.pclass_n)
return
def get_codename(self): #generate a codename from the attribute values
logger = self.logger.getChild('get_codename')
codename = '%s'%self.name
for attn in ['pclass_n', 'out_attn', 'sel_n', 'dt_n']:
v = getattr(self, attn) #should have all of these
if not v is None:
codename = codename +'.%s'%v
logger.debug('got codename \'%s\''%codename)
return codename
def check_outputr(self):
logger = self.logger.getChild('check_outputr')
timeline_df = self.session.pars_df_d['timeline']
if not self.dt_n is None:
if not self.dt_n in timeline_df.loc[:,'name'].values.tolist():
logger.error('my dt_n \'%s\' is not in the timeline'%self.dt_n)
raise IOError
if len(timeline_df) < 2:
logger.error('can not provide time sliced data when there is only 1 time step')
raise IOError
#=======================================================================
# selector checks
#=======================================================================
if not self.sel_n is None:
#check the validity of this Selecto
if not self.sel_o.pclass_n == self.pclass_n:
logger.error('the Selelctors (%s) pclass_n (%s) must match the Outputrs pclass_n (%s)'
%(self.sel_o.name, self.sel_o.pclass_n, self.pclass_n ))
raise IOError
"""
if not self.sel_o.upd_sim_lvl == 0:
'see note in headers'
logger.error('passed Selector \'%s\' has mid-Session updates (%s)'
%(self.sel_o.name, self.sel_o.upd_sim_lvl))
raise IOError"""
if not self.picko_p_f:
raise IOError #not allowing this... too complicated with data structure
def set_dtype(self): #detect teh data type by the out_attn suffix
if not self.out_attn is None:
if self.out_attn.endswith('_f'):
self.dtype = np.bool
else:
self.dtype = np.dtype(object)
def set_otype(self): #determine the outputer type
logger = self.logger.getChild('set_otype')
if (self.custom_exe is None) & (self.post_exe is None):
self.otype = 'simple'
elif (self.custom_exe is None) & (not self.post_exe is None):
self.otype = 'post'
elif (not self.custom_exe is None) & (self.post_exe is None):
self.otype = 'obj'
else:
raise IOError
logger.debug('set as \'%s\''%self.otype)
return
def set_dimensions(self): #set the outputtr function
logger = self.logger.getChild('set_dimensions')
#=======================================================================
# logic by outputter type
#=======================================================================
#post exes
if self.otype == 'post_exe':
'todo: allow multiple dimensions'
time_d, space_d = 0, 0
else:
#=======================================================================
#set time dimension
#=======================================================================
#time slices
'todo: allow lists'
if not self.dt_n is None:
time_d = 0
else:
time_d = 1
#===================================================================
# set space dimension
#===================================================================
space_d = 1
#=======================================================================
# wrap up
#=======================================================================
if (time_d + space_d) >3: raise IOError
self.total_d = space_d + time_d
self.space_d, self.time_d = space_d, time_d
logger.debug('got space_d = %s, time_d = %s'%(space_d, time_d))
def set_outf(self): #assign the approriate writer function
"""
TODO: add pandas functionality so the user can returns tats on the childmeta_df
"""
logger = self.logger.getChild('set_outf')
#===================================================================
# get shortcuts
#===================================================================
ce_str = self.custom_exe
pick_d = self.pick_d
if self.db_f:
if pick_d is None: raise IOError
"""allowign this now
if len(pick_d) == 0:
raise IOError"""
#===================================================================
# data based writers
#===================================================================
if self.otype == 'simple':
#===================================================================
# snap shots
#===================================================================
if self.time_d == 0:
if self.space_d == 0: #one value for the simulation
data = np.nan
outf = self.time0_space0
elif self.space_d == 1: #a list of attributes constant for the simulation (or at th eend)
data = pd.Series(name = self.codename, index = list(pick_d.keys()), dtype = np.object).sort_index()
outf = self.time0_space1
else:
raise IOError
#=======================================================================
# time series
#=======================================================================
elif self.time_d == 1:
#get the time series
dt_l = self.session.pars_df_d['timeline'].loc[:,'name'].tolist()
if self.space_d == 0: #one object attribute with a time series
data = pd.Series(name = self.codename, index = dt_l, dtype = np.object).sort_index()
outf = self.time1_space0
elif self.space_d == 1: #an array of attributes recorded at each time
"""just add new entries?
data = pd.DataFrame(columns = dt_l, index = pick_d.keys(), dtype = np.object)"""
data = pd.DataFrame(columns = dt_l, dtype = np.object).sort_index()
outf = self.time1_space1
else: raise IOError
#===================================================================
# custom outputr commands
#===================================================================
elif self.otype == 'obj':
'todo: allow a list of commands'
#===============================================================
# pre checks
#===============================================================
"""no?
if not self.out_attn is None:
logger.error('out_attn must be blank for custom_exes (got \'%s\''%self.out_attn)
raise IOError"""
"""no! allow some stats for custom funcs
if not self.sim_stats_exe is None:
logger.error('expected something for ')
raise IOError"""
""" OK with this now
if not ce_str.endswith(')'):
logger.error('for custom_exe calls, the string must end with \')\'')
raise IOError"""
logger.debug('user provided custom output functions \'%s\''%(ce_str))
outf = self.custom_exe_call #attach this as the writer func
#data = np.nan
#===================================================================
# set data container
#===================================================================
if not self.time_d == 1: raise IOError
dt_l = self.session.pars_df_d['timeline'].loc[:,'name'].tolist()
if self.space_d == 0: #one object attribute with a time series
data = pd.Series(name = self.codename, index = dt_l, dtype = np.object).sort_index()
elif self.space_d == 1: #an array of attributes recorded at each time
data = pd.DataFrame(columns = dt_l, dtype = np.object).sort_index()
else: raise IOError
#=======================================================================
# post_exe s
#=======================================================================
elif self.otype == 'post':
outf = self.post_exe_call
data = np.nan
logger.debug('post_exe provided: \'%s\''%self.post_exe)
else: raise IOError
self.outf = outf
self.data = data
self.reset_d['data'] = copy.copy(data) #set blank data container for reseting
logger.debug('finished with outf: \'%s\' and data: \'%s\''%(outf, data))
#=======================================================================
# post checks
#=======================================================================
if self.db_f:
if not self.session.state == 'init':
raise IOError
if data is None:
raise IOError
if not callable(outf):
raise IOError
return
def time0_space0(self, obj, att_value):
logger = self.logger.getChild('time0_space0')
self.data = att_value
logger.debug('for %s updated with \'%s\' = %s'%(obj.name, self.out_attn, att_value))
return
def time0_space1(self, obj, att_value):
"""
This type could be a series from a single object
or a string of object scould be passed
"""
logger = self.logger.getChild('time0_space1')
if not hp_pd.isser(self.data): raise IOError
#=======================================================================
# for data series
#=======================================================================
if hp_pd.isser(att_value):
'just make a reference ot this series'
self.data = att_value.copy()
#=======================================================================
# for a gorup of objects, passed one at a time
#=======================================================================
else:
if not obj.gid in self.data.index: raise IOError
self.data[obj.gid] = att_value
logger.debug('for %s updated ser %i with \'%s\' = %s'%
(obj.name, len(self.data), self.out_attn, att_value))
return
def time1_space0(self, obj, att_value):
logger = self.logger.getChild('time1_space0')
ser = self.data
time = self.session.tstep_o.name
logger.debug('for obj.name \'%s\' time \'%s\' out_attn \'%s\''%(obj.name, time, self.out_attn))
#=======================================================================
# checsk
#=======================================================================
if self.db_f:
if not hp_pd.isser(ser):raise IOError
if not time in ser.index: raise IOError
if hasattr(att_value, 'values'):
"""
type(att_value)
att_value.shape
"""
raise IOError
if not self.chk_old_val(ser[time], att_value):
raise IOError
self.data[time] = att_value
logger.debug('for %s updated ser %i with \'%s\' = %s'%
(obj.name, len(self.data), self.out_attn, att_value))
return
def time1_space1(self, obj, att_value):
'TODO: evaluate the boolean again'
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('time1_space1')
time = self.session.tstep_o.name
logger.debug('for obj.name \'%s\' time \'%s\' out_attn \'%s\''%(obj.name, time, self.out_attn))
#=======================================================================
# check that this entry hasnt been written yet
#=======================================================================
if self.db_f:
df = self.data
if not hp_pd.isdf(df): raise IOError
if not time in df.columns: raise IOError
"""allowing d ynamic selection
if not obj.gid in df.index: raise IOError
if not self.chk_old_val(df.loc[obj.gid, time], att_value):
raise IOError"""
#if this object is in already, is the value blank?
if obj.gid in df.index:
if not self.chk_old_val(df.loc[obj.gid, time], att_value):
raise IOError
#=======================================================================
# do the updating
#=======================================================================
#write this value by name and time
self.data.loc[obj.gid, time] = att_value
logger.debug('for %s updated df %s with \'%s\' = %s'%
(obj.gid, str(self.data.shape), self.out_attn, att_value))
return
def custom_exe_call(self, obj, att_value): #object method call
logger = self.logger.getChild('custom_exe_call')
logger.debug("for eval_str: \'%s\'"%self.custom_exe)
try:
result = eval(self.custom_exe)
except:
raise IOError
#=======================================================================
# store the result
#=======================================================================
if not result is None:
time = self.session.tstep_o.name
if self.space_d == 0:
self.data[time] = result
elif self.space_d == 1:
self.data.loc[obj.gid, time] = result
else: raise IOError
return
def post_exe_call(self, obj, att_value):
logger = self.logger.getChild('post_exe_call')
#=======================================================================
# make variables local
#=======================================================================
outs_od = self.session.outs_od
'only setup for uni-dimensional data types'
try:
data = eval(self.post_exe)
except:
logger.error('failed to evaluate \'%s\''%self.post_exe)
raise IOError
try:
self.data = float(data)
except:
logger.error('got unexpected type on data: \'%s\': %s'%(type(data), data))
raise IOError
logger.debug('got post_exe result: %.2f from cmd \'%s\''%(self.data, self.post_exe))
if self.db_f:
if np.any( | pd.isnull(self.data) | pandas.isnull |
# importing numpy, pandas, and matplotlib
import numpy as np
import pandas as pd
import matplotlib
import multiprocessing
matplotlib.use('agg')
import matplotlib.pyplot as plt
# importing sklearn
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn.decomposition import PCA
from sklearn.random_projection import GaussianRandomProjection
from sklearn import cluster
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
# importing keras
import keras
import keras.backend as K
from keras.wrappers.scikit_learn import KerasClassifier
from keras.callbacks import EarlyStopping, ModelCheckpoint, LambdaCallback
from keras.models import Model, load_model
# importing util libraries
import datetime
import time
import math
import os
import importlib
# importing custom library
import DNN_models
import exception_handle
#fix np.random.seed for reproducibility in numpy processing
np.random.seed(7)
class DeepMicrobiome(object):
def __init__(self, data, seed, data_dir):
self.t_start = time.time()
self.filename = str(data)
self.data = self.filename.split('.')[0]
self.seed = seed
self.data_dir = data_dir
self.prefix = ''
self.representation_only = False
def loadData(self, feature_string, label_string, label_dict, dtype=None):
# read file
filename = self.data_dir + "data/" + self.filename
if os.path.isfile(filename):
raw = pd.read_csv(filename, sep='\t', index_col=0, header=None)
else:
print("FileNotFoundError: File {} does not exist".format(filename))
exit()
# select rows having feature index identifier string
X = raw.loc[raw.index.str.contains(feature_string, regex=False)].T
self.X = X
# get class labels
Y = raw.loc[label_string] #'disease'
self.Y = Y.replace(label_dict)
# indices
self.sample_ids = raw.iloc[1]
# train and test split
self.X_train, self.X_test, self.y_train, self.y_test, self.train_indices, self.test_indices = train_test_split(self.X.values.astype(dtype), self.Y.values.astype('int'), self.sample_ids, test_size=0.2, random_state=self.seed, stratify=Y.values)
self.printDataShapes()
def setIndices(self, train_indices, test_indices):
self.X_train = self.X.iloc[train_indices]
self.X_test = self.X.iloc[test_indices]
self.y_train = self.Y.iloc[train_indices]
self.y_test = self.Y.iloc[test_indices]
self.train_indices = self.sample_ids.iloc[train_indices]
self.test_indices = self.sample_ids.iloc[test_indices]
def loadCustomData(self, dtype=None):
# read file
filename = self.data_dir + "data/" + self.filename
if os.path.isfile(filename):
raw = pd.read_csv(filename, sep=',', index_col=False, header=None)
else:
print("FileNotFoundError: File {} does not exist".format(filename))
exit()
# load data
self.X_train = raw.values.astype(dtype)
# put nothing or zeros for y_train, y_test, and X_test
self.y_train = np.zeros(shape=(self.X_train.shape[0])).astype(dtype)
self.X_test = np.zeros(shape=(1,self.X_train.shape[1])).astype(dtype)
self.y_test = np.zeros(shape=(1,)).astype(dtype)
self.printDataShapes(train_only=True)
def loadCustomDataWithLabels(self, label_data, dtype=None):
# read file
filename = self.data_dir + "data/" + self.filename
label_filename = self.data_dir + "data/" + label_data
if os.path.isfile(filename) and os.path.isfile(label_filename):
raw = pd.read_csv(filename, sep=',', index_col=0, header=None)
label = pd.read_csv(label_filename, sep=',', index_col=0, header=None)
assert (raw.index == label.index).all()
else:
if not os.path.isfile(filename):
print("FileNotFoundError: File {} does not exist".format(filename))
if not os.path.isfile(label_filename):
print("FileNotFoundError: File {} does not exist".format(label_filename))
exit()
# label data validity check
if not label.values.shape[1] > 1:
pass
#label = label.values.reshape((label.values.shape[0]))
else:
print('FileSpecificationError: The label file contains more than 1 column.')
exit()
# train and test split
self.X = raw.astype(dtype)
self.sample_ids = raw.index.to_series()
self.Y = label.astype(int)
self.X_train, self.X_test, self.y_train, self.y_test, self.train_indices, self.test_indices = train_test_split(raw.values.astype(dtype),
label.astype('int'), self.sample_ids, test_size=0.2,
random_state=self.seed,
stratify=label)
self.printDataShapes()
#Principal Component Analysis
def pca(self, ratio=0.99):
# manipulating an experiment identifier in the output file
self.prefix = self.prefix + 'PCA_'
# PCA
pca = PCA()
pca.fit(self.X_train)
n_comp = 0
ratio_sum = 0.0
for comp in pca.explained_variance_ratio_:
ratio_sum += comp
n_comp += 1
if ratio_sum >= ratio: # Selecting components explaining 99% of variance
break
pca = PCA(n_components=n_comp)
pca.fit(self.X_train)
X_train = pca.transform(self.X_train)
X_test = pca.transform(self.X_test)
# applying the eigenvectors to the whole training and the test set.
self.X_train = X_train
self.X_test = X_test
self.printDataShapes()
#Gausian Random Projection
def rp(self):
# manipulating an experiment identifier in the output file
self.prefix = self.prefix + 'RandP_'
# GRP
rf = GaussianRandomProjection(eps=0.5)
rf.fit(self.X_train)
# applying GRP to the whole training and the test set.
self.X_train = rf.transform(self.X_train)
self.X_test = rf.transform(self.X_test)
self.printDataShapes()
#Shallow Autoencoder & Deep Autoencoder
def ae(self, dims = [50], epochs= 2000, batch_size=100, verbose=2, loss='mean_squared_error', latent_act=False, output_act=False, act='relu', patience=20, val_rate=0.2, no_trn=False):
# manipulating an experiment identifier in the output file
if patience != 20:
self.prefix += 'p' + str(patience) + '_'
if len(dims) == 1:
self.prefix += 'AE'
else:
self.prefix += 'DAE'
if loss == 'binary_crossentropy':
self.prefix += 'b'
if latent_act:
self.prefix += 't'
if output_act:
self.prefix += 'T'
self.prefix += str(dims).replace(", ", "-") + '_'
if act == 'sigmoid':
self.prefix = self.prefix + 's'
# filename for temporary model checkpoint
modelName = self.prefix + self.data + '.h5'
# clean up model checkpoint before use
if os.path.isfile(modelName):
os.remove(modelName)
# callbacks for each epoch
callbacks = [EarlyStopping(monitor='val_loss', patience=patience, mode='min', verbose=1),
ModelCheckpoint(modelName, monitor='val_loss', mode='min', verbose=1, save_best_only=True)]
# spliting the training set into the inner-train and the inner-test set (validation set)
X_inner_train, X_inner_test, y_inner_train, y_inner_test = train_test_split(self.X_train, self.y_train, test_size=val_rate, random_state=self.seed, stratify=self.y_train)
# insert input shape into dimension list
dims.insert(0, X_inner_train.shape[1])
# create autoencoder model
self.autoencoder, self.encoder = DNN_models.autoencoder(dims, act=act, latent_act=latent_act, output_act=output_act)
self.autoencoder.summary()
if no_trn:
return
# compile model
self.autoencoder.compile(optimizer='adam', loss=loss)
# fit model
self.history = self.autoencoder.fit(X_inner_train, X_inner_train, epochs=epochs, batch_size=batch_size, callbacks=callbacks,
verbose=verbose, validation_data=(X_inner_test, X_inner_test))
# save loss progress
self.saveLossProgress()
# load best model
self.autoencoder = load_model(modelName)
layer_idx = int((len(self.autoencoder.layers) - 1) / 2)
self.encoder = Model(self.autoencoder.layers[0].input, self.autoencoder.layers[layer_idx].output)
# applying the learned encoder into the whole training and the test set.
self.X_train = self.encoder.predict(self.X_train)
self.X_test = self.encoder.predict(self.X_test)
# Variational Autoencoder
def vae(self, dims = [10], epochs=2000, batch_size=100, verbose=2, loss='mse', output_act=False, act='relu', patience=25, beta=1.0, warmup=True, warmup_rate=0.01, val_rate=0.2, no_trn=False):
# manipulating an experiment identifier in the output file
if patience != 25:
self.prefix += 'p' + str(patience) + '_'
if warmup:
self.prefix += 'w' + str(warmup_rate) + '_'
self.prefix += 'VAE'
if loss == 'binary_crossentropy':
self.prefix += 'b'
if output_act:
self.prefix += 'T'
if beta != 1:
self.prefix += 'B' + str(beta)
self.prefix += str(dims).replace(", ", "-") + '_'
if act == 'sigmoid':
self.prefix += 'sig_'
# filename for temporary model checkpoint
modelName = self.prefix + self.data + '.h5'
# clean up model checkpoint before use
if os.path.isfile(modelName):
os.remove(modelName)
# callbacks for each epoch
callbacks = [EarlyStopping(monitor='val_loss', patience=patience, mode='min', verbose=1),
ModelCheckpoint(modelName, monitor='val_loss', mode='min', verbose=1, save_best_only=True,save_weights_only=True)]
# warm-up callback
warm_up_cb = LambdaCallback(on_epoch_end=lambda epoch, logs: [warm_up(epoch)]) # , print(epoch), print(K.get_value(beta))])
# warm-up implementation
def warm_up(epoch):
val = epoch * warmup_rate
if val <= 1.0:
K.set_value(beta, val)
# add warm-up callback if requested
if warmup:
beta = K.variable(value=0.0)
callbacks.append(warm_up_cb)
# spliting the training set into the inner-train and the inner-test set (validation set)
X_inner_train, X_inner_test, y_inner_train, y_inner_test = train_test_split(self.X_train, self.y_train,
test_size=val_rate,
random_state=self.seed,
stratify=self.y_train)
# insert input shape into dimension list
dims.insert(0, X_inner_train.shape[1])
# create vae model
self.vae, self.encoder, self.decoder = DNN_models.variational_AE(dims, act=act, recon_loss=loss, output_act=output_act, beta=beta)
self.vae.summary()
if no_trn:
return
# fit
self.history = self.vae.fit(X_inner_train, epochs=epochs, batch_size=batch_size, callbacks=callbacks, verbose=verbose, validation_data=(X_inner_test, None))
# save loss progress
self.saveLossProgress()
# load best model
self.vae.load_weights(modelName)
self.encoder = self.vae.layers[1]
# applying the learned encoder into the whole training and the test set.
_, _, self.X_train = self.encoder.predict(self.X_train)
_, _, self.X_test = self.encoder.predict(self.X_test)
# Convolutional Autoencoder
def cae(self, dims = [32], epochs=2000, batch_size=100, verbose=2, loss='mse', output_act=False, act='relu', patience=25, val_rate=0.2, rf_rate = 0.1, st_rate = 0.25, no_trn=False):
# manipulating an experiment identifier in the output file
self.prefix += 'CAE'
if loss == 'binary_crossentropy':
self.prefix += 'b'
if output_act:
self.prefix += 'T'
self.prefix += str(dims).replace(", ", "-") + '_'
if act == 'sigmoid':
self.prefix += 'sig_'
# filename for temporary model checkpoint
modelName = self.prefix + self.data + '.h5'
# clean up model checkpoint before use
if os.path.isfile(modelName):
os.remove(modelName)
# callbacks for each epoch
callbacks = [EarlyStopping(monitor='val_loss', patience=patience, mode='min', verbose=1),
ModelCheckpoint(modelName, monitor='val_loss', mode='min', verbose=1, save_best_only=True,save_weights_only=True)]
# fill out blank
onesideDim = int(math.sqrt(self.X_train.shape[1])) + 1
enlargedDim = onesideDim ** 2
self.X_train = np.column_stack((self.X_train, np.zeros((self.X_train.shape[0], enlargedDim - self.X_train.shape[1]))))
self.X_test = np.column_stack((self.X_test, np.zeros((self.X_test.shape[0], enlargedDim - self.X_test.shape[1]))))
# reshape
self.X_train = np.reshape(self.X_train, (len(self.X_train), onesideDim, onesideDim, 1))
self.X_test = np.reshape(self.X_test, (len(self.X_test), onesideDim, onesideDim, 1))
self.printDataShapes()
# spliting the training set into the inner-train and the inner-test set (validation set)
X_inner_train, X_inner_test, y_inner_train, y_inner_test = train_test_split(self.X_train, self.y_train,
test_size=val_rate,
random_state=self.seed,
stratify=self.y_train)
# insert input shape into dimension list
dims.insert(0, (onesideDim, onesideDim, 1))
# create cae model
self.cae, self.encoder = DNN_models.conv_autoencoder(dims, act=act, output_act=output_act, rf_rate = rf_rate, st_rate = st_rate)
self.cae.summary()
if no_trn:
return
# compile
self.cae.compile(optimizer='adam', loss=loss)
# fit
self.history = self.cae.fit(X_inner_train, X_inner_train, epochs=epochs, batch_size=batch_size, callbacks=callbacks, verbose=verbose, validation_data=(X_inner_test, X_inner_test, None))
# save loss progress
self.saveLossProgress()
# load best model
self.cae.load_weights(modelName)
if len(self.cae.layers) % 2 == 0:
layer_idx = int((len(self.cae.layers) - 2) / 2)
else:
layer_idx = int((len(self.cae.layers) - 1) / 2)
self.encoder = Model(self.cae.layers[0].input, self.cae.layers[layer_idx].output)
# applying the learned encoder into the whole training and the test set.
self.X_train = self.encoder.predict(self.X_train)
self.X_test = self.encoder.predict(self.X_test)
self.printDataShapes()
# Classification
def classification(self, hyper_parameters, method='svm', cv=5, scoring='roc_auc', n_jobs=1, cache_size=10000):
clf_start_time = time.time()
print("# Tuning hyper-parameters")
print(self.X_train.shape, self.y_train.shape)
# Support Vector Machine
if method == 'svm':
clf = GridSearchCV(SVC(probability=True, cache_size=cache_size), hyper_parameters, cv=StratifiedKFold(cv, shuffle=True), scoring=scoring, n_jobs=n_jobs, verbose=100, )
clf.fit(self.X_train, self.y_train)
# Random Forest
if method == 'rf':
clf = GridSearchCV(RandomForestClassifier(n_jobs=-1, random_state=0), hyper_parameters, cv=StratifiedKFold(cv, shuffle=True), scoring=scoring, n_jobs=n_jobs, verbose=100)
clf.fit(self.X_train, self.y_train)
# Multi-layer Perceptron
if method == 'mlp':
model = KerasClassifier(build_fn=DNN_models.mlp_model, input_dim=self.X_train.shape[1], verbose=0, )
clf = GridSearchCV(estimator=model, param_grid=hyper_parameters, cv=StratifiedKFold(cv, shuffle=True), scoring=scoring, n_jobs=n_jobs, verbose=100)
clf.fit(self.X_train, self.y_train, batch_size=32)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
# Evaluate performance of the best model on test set
y_true, y_pred = self.y_test, clf.predict(self.X_test)
y_prob = clf.predict_proba(self.X_test)
# Performance Metrics: AUC, ACC, Recall, Precision, F1_score
metrics = [round(roc_auc_score(y_true, y_prob[:, 1]), 4),
round(accuracy_score(y_true, y_pred), 4),
round(recall_score(y_true, y_pred), 4),
round(precision_score(y_true, y_pred), 4),
round(f1_score(y_true, y_pred), 4), ]
# time stamp
metrics.append(str(datetime.datetime.now()))
# running time
metrics.append(round( (time.time() - self.t_start), 2))
# classification time
metrics.append(round( (time.time() - clf_start_time), 2))
# best hyper-parameter append
metrics.append(str(clf.best_params_))
# Write performance metrics as a file
res = pd.DataFrame([metrics], index=[self.prefix + method])
with open(self.data_dir + "results/" + self.data + "_result.txt", 'a') as f:
res.to_csv(f, header=None)
print('Accuracy metrics')
print('AUC, ACC, Recall, Precision, F1_score, time-end, runtime(sec), classfication time(sec), best hyper-parameter')
print(metrics)
def printDataShapes(self, train_only=False):
print("X_train.shape: ", self.X_train.shape)
if not train_only:
print("y_train.shape: ", self.y_train.shape)
print("X_test.shape: ", self.X_test.shape)
print("y_test.shape: ", self.y_test.shape)
# ploting loss progress over epochs
def saveLossProgress(self):
#print(self.history.history.keys())
#print(type(self.history.history['loss']))
#print(min(self.history.history['loss']))
loss_collector, loss_max_atTheEnd = self.saveLossProgress_ylim()
# save loss progress - train and val loss only
figureName = self.prefix + self.data + '_' + str(self.seed)
plt.ylim(min(loss_collector)*0.9, loss_max_atTheEnd * 2.0)
plt.plot(self.history.history['loss'])
plt.plot(self.history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train loss', 'val loss'],
loc='upper right')
plt.savefig(self.data_dir + "results/" + figureName + '.png')
plt.close()
if 'recon_loss' in self.history.history:
figureName = self.prefix + self.data + '_' + str(self.seed) + '_detailed'
plt.ylim(min(loss_collector) * 0.9, loss_max_atTheEnd * 2.0)
plt.plot(self.history.history['loss'])
plt.plot(self.history.history['val_loss'])
plt.plot(self.history.history['recon_loss'])
plt.plot(self.history.history['val_recon_loss'])
plt.plot(self.history.history['kl_loss'])
plt.plot(self.history.history['val_kl_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train loss', 'val loss', 'recon_loss', 'val recon_loss', 'kl_loss', 'val kl_loss'], loc='upper right')
plt.savefig(self.data_dir + "results/" + figureName + '.png')
plt.close()
# supporting loss plot
def saveLossProgress_ylim(self):
loss_collector = []
loss_max_atTheEnd = 0.0
for hist in self.history.history:
current = self.history.history[hist]
loss_collector += current
if current[-1] >= loss_max_atTheEnd:
loss_max_atTheEnd = current[-1]
return loss_collector, loss_max_atTheEnd
if __name__ == '__main__':
# argparse
import argparse
parser = argparse.ArgumentParser()
parser._action_groups.pop()
# load data
load_data = parser.add_argument_group('Loading data')
load_data.add_argument("-d", "--data", help="prefix of dataset to open (e.g. abundance_Cirrhosis)", type=str,
choices=["abundance_Cirrhosis", "abundance_Colorectal", "abundance_IBD",
"abundance_Obesity", "abundance_T2D", "abundance_WT2D",
"marker_Cirrhosis", "marker_Colorectal", "marker_IBD",
"marker_Obesity", "marker_T2D", "marker_WT2D",
])
load_data.add_argument("-cd", "--custom_data", help="filename for custom input data under the 'data' folder", type=str,)
load_data.add_argument("-cl", "--custom_data_labels", help="filename for custom input labels under the 'data' folder", type=str,)
load_data.add_argument("-p", "--data_dir", help="custom path for both '/data' and '/results' folders", default="")
load_data.add_argument("-dt", "--dataType", help="Specify data type for numerical values (float16, float32, float64)",
default="float64", type=str, choices=["float16", "float32", "float64"])
dtypeDict = {"float16": np.float16, "float32": np.float32, "float64": np.float64}
# experiment design
exp_design = parser.add_argument_group('Experiment design')
exp_design.add_argument("-s", "--seed", help="random seed for train and test split", type=int, default=0)
exp_design.add_argument("-k", "--kfold", help="Number of stratified folds to perform", type=int, default=5)
exp_design.add_argument("-r", "--repeat", help="repeat experiment x times by changing random seed for splitting data",
default=5, type=int)
# classification
classification = parser.add_argument_group('Classification')
classification.add_argument("-f", "--numFolds", help="The number of folds for cross-validation in the tranining set",
default=5, type=int)
classification.add_argument("-m", "--method", help="classifier(s) to use", type=str, default="all",
choices=["all", "svm", "rf", "mlp", "svm_rf"])
classification.add_argument("-sc", "--svm_cache", help="cache size for svm run", type=int, default=1000)
classification.add_argument("-t", "--numJobs",
help="The number of jobs used in parallel GridSearch. (-1: utilize all possible cores; -2: utilize all possible cores except one.)",
default=-2, type=int)
parser.add_argument("--scoring", help="Metrics used to optimize method", type=str, default='roc_auc',
choices=['roc_auc', 'accuracy', 'f1', 'recall', 'precision'])
# representation learning & dimensionality reduction algorithms
rl = parser.add_argument_group('Representation learning')
rl.add_argument("--pca", help="run PCA", action='store_true')
rl.add_argument("--rp", help="run Random Projection", action='store_true')
rl.add_argument("--ae", help="run Autoencoder or Deep Autoencoder", action='store_true')
rl.add_argument("--vae", help="run Variational Autoencoder", action='store_true')
rl.add_argument("--cae", help="run Convolutional Autoencoder", action='store_true')
rl.add_argument("--save_rep", help="write the learned representation of the training set as a file", action='store_true')
# detailed options for representation learning
## common options
common = parser.add_argument_group('Common options for representation learning (SAE,DAE,VAE,CAE)')
common.add_argument("--aeloss", help="set autoencoder reconstruction loss function", type=str,
choices=['mse', 'binary_crossentropy'], default='mse')
common.add_argument("--ae_oact", help="output layer sigmoid activation function on/off", action='store_true')
common.add_argument("-a", "--act", help="activation function for hidden layers", type=str, default='relu',
choices=['relu', 'sigmoid'])
common.add_argument("-dm", "--dims",
help="Comma-separated dimensions for deep representation learning e.g. (-dm 50,30,20)",
type=str, default='50')
common.add_argument("-e", "--max_epochs", help="Maximum epochs when training autoencoder", type=int, default=2000)
common.add_argument("-pt", "--patience",
help="The number of epochs which can be executed without the improvement in validation loss, right after the last improvement.",
type=int, default=20)
## AE & DAE only
AE = parser.add_argument_group('SAE & DAE-specific arguments')
AE.add_argument("--ae_lact", help="latent layer activation function on/off", action='store_true')
## VAE only
VAE = parser.add_argument_group('VAE-specific arguments')
VAE.add_argument("--vae_beta", help="weight of KL term", type=float, default=1.0)
VAE.add_argument("--vae_warmup", help="turn on warm up", action='store_true')
VAE.add_argument("--vae_warmup_rate", help="warm-up rate which will be multiplied by current epoch to calculate current beta", default=0.01, type=float)
## CAE only
CAE = parser.add_argument_group('CAE-specific arguments')
CAE.add_argument("--rf_rate", help="What percentage of input size will be the receptive field (kernel) size? [0,1]", type=float, default=0.1)
CAE.add_argument("--st_rate", help="What percentage of receptive field (kernel) size will be the stride size? [0,1]", type=float, default=0.25)
# other options
others = parser.add_argument_group('other optional arguments')
others.add_argument("--no_trn", help="stop before learning representation to see specified autoencoder structure", action='store_true')
others.add_argument("--no_clf", help="skip classification tasks", action='store_true')
args = parser.parse_args()
print(args)
# set labels for diseases and controls
label_dict = {
# Controls
'n': 0,
# Chirrhosis
'cirrhosis': 1,
# Colorectal Cancer
'cancer': 1, 'small_adenoma': 0,
# IBD
'ibd_ulcerative_colitis': 1, 'ibd_crohn_disease': 1,
# T2D and WT2D
't2d': 1,
# Obesity
'leaness': 0, 'obesity': 1,
}
# hyper-parameter grids for classifiers
rf_hyper_parameters = [{'n_estimators': [s for s in range(100, 1001, 200)],
'max_features': ['sqrt', 'log2'],
'min_samples_leaf': [1, 2, 3, 4, 5],
'criterion': ['gini', 'entropy']
}, ]
#svm_hyper_parameters_pasolli = [{'C': [2 ** s for s in range(-5, 16, 2)], 'kernel': ['linear']},
# {'C': [2 ** s for s in range(-5, 16, 2)], 'gamma': [2 ** s for s in range(3, -15, -2)],
# 'kernel': ['rbf']}]
svm_hyper_parameters = [{'C': [2 ** s for s in range(-5, 6, 2)], 'kernel': ['linear']},
{'C': [2 ** s for s in range(-5, 6, 2)], 'gamma': [2 ** s for s in range(3, -15, -2)],'kernel': ['rbf']}]
mlp_hyper_parameters = [{'numHiddenLayers': [1, 2, 3],
'epochs': [30, 50, 100, 200, 300],
'numUnits': [10, 30, 50, 100],
'dropout_rate': [0.1, 0.3],
},]
def loadData():
dm = None
if args.data == None and args.custom_data == None:
print("[Error] Please specify an input file. (use -h option for help)")
exit()
## provided data
elif args.data != None:
dm = DeepMicrobiome(data=args.data + '.txt', seed=args.seed, data_dir=args.data_dir)
## specify feature string
feature_string = ''
data_string = str(args.data)
if data_string.split('_')[0] == 'abundance':
feature_string = "k__"
if data_string.split('_')[0] == 'marker':
feature_string = "gi|"
## load data into the object
dm.loadData(feature_string=feature_string, label_string='disease', label_dict=label_dict,
dtype=dtypeDict[args.dataType])
## user data
elif args.custom_data != None:
### without labels - only conducting representation learning
if args.custom_data_labels == None:
dm = DeepMicrobiome(data=args.custom_data, seed=args.seed, data_dir=args.data_dir)
dm.loadCustomData(dtype=dtypeDict[args.dataType])
### with labels - conducting representation learning + classification
else:
dm = DeepMicrobiome(data=args.custom_data, seed=args.seed, data_dir=args.data_dir)
dm.loadCustomDataWithLabels(label_data=args.custom_data_labels, dtype=dtypeDict[args.dataType])
else:
exit()
return dm
# run exp function
def run_exp(seed):
# create an object and load data
## no argument founded
def run_fold(train_indices, test_indices, k: int = 1):
dm = loadData()
numRLrequired = args.pca + args.ae + args.rp + args.vae + args.cae
if numRLrequired > 1:
raise ValueError('No multiple dimensionality Reduction')
# time check after data has been loaded
dm.t_start = time.time()
# Representation learning (Dimensionality reduction)
dm.setIndices(train_indices, test_indices)
if args.pca:
dm.pca()
if args.ae:
dm.ae(dims=[int(i) for i in args.dims.split(',')], act=args.act, epochs=args.max_epochs,
loss=args.aeloss,
latent_act=args.ae_lact, output_act=args.ae_oact, patience=args.patience, no_trn=args.no_trn)
if args.vae:
dm.vae(dims=[int(i) for i in args.dims.split(',')], act=args.act, epochs=args.max_epochs,
loss=args.aeloss, output_act=args.ae_oact,
patience=25 if args.patience == 20 else args.patience, beta=args.vae_beta,
warmup=args.vae_warmup, warmup_rate=args.vae_warmup_rate, no_trn=args.no_trn)
if args.cae:
dm.cae(dims=[int(i) for i in args.dims.split(',')], act=args.act, epochs=args.max_epochs,
loss=args.aeloss, output_act=args.ae_oact,
patience=args.patience, rf_rate=args.rf_rate, st_rate=args.st_rate, no_trn=args.no_trn)
if args.rp:
dm.rp()
# write the learned representation of the training set as a file
if args.save_rep:
if numRLrequired == 1:
fold_dir = os.path.join(dm.data_dir, dm.data, "results", str(k))
if not os.path.isdir(fold_dir):
os.makedirs(fold_dir)
rep_file = os.path.join(fold_dir, dm.prefix + dm.data + f".train.csv")
| pd.DataFrame(dm.X_train, index=dm.train_indices) | pandas.DataFrame |
"""Loader of raw data into deepchem dataset after featurization.
Qest loader creates datasets of featurized molecules.
QesTS loader creates datasets of featurized reactions.
Double loader creates dataset of featurized reactants and products,
makes a prediction with Qest, and uses this to produce a dataset of
featurized reactions.
"""
from typing import Union, Type, Iterable, List, Iterator
from pathlib import Path
import logging
import time
import os
import ase
import deepchem.data
import numpy
import pandas as pd
import quickq.structure
import quickq.featurizers
logger = logging.getLogger(__name__)
class QestLoader:
"""Loads molecules and their Q values from raw data files.
Data must be stored in a folder alone as:
-XXX.extxyz
-XXX.csv
For each molecule XXX. csv must contain column "T".
Data is featurized and saved as a deepchem dataset.
Parameters
----------
featurizer : quickq.featurizers.MolFeaturizer
Featurizer to apply to each molecule
"""
def __init__(
self,
featurizer: quickq.featurizers.MolFeaturizer = None
):
self.featurizer = featurizer
return
def _get_shards(
self,
files_dir: str,
shard_size: int,
num_shards: int
) -> Iterator:
"""Shardize the files_dir directory and return a generator for the shards.
Parameters
----------
files_dir : str
directory containing the data. See class docs for details.
shard_size : int
Number of structures to load per shard
num_shards : int
number of shards from total to load.
Returns
-------
generator of shards
"""
# iterate through shards
shard_num = 1
# get a big list of the reactions
data_paths = [files_dir+str(path) for path in os.listdir(files_dir) if path.endswith('.extxyz')]
logger.info(f'Total shards: {int(len(data_paths)/shard_size)}')
for shard_indexes in range(0, len(data_paths), shard_size):
# if we haven't reached out shard limit, open the shard
if num_shards is None or shard_num <= num_shards:
shardpaths = data_paths[shard_indexes:shard_indexes+shard_size]
logger.info(f'Loading shard {shard_num}')
shard_num += 1
yield self._open_shard(shardpaths)
else:
break
def _open_shard(self, shardpaths: List[str]):
"""Open a single list of files into structures.
Parameters
----------
shardpaths : list of str
The paths to structures in this shard
Returns
-------
structures : list of Structure objects
ind : list of structure indexes
"""
structures = []
ind = []
for path in shardpaths:
no_extension = path[:-7]
idx = path.split('/')[-1][:-7]
struc = quickq.structure.Structure.load_properties(
path, csv_filename = no_extension+'.csv'
)
structures.append(struc)
ind.append(idx)
return structures, ind
def load_data(self,
files_dir: str,
shard_size: int = 500,
num_shards: int = None
):
"""Load the data into pandas dataframes.
Parameters
----------
files_dir : str
directory containing the data. See class docs for details.
shard_size : int
Number of structures to load per shard
num_shards : int,
number of shards from total to load.
Returns
-------
generator of dataframes
"""
logger.info("Loading raw samples now.")
logger.info("shard_size: %s" % str(shard_size))
if not files_dir.endswith('/'):
files_dir += '/'
def shard_generator():
for shard_num, shard in enumerate(self._get_shards(files_dir, shard_size, num_shards)):
time1 = time.time()
structures, ind = shard
# featurize the molprops
if self.featurizer is not None:
feats = self.featurizer.featurize(structures)
dfs = []
for i, struc in enumerate(structures):
# we need to expand each mol on the temperature and
# Q vector
df = pd.DataFrame({'T':list(struc.T.flatten())})
if struc.log_qpart is not None:
df['logQ'] = list(struc.log_qpart.flatten())
# try featurization if present
if self.featurizer is not None:
df[self.featurizer.name] = list(numpy.tile(feats[i], (len(df), 1)))
df['ids'] = df.apply(lambda row: ind[i]+'_'+str(int(row.name)), axis=1)
dfs.append(df)
df = pd.concat(dfs)
time2 = time.time()
logger.info("TIMING: featurizing shard %d took %0.3f s" %
(shard_num, time2 - time1))
yield df
return shard_generator()
def create_dataset(self,
files_dir: str,
data_dir: str,
shard_size: int = 500,
num_shards: int = None
) -> deepchem.data.DiskDataset:
"""Featurize raw data into deepchem dataset.
Parameters
----------
files_dir : str
directory containing the data. See class docs for details.
data_dir : str
directory name to store deepchem disk dataset
shard_size : int
Number of structures to load per shard
num_shards : int
number of shards from total to load.
"""
def shard_generator():
for df in self.load_data(files_dir=files_dir, shard_size=shard_size, num_shards=num_shards):
# add temperature to whatever feature vector was computed
feats = numpy.vstack(df[self.featurizer.name].values)
T = df['T'].values.reshape(-1,1)
X = numpy.append(feats, 1/T, axis=1)
if 'logQ' in df.columns:
y = df['logQ'].values.reshape(-1,1)
else:
y= numpy.empty(len(X))
w = numpy.ones(len(X))
ids = numpy.array(df['ids']).reshape(-1,1)
yield X, y, w, ids
return deepchem.data.DiskDataset.create_dataset(shard_generator(), data_dir, ['logQ'])
class QesTSLoader:
"""Loads structures from reactions from raw data files.
Data for each reaction must be stored in a folder as:
rxnXXX/
-rXXX.extxyz
-rXXX.csv
-pXXX.extxyz
-pXXX.csv
For each reaction XXX. csvs must contain column temperature "T" as first columns
and "log_qpart" as the reactants/products logged Q values. T values must match.
Data is featurized and saved as a deepchem dataset.
Parameters
----------
featurizer : quickq.featurizers.MolFeaturizer
Featurizer to apply to each molecule
"""
def __init__(
self,
featurizer: quickq.featurizers.MolFeaturizer = None
):
self.featurizer = featurizer
return
def _get_shards(
self,
files_dir: str,
shard_size: int,
num_shards: int
) -> Iterator:
"""Shardize the files_dir directory and return a generator for the shards.
Parameters
----------
files_dir : str
directory containing the data. See class docs for details.
shard_size : int
Number of reactions to load per shard
num_shards : int
number of shards from total to load.
Returns
-------
generator of shards
"""
# iterate through shards
shard_num = 1
# get a big list of the reactions
rxn_paths = [files_dir+str(path) for path in os.listdir(files_dir)]
logger.info(f'Total shards: {int(len(rxn_paths)/shard_size)}')
for shard_indexes in range(0, len(rxn_paths), shard_size):
# if we haven't reached out shard limit, open the shard
if num_shards is None or shard_num <= num_shards:
shardpaths = rxn_paths[shard_indexes:shard_indexes+shard_size]
logger.info(f'Loading shard {shard_num}')
shard_num += 1
yield self._open_shard(shardpaths)
else:
break
def _open_shard(self, shardpaths: List[str]):
"""Open a single list of reaction directories into structures.
Parameters
----------
shardpaths : list of str
The paths to reactions in this shard
Returns
-------
structures : list of list of Structure objects
rxns : list of reaction indexes
"""
rxns = []
structures = []
for rxn_path in shardpaths:
rxn = rxn_path.split('/')[-1][3:]
# reactant, product, ts
r = quickq.structure.Structure.load_properties(
rxn_path+'/r'+rxn+'.extxyz',
csv_filename = rxn_path+'/r'+rxn+'.csv'
)
p = quickq.structure.Structure.load_properties(
rxn_path+'/p'+rxn+'.extxyz',
csv_filename = rxn_path+'/p'+rxn+'.csv'
)
try:
ts = quickq.structure.Structure.load_properties(
rxn_path+'/ts'+rxn+'.extxyz',
csv_filename = rxn_path+'/ts'+rxn+'.csv'
)
except:
ts = None
structures.append([r, p, ts])
rxns.append(rxn)
# if we cannot produce a scaffold just continue
return structures, rxns
def load_data(self,
files_dir: str,
shard_size: int = 500,
num_shards: int = None
):
"""Load the data into pandas dataframes.
Parameters
----------
files_dir : str
directory containing the data. See class docs for details.
shard_size : int
Number of reactions to load per shard
num_shards : int
number of shards from total to load.
Returns
-------
generator of dataframes
"""
logger.info("Loading raw samples now.")
logger.info("shard_size: %s" % str(shard_size))
def shard_generator():
for shard_num, shard in enumerate(self._get_shards(files_dir, shard_size, num_shards)):
time1 = time.time()
structures, rxns = shard
# loop through each reaction, NOT each molecule
dfs=[]
for i, rxn in enumerate(rxns):
structure_set = structures[i]
# check we haev the expected sizes
assert len(structure_set) == 3, 'rxn should have 3 systems'
assert len(numpy.unique([len(mp.atoms) for mp in structure_set if mp is not None])) == 1, 'all systems not the same size'
# create dataframe of T dependant quantities
if structure_set[0].log_qpart is None or structure_set[1].log_qpart is None:
raise ValueError('Cannot use QesTS predictor without R and P partition function')
df = pd.DataFrame({'T':list(structure_set[0].T.flatten()),
'logQr':list(structure_set[0].log_qpart.flatten()),
'logQp':list(structure_set[1].log_qpart.flatten()),
})
if structure_set[2] is not None and structure_set[2].log_qpart is not None:
df['logQts'] = list(structure_set[2].log_qpart.flatten())
# get the features difference
if self.featurizer is not None:
rfeats = self.featurizer.featurize(structure_set[0])
pfeats = self.featurizer.featurize(structure_set[1])
feats = pfeats - rfeats
# add it the the df, all rows have the same value because these features on not
# temperature dependant
df[self.featurizer.name] = list(numpy.tile(feats, (len(df), 1)))
# set a row of ids
df['ids'] = df.apply(lambda row: rxns[i]+'_'+str(int(row.name)), axis=1)
dfs.append(df)
# combine all reactions in this shard
df = pd.concat(dfs)
time2 = time.time()
logger.info("TIMING: featurizing shard %d took %0.3f s" %
(shard_num, time2 - time1))
yield df
return shard_generator()
def create_dataset(self,
files_dir: str,
data_dir: str,
shard_size: int = 500,
num_shards: int = None
) -> deepchem.data.DiskDataset:
"""Featurize raw data into deepchem dataset.
Parameters
----------
files_dir : str
directory containing the data. See class docs for details.
data_dir : str
directory name to store deepchem disk dataset
shard_size : int
Number of reactions to load per shard
num_shards : int
number of shards from total to load.
"""
if not files_dir.endswith('/'):
files_dir +='/'
def shard_generator():
for df in self.load_data(files_dir=files_dir, shard_size=shard_size, num_shards=num_shards):
# add temperature to whatever feature vector was computed
feats = numpy.vstack(df[self.featurizer.name].values)
qr = df['logQr'].values.reshape(-1,1)
qp = df['logQp'].values.reshape(-1,1)
Tinv = 1/df['T'].values.reshape(-1,1)
X = numpy.concatenate([feats, qr, qp, Tinv], axis=1)
if 'logQts' in df.columns:
y = df['logQts'].values.reshape(-1,1)
else:
y= numpy.empty(len(X))
w = numpy.ones(len(X))
ids = numpy.array(df['ids']).reshape(-1,1)
yield X, y, w, ids
return deepchem.data.DiskDataset.create_dataset(shard_generator(), data_dir, ['logQts'])
class DoubleLoader:
"""Loads structures from reactions from raw data files.
Data for each reaction must be stored in a folder as:
rxnXXX/
-rXXX.extxyz
-rXXX.csv
-pXXX.extxyz
-pXXX.csv
For each reaction XXX. csvs must contain column temperature "T" as first columns.
T values must match.
Data is featurized and saved as a deepchem dataset.
Parameters
----------
featurizer : quickq.featurizers.MolFeaturizer
Featurizer to apply to each molecule
"""
def __init__(
self,
featurizer: quickq.featurizers.MolFeaturizer = None
):
self.featurizer = featurizer
import quickq.pipeline
return
def _get_shards(
self,
files_dir: str,
shard_size: int,
num_shards: int
) -> Iterator:
"""Shardize the files_dir directory and return a generator for the shards.
Parameters
----------
files_dir : str
directory containing the data. See class docs for details.
shard_size : int
Number of reactions to load per shard
num_shards : int
number of shards from total to load.
Returns
-------
generator of shards
"""
# iterate through shards
shard_num = 1
# get a big list of the reactions
rxn_paths = [files_dir+str(path) for path in os.listdir(files_dir)]
logger.info(f'Total shards: {int(len(rxn_paths)/shard_size)}')
for shard_indexes in range(0, len(rxn_paths), shard_size):
# if we haven't reached out shard limit, open the shard
if num_shards is None or shard_num <= num_shards:
shardpaths = rxn_paths[shard_indexes:shard_indexes+shard_size]
logger.info(f'Loading shard {shard_num}')
shard_num += 1
yield self._open_shard(shardpaths)
else:
break
def _open_shard(self, shardpaths: List[str]):
"""Open a single list of reaction directories into structures.
Parameters
----------
shardpaths : list of str
The paths to reactions in this shard
Returns
-------
structures : list of list of Structure objects
rxns : list of reaction indexes
"""
rxns = []
structures = []
for rxn_path in shardpaths:
rxn = rxn_path.split('/')[-1][3:]
# reactant, product, ts
r = quickq.structure.Structure.load_properties(
rxn_path+'/r'+rxn+'.extxyz',
csv_filename = rxn_path+'/r'+rxn+'.csv'
)
p = quickq.structure.Structure.load_properties(
rxn_path+'/p'+rxn+'.extxyz',
csv_filename = rxn_path+'/p'+rxn+'.csv'
)
try:
ts = quickq.structure.Structure.load_properties(
rxn_path+'/ts'+rxn+'.extxyz',
csv_filename = rxn_path+'/ts'+rxn+'.csv'
)
except:
ts = None
structures.append([r, p, ts])
rxns.append(rxn)
# if we cannot produce a scaffold just continue
return structures, rxns
def load_data(self,
files_dir: str,
shard_size: int = 500,
num_shards: int = None
):
"""Load the reactant and product data, make predictions, then give dataframes
Parameters
----------
files_dir : str
directory containing the data. See class docs for details.
shard_size : int
Number of reactions to load per shard
num_shards : int
number of shards from total to load.
Returns
-------
generator of dataframes
"""
logger.info("Loading raw samples now.")
logger.info("shard_size: %s" % str(shard_size))
def shard_generator():
for shard_num, shard in enumerate(self._get_shards(files_dir, shard_size, num_shards)):
time1 = time.time()
structures, rxns = shard
# loop through each reaction, NOT each molecule
dfs=[]
for i, rxn in enumerate(rxns):
structure_set = structures[i]
# check we haev the expected sizes
assert len(structure_set) == 3, 'rxn should have 3 systems'
assert len(numpy.unique([len(mp.atoms) for mp in structure_set if mp is not None])) == 1, 'all systems not the same size'
# create dataframe of T dependant quantities
df = pd.DataFrame({'T':list(structure_set[0].T.flatten()),
})
if structure_set[2] is not None and structure_set[2].log_qpart is not None:
df['logQts'] = list(structure_set[2].log_qpart.flatten())
# get the features difference
if self.featurizer is not None:
rfeats = self.featurizer.featurize(structure_set[0])
pfeats = self.featurizer.featurize(structure_set[1])
feats = pfeats - rfeats
# add it the the df, all rows have the same value because these features on not
# temperature dependant
df[self.featurizer.name] = list(numpy.tile(feats, (len(df), 1)))
# predict the Qs with qest
rfeats = numpy.concatenate([numpy.tile(rfeats, (len(df), 1)), (1/df['T'].values).reshape(-1,1)], axis=1)
r_dataset = deepchem.data.NumpyDataset(rfeats)
logQr = quickq.pipeline.predict_qest(r_dataset)
pfeats = numpy.concatenate([numpy.tile(pfeats, (len(df), 1)), (1/df['T'].values).reshape(-1,1)], axis=1)
p_dataset = deepchem.data.NumpyDataset(pfeats)
logQp = quickq.pipeline.predict_qest(p_dataset)
df['logQr'] = logQr
df['logQp'] = logQp
# set a row of ids
df['ids'] = df.apply(lambda row: rxns[i]+'_'+str(int(row.name)), axis=1)
dfs.append(df)
# combine all reactions in this shard
df = | pd.concat(dfs) | pandas.concat |
#!/usr/bin/env python3
# author : <NAME>
# date : 10.01.2019
# license : BSD-3
# ==============================================================================
import os.path
import sys
import time
import argparse
import numpy as np
import pandas as pd
import sqlite3 as sql
from collections import defaultdict
from pmapper.pharmacophore import Pharmacophore
def create_parser():
parser = argparse.ArgumentParser(
description='Iteratively create ligand-based pharmacophore models.')
parser.add_argument('-adb', '--in_active_database', metavar='active.db', required=True,
help='input SQL database file with active compounds')
parser.add_argument('-idb', '--in_inactive_database', metavar='inactive.db', required=True,
help='input SQL database file with active compounds')
parser.add_argument('-ats', '--in_active_trainset', metavar='active_training_set.txt', required=True,
help='txt file with information about active models: '
'model, hash, stereo, nact, ninact, nact/ninact, conf_id, feature_ids')
parser.add_argument('-its', '--in_inactive_trainset', metavar='inactive_training_set.txt', required=True,
help='txt file with information about active models: '
'model, hash, stereo, nact, ninact, nact/ninact, conf_id, feature_ids')
parser.add_argument('-o', '--output_path', metavar='output/path', required=False, default=None,
help='output path to the models of pharmacophores. '
'If None, the path will be generated automatically.')
parser.add_argument('-tol', '--tolerance', default=0,
help='tolerance volume for the calculation of the stereo sign. If the volume of the '
'tetrahedron created by four points less than tolerance then those points are considered '
'lying on the same plane (flat; stereo sign is 0).')
parser.add_argument('-l', '--lower', default=4,
help='number of features of input models')
return parser
def _keep_best_models(df, df_sub_act, df_sub_inact, df_ph_act, df_ph_inact, save_files):
df_sub_act = pd.merge(df_sub_act, df[['hash']], on='hash', how='inner').reset_index(drop=True)
df_ph_act = pd.merge(df_ph_act, df_sub_act[['conf_id']].drop_duplicates(subset=['conf_id']), on='conf_id',
how='inner').reset_index(drop=True)
if not df_ph_inact.empty:
df_sub_inact = pd.merge(df_sub_inact, df[['hash']], on='hash', how='inner').reset_index(drop=True)
df_ph_inact = pd.merge(df_ph_inact, df_sub_inact[['conf_id']].drop_duplicates(subset=['conf_id']),
on='conf_id', how='inner').reset_index(drop=True)
if save_files:
path_internal = os.path.join(save_files[0], 'internal_statistics_{}_pharm{}.txt'.format(save_files[1], save_files[2]))
path_sub_act = os.path.join(save_files[0], 'ph_active_{}_pharm{}.txt'.format(save_files[1], save_files[2]))
df.to_csv(path_internal, index=None, sep='\t')
df_sub_act.to_csv(path_sub_act, index=None, sep='\t')
if not df_sub_inact.empty:
path_sub_inact = os.path.join(save_files[0], 'ph_inactive_{}_pharm{}.txt'.format(save_files[1], save_files[2]))
df_sub_inact.to_csv(path_sub_inact, index=None, sep='\t')
return df_sub_act, df_sub_inact, df_ph_act, df_ph_inact
# generator return mol_name, conf_id, hash, labels
def _gen_quadruplets(df_ph, lower, tol):
for mol_name, conf_id, pharm in zip(df_ph['mol_name'], df_ph['conf_id'], df_ph['pharm']):
if pharm:
for hash, labels in pharm.iterate_pharm(lower, lower, tol):
yield mol_name, conf_id, hash, labels
# generator return mol_name, conf_id, hash, labels
def _plus_one_feature(df_ph, df_sub):
for mol_name, conf_id, pharm in zip(df_ph['mol_name'], df_ph['conf_id'], df_ph['pharm']):
list_ids = df_sub[df_sub['conf_id'] == conf_id]
list_ids = [tuple(map(int, l.split(','))) for l in list_ids['feature_ids']]
if pharm:
for hash, labels in pharm.iterate_pharm1(list_ids):
yield mol_name, conf_id, hash, labels
# return type DataFrame: columns=['hash', 'count', 'mol_name', 'conf_id', 'feature_ids']
def gen_models(def_generator, df_0):
dct = defaultdict(list)
for mol_name, conf_id, hash, labels in def_generator:
dct['hash'].append(hash)
dct['mol_name'].append(mol_name)
dct['conf_id'].append(conf_id)
dct['feature_ids'].append(','.join(map(str, labels)))
df = pd.DataFrame(dct)
if df.empty:
return df_0, df
count_df = df.drop_duplicates(subset=['mol_name', 'hash'])
count_df = count_df.groupby(['hash'], sort=True).size().reset_index(name='count')
df = pd.merge(df, count_df, on='hash', how='right')
df = df.sort_values(by=['count', 'hash'], ascending=False)
return df_0, df[['hash', 'count', 'mol_name', 'conf_id', 'feature_ids']]
# return DataFrame of pharmacophore representation molecules: columns=['mol_name', 'conf_id', 'pharm']
def load_pharmacophores(in_db, in_training_set):
mol_names = [name.strip().split('\t')[1] for name in open(in_training_set).readlines()]
confs_pharm = defaultdict(list)
with sql.connect(in_db) as con:
cur = con.cursor()
cur.execute("SELECT bin_step FROM settings")
db_bin_step = cur.fetchone()[0]
for mol_name in mol_names:
cur.execute("SELECT conf_id, feature_label, x, y, z FROM feature_coords WHERE conf_id IN "
"(SELECT conf_id from conformers WHERE mol_name = ?)", (mol_name,))
res = cur.fetchall()
confs = defaultdict(list)
for r in res:
confs[r[0]].append((r[1], tuple(r[2:]))) # dict(conf_id: (feature_label, x, y, z))
for conf_id, coord in confs.items():
p = Pharmacophore(bin_step=db_bin_step, cached=True)
p.load_from_feature_coords(coord)
confs_pharm['mol_name'].append(mol_name)
confs_pharm['conf_id'].append(conf_id)
confs_pharm['pharm'].append(p)
return pd.DataFrame(confs_pharm)
# return type DataFrame
def strategy_extract_trainset(df, clust_strategy):
if clust_strategy == 2:
df = df.sort_values(by=['recall', 'F2', 'F05'], ascending=False)
df = df.reset_index(drop=True)
if df['F2'].iloc[0] == 1.0:
df = df[(df['recall'] == 1.0) & (df['F2'] == 1.0)]
elif df[df['F2'] >= 0.8].shape[0] <= 100:
df = df[(df['recall'] == 1) & (df['F2'] >= 0.8)]
else:
df = df[(df['recall'] == 1) & (df['F2'] >= df['F2'].loc[100])]
elif clust_strategy == 1:
df = df.sort_values(by=['recall', 'F05', 'F2'], ascending=False)
df = df.reset_index(drop=True)
if df[df['F05'] >= 0.8].shape[0] <= 100:
df = df[df['F05'] >= 0.8]
else:
df = df[df['F05'] >= df['F05'].loc[100]]
return df
# return type DataFrame: columns=['hash', 'TP', 'FP', 'precision', 'recall', 'F2', 'F05']
def calc_internal_stat(df_act, df_inact, act_trainset, clust_strategy):
df_act = df_act.rename(columns={'count': 'TP'})
if not df_inact.empty:
df_inact = df_inact[['hash', 'count']].drop_duplicates(subset=['hash'])
df_inact = df_inact.rename(columns={'count': 'FP'})
df = pd.merge(df_act, df_inact, on=['hash'], how='left')
df.loc[df['FP'].isnull(), 'FP'] = 0
else:
df = df_act
df['FP'] = [0 for x in range(df.shape[0])]
df['precision'] = round(df['TP'] / (df['TP'] + df['FP']), 3)
df['recall'] = round(df['TP'] / (len(open(act_trainset).readlines())), 3)
df['F2'] = round(5 * ((df['precision'] * df['recall']) / (4 * df['precision'] + df['recall'])), 3)
df['F05'] = round(1.25 * ((df['precision'] * df['recall']) / (0.25 * df['precision'] + df['recall'])), 3)
df['FP'] = df['FP'].astype(np.int64)
df = df.sort_values(by=['recall', 'F2', 'F05'], ascending=False)
df = df[['hash', 'TP', 'FP', 'precision', 'recall', 'F2', 'F05']]
# difference ways to check out the best models
df = strategy_extract_trainset(df, clust_strategy)
return df
# return None
def save_models_pma(df_ph, df_sub_act, path_pma, cluster_num, num_ids):
time_start = time.time()
df_sub_act = df_sub_act.drop_duplicates(subset=['hash'])
df_ph = | pd.merge(df_sub_act, df_ph, on=['mol_name', 'conf_id'], how='inner') | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 18 13:54:04 2019
@author: Tobias
"""
import os
import pickle
import numpy as np
import datetime as dt
import pandas as pd
import pandas_datareader as web
import sys
import pdb
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
#Import scikit-learn metrics module for accuracy calculation
from sklearn import metrics
# Import train_test_split function
from sklearn.model_selection import train_test_split
#Import Random Forest Model
from sklearn.ensemble import RandomForestClassifier
class predictor:
'''
'''
# Class attributes
# Return data
data = []
# Publishing dates
dates = []
# Publishing dates, 10-K
dates_k = []
# Publishing dates, 10-Q
dates_q = []
# Company name
name = ''
# Date directory
date_dir = ''
# Company ticker
ticker = ''
# Start date 10-K
start_k = []
# Start date 10-Q
start_q = []
# Similarity data
sim_data = []
# Path for LDA models
mod_path = ''
# Years
years = []
# Years quarterly
years_q = []
# Years annualy
years_k = []
# First valid period with a previous 10-K to use as a benchmark
first_valid = []
# Number of topics
num_top = []
# Data matrix
X = []
# Response matrix
y = pd.DataFrame()
# Days delayed for returns
days = []
# Accuracy
accuarcy = []
# Score
score = []
# Return data matrix
X_ret = []
def __init__(self,name=None,ticker=None,date_dir=None,f_type=None,
algorithm=None, mod_path=None, num_top=None,days=None):
'''
Class constructor.
Optional inputs:
- name : The EDGAR name of the company. Default is empty.
- ticker : The company ticker. Default is empty
- date_dir : The directory for the publishing dates. Default is
empty.
- algorithm : The type of model algorithm. Default is empty.
* Supported algorithms: random forest {'rf'}.
- f_type : The filing type. Default is 10-K
- num_top : The number of topics for the LDA model
Output:
- obj: An object of class predictor.
Examples:
name = '<NAME>'
ticker = 'BRK-A'
date_dir = 'C:\\Users\\Tobias\\Dropbox\\Master\\U.S. Data\\Dates Reports U.S'
mod_path = 'C:\\Users\\Tobias\\Dropbox\\Master\\U.S. Data\\Model U.S'
num_top = 40
days = 7
'''
if name:
self.name = name
if f_type:
self.f_type = f_type
if date_dir:
self.date_dir = date_dir
if ticker:
self.ticker = ticker
if mod_path:
self.mod_path = mod_path
if num_top:
self.num_top = num_top
if days:
self.days = days
def get_return_data(self):
'''
A method for fetching return data from yahoo
Input:
- self : An object of class predictor
Output:
- self : -"-
'''
start ='1996-01-01'
end ='2019-31-05'
mes=('Checking for existing return data...')
sys.stdout.write('\r'+mes)
print('Fetching return data')
if not self.ticker or not self.name:
print('Object cannot be empty')
return
if self.days: delta = self.days
else: delta=0
ticker = self.ticker
name = self.name
directory_q = self.date_dir + '\\10-Q\\'+ name
os.chdir(directory_q)
start = dt.datetime(1994,1, 1)
end = dt.datetime(2019,4,16)
prices = web.get_data_yahoo(ticker,start=start,end=end)[['Open','Close']]
with open(os.listdir()[0],'rb') as file:
dates_q = pickle.load(file)
directory_k = self.date_dir + '\\10-K\\' + name
os.chdir(directory_k)
with open(os.listdir()[0],'rb') as file:
dates_a = pickle.load(file)
dates = dates_q + dates_a
publishingdates = [dt.datetime.strptime(date, '%Y-%m-%d').date() for date in dates]
stockdates = [index.strftime('%Y-%m-%d') for index in prices.index]
stockdates = [dt.datetime.strptime(date, '%Y-%m-%d').date() for date in stockdates]
# Find all indicies where there is a report published
allDates = stockdates
pOp = prices.Open
pCl = prices.Close
dataOut = np.zeros(shape=(len(publishingdates),2))
publishingdates.sort()
p_delta = [d+dt.timedelta(days=delta) for d in publishingdates]
l = 0
if delta>0:
for idx, (t_1,t_2) in enumerate(zip(publishingdates,p_delta)):
f_1 = [date-t_1 for date in allDates]
f_2 = [date-t_2 for date in allDates]
arr = []
arr_2 = []
for diff_1,diff_2 in zip(f_1,f_2):
arr = np.append(arr,diff_1.days)
arr_2 = np.append(arr_2,diff_2.days)
zeroInd = np.where(arr == 0)[0]
zeroInd_2 = np.where(arr_2 == 0)[0]
if not zeroInd.size > 0:
l = l + 1
continue
if not zeroInd_2.size > 0:
l = l + 1
continue
ret = np.log(pCl[zeroInd_2]/pOp[zeroInd][0])
dataOut[idx,0] = ret
if ret[0]>-1E-8:
dataOut[idx,1] = 1
else:
dataOut[idx,1] = -1
else:
for idx, tempDate in enumerate(publishingdates):
f = [date-tempDate for date in allDates]
arr = []
for diff in f:
arr = np.append(arr,diff.days)
zeroInd = np.where(arr == 0)[0]
if not zeroInd.size > 0:
l = l + 1
continue
ret = np.log(pCl[zeroInd]/pOp[zeroInd])
dataOut[idx,0] = ret
if ret[0]>-1E-8:
dataOut[idx,1] = 1
else:
dataOut[idx,1] = -1
years = [y.split('-')[0] for y in dates]
years_q = [y.split('-')[0] for y in dates_q]
years_k = [y.split('-')[0] for y in dates_a]
dates.sort()
dates_a.sort()
dates_q.sort()
years_k.sort()
years_q.sort()
years.sort()
self.data = dataOut
self.dates = dates
self.dates_k = dates_a
self.dates_q = dates_q
self.start_k = dates_a[0]
self.start_q = dates_q[0]
self.years = years
self.years_q = years_q
self.years_k = years_k
resp = pd.DataFrame(dataOut)
resp['Datetime'] = publishingdates
resp = resp.set_index('Datetime')
self.y = resp
return
def get_similarity(self,numWords=None):
'''
'''
if not numWords:
numWords = 50 # Set
numTop = 40 # Set
resT = np.empty((0,numTop))
res = np.empty((0,numTop))
dates = self.dates
y_q = self.years_q
y_k = self.years_k
d_k = self.dates_k
# Identify the first valid year to start estimation
for t in y_k[1:]:
num_inds = len([i for i,year in enumerate(y_q) if year in t])
if num_inds == 3: break
# Identify the last valid year to end estimation
last_ind = 4*(int(y_k[-2])-int(t)+1)
# t now yields the first valid year to start estimation. Find the
# corresponding index
start_ind = y_q.index(t)
dates = dates[1+start_ind:start_ind+last_ind+1]
temp = self.y
temp = temp.iloc[1+start_ind:start_ind+last_ind+1]
self.y = temp
# Find appropriate starting point for 10-K
start_y_ind = y_k.index(str(int(t)-1))
year_dates = d_k[start_y_ind:]
if numTop:
mod_path_k = self.mod_path+'\\10-K_'+ str(numTop) +'\\'+self.name
mod_path_q = self.mod_path+'\\10-Q_'+ str(numTop) +'\\'+self.name
else:
mod_path_k = self.mod_path+'\\10-K_40\\'+self.name
mod_path_q = self.mod_path+'\\10-K_40\\'+self.name
for idx, year in enumerate(y_k[:-1]):
os.chdir(mod_path_k) #self.mod_path+'\\10-K_40\\'+self.name #modelpath_10K
# ann_mod = gensim.models.ldamodel.LdaModel.load(ann[0])
ann_mod_bench=gensim.models.ldamodel.LdaModel.load(year_dates[idx])
d = [d for d in dates if str(int(year)+1) in d]
for quart in d:
if quart in year_dates:
os.chdir(mod_path_k)#self.mod_path+'\\10-K_40\\'+self.name
lda_model_q = gensim.models.ldamodel.LdaModel.load(quart)
else:
os.chdir(mod_path_q)#self.mod_path+'\\10-Q_40\\'+self.name
lda_model_q = gensim.models.ldamodel.LdaModel.load(quart)
mdiff, annotation = lda_model_q.diff(ann_mod_bench,
distance='jaccard', num_words=numWords)
resT = np.empty((0,numTop))
for ii in range(numTop):
g = mdiff[:,ii].tolist()
min_value = min(g)
min_index = g.index(min_value)
# Assign score
resT = np.append(resT, numTop-min_index)
res = np.append(res,resT)
del resT
length = len(dates)
resOut = np.reshape(res,(length,numTop))
X = | pd.DataFrame(resOut) | pandas.DataFrame |
# coding: utf-8
# ### Import
# In[5]:
import numpy as np
import pandas as pd
import xgboost
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
from sklearn.metrics import *
from IPython.core.display import Image
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix
from sklearn.tree import export_graphviz
import io
from sklearn.preprocessing import Imputer
import pydot
from sklearn import preprocessing
import lightgbm as lgb
from scipy.stats import mode
import re
from datetime import datetime
from lightgbm import plot_importance
import warnings
warnings.filterwarnings('ignore')
# ---
# ### Date read
# In[7]:
age_gender_bkts = pd.read_csv("age_gender_bkts.csv")
countries = pd.read_csv("countries.csv")
sessions = pd.read_csv("sessions.csv")
test_users = pd.read_csv("test_users.csv")
train_users_2 = pd.read_csv("train_users_2.csv")
sample_submission_NDF = pd.read_csv("sample_submission_NDF.csv")
# ---
# ### Date setting
# In[8]:
def pre_age_set_data(train_users_2, test_users):
check = pd.concat([train_users_2, test_users], ignore_index=True)
check["first_affiliate_tracked"] = check["first_affiliate_tracked"].replace(np.nan, "untracked")
check["date_account_created"] = pd.to_datetime(check["date_account_created"], format = "%Y-%m-%d")
check["timestamp_first_active"] = pd.to_datetime(check["timestamp_first_active"], format="%Y%m%d%H%M%S")
s_lag = check["timestamp_first_active"] - check["date_account_created"]
check["lag_days"] = s_lag.apply(lambda x : -1 * x.days)
check["lag_seconds"] = s_lag.apply(lambda x : x.seconds)
s_all_check = (check['age'] < 120) & (check['gender'] != '-unknown-')
check['faithless_sign'] = s_all_check.apply(lambda x : 0 if x == True else 1)
pre_age = check.drop("date_first_booking",axis = 1)
pre_age['date_account_created_y'] = pre_age["date_account_created"].apply(lambda x : x.year)
pre_age['date_account_created_m'] = pre_age["date_account_created"].apply(lambda x : x.month)
pre_age['date_account_created_d'] = pre_age["date_account_created"].apply(lambda x : x.day)
pre_age['timestamp_first_active_y'] = pre_age["timestamp_first_active"].apply(lambda x : x.year)
pre_age['timestamp_first_active_m'] = pre_age["timestamp_first_active"].apply(lambda x : x.month)
pre_age['timestamp_first_active_d'] = pre_age["timestamp_first_active"].apply(lambda x : x.day)
pre_age = pre_age.drop("date_account_created" , axis=1)
pre_age = pre_age.drop("timestamp_first_active" , axis=1)
return check, pre_age
# ---
# # Gender
# ### Gender predict data set
# In[11]:
def pre_gen_predict_data(pre_age):
pre_gen_sub = pre_age.filter(items = ['age', 'country_destination', 'id', 'gender'])
pre_gen_dum = pre_age.filter(items = ['affiliate_channel', 'affiliate_provider',
'first_affiliate_tracked', 'first_browser', 'first_device_type',
'language', 'signup_app', 'signup_flow',
'signup_method', 'date_account_created_y', 'date_account_created_m',
'date_account_created_d', 'timestamp_first_active_y',
'timestamp_first_active_m', 'timestamp_first_active_d',"lag_days","lag_seconds",
"faithless_sign"])
pre_gen_dum = pd.get_dummies(pre_gen_dum)
pre_gen_dum_con = pd.concat([pre_gen_dum, pre_gen_sub], axis=1)
pre_gen_dum_con["gender"] = pre_gen_dum_con["gender"].replace(['-unknown-', 'OTHER'], np.nan)
pre_gen_mission = pre_gen_dum_con[pre_gen_dum_con["gender"].isna()].reset_index()
pre_gen_train = pre_gen_dum_con[pre_gen_dum_con["gender"].notna()].reset_index()
pre_gen_mission_test = pre_gen_mission.drop("index", axis=1)
pre_gen_train_test = pre_gen_train.drop("index", axis=1)
pre_gen_mission_test_drop = pre_gen_mission_test.drop(['id', 'age', 'country_destination', "gender"], axis=1)
pre_gen_train_test_drop = pre_gen_train_test.drop(['id', 'age', 'country_destination', "gender"], axis=1)
return pre_gen_mission_test, pre_gen_train_test, pre_gen_mission, pre_gen_train, pre_gen_mission_test_drop, pre_gen_train_test_drop
# ### Gender predict LightGBM
# In[12]:
def predict_gen_LightGBM(pre_gen_train_test_drop, pre_gen_train_test, pre_gen_mission_test_drop):
X = pre_gen_train_test_drop
y = pre_gen_train_test["gender"]
model_gen_lgb = lgb.LGBMClassifier(nthread=3)
model_gen_lgb.fit(X,y)
print(classification_report(y, model_gen_lgb.predict(pre_gen_train_test_drop)))
model_gen_lgb = model_gen_lgb.predict(pre_gen_mission_test_drop)
model_gen_lgb = pd.DataFrame(model_gen_lgb)
return model_gen_lgb
# ### Gender predict data make CSV
# ---
# # Age
# ### Age predict data set
# In[13]:
def pre_age_predict_data(pre_age):
pre_age['age'] = pre_age['age'].fillna(-1)
pre_age_sub = pre_age.filter(items = ['age', 'country_destination','id'])
pre_age_dum = pre_age.filter(items = ['affiliate_channel', 'affiliate_provider',
'first_affiliate_tracked', 'first_browser', 'first_device_type',
'language', 'signup_app', 'signup_flow',
'signup_method', 'date_account_created_y', 'date_account_created_m',
'date_account_created_d', 'timestamp_first_active_y',
'timestamp_first_active_m', 'timestamp_first_active_d',"lag_days","lag_seconds",
"faithless_sign"])
pre_age_dum = pd.get_dummies(pre_age_dum)
pre_age_dum_con = | pd.concat([pre_age_dum, pre_age_sub], axis=1) | pandas.concat |
from abc import ABC, abstractmethod
from pclima.http_util import PClimaURL
import json
import pandas as pd
import requests
import io
import xarray as xr
import numpy as np
class RequestFactory:
def get_order(self, type_of_order,token,json):
if type_of_order == "NetCDF":
return Netcdf(token, json)
if type_of_order == "CSV":
return Csv(token, json)
if type_of_order == "CSVPontos":
return CSVPontos(token, json)
if type_of_order == "CSVPontosT":
return CSVPontosT(token, json)
if type_of_order == "JSON":
return JSON(token, json)
def save(self, type_of_order,content,file):
if type_of_order == "NetCDF":
saveNetcdf(content,file)
if type_of_order == "CSV":
saveCSV(content,file)
if type_of_order == "CSVPontos":
saveCSV(content,file)
if type_of_order == "CSVPontosT":
saveCSV(content,file)
if type_of_order == "JSON":
saveJSON(content,file)
class Product(ABC):
@abstractmethod
def download(self):
pass
class Netcdf(Product):
def __init__(self, token, json):
self.token = token
self.json = json
def download(self):
c1 = PClimaURL()
url = c1.get_url(self.json)
(anoInicial,anoFinal)=verificaIntervaloAnos(self.json)
if (anoInicial and anoFinal):
return (download_toNetCDFInterval(url, self.token,anoInicial,anoFinal))
else:
return (download_toNetCDF(url, self.token))
def __str__(self):
return self.token+" ["+str(self.json)+"] "
class Csv(Product):
def __init__(self, token, json):
self.token = token
self.json = json
def download(self):
c1 = PClimaURL()
url = c1.get_url(self.json)
(anoInicial,anoFinal)=verificaIntervaloAnos(self.json)
if (anoInicial and anoFinal):
return (download_toCSVInterval(url, self.token,anoInicial,anoFinal))
else:
return (download_toCSV(url, self.token))
class JSON(Product):
def __init__(self, token, json):
self.token = token
self.json = json
def download(self):
c1 = PClimaURL()
url = c1.get_url(self.json)
(anoInicial,anoFinal)=verificaIntervaloAnos(self.json)
if (anoInicial and anoFinal):
return (download_toCSVInterval(url, self.token,anoInicial,anoFinal))
else:
return (download_toJSON(url, self.token))
class CSVPontos(Product):
def __init__(self, token, json):
self.token = token
self.json = json
def download(self):
c1 = PClimaURL()
url = c1.get_url(self.json)
(anoInicial,anoFinal)=verificaIntervaloAnos(self.json)
if (anoInicial and anoFinal):
return (download_toCSVPontosInterval(url, self.token,anoInicial,anoFinal))
else:
print("download_toCSVPontos")
return (download_toCSVPontos(url, self.token))
class CSVPontosT(Product):
def __init__(self, token, json):
self.token = token
self.json = json
def download(self):
c1 = PClimaURL()
url = c1.get_url(self.json)
(anoInicial,anoFinal)=verificaIntervaloAnos(self.json)
if (anoInicial and anoFinal):
return (download_toCSVPontosTInterval(url, self.token,anoInicial,anoFinal))
else:
return (download_toCSVPontosT(url, self.token))
def __str__(self):
return self.token+" ["+str(self.json)+"]"
def download_toCSV( url, token):
r=downloadData(url, token)
rawData = pd.read_csv(io.StringIO(r.content.decode('utf-8')))
return rawData
def download_toJSON( url, token):
r=downloadData(url, token)
rawData = pd.read_json(io.StringIO(r.content.decode('utf-8')))
return rawData
def download_toCSVPontos( url, token):
r=downloadData(url, token)
rawData = pd.read_csv(io.StringIO(r.content.decode('utf-8')))
return rawData
def download_toCSVPontosT( url, token):
r=downloadData(url, token)
rawData = pd.read_csv(io.StringIO(r.content.decode('utf-8')))
return rawData
def download_toNetCDF(url, token):
r=downloadData(url, token)
return xr.open_dataset(r.content)
def saveNetcdf(content,file):
content.to_netcdf(file)
def saveCSV(content,file):
print("save CSV")
content.to_csv(file)
def saveJSON(content,file):
print("save JSON")
content.to_json(file)
def downloadData(url, token):
headers = { 'Authorization' : 'Token ' + token }
r = requests.get(url, headers=headers, verify=False)
if (r.status_code != requests.codes.ok):
print("Arquivo ou URL não encontrado. Favor verificar JSON de entrada")
raise SystemExit
return r
def verificaIntervaloAnos(json):
anoInicial=""
anoFinal=""
try:
(anoInicial, anoFinal) = json["ano"].split("-")
except:
pass
return (anoInicial, anoFinal)
def download_toNetCDFInterval(url,token,anoInicial,anoFinal):
mergedAno=0
ds=download_toNetCDF(url[:-9]+str(anoInicial), token)
for ano in range(int(anoInicial)+1, int(anoFinal)+1):
ds1=download_toNetCDF(url[:-9]+str(ano), token)
if (mergedAno==0):
dsmerged = xr.merge([ds,ds1])
else:
dsmerged = xr.merge([dsmerged,ds1])
mergedAno=1
if (ano==int(anoFinal)):
print("ano sair for return")
return (dsmerged)
def download_toCSVInterval(url,token,anoInicial,anoFinal):
df = pd.DataFrame()
for ano in range(int(anoInicial), int(anoFinal)+1):
print(ano)
df1=(download_toCSV(url[:-9]+str(ano), token))
frames = [df, df1]
df = pd.concat(frames)
df.reset_index(drop=True, inplace=True)
return (df)
def download_toJSONInterval(url,token,anoInicial,anoFinal):
df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from cabi.cabi import _get_station_dataframe
if __name__ == "__main__":
df = _get_station_dataframe()
datalist = df.iloc[0, 0]
df = | pd.DataFrame(datalist) | pandas.DataFrame |
import multiprocessing.dummy as mp
import time
from exceptions import TestException
from functools import wraps
from sys import stdout, stderr
import numpy as np
import pandas as pd
import tweepy
from sqlalchemy.exc import IntegrityError, ProgrammingError
from database_handler import DataBaseHandler
from helpers import friends_details_dtypes
from setup import FileImport
# mp.set_start_method('spawn')
def get_latest_tweets(user_id, connection, fields=['lang', 'full_text']):
statuses = connection.api.user_timeline(user_id=user_id, count=200, tweet_mode='extended')
result = pd.DataFrame(columns=fields)
for status in statuses:
result = result.append({field: getattr(status, field) for field in fields},
ignore_index=True)
return result
def get_fraction_of_tweets_in_language(tweets):
"""Returns fraction of languages in a tweet dataframe as a dictionary
Args:
tweets (pandas.DataFrame): Tweet DataFrame as returned by `get_latest_tweets`
Returns:
language_fractions (dict): {languagecode (str): fraction (float)}
"""
language_fractions = tweets['lang'].value_counts(normalize=True)
language_fractions = language_fractions.to_dict()
return language_fractions
# TODO: there might be a better way to drop columns that we don't want than flatten everything
# and removing the columns thereafter.
def flatten_json(y: dict, columns: list, sep: str = "_",
nonetype: dict = {'date': None, 'num': None, 'str': None, 'bool': None}):
'''
Flattens nested dictionaries.
adapted from: https://medium.com/@amirziai/flattening-json-objects-in-python-f5343c794b10
Attributes:
y (dict): Nested dictionary to be flattened.
columns (list of str): Dictionary keys that should not be flattened.
sep (str): Separator for new dictionary keys of nested structures.
nonetype (Value): specify the value that should be used if a key's value is None
'''
out = {}
def flatten(x, name=''):
if type(x) is dict and str(name[:-1]) not in columns: # don't flatten nested fields
for a in x:
flatten(x[a], name + a + sep)
elif type(x) is list and str(name[:-1]) not in columns: # same
i = 0
for a in x:
flatten(a, name + str(i) + sep)
i += 1
elif type(x) is list and str(name[:-1]) in columns:
out[str(name[:-1])] = str(x) # Must be str so that nested lists are written to db
elif type(x) is dict and str(name[:-1]) in columns:
out[str(name[:-1])] = str(x) # Same here
elif type(x) is bool and str(name[:-1]) in columns:
out[str(name[:-1])] = int(x) # Same here
elif x is None and str(name[:-1]) in columns:
if friends_details_dtypes[str(name[:-1])] == np.datetime64:
out[str(name[:-1])] = nonetype["date"]
elif friends_details_dtypes[str(name[:-1])] == np.int64:
out[str(name[:-1])] = nonetype["num"]
elif friends_details_dtypes[str(name[:-1])] == str:
out[str(name[:-1])] = nonetype["str"]
elif friends_details_dtypes[str(name[:-1])] == np.int8:
out[str(name[:-1])] = nonetype["bool"]
else:
raise NotImplementedError("twitter user_detail does not have a supported"
"corresponding data type")
else:
out[str(name[:-1])] = x
flatten(y)
return out
# Decorator function for re-executing x times (with exponentially developing
# waiting times)
def retry_x_times(x):
def retry_decorator(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
try:
if kwargs['fail'] is True:
# if we're testing fails:
return func(*args, **kwargs)
except KeyError:
try:
if kwargs['test_fail'] is True:
return func(*args, **kwargs)
except KeyError:
pass
i = 0
if 'restart' in kwargs:
restart = kwargs['restart']
if 'retries' in kwargs:
retries = kwargs['retries']
else:
retries = x
for i in range(retries - 1):
try:
if 'restart' in kwargs:
kwargs['restart'] = restart
return func(*args, **kwargs)
except Exception as e:
restart = True
waiting_time = 2**i
stdout.write(f"Encountered exception in {func.__name__}{args, kwargs}.\n{e}")
stdout.write(f"Retrying in {waiting_time}.\n")
stdout.flush()
time.sleep(waiting_time)
i += 1
return func(*args, **kwargs)
return func_wrapper
return retry_decorator
class MyProcess(mp.Process):
def run(self):
try:
mp.Process.run(self)
except Exception as err:
self.err = err
raise self.err
else:
self.err = None
class Connection(object):
"""Class that handles the connection to Twitter
Attributes:
token_file_name (str): Path to file with user tokens
"""
def __init__(self, token_file_name="tokens.csv", token_queue=None):
self.credentials = FileImport().read_app_key_file()
self.ctoken = self.credentials[0]
self.csecret = self.credentials[1]
if token_queue is None:
self.tokens = FileImport().read_token_file(token_file_name)
self.token_queue = mp.Queue()
for token, secret in self.tokens.values:
self.token_queue.put((token, secret, {}, {}))
else:
self.token_queue = token_queue
self.token, self.secret, self.reset_time_dict, self.calls_dict = self.token_queue.get()
self.auth = tweepy.OAuthHandler(self.ctoken, self.csecret)
self.auth.set_access_token(self.token, self.secret)
self.api = tweepy.API(self.auth, wait_on_rate_limit=False, wait_on_rate_limit_notify=False)
def next_token(self):
self.token_queue.put((self.token, self.secret, self.reset_time_dict, self.calls_dict))
(self.token, self.secret,
self.reset_time_dict, self.calls_dict) = self.token_queue.get()
self.auth = tweepy.OAuthHandler(self.ctoken, self.csecret)
self.auth.set_access_token(self.token, self.secret)
self.api = tweepy.API(self.auth)
def remaining_calls(self, endpoint='/friends/ids'):
"""Returns the number of remaining calls until reset time.
Args:
endpoint (str):
API endpoint.
Defaults to '/friends/ids'
Returns:
remaining calls (int)
"""
rate_limits = self.api.rate_limit_status()
path = endpoint.split('/')
path = path[1:]
rate_limits = rate_limits['resources'][path[0]]
key = "/" + path[0]
for item in path[1:]:
key = key + '/' + item
rate_limits = rate_limits[key]
rate_limits = rate_limits['remaining']
return rate_limits
def reset_time(self, endpoint='/friends/ids'):
"""Returns the time until reset time.
Args:
endpoint (str):
API endpoint.
Defaults to '/friends/ids'
Returns:
remaining time in seconds (int)
"""
reset_time = self.api.rate_limit_status()
path = endpoint.split('/')
path = path[1:]
reset_time = reset_time['resources'][path[0]]
key = "/" + path[0]
for item in path[1:]:
key = key + '/' + item
reset_time = reset_time[key]
reset_time = reset_time['reset'] - int(time.time())
return reset_time
class Collector(object):
"""Does the collecting of friends.
Attributes:
connection (Connection object):
Connection object with actually active credentials
seed (int): Twitter id of seed user
"""
def __init__(self, connection, seed, following_pages_limit=0):
self.seed = seed
self.connection = connection
self.token_blacklist = {}
self.following_pages_limit = following_pages_limit
class Decorators(object):
@staticmethod
def retry_with_next_token_on_rate_limit_error(func):
def wrapper(*args, **kwargs):
collector = args[0]
old_token = collector.connection.token
while True:
try:
try:
if kwargs['force_retry_token'] is True:
print('Forced retry with token.')
return func(*args, **kwargs)
except KeyError:
pass
try:
if collector.token_blacklist[old_token] <= time.time():
print(f'Token starting with {old_token[:4]} should work again.')
return func(*args, **kwargs)
else:
print(f'Token starting with {old_token[:4]} not ready yet.')
collector.connection.next_token()
time.sleep(10)
continue
except KeyError:
print(f'Token starting with {old_token[:4]} not tried yet. Trying.')
return func(*args, **kwargs)
except tweepy.RateLimitError:
collector.token_blacklist[old_token] = time.time() + 150
print(f'Token starting with {old_token[:4]} hit rate limit.')
print("Retrying with next available token.")
print(f"Blacklisted until {collector.token_blacklist[old_token]}")
collector.connection.next_token()
continue
break
return wrapper
@Decorators.retry_with_next_token_on_rate_limit_error
def check_API_calls_and_update_if_necessary(self, endpoint, check_calls=True):
"""Checks for an endpoint how many calls are left (optional), gets the reset time
and updates token if necessary.
If called with check_calls = False,
it will assume that the actual token calls for the specified endpoint are depleted
and return None for remaining calls
Args:
endpoint (str): API endpoint, e.g. '/friends/ids'
check_calls (boolean): Default True
Returns:
if check_calls=True:
remaining_calls (int)
else:
None
"""
def try_remaining_calls_except_invalid_token():
try:
remaining_calls = self.connection.remaining_calls(endpoint=endpoint)
except tweepy.error.TweepError as invalid_error:
if "'code': 89" in invalid_error.reason:
print(f"Token starting with {self.connection.token[:5]} seems to have expired or\
it has been revoked.")
print(invalid_error)
self.connection.next_token()
remaining_calls = self.connection.remaining_calls(endpoint=endpoint)
else:
raise invalid_error
print("REMAINING CALLS FOR {} WITH TOKEN STARTING WITH {}: ".format(
endpoint, self.connection.token[:4]), remaining_calls)
return remaining_calls
if check_calls is True:
self.connection.calls_dict[endpoint] = try_remaining_calls_except_invalid_token()
reset_time = self.connection.reset_time(endpoint=endpoint)
self.connection.reset_time_dict[endpoint] = time.time() + reset_time
while self.connection.calls_dict[endpoint] == 0:
stdout.write("Attempt with next available token.\n")
self.connection.next_token()
try:
next_reset_at = self.connection.reset_time_dict[endpoint]
if time.time() >= next_reset_at:
self.connection.calls_dict[endpoint] = \
self.connection.remaining_calls(endpoint=endpoint)
else:
time.sleep(10)
continue
except KeyError:
self.connection.calls_dict[endpoint] = \
try_remaining_calls_except_invalid_token()
reset_time = self.connection.reset_time(endpoint=endpoint)
self.connection.reset_time_dict[endpoint] = time.time() + reset_time
print("REMAINING CALLS FOR {} WITH TOKEN STARTING WITH {}: ".format(
endpoint, self.connection.token[:4]), self.connection.calls_dict[endpoint])
print(f"{time.strftime('%c')}: new reset of token {self.connection.token[:4]} for \
{endpoint} in {int(self.connection.reset_time_dict[endpoint] - time.time())} seconds.")
return self.connection.calls_dict[endpoint]
else:
self.connection.calls_dict[endpoint] = 0
if endpoint not in self.connection.reset_time_dict \
or self.connection.reset_time_dict[endpoint] <= time.time():
reset_time = self.connection.reset_time(endpoint=endpoint)
self.connection.reset_time_dict[endpoint] = time.time() + reset_time
print("REMAINING CALLS FOR {} WITH TOKEN STARTING WITH {}: ".format(
endpoint, self.connection.token[:4]), self.connection.calls_dict[endpoint])
print(f"{time.strftime('%c')}: new reset of token {self.connection.token[:4]} for \
{endpoint} in {int(self.connection.reset_time_dict[endpoint] - time.time())} seconds.")
while (endpoint in self.connection.reset_time_dict and
self.connection.reset_time_dict[endpoint] >= time.time() and
self.connection.calls_dict[endpoint] == 0):
self.connection.next_token()
time.sleep(1)
return None
def get_friend_list(self, twitter_id=None, follower=False):
"""Gets the friend list of an account.
Args:
twitter_id (int): Twitter Id of account,
if None defaults to seed account of Collector object.
Returns:
list with friends of user.
"""
if twitter_id is None:
twitter_id = self.seed
result = []
cursor = -1
following_page = 0
while self.following_pages_limit == 0 or following_page < self.following_pages_limit:
while True:
try:
if follower is False:
page = self.connection.api.friends_ids(user_id=twitter_id, cursor=cursor)
self.connection.calls_dict['/friends/ids'] = 1
else:
page = self.connection.api.followers_ids(user_id=twitter_id, cursor=cursor)
self.connection.calls_dict['/followers/ids'] = 1
break
except tweepy.RateLimitError:
if follower is False:
self.check_API_calls_and_update_if_necessary(endpoint='/friends/ids',
check_calls=False)
else:
self.check_API_calls_and_update_if_necessary(endpoint='/followers/ids',
check_calls=False)
if len(page[0]) > 0:
result += page[0]
else:
break
cursor = page[1][1]
following_page += 1
return result
def get_details(self, friends):
"""Collects details from friends of an account.
Args:
friends (list of int): list of Twitter user ids
Returns:
list of Tweepy user objects
"""
i = 0
user_details = []
while i < len(friends):
if i + 100 <= len(friends):
j = i + 100
else:
j = len(friends)
while True:
try:
try:
user_details += self.connection.api.lookup_users(user_ids=friends[i:j],
tweet_mode='extended')
except tweepy.error.TweepError as e:
if "No user matches for specified terms." in e.reason:
stdout.write(f"No user matches for {friends[i:j]}")
stdout.flush()
else:
raise e
self.connection.calls_dict['/users/lookup'] = 1
break
except tweepy.RateLimitError:
self.check_API_calls_and_update_if_necessary(endpoint='/users/lookup',
check_calls=False)
i += 100
return user_details
@staticmethod
def make_friend_df(friends_details, select=["id", "followers_count", "status_lang",
"created_at", "statuses_count"],
provide_jsons: bool = False, replace_nonetype: bool = True,
nonetype: dict = {'date': '1970-01-01',
'num': -1,
'str': '-1',
'bool': -1}):
"""Transforms list of user details to pandas.DataFrame
Args:
friends_details (list of Tweepy user objects)
select (list of str): columns to keep in DataFrame
provide_jsons (boolean): If true, will treat friends_details as list of jsons. This
allows creating a user details dataframe without having to
download the details first. Note that the jsons must have the
same format as the _json attribute of a user node of the
Twitter API.
replace_nonetype (boolean): Whether or not to replace values in the user_details that
are None. Setting this to False is experimental, since code
to avoid errors resulting from it has not yet been
implemented. By default, missing dates will be replaced by
1970/01/01, missing numericals by -1, missing strs by
'-1', and missing booleans by -1.
Use the 'nonetype' param to change the default.
nonetype (dict): Contains the defaults for nonetype replacement (see docs for
'replace_nonetype' param).
{'date': 'yyyy-mm-dd', 'num': int, 'str': 'str', 'bool': int}
Returns:
pandas.DataFrame with these columns or selected as by `select`:
['contributors_enabled',
'created_at',
'default_profile',
'default_profile_image',
'description',
'entities_description_urls',
'entities_url_urls',
'favourites_count',
'follow_request_sent',
'followers_count',
'following',
'friends_count',
'geo_enabled',
'has_extended_profile',
'id',
'id_str',
'is_translation_enabled',
'is_translator',
'lang',
'listed_count',
'location',
'name',
'needs_phone_verification',
'notifications',
'profile_background_color',
'profile_background_image_url',
'profile_background_image_url_https',
'profile_background_tile',
'profile_banner_url',
'profile_image_url',
'profile_image_url_https',
'profile_link_color',
'profile_sidebar_border_color',
'profile_sidebar_fill_color',
'profile_text_color',
'profile_use_background_image',
'protected',
'screen_name',
'status_contributors',
'status_coordinates',
'status_coordinates_coordinates',
'status_coordinates_type',
'status_created_at',
'status_entities_hashtags',
'status_entities_media',
'status_entities_symbols',
'status_entities_urls',
'status_entities_user_mentions',
'status_extended_entities_media',
'status_favorite_count',
'status_favorited',
'status_geo',
'status_geo_coordinates',
'status_geo_type',
'status_id',
'status_id_str',
'status_in_reply_to_screen_name',
'status_in_reply_to_status_id',
'status_in_reply_to_status_id_str',
'status_in_reply_to_user_id',
'status_in_reply_to_user_id_str',
'status_is_quote_status',
'status_lang',
'status_place',
'status_place_bounding_box_coordinates',
'status_place_bounding_box_type',
'status_place_contained_within',
'status_place_country',
'status_place_country_code',
'status_place_full_name',
'status_place_id',
'status_place_name',
'status_place_place_type',
'status_place_url',
'status_possibly_sensitive',
'status_quoted_status_id',
'status_quoted_status_id_str',
'status_retweet_count',
'status_retweeted',
'status_retweeted_status_contributors',
'status_retweeted_status_coordinates',
'status_retweeted_status_created_at',
'status_retweeted_status_entities_hashtags',
'status_retweeted_status_entities_media',
'status_retweeted_status_entities_symbols',
'status_retweeted_status_entities_urls',
'status_retweeted_status_entities_user_mentions',
'status_retweeted_status_extended_entities_media',
'status_retweeted_status_favorite_count',
'status_retweeted_status_favorited',
'status_retweeted_status_geo',
'status_retweeted_status_id',
'status_retweeted_status_id_str',
'status_retweeted_status_in_reply_to_screen_name',
'status_retweeted_status_in_reply_to_status_id',
'status_retweeted_status_in_reply_to_status_id_str',
'status_retweeted_status_in_reply_to_user_id',
'status_retweeted_status_in_reply_to_user_id_str',
'status_retweeted_status_is_quote_status',
'status_retweeted_status_lang',
'status_retweeted_status_place',
'status_retweeted_status_possibly_sensitive',
'status_retweeted_status_quoted_status_id',
'status_retweeted_status_quoted_status_id_str',
'status_retweeted_status_retweet_count',
'status_retweeted_status_retweeted',
'status_retweeted_status_source',
'status_retweeted_status_full_text',
'status_retweeted_status_truncated',
'status_source',
'status_full_text',
'status_truncated',
'statuses_count',
'suspended',
'time_zone',
'translator_type',
'url',
'verified'
'utc_offset'],
"""
if not provide_jsons:
json_list_raw = [friend._json for friend in friends_details]
else:
json_list_raw = friends_details
json_list = []
dtypes = {key: value for (key, value) in friends_details_dtypes.items() if key in select}
for j in json_list_raw:
flat = flatten_json(j, sep="_", columns=select, nonetype=nonetype)
# In case that there are keys in the user_details json that are not in select
newflat = {key: value for (key, value) in flat.items() if key in select}
json_list.append(newflat)
df = pd.json_normalize(json_list)
for var in select:
if var not in df.columns:
if dtypes[var] == np.datetime64:
df[var] = pd.to_datetime(nonetype["date"])
elif dtypes[var] == np.int64:
df[var] = nonetype["num"]
elif dtypes[var] == str:
df[var] = nonetype["str"]
elif dtypes[var] == np.int8:
df[var] = nonetype["bool"]
else:
df[var] = np.nan
else:
if dtypes[var] == np.datetime64:
df[var] = df[var].fillna(pd.to_datetime(nonetype["date"]))
elif dtypes[var] == np.int64:
df[var] = df[var].fillna(nonetype["num"])
elif dtypes[var] == str:
df[var] = df[var].fillna(nonetype["str"])
elif dtypes[var] == np.int8:
df[var] = df[var].fillna(nonetype["bool"])
df[var] = df[var].astype(dtypes[var])
df.sort_index(axis=1, inplace=True)
return df
def check_follows(self, source, target):
"""Checks Twitter API whether `source` account follows `target` account.
Args:
source (int): user id
target (int): user id
Returns:
- `True` if `source` follows `target`
- `False` if `source` does not follow `target`
"""
# TODO: check remaining API calls
friendship = self.connection.api.show_friendship(
source_id=source, target_id=target)
following = friendship[0].following
return following
class Coordinator(object):
"""Selects a queue of seeds and coordinates the collection with collectors
and a queue of tokens.
"""
def __init__(self, seeds=2, token_file_name="tokens.csv", seed_list=None,
following_pages_limit=0):
# Get seeds from seeds.csv
self.seed_pool = FileImport().read_seed_file()
# Create seed_list if none is given by sampling from the seed_pool
if seed_list is None:
self.number_of_seeds = seeds
try:
self.seeds = self.seed_pool.sample(n=self.number_of_seeds)
except ValueError: # seed pool too small
stderr.write("WARNING: Seed pool smaller than number of seeds.\n")
self.seeds = self.seed_pool.sample(n=self.number_of_seeds, replace=True)
self.seeds = self.seeds[0].values
else:
self.number_of_seeds = len(seed_list)
self.seeds = seed_list
self.seed_queue = mp.Queue()
for seed in self.seeds:
self.seed_queue.put(seed)
# Get authorized user tokens for app from tokens.csv
self.tokens = FileImport().read_token_file(token_file_name)
# and put them in a queue
self.token_queue = mp.Queue()
for token, secret in self.tokens.values:
self.token_queue.put((token, secret, {}, {}))
# Initialize DataBaseHandler for DB communication
self.dbh = DataBaseHandler()
self.following_pages_limit = following_pages_limit
def bootstrap_seed_pool(self, after_timestamp=0):
"""Adds all collected user details, i.e. friends with the desired properties
(e.g. language) of previously found seeds to the seed pool.
Args:
after_timestamp (int): filter for friends added after this timestamp. Default: 0
Returns:
None
"""
seed_pool_size = len(self.seed_pool)
stdout.write("Bootstrapping seeds.\n")
stdout.write(f"Old size: {seed_pool_size}. Adding after {after_timestamp} ")
stdout.flush()
query = f"SELECT id FROM user_details WHERE UNIX_TIMESTAMP(timestamp) >= {after_timestamp}"
more_seeds = pd.read_sql(query, self.dbh.engine)
more_seeds.columns = [0] # rename from id to 0 for proper append
self.seed_pool = self.seed_pool.merge(more_seeds, how='outer', on=[0])
seed_pool_size = len(self.seed_pool)
stdout.write(f"New size: {seed_pool_size}\n")
stdout.flush()
def lookup_accounts_friend_details(self, account_id, db_connection=None, select="*"):
"""Looks up and retrieves details from friends of `account_id` via database.
Args:
account_id (int)
db_connection (database connection/engine object)
select (str): comma separated list of required fields, defaults to all available ("*")
Returns:
None, if no friends found.
Otherwise DataFrame with all details. Might be empty if language filter is on.
"""
if db_connection is None:
db_connection = self.dbh.engine
query = f"SELECT target from friends WHERE source = {account_id} AND burned = 0"
friends = pd.read_sql(query, db_connection)
if len(friends) == 0:
return None
else:
friends = friends['target'].values
friends = tuple(friends)
if len(friends) == 1:
friends = str(friends).replace(',', '')
query = f"SELECT {select} from user_details WHERE id IN {friends}"
friend_detail = | pd.read_sql(query, db_connection) | pandas.read_sql |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 8 08:56:36 2016
@author: davidangeles
"""
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import tissue_enrichment_analysis as tea
import os
import mpl_toolkits.mplot3d
import pyrnaseq_graphics as rsq
from sklearn.preprocessing import StandardScaler
# Package to perform PCA
import sklearn.datasets
import sklearn.decomposition
sns.set_context("notebook")
mag = 2 # value of beta from regression
qval = .1 # qvalue from regression
qvalEn = 0.05 # q value for enrichment analysis (tissues)
dirLists = '../output/Gene_lists_for_analysis'
if not os.path.exists(dirLists):
os.makedirs(dirLists)
dirGraphs = '../output/Graphs'
if not os.path.exists(dirLists):
os.makedirs(dirGraphs)
os.chdir('./')
# gene_lists from sleuth
# tpm vals for PCA
dfTPM = pd.read_csv("../input/tpm_table.csv")
dfTPM.dropna(inplace=True)
# pos beta means high old adults
dfBetaA = pd.read_csv("../input/agebeta_wt.csv")
dfBetaA.dropna(inplace=True)
# pos beta means high in fog2
dfBetaG = pd.read_csv("../input/genotypebeta_wt.csv")
dfBetaG.dropna(inplace=True)
# pos beta means high in fog2-aged
dfBetaAG = pd.read_csv("../input/genotypecrossagebeta_wt.csv")
dfBetaAG.dropna(inplace=True)
# likelihood ratio test results
dfLRT = pd.read_csv("../input/lrt.csv")
dfLRT.dropna(inplace=True)
# sort by target_id
dfBetaA.sort_values('target_id', inplace=True)
dfBetaG.sort_values('target_id', inplace=True)
dfBetaAG.sort_values('target_id', inplace=True)
dfLRT.sort_values('target_id', inplace=True)
# gold standard datasets
dfDaf12 = pd.read_csv('../input/daf12genes.csv')
dfDaf16 = pd.read_csv('../input/daf16genes.csv')
dfLund = pd.read_csv('../input/lund_data.csv', header=None, names=['gene'])
dfEckley = pd.read_csv('../input/eckley_data.csv', header=None, names=['gene'])
dfMurphyUp = pd.read_csv('../input/murphy_data_lifespan_extension.csv')
dfMurphyDown = pd.read_csv('../input/murphy_data_lifespan_decrease.csv')
dfHalaschek = pd.read_csv('../input/Halaschek-Wiener_data.csv')
# gpcrs
dfGPCR = pd.read_csv('../input/all_gpcrs.csv')
dfICh = pd.read_csv('../input/select_ion_transport_genes.csv')
dfAxon = pd.read_csv('../input/axonogenesis_genes.csv')
dfNP = | pd.read_csv('../input/neuropeptides.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 20 12:46:41 2018
@author: MichaelEK
"""
import numpy as np
from os import path
import pandas as pd
from pdsql.mssql import rd_sql
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from datetime import datetime
import matplotlib.ticker as plticker
import lowflows as lf
plt.ioff()
loc = plticker.MaxNLocator(integer=True)
datetime1 = pd.Timestamp.today()
#date1 = pd.Timestamp(datetime1.date())
pd.options.display.max_columns = 10
#####################################
### Parameters
usm_server = 'sql02prod'
usm_database = 'usm'
site_table = 'Site'
site_attr_table = 'SiteAttribute'
bad_sites = {'66101': '14240260', '65104': '165104', '69650': '696501'}
irr_mons1 = [10, 11, 12]
irr_mons2 = [1, 2, 3, 4]
include_flow_methods = ['Correlated from Telem', 'Gauged', 'Telemetered', 'Visually Gauged', 'GW manual']
##color palettes
full_color = sns.color_palette('Blues')
partial_color = sns.color_palette('Greens')
no_color = sns.color_palette('Greys')
base_dir = path.split(path.realpath(path.dirname(__file__)))[0]
export_path = path.join(base_dir, 'lowflow_plots')
#export_sel2 = 'lowflow_restr_2017-10-01.csv'
####################################
### Set up time ranges
mon_now = datetime1.month - 1
year_now = datetime1.year
if mon_now in irr_mons1:
from_date = '{year}-10-01'.format(year=year_now)
elif mon_now in irr_mons2:
from_date = '{year}-10-01'.format(year=year_now - 1)
else:
from_date = '{year}-05-01'.format(year=year_now)
end_mon_now = datetime1 - pd.DateOffset(months=1) + pd.tseries.offsets.MonthEnd(0)
to_date = str(end_mon_now.date())
#to_date = '2019-02-12'
export_name_fancy = '{start}_{end}_restrictions_fancy.png'.format(start=from_date, end=to_date)
export_name = '{start}_{end}_restrictions.png'.format(start=from_date, end=to_date)
export_man_calc_sites = '{start}_{end}_lowflow_sites.csv'.format(start=from_date, end=to_date)
####################################
### extract data
lf_sites1 = lf.sites().reset_index()
lowflow1 = lf.site_summary_ts(from_date, to_date).reset_index()
lowflow2 = lowflow1[lowflow1.SourceSystem.isin(['Hydrotel', 'Gauging'])].copy()
lowflow2.rename(columns={'ExtSiteID': 'site', 'RestrDate': 'date', 'RestrCategory': 'restr_category', 'MeasurementMethod': 'flow_method'}, inplace=True)
sites1 = lowflow2.site.unique().tolist()
sites1.extend(list(bad_sites.keys()))
sites2 = rd_sql(usm_server, usm_database, site_table, ['ID', 'UpstreamSiteID'], where_in={'UpstreamSiteID': sites1})
sites_attr1 = rd_sql(usm_server, usm_database, site_attr_table, ['SiteID', 'CwmsName'])
sites_attr1.rename(columns={'SiteID': 'ID'}, inplace=True)
sites = pd.merge(sites2, sites_attr1, on='ID').drop('ID', axis=1)
sites.rename(columns={'UpstreamSiteID': 'site', 'CwmsName': 'cwms'}, inplace=True)
bad_ones = sites[sites.site.isin(list(bad_sites.keys()))].copy()
bad_ones.replace({'site': bad_sites}, inplace=True)
sites2 = pd.concat([sites, bad_ones])
## Other - unused for now - but might later
#site_restr1 = lowflow2.groupby(['site', 'restr_category'])['crc_count'].count()
#site_restr1.name = 'count'
#max_days = (pd.to_datetime(to_date) - pd.to_datetime(from_date)).days + 1
#max_days1 = site_restr1.groupby(level=['site']).transform('sum')
#site_restr2 = (site_restr1/max_days1).round(3).unstack('restr_category')
##
## Combine cwms with lowflow sites
lowflow3 = pd.merge(sites2, lowflow2, on='site')
sites_zone_count = lowflow3[['site', 'cwms']].drop_duplicates().groupby('cwms').site.count()
sites_zone_count['All Canterbury'] = sites_zone_count.sum()
sites_zone_count_plot = ((sites_zone_count*0.1).apply(np.ceil) * 10).astype(int)
restr_all = lowflow3.groupby(['restr_category', 'flow_method', 'date'])[['site']].count()
restr_all['cwms'] = 'All Canterbury'
restr_all1 = restr_all.reset_index().set_index(['cwms', 'restr_category', 'flow_method', 'date']).site
restr1 = lowflow3.groupby(['cwms', 'restr_category', 'flow_method', 'date'])['site'].count()
restr2 = | pd.concat([restr_all1, restr1]) | pandas.concat |
import os
from pyspark.sql import SparkSession
import pyspark
import multiprocessing
import pretty_midi
from legacy.transcription import encode
import pandas as pd
# Functions for transcripting dataset stored in midi files using spark
# Load midi from 'processed_dir'
# Save resulted transcriptions to 'processed_dir'
# Names of particular midi files in given directory
def get_filenames(dataset_path=r'D:\ВКР\dataset\big midi\Lakh MIDI\lmd_full', ext=None):
# count = 0
filenames = []
# dirs = [ name for name in os.listdir(dataset_path) if os.path.isdir(os.path.join(dataset_path, name))]
for file_group in os.listdir(dataset_path):
item_name = os.path.join(dataset_path, file_group)
# for filename in os.listdir(group_path):
# item_name = os.path.join(group_path, filename)
if os.path.isdir(item_name):
filenames += get_filenames(item_name, ext)
else:
if ext:
for e in ext:
if item_name.endswith(e):
filenames.append(item_name)
break
else:
filenames.append(item_name)
print(len(filenames), "midi files in", dataset_path)
return filenames
# Read midi from file, make transcription and write result to 'processed_dir'
def file_preproccess(data):
filename, processed_dir, texts_dir = data
try:
pm = pretty_midi.PrettyMIDI(filename)
encoded, OG_res, OG_tempo = encode(pm)
if processed_dir:
procesed_file = processed_dir + "\\" + filename.split("\\")[-1].split('.')[0] + '.txt'
with open(procesed_file, "w") as text_file:
text_file.write(str([encoded, OG_res, OG_tempo]))
if texts_dir:
procesed_file = texts_dir + "\\" + filename.split("\\")[-1].split('.')[0] + '.txt'
with open(procesed_file, "w") as text_file:
text_file.write(str(encoded))
return True
except:
return False # str(list())
# Write transcripted files to 'processed_dir'
def preproccess_files(filenames,
processed_dir,
texts_dir,
spark,
n=-1,
cpu_count=4,
return_value=False):
num = len(filenames) if n == -1 else n
files = list(filenames[:num])
files = list([(f, processed_dir, texts_dir) for f in files])
print('cpu_count',cpu_count)
rdd = spark.sparkContext.parallelize(files, cpu_count)
if return_value:
return rdd.map(file_preproccess).collect()
rdd.map(file_preproccess).collect()
def spark_preprocess(num=-1,
dataset_dir=r'D:\ВКР\dataset\big midi\Lakh MIDI\lmd_full',
processed_dir=r"D:\ВКР\dataset\big midi\ProcessedLakh",
texts_dir=r'D:\ВКР\dataset\big midi\ProcessedLakh'):
cpu_count = multiprocessing.cpu_count()
spark = SparkSession.builder\
.master("local[8]")\
.appName("SparkApp")\
.getOrCreate()
sc = spark.sparkContext
conf = pyspark.SparkConf().setAll([
('spark.executor.cores', str(cpu_count)),
('spark.cores.max', str(cpu_count))])
spark.sparkContext.stop()
spark = SparkSession.builder.config(conf=conf).getOrCreate()
sc = spark.sparkContext
print(sc.getConf().getAll())
filenames = get_filenames(dataset_path=dataset_dir, ext=[".mid", ".midi"])
# filenames += get_filenames(dataset_path=dataset_dir, ext=)
preproccess_files(filenames, processed_dir, texts_dir, spark, num, cpu_count)
# Save DataFrames with processed midi to csv files
def to_csv(csv_dir=r"D:\ВКР\dataset\big midi\pdLakh" + "\\",
processed_dir=r'D:\ВКР\dataset\big midi\ProcessedLakh',
df_size=10 * 1000):
count = 0
file_num = 0
filenames_processed = get_filenames(dataset_path=processed_dir)
for i, filename in enumerate(filenames_processed):
if count == 0:
df = pd.DataFrame({'title': | pd.Series([], dtype='str') | pandas.Series |
from reframed import CBModel, Compartment, Metabolite, CBReaction, save_cbmodel
from reframed.io.sbml import parse_gpr_rule
from ..reconstruction.utils import to_rdf_annotation
import pandas as pd
import requests
import sys
UNIVERSE_URL = 'http://bigg.ucsd.edu/static/namespace/universal_model.json'
COMPARTMENTS_URL = 'http://bigg.ucsd.edu/api/v2/compartments'
METABOLITES_URL = 'http://bigg.ucsd.edu/api/v2/universal/metabolites/'
REACTIONS_URL = 'http://bigg.ucsd.edu/api/v2/universal/reactions/'
MODELS_URL = 'http://bigg.ucsd.edu/api/v2/models'
MAX_GPR_TOKENS = 50
def progress(i, n):
p = int((i+1)*100.0 / n)
sys.stdout.write(f"\r{p}%")
sys.stdout.flush()
def get_request(url, max_tries=10):
""" Get JSON data from BiGG RESTful API.
Args:
url (str): url request
max_tries (int): maximum number of communication attempts (default: 10)
Returns:
dict: json data
"""
resp, data = None, None
for i in range(max_tries):
try:
resp = requests.get(url)
except:
pass
if resp is not None:
data = resp.json()
break
if data is None:
print('max number of attempts exceeded:', max_tries)
print('try again? [y]/n')
resp = input()
if resp.lower() != 'n':
data = get_request(url)
return data
def extract_annotation(elem, data):
annotation = to_rdf_annotation(elem.id, [x[1] for x in data])
elem.metadata['XMLAnnotation'] = annotation
def load_compartments(model):
compartments = get_request(COMPARTMENTS_URL)
compartments = sorted(compartments['compartments'], key=lambda x: x['bigg_id'])
for entry in compartments:
c_id = 'C_' + entry['bigg_id']
comp = Compartment(c_id, entry['name'], external=(c_id == 'C_e'))
comp.metadata['SBOTerm'] = 'SBO:0000290'
model.add_compartment(comp)
def load_metabolites(json_model, model, cpds):
metabolites = sorted(json_model['metabolites'], key=lambda x: x['id'])
for entry in metabolites:
c_id = 'C_' + entry['id'].split('_')[-1]
m_id = 'M_' + entry['id']
met = Metabolite(m_id, str(entry['name']), c_id)
met.metadata['SBOTerm'] = 'SBO:0000247'
extract_annotation(met, entry['annotation'])
if m_id[2:-2] in cpds.index:
met.metadata['FORMULA'] = cpds.loc[m_id[2:-2], "formula"]
met.metadata['CHARGE'] = str(int(cpds.loc[m_id[2:-2], "charge"]))
model.add_metabolite(met)
def is_pseudo(rxn):
prefix = rxn.split('_', 1)[0].upper()
return prefix in ['ATPM', 'BIOMASS', 'DM', 'EX', 'SK']
def load_reactions(json_model, model):
reactions = sorted(json_model['reactions'], key=lambda x: x['id'])
for entry in reactions:
if is_pseudo(entry['id']):
continue
r_id = 'R_' + entry['id']
stoichiometry = {'M_' + met: coeff for met, coeff in entry['metabolites'].items()}
rxn = CBReaction(r_id, str(entry['name']), stoichiometry=stoichiometry)
extract_annotation(rxn, entry['annotation'])
model.add_reaction(rxn)
if len(model.get_reaction_compartments(r_id)) > 1:
rxn.metadata['SBOTerm'] = 'SBO:0000185'
else:
rxn.metadata['SBOTerm'] = 'SBO:0000176'
def download_universal_model(outputfile, cpd_annotation):
print("Downloading BiGG universe...")
cpds = | pd.read_csv(cpd_annotation, sep="\t", index_col=0) | pandas.read_csv |
import numpy as np
import pandas as pd
from rdt import HyperTransformer
from rdt.transformers import OneHotEncodingTransformer
def get_input_data_with_nan():
data = pd.DataFrame({
'integer': [1, 2, 1, 3, 1],
'float': [0.1, 0.2, 0.1, np.nan, 0.1],
'categorical': ['a', 'b', np.nan, 'b', 'a'],
'bool': [False, np.nan, False, True, False],
'datetime': [
np.nan, '2010-02-01', '2010-01-01', '2010-02-01', '2010-01-01'
],
'names': ['Jon', 'Arya', 'Sansa', 'Jon', 'Robb'],
})
data['datetime'] = pd.to_datetime(data['datetime'])
return data
def get_input_data_without_nan():
data = pd.DataFrame({
'integer': [1, 2, 1, 3],
'float': [0.1, 0.2, 0.1, 0.1],
'categorical': ['a', 'b', 'b', 'a'],
'bool': [False, False, True, False],
'datetime': [
'2010-02-01', '2010-01-01', '2010-02-01', '2010-01-01'
],
'names': ['Jon', 'Arya', 'Sansa', 'Jon'],
})
data['datetime'] = pd.to_datetime(data['datetime'])
data['bool'] = data['bool'].astype('O') # boolean transformer returns O instead of bool
return data
def get_transformed_data():
return pd.DataFrame({
'integer': [1, 2, 1, 3],
'float': [0.1, 0.2, 0.1, 0.1],
'categorical': [0.75, 0.25, 0.25, 0.75],
'bool': [0.0, 0.0, 1.0, 0.0],
'datetime': [
1.2649824e+18,
1.262304e+18,
1.2649824e+18,
1.262304e+18
],
'names': [0.25, 0.875, 0.625, 0.25]
})
def get_transformed_nan_data():
return pd.DataFrame({
'integer': [1, 2, 1, 3, 1],
'float': [0.1, 0.2, 0.1, 0.125, 0.1],
'float#1': [0.0, 0.0, 0.0, 1.0, 0.0],
'categorical': [0.6, 0.2, 0.9, 0.2, 0.6],
'bool': [0.0, -1.0, 0.0, 1.0, 0.0],
'bool#1': [0.0, 1.0, 0.0, 0.0, 0.0],
'datetime': [
1.2636432e+18, 1.2649824e+18, 1.262304e+18,
1.2649824e+18, 1.262304e+18
],
'datetime#1': [1.0, 0.0, 0.0, 0.0, 0.0],
'names': [0.2, 0.9, 0.5, 0.2, 0.7],
})
def get_transformers():
return {
'integer': {
'class': 'NumericalTransformer',
'kwargs': {
'dtype': np.int64,
}
},
'float': {
'class': 'NumericalTransformer',
'kwargs': {
'dtype': np.float64,
}
},
'categorical': {
'class': 'CategoricalTransformer'
},
'bool': {
'class': 'BooleanTransformer'
},
'datetime': {
'class': 'DatetimeTransformer'
},
'names': {
'class': 'CategoricalTransformer',
},
}
def test_hypertransformer_with_transformers():
data = get_input_data_without_nan()
transformers = get_transformers()
ht = HyperTransformer(transformers)
ht.fit(data)
transformed = ht.transform(data)
expected = get_transformed_data()
np.testing.assert_allclose(
transformed.sort_index(axis=1).values,
expected.sort_index(axis=1).values
)
reversed_data = ht.reverse_transform(transformed)
original_names = data.pop('names')
reversed_names = reversed_data.pop('names')
pd.testing.assert_frame_equal(data.sort_index(axis=1), reversed_data.sort_index(axis=1))
for name in original_names:
assert name not in reversed_names
def test_hypertransformer_with_transformers_nan_data():
data = get_input_data_with_nan()
transformers = get_transformers()
ht = HyperTransformer(transformers)
ht.fit(data)
transformed = ht.transform(data)
expected = get_transformed_nan_data()
np.testing.assert_allclose(
transformed.sort_index(axis=1).values,
expected.sort_index(axis=1).values
)
reversed_data = ht.reverse_transform(transformed)
original_names = data.pop('names')
reversed_names = reversed_data.pop('names')
pd.testing.assert_frame_equal(data.sort_index(axis=1), reversed_data.sort_index(axis=1))
for name in original_names:
assert name not in reversed_names
def test_hypertransformer_without_transformers():
data = get_input_data_without_nan()
ht = HyperTransformer()
ht.fit(data)
transformed = ht.transform(data)
expected = get_transformed_data()
np.testing.assert_allclose(
transformed.sort_index(axis=1).values,
expected.sort_index(axis=1).values
)
reversed_data = ht.reverse_transform(transformed)
original_names = data.pop('names')
reversed_names = reversed_data.pop('names')
pd.testing.assert_frame_equal(data.sort_index(axis=1), reversed_data.sort_index(axis=1))
for name in original_names:
assert name not in reversed_names
def test_hypertransformer_without_transformers_nan_data():
data = get_input_data_with_nan()
ht = HyperTransformer()
ht.fit(data)
transformed = ht.transform(data)
expected = get_transformed_nan_data()
np.testing.assert_allclose(
transformed.sort_index(axis=1).values,
expected.sort_index(axis=1).values
)
reversed_data = ht.reverse_transform(transformed)
original_names = data.pop('names')
reversed_names = reversed_data.pop('names')
pd.testing.assert_frame_equal(data.sort_index(axis=1), reversed_data.sort_index(axis=1))
for name in original_names:
assert name not in reversed_names
def test_single_category():
ht = HyperTransformer(transformers={
'a': OneHotEncodingTransformer()
})
data = pd.DataFrame({
'a': ['a', 'a', 'a']
})
ht.fit(data)
transformed = ht.transform(data)
reverse = ht.reverse_transform(transformed)
pd.testing.assert_frame_equal(data, reverse)
def test_dtype_category():
df = pd.DataFrame({'a': ['a', 'b', 'c']}, dtype='category')
ht = HyperTransformer()
ht.fit(df)
trans = ht.transform(df)
rever = ht.reverse_transform(trans)
| pd.testing.assert_frame_equal(df, rever) | pandas.testing.assert_frame_equal |
"""
Functions to add a model version to the ModMon database.
Run this script once, the first time an analyst submits a model file (including for a
new version of a model)
"""
import argparse
import json
import os
import sys
import pandas as pd
from ..db.connect import get_session
from ..db.utils import get_unique_id
from ..db.schema import (
Team,
Dataset,
Metric,
ResearchQuestion,
Model,
ModelVersion,
Score,
)
from .store import copy_model_to_storage
from .check import check_submission
from ..utils.utils import ask_for_confirmation
def setup_model(
model_path,
check_first=True,
confirm=True,
force=False,
warnings_ok=True,
create_envs=True,
repro_check=True,
set_old_inactive=True,
session=None,
):
"""Add a model version to the ModMon monitoring system. Includes copying the
directory model_path to the ModMon storage area and creating database entries.
Parameters
----------
model_path : str
Path to model version directory
check_first : bool, optional
Run model checks before attempting model version setup, by default True
confirm : bool, optional
Ask for user whether its ok to contitnue after performing model checks, by
default True
force : bool, optional
Continue with model setup even if checks fail. Only applies if confirm is False.
By default False
warnings_ok : bool, optional
Continue with model setup if warnings encountered during checks. Only applies
if confirm is False. By default True
create_envs : bool, optional
Check environment creation if performing model checks, by default True
repro_check : bool, optional
Check running the model and reproducing its results if performing model checks,
by default True
set_old_inactive : bool , optional
If True, set all previous versions of this model to be inactive, by default True
"""
if not os.path.exists(model_path):
raise FileNotFoundError(f"{model_path} does not exist")
if check_first:
check_result = check_submission(
model_path, create_envs=create_envs, repro_check=repro_check
)
if confirm:
message = "Add this model to the database?"
if check_result["error"] > 0:
message += " NOT RECOMMENDED WITH ERRORS ABOVE!"
confirmed = ask_for_confirmation(message)
if not confirmed:
print("Aborting model setup.")
return
elif not force:
if check_result["error"] > 0:
print("Model checks failed. Aborting model setup.")
return
elif check_result["warning"] > 0 and not warnings_ok:
print("Warnings during model checks. Aborting model setup.")
return
print("-" * 30)
print(f"Adding model {model_path}...")
# Set up SQLAlchemy session
if session is None:
session = get_session()
#############
# Files ###
#############
metadata_json = model_path + "/metadata.json"
training_metrics_csv = model_path + "/training_scores.csv"
prediction_metrics_csv = model_path + "/scores.csv"
#####################
# Load metadata ###
#####################
with open(metadata_json, "r") as f:
metadata = json.load(f)
#################
# Load data ###
#################
# Load model run reference metrics
metrics = pd.read_csv(prediction_metrics_csv)
# Load model train metrics, if included
try:
training_metrics = | pd.read_csv(training_metrics_csv) | pandas.read_csv |
"""SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI
connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL,
PostgreSQL) derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback`)
"""
import csv
from datetime import date, datetime, time
from io import StringIO
import sqlite3
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
isna,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.io.sql as sql
from pandas.io.sql import read_sql_query, read_sql_table
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
"create_iris": {
"sqlite": """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
"mysql": """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
"postgresql": """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)""",
},
"insert_iris": {
"sqlite": """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
"mysql": """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
"postgresql": """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);""",
},
"create_test_types": {
"sqlite": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
"mysql": """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`IntDateOnlyCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
"postgresql": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)""",
},
"insert_test_types": {
"sqlite": {
"query": """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"mysql": {
"query": """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"postgresql": {
"query": """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"DateColWithTz",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
},
"read_parameters": {
"sqlite": "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
"mysql": 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
"postgresql": 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s',
},
"read_named_parameters": {
"sqlite": """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
"mysql": """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
"postgresql": """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
""",
},
"create_view": {
"sqlite": """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
},
}
class MixInBase:
def teardown_method(self, method):
# if setup fails, there may not be a connection to close.
if hasattr(self, "conn"):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute(f"DROP TABLE IF EXISTS {sql._get_valid_mysql_name(table_name)}")
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute("SHOW TABLES")
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute(
f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}"
)
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
pass
class PandasSQLTest:
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, "execute"):
return self.conn
else:
return self.conn.cursor()
@pytest.fixture(params=[("data", "iris.csv")])
def load_iris_data(self, datapath, request):
import io
iris_csv_file = datapath(*request.param)
if not hasattr(self, "conn"):
self.setup_connect()
self.drop_table("iris")
self._get_exec().execute(SQL_STRINGS["create_iris"][self.flavor])
with io.open(iris_csv_file, mode="r", newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS["insert_iris"][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table("iris_view")
self._get_exec().execute(SQL_STRINGS["create_view"][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
assert issubclass(pytype, np.floating)
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _load_test1_data(self):
columns = ["index", "A", "B", "C", "D"]
data = [
(
"2000-01-03 00:00:00",
0.980268513777,
3.68573087906,
-0.364216805298,
-1.15973806169,
),
(
"2000-01-04 00:00:00",
1.04791624281,
-0.0412318367011,
-0.16181208307,
0.212549316967,
),
(
"2000-01-05 00:00:00",
0.498580885705,
0.731167677815,
-0.537677223318,
1.34627041952,
),
(
"2000-01-06 00:00:00",
1.12020151869,
1.56762092543,
0.00364077397681,
0.67525259227,
),
]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(
dict(
A=[4, 1, 3, 6],
B=["asd", "gsq", "ylt", "jkl"],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=["1990-11-22", "1991-10-26", "1993-11-26", "1995-12-12"],
)
)
df["E"] = to_datetime(df["E"])
self.test_frame2 = df
def _load_test3_data(self):
columns = ["index", "A", "B"]
data = [
("2000-01-03 00:00:00", 2 ** 31 - 1, -1.987670),
("2000-01-04 00:00:00", -29, -0.0412318367011),
("2000-01-05 00:00:00", 20000, 0.731167677815),
("2000-01-06 00:00:00", -290867, 1.56762092543),
]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table("types_test_data")
self._get_exec().execute(SQL_STRINGS["create_test_types"][self.flavor])
ins = SQL_STRINGS["insert_test_types"][self.flavor]
data = [
{
"TextCol": "first",
"DateCol": "2000-01-03 00:00:00",
"DateColWithTz": "2000-01-01 00:00:00-08:00",
"IntDateCol": 535852800,
"IntDateOnlyCol": 20101010,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": 1,
"BoolColWithNull": False,
},
{
"TextCol": "first",
"DateCol": "2000-01-04 00:00:00",
"DateColWithTz": "2000-06-01 00:00:00-07:00",
"IntDateCol": 1356998400,
"IntDateOnlyCol": 20101212,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": None,
"BoolColWithNull": None,
},
]
for d in data:
self._get_exec().execute(
ins["query"], [d[field] for field in ins["fields"]]
)
def _count_rows(self, table_name):
result = (
self._get_exec()
.execute(f"SELECT count(*) AS count_1 FROM {table_name}")
.fetchone()
)
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS["read_parameters"][self.flavor]
params = ["Iris-setosa", 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS["read_named_parameters"][self.flavor]
params = {"name": "Iris-setosa", "length": 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self, method=None):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=method)
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _to_sql_empty(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], "test_frame1")
def _to_sql_fail(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
assert self.pandasSQL.has_table("test_frame1")
msg = "Table 'test_frame1' already exists"
with pytest.raises(ValueError, match=msg):
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
self.drop_table("test_frame1")
def _to_sql_replace(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="replace")
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_append(self):
# Nuke table just in case
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="append")
assert self.pandasSQL.has_table("test_frame1")
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_method_callable(self):
check = [] # used to double check function below is really being used
def sample(pd_table, conn, keys, data_iter):
check.append(1)
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(pd_table.table.insert(), data)
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=sample)
assert self.pandasSQL.has_table("test_frame1")
assert check == [1]
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _roundtrip(self):
self.drop_table("test_frame_roundtrip")
self.pandasSQL.to_sql(self.test_frame1, "test_frame_roundtrip")
result = self.pandasSQL.read_query("SELECT * FROM test_frame_roundtrip")
result.set_index("level_0", inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _to_sql_save_index(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")], columns=["A", "B", "C"], index=["A"]
)
self.pandasSQL.to_sql(df, "test_to_sql_saves_index")
ix_cols = self._get_index_columns("test_to_sql_saves_index")
assert ix_cols == [["A"]]
def _transaction_test(self):
with self.pandasSQL.run_transaction() as trans:
trans.execute("CREATE TABLE test_trans (A INT, B TEXT)")
class DummyException(Exception):
pass
# Make sure when transaction is rolled back, no rows get inserted
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise DummyException("error")
except DummyException:
# ignore raised exception
pass
res = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res) == 0
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res2) == 1
# -----------------------------------------------------------------------------
# -- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode
(`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific
tests for the different sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = "sqlite"
mode: str
def setup_connect(self):
self.conn = self.connect()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def load_test_data_and_sql(self):
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, "test_frame1", self.conn)
assert sql.has_table("test_frame1", self.conn)
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
assert sql.has_table("test_frame2", self.conn)
msg = "Table 'test_frame2' already exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="replace")
assert sql.has_table("test_frame3", self.conn)
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame3")
assert num_rows == num_entries
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="append")
assert sql.has_table("test_frame4", self.conn)
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame4")
assert num_rows == num_entries
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, "test_frame5", self.conn, index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype="int64"), name="series")
sql.to_sql(s, "test_series", self.conn, index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_roundtrip(self):
sql.to_sql(self.test_frame1, "test_frame_roundtrip", con=self.conn)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index("level_0", inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(
self.test_frame1,
"test_frame_roundtrip",
con=self.conn,
index=False,
chunksize=2,
)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def test_date_parsing(self):
# Test date parsing in read_sql
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["DateCol"]
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"IntDateOnlyCol": "%Y%m%d"},
)
assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64)
assert df.IntDateOnlyCol.tolist() == [
pd.Timestamp("2010-10-10"),
pd.Timestamp("2010-12-12"),
]
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
index_col="DateCol",
parse_dates=["DateCol", "IntDateCol"],
)
assert issubclass(df.index.dtype.type, np.datetime64)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_timedelta(self):
# see #6921
df = to_timedelta(Series(["00:00:01", "00:00:03"], name="foo")).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql("test_timedelta", self.conn)
result = sql.read_sql_query("SELECT * FROM test_timedelta", self.conn)
tm.assert_series_equal(result["foo"], df["foo"].astype("int64"))
def test_complex_raises(self):
df = DataFrame({"a": [1 + 1j, 2j]})
msg = "Complex datatypes not supported"
with pytest.raises(ValueError, match=msg):
df.to_sql("test_complex", self.conn)
@pytest.mark.parametrize(
"index_name,index_label,expected",
[
# no index name, defaults to 'index'
(None, None, "index"),
# specifying index_label
(None, "other_label", "other_label"),
# using the index name
("index_name", None, "index_name"),
# has index name, but specifying index_label
("index_name", "other_label", "other_label"),
# index name is integer
(0, None, "0"),
# index name is None but index label is integer
(None, 0, "0"),
],
)
def test_to_sql_index_label(self, index_name, index_label, expected):
temp_frame = DataFrame({"col1": range(4)})
temp_frame.index.name = index_name
query = "SELECT * FROM test_index_label"
sql.to_sql(temp_frame, "test_index_label", self.conn, index_label=index_label)
frame = sql.read_sql_query(query, self.conn)
assert frame.columns[0] == expected
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame(
{"col1": range(4)},
index=MultiIndex.from_product([("A0", "A1"), ("B0", "B1")]),
)
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, "test_index_label", self.conn)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[0] == "level_0"
assert frame.columns[1] == "level_1"
# specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["A", "B"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# using the index name
temp_frame.index.names = ["A", "B"]
sql.to_sql(temp_frame, "test_index_label", self.conn, if_exists="replace")
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# has index name, but specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["C", "D"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["C", "D"]
msg = "Length of 'index_label' should match number of levels, which is 2"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label="C",
)
def test_multiindex_roundtrip(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")],
columns=["A", "B", "C"],
index=["A", "B"],
)
df.to_sql("test_multiindex_roundtrip", self.conn)
result = sql.read_sql_query(
"SELECT * FROM test_multiindex_roundtrip", self.conn, index_col=["A", "B"]
)
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn, if_exists="replace")
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, "test", con=self.conn)
assert "CREATE" in create_sql
def test_get_schema_dtypes(self):
float_frame = DataFrame({"a": [1.1, 1.2], "b": [2.1, 2.2]})
dtype = sqlalchemy.Integer if self.mode == "sqlalchemy" else "INTEGER"
create_sql = sql.get_schema(
float_frame, "test", con=self.conn, dtype={"b": dtype}
)
assert "CREATE" in create_sql
assert "INTEGER" in create_sql
def test_get_schema_keys(self):
frame = DataFrame({"Col1": [1.1, 1.2], "Col2": [2.1, 2.2]})
create_sql = sql.get_schema(frame, "test", con=self.conn, keys="Col1")
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
assert constraint_sentence in create_sql
# multiple columns as key (GH10385)
create_sql = sql.get_schema(
self.test_frame1, "test", con=self.conn, keys=["A", "B"]
)
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
assert constraint_sentence in create_sql
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list("abcde"))
df.to_sql("test_chunksize", self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
res2 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_query(
"select * from test_chunksize", self.conn, chunksize=5
):
res2 = concat([res2, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res2)
# reading the query in chunks with read_sql_query
if self.mode == "sqlalchemy":
res3 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_table("test_chunksize", self.conn, chunksize=5):
res3 = concat([res3, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res3)
def test_categorical(self):
# GH8624
# test that categorical gets written correctly as dense column
df = DataFrame(
{
"person_id": [1, 2, 3],
"person_name": ["<NAME>", "<NAME>", "<NAME>"],
}
)
df2 = df.copy()
df2["person_name"] = df2["person_name"].astype("category")
df2.to_sql("test_categorical", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_categorical", self.conn)
tm.assert_frame_equal(res, df)
def test_unicode_column_name(self):
# GH 11431
df = DataFrame([[1, 2], [3, 4]], columns=["\xe9", "b"])
df.to_sql("test_unicode", self.conn, index=False)
def test_escaped_table_name(self):
# GH 13206
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("d1187b08-4943-4c8d-a7f6", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM `d1187b08-4943-4c8d-a7f6`", self.conn)
tm.assert_frame_equal(res, df)
@pytest.mark.single
@pytest.mark.skipif(not SQLALCHEMY_INSTALLED, reason="SQLAlchemy not installed")
class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = "sqlite"
mode = "sqlalchemy"
def connect(self):
return sqlalchemy.create_engine("sqlite:///:memory:")
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
cols = ["A", "B"]
result = sql.read_sql_table("test_frame", self.conn, columns=cols)
assert result.columns.tolist() == cols
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
result = sql.read_sql_table("test_frame", self.conn, index_col="index")
assert result.index.names == ["index"]
result = sql.read_sql_table("test_frame", self.conn, index_col=["A", "B"])
assert result.index.names == ["A", "B"]
result = sql.read_sql_table(
"test_frame", self.conn, index_col=["A", "B"], columns=["C", "D"]
)
assert result.index.names == ["A", "B"]
assert result.columns.tolist() == ["C", "D"]
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table("iris", self.conn)
iris_frame2 = sql.read_sql("iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
def test_not_reflect_all_tables(self):
# create invalid table
qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);"""
self.conn.execute(qry)
qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);"""
self.conn.execute(qry)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
sql.read_sql_table("other_table", self.conn)
sql.read_sql_query("SELECT * FROM other_table", self.conn)
# Verify some things
assert len(w) == 0
def test_warning_case_insensitive_table_name(self):
# see gh-7815
#
# We can't test that this warning is triggered, a the database
# configuration would have to be altered. But here we test that
# the warning is certainly NOT triggered in a normal case.
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# This should not trigger a Warning
self.test_frame1.to_sql("CaseSensitive", self.conn)
# Verify some things
assert len(w) == 0
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes("test_index_saved")
ixs = [i["column_names"] for i in ixs]
return ixs
def test_sqlalchemy_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLDatabase(self.conn)
table = sql.SQLTable("test_type", db, frame=df)
# GH 9086: TIMESTAMP is the suggested type for datetimes with timezones
assert isinstance(table.table.c["time"].type, sqltypes.TIMESTAMP)
def test_database_uri_string(self):
# Test read_sql and .to_sql method with a database URI (GH10654)
test_frame1 = self.test_frame1
# db_uri = 'sqlite:///:memory:' # raises
# sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near
# "iris": syntax error [SQL: 'iris']
with tm.ensure_clean() as name:
db_uri = "sqlite:///" + name
table = "iris"
test_frame1.to_sql(table, db_uri, if_exists="replace", index=False)
test_frame2 = sql.read_sql(table, db_uri)
test_frame3 = sql.read_sql_table(table, db_uri)
query = "SELECT * FROM iris"
test_frame4 = sql.read_sql_query(query, db_uri)
tm.assert_frame_equal(test_frame1, test_frame2)
tm.assert_frame_equal(test_frame1, test_frame3)
tm.assert_frame_equal(test_frame1, test_frame4)
# using driver that will not be installed on Travis to trigger error
# in sqlalchemy.create_engine -> test passing of this error to user
try:
# the rest of this test depends on pg8000's being absent
import pg8000 # noqa
pytest.skip("pg8000 is installed")
except ImportError:
pass
db_uri = "postgresql+pg8000://user:pass@host/dbname"
with pytest.raises(ImportError, match="pg8000"):
sql.read_sql("select * from table", db_uri)
def _make_iris_table_metadata(self):
sa = sqlalchemy
metadata = sa.MetaData()
iris = sa.Table(
"iris",
metadata,
sa.Column("SepalLength", sa.REAL),
sa.Column("SepalWidth", sa.REAL),
sa.Column("PetalLength", sa.REAL),
sa.Column("PetalWidth", sa.REAL),
sa.Column("Name", sa.TEXT),
)
return iris
def test_query_by_text_obj(self):
# WIP : GH10846
name_text = sqlalchemy.text("select * from iris where name=:name")
iris_df = sql.read_sql(name_text, self.conn, params={"name": "Iris-versicolor"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-versicolor"}
def test_query_by_select_obj(self):
# WIP : GH10846
iris = self._make_iris_table_metadata()
name_select = sqlalchemy.select([iris]).where(
iris.c.Name == sqlalchemy.bindparam("name")
)
iris_df = sql.read_sql(name_select, self.conn, params={"name": "Iris-setosa"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-setosa"}
class _EngineToConnMixin:
"""
A mixin that causes setup_connect to create a conn rather than an engine.
"""
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
super().load_test_data_and_sql()
engine = self.conn
conn = engine.connect()
self.__tx = conn.begin()
self.pandasSQL = sql.SQLDatabase(conn)
self.__engine = engine
self.conn = conn
yield
self.__tx.rollback()
self.conn.close()
self.conn = self.__engine
self.pandasSQL = sql.SQLDatabase(self.__engine)
# XXX:
# super().teardown_method(method)
@pytest.mark.single
class TestSQLApiConn(_EngineToConnMixin, TestSQLApi):
pass
@pytest.mark.single
class TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi):
"""
Test the public sqlite connection fallback API
"""
flavor = "sqlite"
mode = "fallback"
def connect(self, database=":memory:"):
return sqlite3.connect(database)
def test_sql_open_close(self):
# Test if the IO in the database still work if the connection closed
# between the writing and reading (as in many real situations).
with tm.ensure_clean() as name:
conn = self.connect(name)
sql.to_sql(self.test_frame3, "test_frame3_legacy", conn, index=False)
conn.close()
conn = self.connect(name)
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn)
conn.close()
tm.assert_frame_equal(self.test_frame3, result)
@pytest.mark.skipif(SQLALCHEMY_INSTALLED, reason="SQLAlchemy is installed")
def test_con_string_import_error(self):
conn = "mysql://root@localhost/pandas_nosetest"
msg = "Using URI string without sqlalchemy installed"
with pytest.raises(ImportError, match=msg):
sql.read_sql("SELECT * FROM iris", conn)
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
msg = "Execution failed on sql 'iris': near \"iris\": syntax error"
with pytest.raises(sql.DatabaseError, match=msg):
sql.read_sql("iris", self.conn)
def test_safe_names_warning(self):
# GH 6798
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b "]) # has a space
# warns on create table with spaces in names
with tm.assert_produces_warning():
sql.to_sql(df, "test_frame3_legacy", self.conn, index=False)
def test_get_schema2(self):
# without providing a connection object (available for backwards comp)
create_sql = sql.get_schema(self.test_frame1, "test")
assert "CREATE" in create_sql
def _get_sqlite_column_type(self, schema, column):
for col in schema.split("\n"):
if col.split()[0].strip('""') == column:
return col.split()[1]
raise ValueError(f"Column {column} not found")
def test_sqlite_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLiteDatabase(self.conn)
table = sql.SQLiteTable("test_type", db, frame=df)
schema = table.sql_schema()
assert self._get_sqlite_column_type(schema, "time") == "TIMESTAMP"
# -----------------------------------------------------------------------------
# -- Database flavor specific tests
class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest):
"""
Base class for testing the sqlalchemy backend.
Subclasses for specific database types are created below. Tests that
deviate for each flavor are overwritten there.
"""
flavor: str
@pytest.fixture(autouse=True, scope="class")
def setup_class(cls):
cls.setup_import()
cls.setup_driver()
conn = cls.connect()
conn.connect()
def load_test_data_and_sql(self):
self._load_raw_sql()
self._load_test1_data()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
@classmethod
def setup_import(cls):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
pytest.skip("SQLAlchemy not installed")
@classmethod
def setup_driver(cls):
raise NotImplementedError()
@classmethod
def connect(cls):
raise NotImplementedError()
def setup_connect(self):
try:
self.conn = self.connect()
self.pandasSQL = sql.SQLDatabase(self.conn)
# to test if connection can be made:
self.conn.connect()
except sqlalchemy.exc.OperationalError:
pytest.skip(f"Can't connect to {self.flavor} server")
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_to_sql_method_multi(self):
self._to_sql(method="multi")
def test_to_sql_method_callable(self):
self._to_sql_method_callable()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
def test_drop_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
pandasSQL.drop_table("temp_frame")
assert not temp_conn.has_table("temp_frame")
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_sql_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_sql_table(
"iris", con=self.conn, columns=["SepalLength", "SepalLength"]
)
tm.equalContents(iris_frame.columns.values, ["SepalLength", "SepalLength"])
def test_read_table_absent_raises(self):
msg = "Table this_doesnt_exist not found"
with pytest.raises(ValueError, match=msg):
sql.read_sql_table("this_doesnt_exist", con=self.conn)
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
assert issubclass(df.BoolCol.dtype.type, np.bool_)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA values becomes object
assert issubclass(df.BoolColWithNull.dtype.type, np.object)
def test_bigint(self):
# int64 should be converted to BigInteger, GH7433
df = DataFrame(data={"i64": [2 ** 62]})
df.to_sql("test_bigint", self.conn, index=False)
result = sql.read_sql_table("test_bigint", self.conn)
tm.assert_frame_equal(df, result)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
assert issubclass(df.DateCol.dtype.type, np.datetime64)
def test_datetime_with_timezone(self):
# edge case that converts postgresql datetime with time zone types
# to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok
# but should be more natural, so coerce to datetime64[ns] for now
def check(col):
# check that a column is either datetime64[ns]
# or datetime64[ns, UTC]
if is_datetime64_dtype(col.dtype):
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
assert col[0] == Timestamp("2000-01-01 08:00:00")
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
assert col[1] == Timestamp("2000-06-01 07:00:00")
elif is_datetime64tz_dtype(col.dtype):
assert str(col.dt.tz) == "UTC"
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
# GH 6415
expected_data = [
Timestamp("2000-01-01 08:00:00", tz="UTC"),
Timestamp("2000-06-01 07:00:00", tz="UTC"),
]
expected = Series(expected_data, name=col.name)
tm.assert_series_equal(col, expected)
else:
raise AssertionError(
f"DateCol loaded with incorrect type -> {col.dtype}"
)
# GH11216
df = pd.read_sql_query("select * from types_test_data", self.conn)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
# this is parsed on Travis (linux), but not on macosx for some reason
# even with the same versions of psycopg2 & sqlalchemy, possibly a
# Postgresql server version difference
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
df = pd.read_sql_query(
"select * from types_test_data", self.conn, parse_dates=["DateColWithTz"]
)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
check(df.DateColWithTz)
df = pd.concat(
list(
pd.read_sql_query(
"select * from types_test_data", self.conn, chunksize=1
)
),
ignore_index=True,
)
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
expected = sql.read_sql_table("types_test_data", self.conn)
col = expected.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
tm.assert_series_equal(df.DateColWithTz, expected.DateColWithTz)
# xref #7139
# this might or might not be converted depending on the postgres driver
df = sql.read_sql_table("types_test_data", self.conn)
check(df.DateColWithTz)
def test_datetime_with_timezone_roundtrip(self):
# GH 9086
# Write datetimetz data to a db and read it back
# For dbs that support timestamps with timezones, should get back UTC
# otherwise naive data should be returned
expected = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3, tz="US/Pacific")}
)
expected.to_sql("test_datetime_tz", self.conn, index=False)
if self.flavor == "postgresql":
# SQLAlchemy "timezones" (i.e. offsets) are coerced to UTC
expected["A"] = expected["A"].dt.tz_convert("UTC")
else:
# Otherwise, timestamps are returned as local, naive
expected["A"] = expected["A"].dt.tz_localize(None)
result = sql.read_sql_table("test_datetime_tz", self.conn)
tm.assert_frame_equal(result, expected)
result = sql.read_sql_query("SELECT * FROM test_datetime_tz", self.conn)
if self.flavor == "sqlite":
# read_sql_query does not return datetime type like read_sql_table
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, expected)
def test_naive_datetimeindex_roundtrip(self):
# GH 23510
# Ensure that a naive DatetimeIndex isn't converted to UTC
dates = date_range("2018-01-01", periods=5, freq="6H")
expected = DataFrame({"nums": range(5)}, index=dates)
expected.to_sql("foo_table", self.conn, index_label="info_date")
result = sql.read_sql_table("foo_table", self.conn, index_col="info_date")
# result index with gain a name from a set_index operation; expected
tm.assert_frame_equal(result, expected, check_names=False)
def test_date_parsing(self):
# No Parsing
df = sql.read_sql_table("types_test_data", self.conn)
expected_type = object if self.flavor == "sqlite" else np.datetime64
assert issubclass(df.DateCol.dtype.type, expected_type)
df = sql.read_sql_table("types_test_data", self.conn, parse_dates=["DateCol"])
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"}
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data",
self.conn,
parse_dates={"DateCol": {"format": "%Y-%m-%d %H:%M:%S"}},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": {"unit": "s"}}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_datetime(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.to_sql("test_datetime", self.conn)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
result = result.drop("index", axis=1)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
result = result.drop("index", axis=1)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_NaT(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.loc[1, "A"] = np.nan
df.to_sql("test_datetime", self.conn, index=False)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"], errors="coerce")
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql("test_date", self.conn, index=False)
res = read_sql_table("test_date", self.conn)
result = res["a"]
expected = to_datetime(df["a"])
# comes back as datetime64
tm.assert_series_equal(result, expected)
def test_datetime_time(self):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql("test_time", self.conn, index=False)
res = read_sql_table("test_time", self.conn)
tm.assert_frame_equal(res, df)
# GH8341
# first, use the fallback to have the sqlite adapter put in place
sqlite_conn = TestSQLiteFallback.connect()
sql.to_sql(df, "test_time2", sqlite_conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_time2", sqlite_conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res) # check if adapter is in place
# then test if sqlalchemy is unaffected by the sqlite adapter
sql.to_sql(df, "test_time3", self.conn, index=False)
if self.flavor == "sqlite":
res = sql.read_sql_query("SELECT * FROM test_time3", self.conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res)
res = sql.read_sql_table("test_time3", self.conn)
tm.assert_frame_equal(df, res)
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Series(2 ** 25 + 1, dtype=np.int32)
s2 = Series(0.0, dtype=np.float32)
df = DataFrame({"s1": s1, "s2": s2})
# write and read again
df.to_sql("test_read_write", self.conn, index=False)
df2 = sql.read_sql_table("test_read_write", self.conn)
tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True)
def test_nan_numeric(self):
# NaNs in numeric float column
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("test_nan", self.conn, index=False)
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def test_nan_fullcolumn(self):
# full NaN column (numeric float column)
df = DataFrame({"A": [0, 1, 2], "B": [np.nan, np.nan, np.nan]})
df.to_sql("test_nan", self.conn, index=False)
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> not type info from table -> stays None
df["B"] = df["B"].astype("object")
df["B"] = None
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def test_nan_string(self):
# NaNs in string column
df = DataFrame({"A": [0, 1, 2], "B": ["a", "b", np.nan]})
df.to_sql("test_nan", self.conn, index=False)
# NaNs are coming back as None
df.loc[2, "B"] = None
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes(tbl_name)
ixs = [i["column_names"] for i in ixs]
return ixs
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def test_get_schema_create_table(self):
# Use a dataframe without a bool column, since MySQL converts bool to
# TINYINT (which read_sql_table returns as an int and causes a dtype
# mismatch)
self._load_test3_data()
tbl = "test_get_schema_create_table"
create_sql = sql.get_schema(self.test_frame3, tbl, con=self.conn)
blank_test_df = self.test_frame3.iloc[:0]
self.drop_table(tbl)
self.conn.execute(create_sql)
returned_df = sql.read_sql_table(tbl, self.conn)
tm.assert_frame_equal(returned_df, blank_test_df, check_index_type=False)
self.drop_table(tbl)
def test_dtype(self):
cols = ["A", "B"]
data = [(0.8, True), (0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql("dtype_test", self.conn)
df.to_sql("dtype_test2", self.conn, dtype={"B": sqlalchemy.TEXT})
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltype = meta.tables["dtype_test2"].columns["B"].type
assert isinstance(sqltype, sqlalchemy.TEXT)
msg = "The type of B is not a SQLAlchemy type"
with pytest.raises(ValueError, match=msg):
df.to_sql("error", self.conn, dtype={"B": str})
# GH9083
df.to_sql("dtype_test3", self.conn, dtype={"B": sqlalchemy.String(10)})
meta.reflect()
sqltype = meta.tables["dtype_test3"].columns["B"].type
assert isinstance(sqltype, sqlalchemy.String)
assert sqltype.length == 10
# single dtype
df.to_sql("single_dtype_test", self.conn, dtype=sqlalchemy.TEXT)
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltypea = meta.tables["single_dtype_test"].columns["A"].type
sqltypeb = meta.tables["single_dtype_test"].columns["B"].type
assert isinstance(sqltypea, sqlalchemy.TEXT)
assert isinstance(sqltypeb, sqlalchemy.TEXT)
def test_notna_dtype(self):
cols = {
"Bool": Series([True, None]),
"Date": Series([datetime(2012, 5, 1), None]),
"Int": Series([1, None], dtype="object"),
"Float": Series([1.1, None]),
}
df = DataFrame(cols)
tbl = "notna_dtype_test"
df.to_sql(tbl, self.conn)
returned_df = sql.read_sql_table(tbl, self.conn) # noqa
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
if self.flavor == "mysql":
my_type = sqltypes.Integer
else:
my_type = sqltypes.Boolean
col_dict = meta.tables[tbl].columns
assert isinstance(col_dict["Bool"].type, my_type)
assert isinstance(col_dict["Date"].type, sqltypes.DateTime)
assert isinstance(col_dict["Int"].type, sqltypes.Integer)
assert isinstance(col_dict["Float"].type, sqltypes.Float)
def test_double_precision(self):
V = 1.23456789101112131415
df = DataFrame(
{
"f32": Series([V], dtype="float32"),
"f64": Series([V], dtype="float64"),
"f64_as_f32": Series([V], dtype="float64"),
"i32": Series([5], dtype="int32"),
"i64": Series([5], dtype="int64"),
}
)
df.to_sql(
"test_dtypes",
self.conn,
index=False,
if_exists="replace",
dtype={"f64_as_f32": sqlalchemy.Float(precision=23)},
)
res = sql.read_sql_table("test_dtypes", self.conn)
# check precision of float64
assert np.round(df["f64"].iloc[0], 14) == np.round(res["f64"].iloc[0], 14)
# check sql types
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
col_dict = meta.tables["test_dtypes"].columns
assert str(col_dict["f32"].type) == str(col_dict["f64_as_f32"].type)
assert isinstance(col_dict["f32"].type, sqltypes.Float)
assert isinstance(col_dict["f64"].type, sqltypes.Float)
assert isinstance(col_dict["i32"].type, sqltypes.Integer)
assert isinstance(col_dict["i64"].type, sqltypes.BigInteger)
def test_connectable_issue_example(self):
# This tests the example raised in issue
# https://github.com/pandas-dev/pandas/issues/10104
def foo(connection):
query = "SELECT test_foo_data FROM test_foo_data"
return sql.read_sql_query(query, con=connection)
def bar(connection, data):
data.to_sql(name="test_foo_data", con=connection, if_exists="append")
def main(connectable):
with connectable.connect() as conn:
with conn.begin():
foo_data = conn.run_callable(foo)
conn.run_callable(bar, foo_data)
DataFrame({"test_foo_data": [0, 1, 2]}).to_sql("test_foo_data", self.conn)
main(self.conn)
def test_temporary_table(self):
test_data = "Hello, World!"
expected = DataFrame({"spam": [test_data]})
Base = declarative.declarative_base()
class Temporary(Base):
__tablename__ = "temp_test"
__table_args__ = {"prefixes": ["TEMPORARY"]}
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
spam = sqlalchemy.Column(sqlalchemy.Unicode(30), nullable=False)
Session = sa_session.sessionmaker(bind=self.conn)
session = Session()
with session.transaction:
conn = session.connection()
Temporary.__table__.create(conn)
session.add(Temporary(spam=test_data))
session.flush()
df = sql.read_sql_query(sql=sqlalchemy.select([Temporary.spam]), con=conn)
tm.assert_frame_equal(df, expected)
class _TestSQLAlchemyConn(_EngineToConnMixin, _TestSQLAlchemy):
def test_transactions(self):
pytest.skip("Nested transactions rollbacks don't work with Pandas")
class _TestSQLiteAlchemy:
"""
Test the sqlalchemy backend against an in-memory sqlite database.
"""
flavor = "sqlite"
@classmethod
def connect(cls):
return sqlalchemy.create_engine("sqlite:///:memory:")
@classmethod
def setup_driver(cls):
# sqlite3 is built-in
cls.driver = None
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# sqlite has no boolean type, so integer type is returned
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Non-native Bool column with NA values stays as float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
def test_bigint_warning(self):
# test no warning for BIGINT (to support int64) is raised (GH7433)
df = DataFrame({"a": [1, 2]}, dtype="int64")
df.to_sql("test_bigintwarning", self.conn, index=False)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sql.read_sql_table("test_bigintwarning", self.conn)
assert len(w) == 0
class _TestMySQLAlchemy:
"""
Test the sqlalchemy backend against an MySQL database.
"""
flavor = "mysql"
@classmethod
def connect(cls):
url = "mysql+{driver}://root@localhost/pandas_nosetest"
return sqlalchemy.create_engine(
url.format(driver=cls.driver), connect_args=cls.connect_args
)
@classmethod
def setup_driver(cls):
pymysql = pytest.importorskip("pymysql")
cls.driver = "pymysql"
cls.connect_args = {"client_flag": pymysql.constants.CLIENT.MULTI_STATEMENTS}
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# MySQL has no real BOOL type (it's an alias for TINYINT)
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA = int column with NA values => becomes float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_read_procedure(self):
import pymysql
# see GH7324. Although it is more an api test, it is added to the
# mysql tests as sqlite does not have stored procedures
df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
df.to_sql("test_procedure", self.conn, index=False)
proc = """DROP PROCEDURE IF EXISTS get_testdb;
CREATE PROCEDURE get_testdb ()
BEGIN
SELECT * FROM test_procedure;
END"""
connection = self.conn.connect()
trans = connection.begin()
try:
r1 = connection.execute(proc) # noqa
trans.commit()
except pymysql.Error:
trans.rollback()
raise
res1 = sql.read_sql_query("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res1)
# test delegation to read_sql_query
res2 = sql.read_sql("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res2)
class _TestPostgreSQLAlchemy:
"""
Test the sqlalchemy backend against an PostgreSQL database.
"""
flavor = "postgresql"
@classmethod
def connect(cls):
url = "postgresql+{driver}://postgres@localhost/pandas_nosetest"
return sqlalchemy.create_engine(url.format(driver=cls.driver))
@classmethod
def setup_driver(cls):
pytest.importorskip("psycopg2")
cls.driver = "psycopg2"
def test_schema_support(self):
# only test this for postgresql (schema's not supported in
# mysql/sqlite)
df = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]})
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe to different schema's
df.to_sql("test_schema_public", self.conn, index=False)
df.to_sql(
"test_schema_public_explicit", self.conn, index=False, schema="public"
)
df.to_sql("test_schema_other", self.conn, index=False, schema="other")
# read dataframes back in
res1 = sql.read_sql_table("test_schema_public", self.conn)
tm.assert_frame_equal(df, res1)
res2 = sql.read_sql_table("test_schema_public_explicit", self.conn)
tm.assert_frame_equal(df, res2)
res3 = sql.read_sql_table(
"test_schema_public_explicit", self.conn, schema="public"
)
tm.assert_frame_equal(df, res3)
res4 = sql.read_sql_table("test_schema_other", self.conn, schema="other")
tm.assert_frame_equal(df, res4)
msg = "Table test_schema_other not found"
with pytest.raises(ValueError, match=msg):
sql.read_sql_table("test_schema_other", self.conn, schema="public")
# different if_exists options
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe with different if_exists options
df.to_sql("test_schema_other", self.conn, schema="other", index=False)
df.to_sql(
"test_schema_other",
self.conn,
schema="other",
index=False,
if_exists="replace",
)
df.to_sql(
"test_schema_other",
self.conn,
schema="other",
index=False,
if_exists="append",
)
res = sql.read_sql_table("test_schema_other", self.conn, schema="other")
tm.assert_frame_equal(concat([df, df], ignore_index=True), res)
# specifying schema in user-provided meta
# The schema won't be applied on another Connection
# because of transactional schemas
if isinstance(self.conn, sqlalchemy.engine.Engine):
engine2 = self.connect()
meta = sqlalchemy.MetaData(engine2, schema="other")
pdsql = sql.SQLDatabase(engine2, meta=meta)
pdsql.to_sql(df, "test_schema_other2", index=False)
pdsql.to_sql(df, "test_schema_other2", index=False, if_exists="replace")
pdsql.to_sql(df, "test_schema_other2", index=False, if_exists="append")
res1 = sql.read_sql_table("test_schema_other2", self.conn, schema="other")
res2 = pdsql.read_table("test_schema_other2")
tm.assert_frame_equal(res1, res2)
def test_copy_from_callable_insertion_method(self):
# GH 8953
# Example in io.rst found under _io.sql.method
# not available in sqlite, mysql
def psql_insert_copy(table, conn, keys, data_iter):
# gets a DBAPI connection that can provide a cursor
dbapi_conn = conn.connection
with dbapi_conn.cursor() as cur:
s_buf = StringIO()
writer = csv.writer(s_buf)
writer.writerows(data_iter)
s_buf.seek(0)
columns = ", ".join(f'"{k}"' for k in keys)
if table.schema:
table_name = f"{table.schema}.{table.name}"
else:
table_name = table.name
sql_query = f"COPY {table_name} ({columns}) FROM STDIN WITH CSV"
cur.copy_expert(sql=sql_query, file=s_buf)
expected = | DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]}) | pandas.DataFrame |
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
import numpy as np
import pandas as pd
from mlos.Logger import create_logger
from mlos.Optimizers.ExperimentDesigner.UtilityFunctions.UtilityFunction import UtilityFunction
from mlos.Optimizers.ParetoFrontier import ParetoFrontier
from mlos.Optimizers.RegressionModels.MultiObjectiveRegressionModel import MultiObjectiveRegressionModel
from mlos.Optimizers.RegressionModels.Prediction import Prediction
from mlos.Optimizers.RegressionModels.MultiObjectivePrediction import MultiObjectivePrediction
from mlos.Spaces import SimpleHypergrid, DiscreteDimension, Point
from mlos.Spaces.Configs.ComponentConfigStore import ComponentConfigStore
from mlos.Tracer import trace
multi_objective_probability_of_improvement_utility_function_config_store = ComponentConfigStore(
parameter_space=SimpleHypergrid(
name="multi_objective_probability_of_improvement_config",
dimensions=[
DiscreteDimension(name="num_monte_carlo_samples", min=100, max=1000)
]
),
default=Point(
num_monte_carlo_samples=100
)
)
class MultiObjectiveProbabilityOfImprovementUtilityFunction(UtilityFunction):
"""Computes the probability of improvement (POI) of a set of configurations over the existing pareto frontier.
We are up against several requirements here: we need to be able to predict the probability of improvement in a multi-dimensional
objective space. Our assumptions (see below) make each distribution a multi-dimensional blob that's cut in two by a nearly
arbitrarily complex surface of the pareto frontier. This precludes any closed form solution to the POI question.
Thus, we take a Monte Carlo approach: we generate a bunch of points from the predictive distribution, compute the proportion of
these that are dominated by the existing pareto frontier, and use that proportion as an estimator for the probability of
improvement.
Assuming Prediction Error Independence:
We assume that we can sample independently from the distributions described by the multi-objective prediction object. That
is to say that if we assume:
P(objective_2 = y2 | objective_1 = y1) == P(objective_2 = y2)
In practice I do not know how often this assumption is true. On the one hand, correlations between objective_1 and
objective_2 should have been picked up by the model doing the predictions so by the time we get here, assuming that prediction
errors are uncorrelated seems at least partly defensible. On the other hand, if for example predictions came from leaves at
the edge of the parameter space then the predictions can effectively be extrapolations. In such a case, the prediction errors
are correlated and our Monte Carlo sampling is biased.
I don't know how important this is in practice so I propose going forward with this simple solution, and treating it as
as a baseline that more sophisticated approaches can improve upon in the future.
"""
def __init__(
self,
function_config: Point,
pareto_frontier: ParetoFrontier,
surrogate_model: MultiObjectiveRegressionModel,
logger=None
):
if logger is None:
logger = create_logger(self.__class__.__name__)
self.logger = logger
self.config = function_config
self.pareto_frontier = pareto_frontier
self.surrogate_model: MultiObjectiveRegressionModel = surrogate_model
@trace()
def __call__(self, feature_values_pandas_frame: pd.DataFrame):
self.logger.debug(f"Computing utility values for {len(feature_values_pandas_frame.index)} points.")
if self.pareto_frontier.empty or not self.surrogate_model.trained:
# All of the configs are equally likely to improve upon a non-existing solution.
#
return pd.DataFrame(columns=['utility'], dtype='float')
feature_values_pandas_frame = self.surrogate_model.input_space.filter_out_invalid_rows(original_dataframe=feature_values_pandas_frame)
multi_objective_predictions: MultiObjectivePrediction = self.surrogate_model.predict(features_df=feature_values_pandas_frame)
# Now that we have predictions for all of the features_df rows, we need to sample random points from the distribution
# described by each prediction and then we want to check how many of those random points are dominated by the existing
# pareto frontier. The proportion of non-dominated to all points is our estimator for the probability of improvement.
# Note that we could compute the confidence intervals on the POI, and we could in theory keep sampling more intelligently.
# That is, we could reject really dominated configurations after only a few samples, but if there are any close contenders,
# we could sample more aggressively from their distributions until we reach a statistically significant difference between
# their POI esitmates (and then sample a bit more, to fortify our conclusions).
valid_predictions_index = feature_values_pandas_frame.index
for _, prediction in multi_objective_predictions:
prediction_df = prediction.get_dataframe()
valid_predictions_index = valid_predictions_index.intersection(prediction_df.index)
# Let's make sure all predictions have a standard deviation available.
#
for _, objective_prediction in multi_objective_predictions:
std_dev_column_name = objective_prediction.add_standard_deviation_column()
batched_poi_df = self._batched_probability_of_improvement(
multi_objective_predictions=multi_objective_predictions,
valid_predictions_index=valid_predictions_index,
std_dev_column_name=std_dev_column_name
)
batched_poi_df['utility'] = | pd.to_numeric(arg=batched_poi_df['utility'], errors='raise') | pandas.to_numeric |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from django.db.models import Q
from django_pandas.io import read_frame
from shuup.core.models import OrderLine, OrderStatus
from shuup_recommender.models import ProductView
from ._base import BaseRecommender
from ._consts import EVERYTHING
def distance(x, y):
return np.sqrt(np.power(x, 2) + np.power(y, 2))
class MostSoldProducts(BaseRecommender):
""" Most sold products recommender
Recommend the most sold products
To filter orders or shops, use the kwargs param when
instantiating the recommender:
kwargs:
base_orders (queryset of shuup.Order): calculates the most
sold products from this queryset.
shop (shuup.Shop): filters orders based on this shop.
shops (iterator[shuup.Shop]): filters orders based on these shops.
Usage example:
```
most_sold_products = MostSoldProducts(shop=my_shop).recommend(20)
```
"""
def _get_order_lines(self):
orders = self.kwargs.get("base_orders")
if orders:
order_lines = OrderLine.objects.filter(order__in=orders).products()
else:
filters = Q(order__status=OrderStatus.objects.get_default_complete())
shop = self.kwargs.get("shop")
shops = self.kwargs.get("shops")
if shop:
filters &= Q(order__shop=shop)
elif shops:
filters &= Q(order__shop__in=shops)
order_lines = OrderLine.objects.products().filter(filters)
return order_lines
def recommend(self, n=10, **kwargs):
"""
:returns: DataFrame of [product_id, rank] sorted by rank DESC
product_id (int): ID of the product
sold_rank (float): rank of the product (from 0 to 1)
"""
order_lines = self._get_order_lines()
# read order lines into DataFrame
items_df = read_frame(order_lines, fieldnames=["product_id", "quantity"], verbose=False)
# group by product ID and sum quantities
sold_items = items_df.groupby(["product_id"]).sum()
# get the max value of quantity
max_value = sold_items["quantity"].max()
# normalize the values
sold_items["sold_rank"] = (sold_items["quantity"] / max_value).apply(pd.to_numeric)
# sort the products and remove auxiliar columns
ranked_products = sold_items[["sold_rank"]].sort_values("sold_rank", ascending=False)
# if `n` is 0 or None, return everything
return ranked_products.head(n) if n else ranked_products
class MostViewedProducts(BaseRecommender):
""" Most viwed products recommender
Recommend the most viwed products
This recommender works by using then
shuup_recommender.ProductView model instances.
Thus, it is your responsability to create instances
of that model everytime one actually view the product.
All ProductView will be used by this recommender by defaut.
To use a custom set of views, you should use the
`views` kwargs key when instantiating the recommender:
```
most_viewed_products = MostViewedProducts(
views=ProductView.objects.filter(user__isnull=True)
).recommend()
```
"""
def _get_views(self):
if self.kwargs.get("views"):
return self.kwargs["views"]
return ProductView.objects.all()
def recommend(self, n=10, **kwargs):
"""
:returns: DataFrame of [product_id, rank] sorted by rank DESC
product_id (int): ID of the product
view_rank (float): rank of the product (from 0 to 1)
"""
product_views_df = read_frame(self._get_views(), fieldnames=["product_id"], verbose=False)
product_views_df["views"] = 1
# group by product ID and sum views
viewed_products = product_views_df.groupby(["product_id"]).sum()
# get the max value of views
max_value = viewed_products["views"].max()
# normalize the values - this way we can easily plot them later
viewed_products["view_rank"] = (viewed_products["views"] / max_value).apply(pd.to_numeric)
# sort the products and remove auxiliar columns
ranked_products = viewed_products[["view_rank"]].sort_values("view_rank", ascending=False)
# if `n` is 0 or None, return everything
return ranked_products.head(n) if n else ranked_products
class PopularProducts(object):
"""Popular products recommender
Recommend the popular products
This is the mix of most sold and most viewed products recommenders.
All kwargs parameters from `MostViewedProducts` and `MostSoldProducts`
can be used.
"""
def recommend(self, n=10, **kwargs):
"""
:returns: DataFrame of [product_id, rank] sorted by rank DESC
product_id (int): ID of the product
rank (float): rank of the product (from 0 to 1)
"""
viewed_products_rank = MostViewedProducts(**kwargs).recommend(EVERYTHING)
sold_items_rank = MostSoldProducts(**kwargs).recommend(EVERYTHING)
products_rank = | pd.merge(sold_items_rank, viewed_products_rank, how="outer", left_index=True, right_index=True) | pandas.merge |
import math
import pandas as pd
from scipy import stats
import streamlit as st
st.title("Udacity A/B Testing Final Project")
"""
I recently completed Google and Udacity's introduction to A/B testing, which was pretty interesting! This is my take on the final project.
The problem definition below comes almost verbatim from the instructions found [here](https://docs.google.com/document/u/1/d/1aCquhIqsUApgsxQ8-SQBAigFDcfWVVohLEXcV6jWbdI/pub?embedded=True).
*At the time of this experiment, Udacity courses currently have two options on the course overview page: "start free trial", and "access course materials".
If the student clicks "start free trial", they will be asked to enter their credit card information, and then they will be enrolled in a free trial
for the paid version of the course. After 14 days, they will automatically be charged unless they cancel first. If the student clicks
"access course materials", they will be able to view the videos and take the quizzes for free, but they will not receive coaching support
or a verified certificate, and they will not submit their final project for feedback.*
*In the experiment, Udacity tested a change where if the student clicked "start free trial", they were asked how much time they had available
to devote to the course. If the student indicated 5 or more hours per week, they would be taken through the checkout process as usual.
If they indicated fewer than 5 hours per week, a message would appear indicating that Udacity courses usually require a greater time commitment
for successful completion, and suggesting that the student might like to access the course materials for free. At this point,
the student would have the option to continue enrolling in the free trial, or access the course materials for free instead.
The screenshot below shows what the experiment looks like.*
"""
from PIL import Image
image = Image.open("screenshot.png")
st.image(image, caption="The experimental pop-up", use_column_width=True)
"""
*The hypothesis was that this might set clearer expectations for students upfront, thus reducing the number of frustrated students who left the free trial
because they didn't have enough time—without significantly reducing the number of students to continue past the free trial and eventually
complete the course. If this hypothesis held true, Udacity could improve the overall student experience and improve coaches' capacity to support students
who are likely to complete the course.*
*The unit of diversion is a cookie, although if the student enrolls in the free trial, they are tracked by user-id from that point forward.
The same user-id cannot enroll in the free trial twice. For users that do not enroll, their user-id is not tracked in the experiment,
even if they were signed in when they visited the course overview page.*
## Metric choice
As evaluation metrics, I chose:
* Gross conversion, that is number of users to enroll in the free trial divided by number of users to click on "start free trial" ($d_{min}=0.01$)
* Net conversion, that is number of users to make at least one payment divided by number of users to click on "start free trial" ($d_{min}=0.0075$)
If the experiment has an effect, then we would expect gross conversion to decrease, and hope for net conversion not to significantly decrease.
The invariant metrics I used are:
* Number of cookies visiting the course overview page
* Number of clicks on the “start free trial” button
This makes sense because all of these activities occur before the user is even shown the modified page,
which is only shown after the button is clicked. Therefore, any change in these metrics after the experiments should be treated as suspicious,
and investigated further.
## Measuring variability
The next step is estimating the baseline variability of each evaluation metric. This allows us to later calculate a suitable sample size for
the experiment, and verify that the experiment is feasible. Below are the rough estimates of the baseline values for each metric.
"""
baseline_values = pd.read_csv("baseline.csv", names=["Metric", "Value"])
baseline_values
r"""
I use pageviews from here onwards as short-hand for the number of unique cookies viewing the course overview page
(in the given data, it shows as cookies).
The exercise calls for making an analytical estimate of the standard deviation based on a $N_{pageviews}=5000$.
However, for both metrics,
the standard deviation is based on the number of users who clicked the "start free trial" button, not the number of pageviews.
The above data allows us to estimate this number for our given sample size, knowing the proportion of people who do click the button.
The resulting standard deviations are shown in the following table.
"""
n = 5000
sd_gross_conversion = math.sqrt(
(baseline_values.loc[4, "Value"] * (1 - baseline_values.loc[4, "Value"]))
/ (n * (baseline_values.loc[3, "Value"]))
)
sd_net_conversion = math.sqrt(
(baseline_values.loc[6, "Value"] * (1 - baseline_values.loc[6, "Value"]))
/ (n * (baseline_values.loc[3, "Value"]))
)
sd_all = pd.DataFrame(
[[sd_gross_conversion, sd_net_conversion]],
columns=["Sd. Gross conversion", "Sd. Net conversion"],
)
sd_all
"""
Since both metrics are probabilities, they should approximately follow a binomial distribution.
Because of this, I would say we can expect the analytical estimates to be accurate.
There should be no need to try and collect empirical estimates for either.
"""
r"""
## Experiment sizing
### Sample size
The sample size is calculated on a metric-wise basis with the help of Evan Miller's
[sample size calculator](https://www.evanmiller.org/ab-testing/sample-size.html), which I have reimplemented here.
Since we have two evaluation metrics, we use the maximum of the two calculated sample sizes as the final sample size,
in order to ensure enough power for each metric.
We also need to keep in mind that the sample sizes we calculated refer to number of clicks needed, and needs to be converted to a sample size in pageviews.
With this in mind the results are given below.
The significance level and power used are standard picks, with $\alpha=.05$ and $\beta=.2$.
"""
from sample_size import sample_size
alpha = 0.05
beta = 0.2
d_min_gross_diff = 0.01
d_min_net_diff = 0.0075
gross_sample_size = (
sample_size(
alpha, 1 - beta, baseline_values.loc[4, "Value"], d_min_gross_diff,
)
* 2
/ baseline_values.loc[3, "Value"]
)
net_sample_size = (
sample_size(
alpha, 1 - beta, baseline_values.loc[6, "Value"], d_min_net_diff,
)
* 2
/ baseline_values.loc[3, "Value"]
)
# Index: metric columns: d_min, sample size
sample_sizes = pd.DataFrame(
[
[
baseline_values.loc[4, "Value"],
d_min_gross_diff,
gross_sample_size,
],
[baseline_values.loc[6, "Value"], d_min_net_diff, net_sample_size],
],
columns=["Baseline value", "Minimum detectable difference", "Sample size"],
index=["Gross conversion", "Net conversion"],
)
sample_sizes
total_sample_size = math.ceil(max(gross_sample_size, net_sample_size))
"""
The resulting sample size is $N_{pageviews}=""" + str(
total_sample_size
) + """$. Due to implementation difference this number differs slightly from the original sample size calculator."""
experiment_duration = math.ceil(
total_sample_size / baseline_values.loc[0, "Value"]
)
"""
### Duration vs. exposure
I decided to run the experiment on 100% of traffic, for a couple of reasons:
* No other running or anticipated future test is mentioned in the project instructions, so I assumed there would be enough bandwidth
for full traffic to be split 50/50.
* The potential harm in showing the feature to many users even if it ends up being taken away is very low.
The main disadvantage I identified for this approach is that there might be a bug in the feature that makes it harder for users to actually
complete their enrollment. However, this risk seemed minor, considering the feature is simply a pop-up.
A possible approach would be to run a smaller test on a tiny group of users beforehand,
just to make sure the feature works as expected before rolling it out to a larger experimental group.
When running the experiment on all traffic, the duration comes down to $n_{days}=""" + str(
experiment_duration
) + """$.
Since this time interval covers at least two weekends, we can expect the results not to be significantly biased by weekly seasonalities.
## Analysis
After running the mock experiment for the chosen number of days, we can analyze the results.
The first step, before we evaluate the significance of the results, is running sanity checks on the invariant metrics.
If, and only if, the sanity checks pass, we calculate statistical and practical significance, and run a sign test for additional confirmation.
**Note:** This is where the calculations started getting confusing for me, and perhaps for you too, if you are working on the project yourself.
First, In the Excel file we are given, enrollments and payments are missing after November 2 for some reason.
Moreover, the results Udacity expects you to find are not related to how many days your decided to run your experiment.
How many days you have to include (if you want your calculations to be marked as correct) even varies between the sanity checks and the effect size and sign
test calculations, which I found strange.
I consider the correct calculations to be based on the $n_{days}$ I calculated, so you will see my numbers based on that.
However, if you adjust the number of days on the slider, you can match Udacity's results. The tables will update immediately.
"""
max_days = len(pd.read_csv("control.csv"))
number_of_days = st.slider(
label="Number of days to run the experiment",
min_value=1,
max_value=max_days,
value=experiment_duration,
)
control_data = | pd.read_csv("control.csv") | pandas.read_csv |
# -*- coding:utf-8 -*-
import pandas as pd
import numpy as np
import warnings
def test(x):
print('类型:\n{}\n'.format(type(x)))
if isinstance(x, pd.Series):
print('竖标:\n{}\n'.format(x.index))
else:
print('竖标:\n{}\n'.format(x.index))
print('横标:\n{}\n'.format(x.columns))
# print('内容:\n{}\n'.format(x.values))
print('------------------------------------\n')
def func_pdf(x, a, u, o):
return a * (1 / (2 * np.pi * o ** 2) ** 0.5) * np.exp(-(x - u) ** 2 / (2 * o ** 2))
def choose_right(x, u):
if x < u:
x = u
return x
warnings.filterwarnings("ignore")
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
# ----------- 连接 -----------
# excel 内只能包含表格,要把表格外的东西删掉(比如 数据来源:wind)
addr = r'C:\Users\Administrator\Desktop\ANA'
addr_in = addr + '\原始表.xlsx'
addr_out = addr + '\函数拟合表.xlsx'
addr_final = addr + '\概率计算表.xlsx'
df_origin = pd.read_excel(addr_in, sheet_name='对数')
stata_list = ['均值', '标准差']
result_list = []
for i in stata_list:
df_stata = pd.read_excel(addr_out, sheet_name=i)
df_stata.rename(columns={'Unnamed: 0': '三级行业'}, inplace=True)
result = pd.merge(df_origin, df_stata, on='三级行业', how='left')
num = len(df_stata.columns) + 3
b = result[result.columns[num:]]
b.columns = [item.strip('_y') for item in b.columns]
result_list.append(b)
a = df_origin[df_origin.columns[4:]]
b = result_list[0]
a[a < b] = -100000000000
# 计算指标
c = func_pdf(a, 1, result_list[0], result_list[1])
c = c[df_stata.columns[2:]]
c.to_csv(addr_final, encoding='utf-8-sig')
c.replace(0, inplace=True)
product = []
for i, v in c.iterrows():
trans = v.sort_values().tolist()
product_trans = trans[0] * trans[1] * trans[2]
product.append(product_trans)
product_trans = pd.DataFrame(product)
c = | pd.concat([c, product_trans], axis=1) | pandas.concat |
# Recurrent Neural Network
# Part 1 - Data Preprocessing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the training set
dataset_train = pd.read_csv('Google_Stock_Price_Train.csv')
training_set = dataset_train.iloc[:,1:2].values
# Feature Scaling
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
training_set_scaled = sc.fit_transform(training_set)
# Creating a data structure with 60 timesteps and 1 output
X_train = []
y_train = []
for i in range(60, 1258):
X_train.append(training_set_scaled[i-60:i, 0])
y_train.append(training_set_scaled[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
# Reshaping
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
# Part 2 - Building the RNN
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
# Initialising the RNN
regressor = Sequential()
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50, return_sequences = True, input_shape = (X_train.shape[1], 1)))
regressor.add(Dropout(0.2))
# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))
# Adding a third LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))
# Adding a fourth LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50))
regressor.add(Dropout(0.2))
# Adding the output layer
regressor.add(Dense(units = 1))
# Compiling the RNN
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
# Fitting the RNN to the Training set
regressor.fit(X_train, y_train, epochs = 100, batch_size = 32)
# Part 3 - Making the predictions and visualising the results
# Getting the real stock price of 2017
dataset_test = | pd.read_csv('Google_Stock_Price_Test.csv') | pandas.read_csv |
import copy
import time
from functools import partial
import matplotlib
import pint
import os
from pint.quantity import _Quantity
from eam_core.YamlLoader import YamlLoader
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from eam_core import Q_, FormulaProcess, collect_process_variables, SimulationControl
import csv
import inspect
import logging
import subprocess
from operator import itemgetter
from typing import Dict, Any, Optional, Tuple
import numpy as np
import pandas as pd
import pypandoc
import simplejson
from networkx.drawing.nx_pydot import to_pydot
from tabulate import tabulate
import networkx as nx
from networkx.drawing.nx_agraph import graphviz_layout, write_dot
import errno
logger = logging.getLogger(__name__)
def find_node_by_name(model, name) -> FormulaProcess:
return [node for node in model.process_graph.nodes() if node.name == name][0]
def store_trace_data(model, trace_data: Dict[str, Dict[str, pd.Series]], simulation_control=None, average=True):
"""
:param simulation_control:
:type simulation_control:
:param trace_data: takes traces from :function:`ngmodel.collect_calculation_traces`
:return:
"""
if not os.path.exists(f'{simulation_control.output_directory}/traces/'):
os.makedirs(f'{simulation_control.output_directory}/traces/')
metadata = {}
for var, df in trace_data.items():
if isinstance(df, Q_):
metadata[var] = {'unit': df.units}
df = df.m
if average:
logger.warning(f'takign average of {var}')
if isinstance(df.index, pd.core.index.MultiIndex):
df = df.mean(level='time')
# h5store(f'{simulation_control.output_directory}/traces/{var}.h5', df, **metadata)
df.to_pickle(f'{simulation_control.output_directory}/traces/{var}.pdpkl')
def load_trace_data(output_directory, variable: str, base_dir='.') -> pd.DataFrame:
return pd.read_pickle(f'{base_dir}/{output_directory}/traces/{variable}.pdpkl')
def store_calculation_debug_info(model, simulation_control, store_input_vars=True, average=True, target_units=None,
result_variables=None):
"""
Store
:param model:
:param simulation_control:
:param store_input_vars:
:return:
"""
# store calculation traces
traces = model.collect_calculation_traces()
store_trace_data(model, traces, simulation_control=simulation_control, average=average)
if store_input_vars:
"calculate sensitivity for all variables"
all_vars = model.collect_input_variables()
flattened_vars = {}
with open(f'{simulation_control.output_directory}/input_vars.csv', 'w') as f:
writer = csv.writer(f)
for proc_name, vars in sorted(all_vars.items()):
for var_name, var in sorted(vars.items()):
val = to_target_dimension(var_name, var.mean(), target_units)
mean = val
writer.writerow((proc_name, var_name, mean.m, str(mean.units)))
flattened_vars[f'{proc_name}.{var_name}'] = mean
df = pd.DataFrame.from_dict(flattened_vars, orient='index')
df = df
if not os.path.exists(f'{simulation_control.output_directory}/pd_pickles/'):
os.makedirs(f'{simulation_control.output_directory}/pd_pickles/')
h5store(f'{simulation_control.output_directory}/pd_pickles/input_variables.hd5', df, **metadata)
df.to_pickle(f'{simulation_control.output_directory}/pd_pickles/input_variables.pdpkl')
def h5store(filename, df, **kwargs):
store = pd.HDFStore(filename)
store.put('model_data', df)
store.get_storer('model_data').attrs.metadata = kwargs
store.close()
def h5load(filename):
store = pd.HDFStore(filename)
data = store['model_data']
metadata = store.get_storer('model_data').attrs.metadata
store.close()
return data, metadata
def store_dataframe(q_dict: Dict[str, pd.Series], simulation_control=None, target_units=None, variable_name=None,
subdirectory=''):
storage_df, metadata = pandas_series_dict_to_dataframe(q_dict, target_units=target_units, var_name=variable_name,
simulation_control=simulation_control)
logger.info(f'metadata is {metadata}')
filename = f'{simulation_control.output_directory}/{subdirectory}/result_data_{variable_name}.hdf5'
if not os.path.exists(f'{simulation_control.output_directory}/{subdirectory}'):
os.mkdir(f'{simulation_control.output_directory}/{subdirectory}')
h5store(filename, storage_df.pint.dequantify(), **metadata)
def load_as_qantity_dict(filename) -> Dict[str, _Quantity]:
val, metadata = h5load(filename)
result_dict = {k: Q_(val[k], metadata[k]['unit']) for k in val.columns}
return result_dict
def load_as_df_qantity(filename) -> _Quantity:
val, metadata = h5load(filename)
# check all units are the same
# logger.debug(metadata.items())
# assert len({v['unit'] for _, v in metadata.items()}) == 1
return val.pint.quantify(level=-1), metadata
# return Q_(val, list(metadata.values())[0]['unit'])
def load_as_plain_df(filename):
data, metadata = load_as_df_qantity(filename)
units = {v[0]: v[1] for v in data.pint.dequantify().columns.values}
df = data.pint.dequantify()
df.columns = df.columns.droplevel(1)
return df, units
def load_as_df(filename) -> Tuple[pd.DataFrame, Dict[str, Dict[str, str]]]:
val, metadata = h5load(filename)
return val, metadata
def pandas_series_dict_to_dataframe(data: Dict[str, pd.Series], target_units=None, var_name=None,
simulation_control: SimulationControl = None):
"""
input dict of process keys with result pd.Series
output pd.DataFrame with with result pd.Series for columns of process keys
"""
metadata = {}
if simulation_control.with_group:
results_df = pd.DataFrame(index=simulation_control.group_df_multi_index)
else:
results_df = pd.DataFrame(index=simulation_control._df_multi_index)
for process, variable in data.items():
logger.debug(f'converting results for process {process}')
if target_units:
variable = to_target_dimension(var_name, variable, target_units)
results_df[process] = variable
metadata[process] = variable.pint.units
return results_df, metadata
def quantity_dict_to_dataframe(q_data: Dict[str, _Quantity], target_units=None, var_name=None,
simulation_control: SimulationControl = None) \
-> Tuple[pd.DataFrame, Dict[str, str]]:
data = None
metadata = {}
logger.debug(f'result data has the following processes {q_data.keys()}')
for process, results in q_data.items():
logger.debug(f'Converting <Quantity> back to <Pandas DF> {process}')
if isinstance(results, Q_):
if target_units:
results = to_target_dimension(var_name, results, target_units)
d = results.m
metadata[process] = {'unit': str(results.units)}
results_df = pd.DataFrame(data=d)
results_df.columns = [process]
if not isinstance(results_df.index, pd.MultiIndex):
results_df.index = simulation_control._df_multi_index
if data is None:
data = results_df
else:
data[process] = results_df
# print(data)
return data, metadata
def generate_model_definition_markdown(model, in_docker=False, output_directory=None):
"""
Generate markdown file with model code.
:param model:
:return:
"""
H = nx.relabel_nodes(model.process_graph, lambda n: n.name)
# write_dot(H, 'models/graphs/baseline.dot')
pydot = to_pydot(H)
input_vars = model.collect_input_variables()
traces = model.collect_calculation_traces()
df = load_df(model.name, 'all', output_directory=output_directory)
logger.debug("Building model documentation markdown")
with open(f'{output_directory}/{model.name}_model_documentation.md', 'w') as f:
f.write("# Model\n\n")
for node in pydot.get_node_list():
name = node.get_name().strip('"')
process_node = find_node_by_name(model, name)
# method = process_node.formulaModel.formula
# write_formula(f, method, name, 'energy_footprint')
#
# method = process_node.embodied_carbon_footprint
# write_method(f, method, name, 'embodied_carbon_footprint')
#
# if hasattr(process_node, 'get_allocation_coefficient'):
# method = process_node.get_allocation_coefficient
# write_method(f, method, name, 'get_allocation_coefficient')
items = []
logger.debug(f"Processing process {name}")
for item in input_vars[name].items():
logger.debug(f"Processing variable {item[0]}")
dataa = item[1]
if isinstance(dataa, Q_):
dataa = dataa.m
metadata[item[0]] = {'unit': dataa.units}
if dataa.index.nlevels == 2:
item__mean = dataa.mean(level='time').mean()
else:
item__mean = dataa.mean()
items.append((item[0], item__mean))
# collect all the traces of this process
process_traces = {key: traces[key][name] for key in traces.keys() if name in traces[key]}
for item in process_traces.items():
if item[1].index.nlevels == 2:
item__mean = item[1].mean(level='time').mean()
else:
item__mean = item[1].mean()
items.append((item[0], item__mean))
f.writelines(tabulate(items, ['variable', 'value'], tablefmt='simple'))
f.write('\n\n')
items = []
for metric, unit in zip(['use_phase_energy', 'use_phase_carbon', 'embodied_carbon'],
['J', 'gCO2e', 'gCO2e']):
if name + '_' + metric in df.columns:
v = df[name + '_' + metric].mean()
items.append([metric, v, unit])
f.writelines(tabulate(items, ['variable', 'value', 'unit'], tablefmt='simple'))
f.write('\n\n')
logger.info("writing pandoc")
# if in_docker:
# ps = subprocess.Popen((
# f"docker run -v `pwd`:/source jagregory/pandoc -f markdown -t latex {simulation_control.output_directory}/{model.name}_model_documentation.md -o {simulation_control.output_directory}/{model.name}_model_documentation.pdf -V geometry:margin=0.2in, landscape"),
# shell=True)
# else:
logger.info(f'converting model doc at {output_directory}/{model.name}_model_documentation.md')
output = pypandoc.convert_file(f'{output_directory}/{model.name}_model_documentation.md', 'pdf',
outputfile=f'{output_directory}/{model.name}_model_documentation.pdf',
extra_args=['-V', 'geometry:margin=0.2in, landscape'])
def write_method(f, method, name, suffix=None):
f.writelines(["## Process {} - {}\n".format(name, suffix), "\n", "```python\n"])
f.writelines(inspect.getsource(method))
f.writelines(["```\n", "\n"])
def get_unit(name, process_node=None):
unit = None
if process_node:
# try resolving the unit from the process documentation
logger.debug(f'searching for unit for var {name} in process {process_node.name}')
try:
units = [info.unit for info in process_node.variable_infos() if info.property_name == name]
except:
units = None
unit = units[0] if units else ''
if not unit:
# if that fails, use these defaults
unit_dict = {'data_volume': 'b', 'use_phase_energy': 'J', 'use_phase_carbon': 'tCO2e', 'on_energy': 'J',
'active_standby_energy': 'J', 'viewing_time_minutes_monthly': 'minutes/month'}
if name in unit_dict.keys():
return unit_dict[name]
return unit
def convert_for_graph(val, unit):
if unit == 'b':
return val * 1.25e-16, 'PB'
if unit == 'J':
return val * 2.77778e-13, 'GWh'
if unit == 'hours/month':
return val * 3.61984E-12, 'viewer-years/sec'
if unit == 'minutes/month':
return val * 2.60628E-09, 'viewer-years/h'
return val, unit
def get_unit_by_name(name):
if name == 'data_volume':
return 'b'
def draw_graph_from_dotfile(model, file_type='pdf', show_variables=True, metric=None, start_date=None, end_date=None,
colour_def=None, show_histograms=True, in_docker=False, output_directory=None,
edge_labels=False, target_units=None):
if show_histograms:
generate_graph_node_barcharts(model.name, metric, start_date=start_date, end_date=end_date,
base_directory=output_directory)
project_dir = os.getcwd() # '/Users/csxds/workspaces/ngmodel'
H = nx.relabel_nodes(model.process_graph, lambda n: n.name)
ref_period = 'monthly'
H.graph['graph'] = {'label': f'{model.name} ({ref_period})', 'labelloc': 't', 'nodesep': 1, 'ranksep': 1}
# write_dot(H, 'models/graphs/baseline.dot')
pydot = to_pydot(H)
for node in pydot.get_node_list():
node.set_shape('box')
node_name = node.get_name().strip('"')
process_node = find_node_by_name(model, node_name)
# attributes = '\n'.join(keys)
logger.debug(f'drawing node {process_node.name}')
node_colour = colour_def.get('colours', {}).get(
process_node.metadata.get(colour_def.get('category_name', ""), None),
'#EB0EAA')
lable_txt = f'<<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0"><TR><TD BGCOLOR="{node_colour}"></TD></TR>' \
+ f'<TR><TD><FONT FACE="times bold italic">{node_name}</FONT></TD></TR>'
if show_variables:
import_vars, input_vars = collect_process_variables(process_node)
lable_txt += '<TR><TD ALIGN="CENTER" BORDER="1">INPUT VARS</TD></TR>'
for var_name, var_value in sorted(input_vars.items()):
val = to_target_dimension(var_name, var_value, target_units)
lable_txt = append_quantity_value_to_label(var_name, lable_txt,
val.pint.m.mean(), val.pint.units)
lable_txt += '<TR><TD ALIGN="CENTER" BORDER="1">IMPORT VARS</TD></TR>'
for var_name, var_value in sorted(import_vars.items()):
val = to_target_dimension(var_name, var_value, target_units)
lable_txt = append_quantity_value_to_label(var_name, lable_txt,
val.pint.m.mean(), val.pint.units)
if process_node._DSL.result is not None:
lable_txt += '<TR><TD ALIGN="CENTER" BORDER="1">RESULT</TD></TR>'
val = process_node._DSL.result
var_name = 'result'
val = to_target_dimension(var_name, val, target_units)
lable_txt = append_quantity_value_to_label(var_name, lable_txt,
val.pint.m.mean(), val.pint.units)
img_file = process_node.name.replace(' ', '_')
img_filename = f'{project_dir}/{output_directory}/subgraphs/{img_file}.png'
if os.path.isfile(img_filename):
lable_txt += f'<tr><td><IMG SCALE="TRUE" SRC="{img_filename}" /></td></tr>'
lable_txt += '</TABLE>>'
# lable_txt= "<<div><h1>{}</h1>{}</div>>".format(name, attributes)
node.set_label(lable_txt)
# node.set_tooltip("test")
node.set_tooltip(process_node.formulaModel.formula.text)
logger.info(f"Labelling edges to processes.")
for dot_edge in pydot.get_edge_list():
logger.debug(f"processing {dot_edge.obj_dict['points']}.")
source_name = dot_edge.get_source()
destination_name = dot_edge.get_destination()
# @todo - why are some processes wrapped in double quotes?...
def strip_quotes(string):
if string.startswith('"') and string.endswith('"'):
return string[1:-1]
else:
return string
source_name = strip_quotes(source_name)
destination_name = strip_quotes(destination_name)
source_node = find_node_by_name(model, source_name)
destination_node = find_node_by_name(model, destination_name)
# import_variable_names = defaultdict(list)
if edge_labels:
logger.debug(f'processing edge: {source_name} -> {destination_name}')
# find the edge object in
for graph_edge in model.process_graph.in_edges(destination_node, data=True):
# print(graph_edge)
if graph_edge[0] == source_node:
# logger.debug(f'processing edge: {source_name} -> {destination_name}')
label = ""
for key, var in graph_edge[2].items():
if key == 'processed':
continue
# pass
logger.debug(f'adding label for variable {var.name}')
val = var.data_source.get_value(None, None, )
val = to_target_dimension(var.name, val, target_units)
# val_m = val.m.mean()
# unit = val.units
val_m = val.pint.m.mean()
val_unit = val.pint.units
if abs(val_m) > 0.01 and abs(val_m) < 100000:
label += '{} = {:.02f} {}\n'.format(var.name, val_m, val_unit)
else:
label += '{} = {:.2E} {}\n'.format(var.name, val_m, val_unit)
# dot_edge.set_label("this is a test ")
dot_edge.set_label(label)
# dot_edge.set_labeltooltip(label)
# for k, v in edge_variables.items():
# import_variable_names[k].append(v)
# @todo check model name does not allow code execution
dot_file = f'{output_directory}/{model.name}.dot'
pydot.write_dot(dot_file)
cmd = r'perl -p -i.regexp_bak -e \'s/lp="\d+\.?\d*,\d*\.?\d*",\n?//\' "' + dot_file + '"'
# l_cmd = ["perl", "-p", "-i.regexp_bak", "-e", '\'s/lp="\d+\.?\d*,\d*\.?\d*",\n?//\'', "{dot_file}"]
import shlex
l_cmd = shlex.split(cmd)
logger.info(f'removing "lp" statements from {dot_file}')
with subprocess.Popen(l_cmd, stdout=subprocess.PIPE) as proc:
logger.info('output from shell process: ' + str(proc.stdout.read()))
# time.sleep(2)
dot_render_filename = f'{output_directory}/{model.name}_graph.{file_type}'
logger.info('generating new graph plot from dot file at %s' % dot_render_filename)
if in_docker:
cwd = os.getcwd()
cmd = f"docker run -v {cwd}:{project_dir} -w {project_dir} markfletcher/graphviz dot '{dot_file}' -T{file_type} -Gsplines=ortho -Grankdir=LR -Gnodesep=0.1 -Gratio=compress"
logger.info(f'running docker cmd {cmd}')
l_cmd = shlex.split(cmd)
logger.info(f'running docker cmd {l_cmd}')
with open(dot_render_filename, 'w') as output:
with subprocess.Popen(l_cmd, stdout=output) as proc:
pass
else:
cmd = f"dot '{dot_file}' -T{file_type} -Gsplines=ortho -Grankdir=BT > '{dot_render_filename}'"
logger.info(f'running local cmd {cmd}')
ps = subprocess.Popen((cmd), shell=True)
def to_target_dimension(name: str, q, target_units: Dict[str, str]):
for target in target_units:
func_name = list({v for v in target.keys() if v != 'to_unit'})[0]
method_to_call = getattr(name, func_name)
# @todo this is a security risk...
result = method_to_call(target[func_name])
if result:
logger.debug(f"converting {name} to {target['to_unit']}")
if isinstance(q, Q_):
return q.to(target['to_unit'])
if isinstance(q, pd.Series):
return q.pint.to(target['to_unit'])
return q
def append_quantity_value_to_label(att, lable_txt, val, unit):
if abs(val) > 0.01 and abs(val) < 100000:
lable_txt += '<TR><TD ALIGN="LEFT" >{} = {:.02f} {}</TD></TR>'.format(att, val, unit)
else:
lable_txt += '<TR><TD ALIGN="LEFT" >{} = {:.2E} {}</TD></TR>'.format(att, val, unit)
return lable_txt
def get_unit_and_convert(att, process_node, val):
if not isinstance(val, (float, int)):
val = val.mean()
if att in process_node.variables:
var = process_node.variables[att]
unit = get_unit(att, process_node)
val, unit = convert_for_graph(val, unit)
return unit, val
def generate_graph_node_barcharts(model_name, metric, start_date=None, end_date=None, base_directory=None):
logger.info('generating graph node barcharts')
filename = f'{base_directory}/result_data_{metric}.hdf5'
df, units = load_as_plain_df(filename)
if not start_date:
start_date = df.index[0][0].date()
if not end_date:
end_date = df.index[-1][0].date()
df = df.loc[start_date:end_date]
s = [*df.mean(level='time').sum().items()]
labels, values = zip(*sorted(s, key=itemgetter(1)))
indexes = np.arange(len(labels))
width = 1
if not os.path.exists(f'{base_directory}/subgraphs/'):
os.mkdir(f'{base_directory}/subgraphs/')
for k, v in df.mean(level='time').sum().items():
logger.debug(f'writing subgraph for {k}')
fig, ax = plt.subplots(figsize=(3, 1))
plt.bar(indexes, values, width, alpha=0.2)
plt.bar(labels.index(k), values[labels.index(k)], width, color='red', alpha=0.7)
plt.gca().annotate(k,
xy=(labels.index(k), values[labels.index(k)]), xycoords='data',
xytext=(-30, 20), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="angle3,angleA=0,angleB=-90"))
plt.xticks(indexes + width * 0.5, labels)
plt.gca().get_xaxis().set_visible(False)
plt.savefig(f'{base_directory}/subgraphs/%s.png' % k.replace(' ', '_'))
plt.close()
def store_results(process_footprint_dict, model, simulation_control=None, ):
if not os.path.exists(f'{simulation_control.output_directory}/pd_pickles/'):
os.makedirs(f'{simulation_control.output_directory}/pd_pickles/')
raw = {k: v.m for k, v in process_footprint_dict['use_phase_energy'].items()}
dfm = | pd.DataFrame.from_dict(raw) | pandas.DataFrame.from_dict |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 2 12:49:47 2021
@author: madeline
"""
import argparse
import pandas as pd
import os
def parse_args():
parser = argparse.ArgumentParser(
description='Creates two dataframes from a surveillance report TSV')
parser.add_argument('--tsv', type=str, default=None,
help='Path to surveillance report TSV file')
parser.add_argument('--functions_table', type=str, default=None,
help='TSV file containing Pokay category:NML category mappings')
parser.add_argument('--frequency_threshold', type=float, default=0.2,
help='Alternate frequency threshold cutoff for inclusion in report')
return parser.parse_args()
def make_functions_df(tsv, functions_df_template):
#load functions_df template
functions_df = pd.read_csv(functions_df_template, sep='\t', header=0)
#populate the Mutations column row by row
for row in functions_df['Sub-categories from POKAY']:
row_mutations_set = set()
category_list = row.split(',') #get list of Pokay categories
for category in category_list:
category = category.rstrip() #remove trailing spaces to enable matching
#find mutation names that match that category and add them to the set
cat_mutations = tsv[tsv['function_category']==category]['name']
cat_mutations_set = set(cat_mutations)
#add category mutations set to row mutations set
row_mutations_set.update(cat_mutations_set)
#save row mutations set in the 'Mutations' column, sorted alphabetically
row_list = sorted(list(row_mutations_set))
row_str = ', '.join(str(e) for e in row_list)
mask = functions_df['Sub-categories from POKAY']==row
functions_df.loc[mask, 'Mutations'] = row_str
return functions_df
def make_mutations_df(tsv, functions_dataframe):
named_mutations = functions_dataframe['Mutations'].values.tolist()
named_mutations = ', '.join(str(e) for e in named_mutations).split(', ')
named_mutations = set(named_mutations)
named_mutations.remove('')
#tsv columns to use
tsv_df_cols = ['name', 'function_category', 'function_description', 'viral_clade_defining', 'citation', 'ao', 'dp', 'Frequency (Variant)']
#create empty dataframe
mutations_df = pd.DataFrame(columns=tsv_df_cols)
for mutation in named_mutations:
#get rows of the tsv for that mutation
tsv_rows = tsv[tsv['name']==mutation]
#keep certain columns of the tsv rows
tsv_rows = tsv_rows[tsv_df_cols]
#concatenate dfs
mutations_df = pd.concat((mutations_df, tsv_rows))
#remove clade-defining values from strains column
mutations_df['viral_clade_defining'] = mutations_df['viral_clade_defining'].str.replace(r"=.*?;",",", regex=True)
#remove trailing commas
mutations_df['viral_clade_defining'] = mutations_df['viral_clade_defining'].str.rstrip(' ').str.rstrip(',')
#rename mutations_df columns
final_mutations_df_cols = ['Mutations', 'Sub-category', 'Function', 'Lineages', 'Citation', 'ao', 'dp', 'Frequency (Variant)']
renaming_dict = dict(zip(tsv_df_cols, final_mutations_df_cols))
mutations_df = mutations_df.rename(columns=renaming_dict)
#add 'Frequency (Functional)' column
mutations_df['Frequency (Functional)'] = mutations_df['ao'] / mutations_df['dp'] # ao / dp
#reorder mutations_df columns
mutations_df_cols = ['Mutations', 'Frequency (Variant)', 'Frequency (Functional)', 'Sub-category', 'Function', 'Lineages', 'Citation']
mutations_df = mutations_df[mutations_df_cols]
return mutations_df
if __name__ == '__main__':
args = parse_args()
functions_table = args.functions_table
report_tsv = args.tsv
#load tsv and remove all rows that don't meet the frequency cutoff
tsv_df = | pd.read_csv(args.tsv, sep='\t', header=0) | pandas.read_csv |
import logging
import os
import numpy as np
from torch_geometric.graphgym.config import cfg
from torch_geometric.graphgym.utils.io import (dict_list_to_json,
dict_list_to_tb, dict_to_json,
json_to_dict_list,
makedirs_rm_exist,
string_to_python)
try:
from tensorboardX import SummaryWriter
except ImportError:
SummaryWriter = None
def is_seed(s):
try:
int(s)
return True
except Exception:
return False
def is_split(s):
if s in ['train', 'val', 'test']:
return True
else:
return False
def join_list(l1, l2):
assert len(l1) == len(l2), \
'Results with different seeds must have the save format'
for i in range(len(l1)):
l1[i] += l2[i]
return l1
def agg_dict_list(dict_list):
"""
Aggregate a list of dictionaries: mean + std
Args:
dict_list: list of dictionaries
"""
dict_agg = {'epoch': dict_list[0]['epoch']}
for key in dict_list[0]:
if key != 'epoch':
value = np.array([dict[key] for dict in dict_list])
dict_agg[key] = np.mean(value).round(cfg.round)
dict_agg['{}_std'.format(key)] = np.std(value).round(cfg.round)
return dict_agg
def name_to_dict(run):
cols = run.split('-')[1:]
keys, vals = [], []
for col in cols:
try:
key, val = col.split('=')
except Exception:
print(col)
keys.append(key)
vals.append(string_to_python(val))
return dict(zip(keys, vals))
def rm_keys(dict, keys):
for key in keys:
dict.pop(key, None)
def agg_runs(dir, metric_best='auto'):
r'''
Aggregate over different random seeds of a single experiment
Args:
dir (str): Directory of the results, containing 1 experiment
metric_best (str, optional): The metric for selecting the best
validation performance. Options: auto, accuracy, auc.
'''
results = {'train': None, 'val': None, 'test': None}
results_best = {'train': None, 'val': None, 'test': None}
for seed in os.listdir(dir):
if is_seed(seed):
dir_seed = os.path.join(dir, seed)
split = 'val'
if split in os.listdir(dir_seed):
dir_split = os.path.join(dir_seed, split)
fname_stats = os.path.join(dir_split, 'stats.json')
stats_list = json_to_dict_list(fname_stats)
if metric_best == 'auto':
metric = 'auc' if 'auc' in stats_list[0] else 'accuracy'
else:
metric = metric_best
performance_np = np.array( # noqa
[stats[metric] for stats in stats_list])
best_epoch = \
stats_list[
eval("performance_np.{}()".format(cfg.metric_agg))][
'epoch']
print(best_epoch)
for split in os.listdir(dir_seed):
if is_split(split):
dir_split = os.path.join(dir_seed, split)
fname_stats = os.path.join(dir_split, 'stats.json')
stats_list = json_to_dict_list(fname_stats)
stats_best = [
stats for stats in stats_list
if stats['epoch'] == best_epoch
][0]
print(stats_best)
stats_list = [[stats] for stats in stats_list]
if results[split] is None:
results[split] = stats_list
else:
results[split] = join_list(results[split], stats_list)
if results_best[split] is None:
results_best[split] = [stats_best]
else:
results_best[split] += [stats_best]
results = {k: v for k, v in results.items() if v is not None} # rm None
results_best = {k: v
for k, v in results_best.items()
if v is not None} # rm None
for key in results:
for i in range(len(results[key])):
results[key][i] = agg_dict_list(results[key][i])
for key in results_best:
results_best[key] = agg_dict_list(results_best[key])
# save aggregated results
for key, value in results.items():
dir_out = os.path.join(dir, 'agg', key)
makedirs_rm_exist(dir_out)
fname = os.path.join(dir_out, 'stats.json')
dict_list_to_json(value, fname)
if cfg.tensorboard_agg:
if SummaryWriter is None:
raise ImportError(
'Tensorboard support requires `tensorboardX`.')
writer = SummaryWriter(dir_out)
dict_list_to_tb(value, writer)
writer.close()
for key, value in results_best.items():
dir_out = os.path.join(dir, 'agg', key)
fname = os.path.join(dir_out, 'best.json')
dict_to_json(value, fname)
logging.info('Results aggregated across runs saved in {}'.format(
os.path.join(dir, 'agg')))
def agg_batch(dir, metric_best='auto'):
r'''
Aggregate across results from multiple experiments via grid search
Args:
dir (str): Directory of the results, containing multiple experiments
metric_best (str, optional): The metric for selecting the best
validation performance. Options: auto, accuracy, auc.
'''
import pandas as pd
results = {'train': [], 'val': [], 'test': []}
for run in os.listdir(dir):
if run != 'agg':
dict_name = name_to_dict(run)
dir_run = os.path.join(dir, run, 'agg')
if os.path.isdir(dir_run):
for split in os.listdir(dir_run):
dir_split = os.path.join(dir_run, split)
fname_stats = os.path.join(dir_split, 'best.json')
dict_stats = json_to_dict_list(fname_stats)[
-1] # get best val epoch
rm_keys(dict_stats,
['lr', 'lr_std', 'eta', 'eta_std', 'params_std'])
results[split].append({**dict_name, **dict_stats})
dir_out = os.path.join(dir, 'agg')
makedirs_rm_exist(dir_out)
for key in results:
if len(results[key]) > 0:
results[key] = pd.DataFrame(results[key])
results[key] = results[key].sort_values(
list(dict_name.keys()), ascending=[True] * len(dict_name))
fname = os.path.join(dir_out, '{}_best.csv'.format(key))
results[key].to_csv(fname, index=False)
results = {'train': [], 'val': [], 'test': []}
for run in os.listdir(dir):
if run != 'agg':
dict_name = name_to_dict(run)
dir_run = os.path.join(dir, run, 'agg')
if os.path.isdir(dir_run):
for split in os.listdir(dir_run):
dir_split = os.path.join(dir_run, split)
fname_stats = os.path.join(dir_split, 'stats.json')
dict_stats = json_to_dict_list(fname_stats)[
-1] # get last epoch
rm_keys(dict_stats,
['lr', 'lr_std', 'eta', 'eta_std', 'params_std'])
results[split].append({**dict_name, **dict_stats})
dir_out = os.path.join(dir, 'agg')
for key in results:
if len(results[key]) > 0:
results[key] = | pd.DataFrame(results[key]) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, isnull, date_range,
MultiIndex, Index)
from pandas.tseries.index import Timestamp
from pandas.compat import range
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.Akima1DInterpolator missing')
class TestSeriesMissingData(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(
days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(
0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
from pandas import tslib
result = td.fillna(tslib.NaT)
expected = Series([tslib.NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna( | Timestamp('20130104') | pandas.tseries.index.Timestamp |
import kaggle
import argparse
import pandas as pd
import os
import json
import requests
import traceback
from requests.exceptions import ConnectionError, ChunkedEncodingError
import errno
from multiprocessing import Pool, cpu_count, Queue
from bs4 import BeautifulSoup
from urllib.parse import urlparse, parse_qs
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import NoSuchElementException
from tqdm import tqdm
from pathlib import Path
from utils import make_sure_path_exists
import logging
logpath = "./scraper.log"
logger = logging.getLogger('log')
logger.setLevel(logging.INFO)
ch = logging.FileHandler(logpath)
# ch.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(ch)
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
def list_datasets(page=1, all_pages = False, out_path =None,
min_size = None, max_size = None, sort_by = None):
results = []
current_page = page
get_next_page = True
while get_next_page:
dataset_page = kaggle.api.datasets_list(page=current_page, min_size= min_size,
max_size= max_size, sort_by=sort_by)
results = results + dataset_page
if not dataset_page or not all_pages:
get_next_page = False
else:
current_page += 1
# print("Page:{}, N_Datasets:{}".format(current_page,len(results)))
if out_path:
result_df = pd.DataFrame(results)
result_df.to_csv(out_path)
return results
def get_kaggle_cookie_str(cookie_dict):
keys = ["ka_sessionid", "CSRF-TOKEN","GCLB", "XSRF-TOKEN"]
values = [cookie_dict.get(k) for k in keys]
cookie_str_template = "{key}={value}"
cookie_str = "; ".join(cookie_str_template.format(key=k, value=v) for k,v in zip(keys, values))
return cookie_str
def get_kaggle_header():
get_response = requests.get("https://www.kaggle.com")
cookie_dict = requests.utils.dict_from_cookiejar(get_response.cookies)
header = { 'authority': 'www.kaggle.com',
'accept': 'application/json',
'sec-fetch-dest': 'empty',
'x-xsrf-token': cookie_dict.get("XSRF-TOKEN"),
'__requestverificationtoken': cookie_dict.get("XSRF-TOKEN"),
'content-type': 'application/json',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'accept-language': 'en-US,en;q=0.9',
'cookie': get_kaggle_cookie_str(cookie_dict)
}
return header
def get_tasks_for_dataset(datasetId):
header = get_kaggle_header()
data = '{"datasetId":' + str(datasetId) + '}'
response = requests.post('https://www.kaggle.com/requests/GetDatasetTasksRequest',
headers=header, data=data)
if response:
result = response.json().get("result")
if result:
tasks = result["tasks"]
return tasks
else:
print("No response!")
def get_task_metadata(taskId):
header = get_kaggle_header()
data = '{{"taskId":{0}}}'.format(taskId)
response = requests.post('https://www.kaggle.com/requests/GetTaskRequest',
headers=header, data=data)
if response:
return response.json()
def get_task_submissions(taskId,offset=0,limit=1000):
data = '{{"taskId":{0},"offset":{1},"limit":{2}}}'.format(taskId,offset,limit)
header = get_kaggle_header()
response = requests.post('https://www.kaggle.com/requests/GetTaskSubmissionsRequest', headers=header, data=data)
if response:
results = response.json()["result"]
submissions = results["taskSubmissions"]
total_count = results["totalCount"]
if total_count > (offset + 1) * limit:
submissions = submissions + get_task_submissions(taskId,offset=offset+1,limit=limit)
return submissions
def download_task_submissions(taskId,out_path):
print("Downloading submissions...")
submissions = get_task_submissions(taskId)
if submissions is None:
return
for sub in submissions:
print(" {}".format(sub["url"]))
try:
download_kernel_from_path(sub["url"],out_path)
except kaggle.rest.ApiException as e:
print(" Not Found!")
def scrape_all_task_submissions(out_dir,max_gap_size=40):
# Kaggle uses sequential indices for its tasks.
# If we pass by more than gap_size indices without getting a result,
# assume we're at the end of the task
taskId = 1
get_next_task = True
gap_size = 0
while get_next_task:
print("Checking for task {}....".format(taskId))
task_metadata = get_task_metadata(taskId)
if "errors" in task_metadata:
print(task_metadata["errors"])
gap_size = gap_size + 1
if gap_size == max_gap_size:
break
else:
gap_size = 0
task_dir = os.path.join(out_dir,"taskId={}".format(taskId))
make_sure_path_exists(task_dir)
download_task_submissions(taskId,task_dir)
taskId = taskId + 1
#Assumes that there are not more than max_results datasets of the same size
def get_datasets_in_size_range(min_size,max_size,max_results=10000,out_path=None):
boundary = min_size + (max_size - min_size)//4
lower = list_datasets(all_pages=True,min_size = min_size, max_size = boundary)
upper = list_datasets(all_pages=True,min_size= boundary, max_size= max_size)
print("Got {} results in range ({},{})".format(len(lower),min_size, boundary))
if len(lower) >= max_results:
lower = get_datasets_in_size_range(min_size = min_size, max_size = boundary, max_results = max_results)
print("Got {} results in range ({},{})".format(len(upper), boundary, max_size))
if len(upper) >= max_results:
upper = get_datasets_in_size_range(min_size = boundary, max_size = max_size, max_results = max_results)
result = lower + upper
if out_path:
result_df = pd.DataFrame(result)
result_df.to_csv(out_path)
return result
def get_kernel_metadata(competition, page_limit = None, save_kernel_metadata = True):
to_return = []
response = kaggle.api.kernels_list_with_http_info(search = competition)
i = 0
while len(response[0]) > 0:
i = i+1
response = kaggle.api.kernels_list_with_http_info(search = competition, page = i)
to_return = to_return + response[0]
return to_return
def get_version_metadata_for_kernel(author,slug,driver):
try:
kernel_url = "https://www.kaggle.com/{author}/{slug}".format(author=author,slug=slug)
driver.get(kernel_url)
version_box_xpath = "//*[starts-with(@class, 'VersionsInfoBox_VersionTitle') and not(contains(@class,'ForkInfo'))]"
versions_box_present = EC.presence_of_element_located((By.XPATH, version_box_xpath))
versions_box_clickable = EC.element_to_be_clickable((By.XPATH, version_box_xpath))
WebDriverWait(driver, 5).until(versions_box_present)
version_info = driver.find_element_by_xpath(version_box_xpath)
# driver.save_screenshot("screenshot.png")
version_info.click()
versions_table_xpath = "//table[starts-with(@class,'VersionsPaneContent')]"
versions_table_present = EC.element_to_be_clickable((By.XPATH,versions_table_xpath))
WebDriverWait(driver, 5).until(versions_table_present)
version_table = driver.find_element_by_xpath(versions_table_xpath)
version_metadata = process_versions_table(version_table)
return version_metadata
except TimeoutException:
return []
def process_versions_table(table_elem):
version_rows = table_elem.find_elements_by_xpath(".//div")
results = []
for row in version_rows:
row_result = {"script_completed":False, "version_id":None}
status_icon = row.find_element_by_xpath(".//a/*[local-name() = 'svg']")
if status_icon.get_attribute("data-icon") == "check-circle":
row_result["script_completed"] = True
try:
version_href = row.find_element_by_xpath(".//a[contains(@href, 'scriptVersionId=')]").get_attribute("href")
except NoSuchElementException:
continue
version_id = parse_qs(urlparse(version_href).query).get("scriptVersionId",[None])[0]
row_result["version_id"] = version_id
results.append(row_result)
return results
def get_all_competitions(sort_by = "numberOfTeams",start = 1):
results = []
i = start
page = kaggle.api.competitions_list(page = i, sort_by = sort_by)
while page:
results = results + page
page = kaggle.api.competitions_list(page = i, sort_by = sort_by)
i = i+1
return results
def process_download_mapper(kernel_key):
global process_driver
driver = process_driver
return download_versions(*kernel_key, driver=driver)
def look_for_global_driver():
global driver
return driver
def fetch_kernels_for_competition(competition, competitions_path, save_kernel_metadata = True,
progress_bar=False,
driver=None, fetch_versions=True):
"""Download all submissions for a competition. Creates the following
directory structure:
-- <competitions_path>
- <competition>
- submission_a.json
- submission_b.json
- ....
- submission_z.json
- metadata.csv
If fetch_versions = true, then this st
"""
out_path = os.path.join(competitions_path,competition)
Path(out_path).mkdir(parents=True, exist_ok=True)
kernel_metadata = get_kernel_metadata(competition,save_kernel_metadata)
kernel_keys = [(*kernel["ref"].split("/"),out_path) for kernel in kernel_metadata]
n = len(kernel_metadata)
if fetch_versions:
if driver is None:
#Look for global driver in process:
try:
driver = look_for_global_driver()
except NameError:
driver = get_driver()
if progress_bar:
kernel_metadata = list(tqdm([download_versions(*k,driver = driver) for k in kernel_keys],total =n))
else:
kernel_metadata = [download_versions(*k,driver = driver) for k in kernel_keys]
else:
raise NotImplementedError
if save_kernel_metadata:
metadata_out_path = os.path.join(out_path,"metadata.csv")
pd.DataFrame(kernel_metadata).to_csv(metadata_out_path)
def download_versions(author,slug,out_path,driver = None):
if driver is None:
driver = get_driver()
version_metadata = get_version_metadata_for_kernel(author,slug,driver)
author_path = os.path.join(out_path,author)
make_sure_path_exists(author_path)
for version in version_metadata:
vid = version["version_id"]
version_filename = "{vid}.json".format(vid = vid)
download_kernel_with_version_id(vid,author_path,
filename = version_filename)
| pd.DataFrame(version_metadata) | pandas.DataFrame |
import math
import warnings
from typing import List, Union
import matplotlib
import numpy as np
import pandas as pd
matplotlib.rcParams["text.usetex"] = True
from pykelihood import kernels
from pykelihood.distributions import Exponential, MixtureExponentialModel
from pykelihood.stats_utils import Profiler
try:
from hawkeslib import UnivariateExpHawkesProcess as UEHP
except ImportError:
UEHP = None
from pykelihood.distributions import Distribution, opposite_log_likelihood
from pykelihood.parameters import Parameter
from pykelihood.visualisation.utils import (
get_quantiles_and_confidence_intervals,
get_quantiles_and_confidence_intervals_uniform_scale,
)
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
def qq_plot_gpd(
data: pd.DataFrame,
gpd_fit: Distribution,
path_to_figure: str,
threshold: Union[List, float, int, str] = "",
ci_confidence=0.99,
figure_name="qq_plot_gpd",
):
id_obs = True
if len(gpd_fit.flattened_params) != len(gpd_fit.params):
id_obs = False
theoretical, empirical, lower_bound, upper_bound = (
get_quantiles_and_confidence_intervals(gpd_fit, data, ci_confidence)
if id_obs
else get_quantiles_and_confidence_intervals_uniform_scale(
gpd_fit, data, ci_confidence
)
)
n = len(data)
text_title = ""
if type(gpd_fit.loc()) is not Parameter:
loc = {
r"$\mu_{}$".format(a): round(gpd_fit.loc.param_dict[a], 2)
for a in gpd_fit.loc.param_dict
}
for k in loc:
text_title += f"{k} = {loc[k]}, "
else:
loc = round(gpd_fit.loc(), 2)
text_title += r"$\mu$=" + str(loc) + ", "
text_title += "\n"
if type(gpd_fit.scale()) is not Parameter:
scale = {
r"$\sigma_{}$".format(a): round(gpd_fit.scale.param_dict[a], 2)
for a in gpd_fit.scale.param_dict
}
for k in scale:
text_title += f"{k} = {scale[k]}, "
else:
scale = round(gpd_fit.scale(), 2)
text_title += r"$\sigma$=" + str(scale) + ", "
text_title += "\n"
if type(gpd_fit.shape()) is not Parameter:
shape = {
r"$\xi_{}$".format(a): round(gpd_fit.shape.param_dict[a], 2)
for a in gpd_fit.shape.param_dict
}
for k in shape:
text_title += f"{k} = {shape[k]}, "
else:
shape = round(gpd_fit.shape(), 2)
text_title += r"$\xi$=" + str(shape)
if text_title.endswith(", "):
text_title = text_title[:-2]
threshold_text = (
str(tuple(threshold)) if hasattr(threshold, "__len__") else str(threshold)
)
plt.scatter(theoretical, empirical, s=5, marker="x", color="navy")
plt.plot(theoretical, theoretical, label=f"$x=y$", color="navy")
if id_obs:
plt.fill_betweenx(
y=empirical, x1=lower_bound, x2=upper_bound, alpha=0.2, color="navy"
)
else:
plt.fill_between(theoretical, lower_bound, upper_bound, alpha=0.2, color="navy")
plt.legend()
plt.title(
"QQ Plot of Exceedances over threshold "
+ threshold_text
+ " vs GPD distribution with parameters:\n"
+ text_title
)
plt.xlabel(f"Theoretical quantiles ({n} observations)")
plt.ylabel("Empirical quantiles")
plt.tight_layout()
plt.savefig(f"{path_to_figure}/{figure_name}.png")
plt.clf()
to_concat = pd.DataFrame(
[theoretical, lower_bound, upper_bound],
columns=empirical.index,
index=["theoretical", "lower_bound", "upper_bound"],
).T
return pd.concat([empirical.rename("realized"), to_concat], axis=1)
def qq_plot_gev(
data: pd.DataFrame,
gev_fit: Distribution,
path_to_figure: str,
ci_confidence=0.99,
figure_name="qq_plot_gev",
):
id_obs = True
if len(gev_fit.flattened_params) != len(gev_fit.params):
id_obs = False
theoretical, empirical, lower_bound, upper_bound = (
get_quantiles_and_confidence_intervals(gev_fit, data, ci_confidence)
if id_obs
else get_quantiles_and_confidence_intervals_uniform_scale(
gev_fit, data, ci_confidence
)
)
n = len(data)
text_title = ""
if type(gev_fit.loc()) is not Parameter:
loc = {
r"$\mu_{}$".format(a): round(gev_fit.loc.param_dict[a], 2)
for a in gev_fit.loc.param_dict
}
for k in loc:
text_title += f"{k} = {loc[k]}, "
else:
loc = round(gev_fit.loc(), 2)
text_title += r"$\mu$=" + str(loc) + ", "
text_title += "\n"
if type(gev_fit.scale()) is not Parameter:
scale = {
r"$\sigma_{}$".format(a): round(gev_fit.scale.param_dict[a], 2)
for a in gev_fit.scale.param_dict
}
for k in scale:
text_title += f"{k} = {scale[k]}, "
else:
scale = round(gev_fit.scale(), 2)
text_title += r"$\sigma$=" + str(scale) + ", "
text_title += "\n"
if type(gev_fit.shape()) is not Parameter:
shape = {
r"$\xi_{}$".format(a): round(gev_fit.shape.param_dict[a], 2)
for a in gev_fit.shape.param_dict
}
for k in shape:
text_title += f"{k} = {shape[k]}, "
else:
shape = round(gev_fit.shape(), 2)
text_title += r"$\xi$=" + str(shape)
if text_title.endswith(", "):
text_title = text_title[:-2]
plt.scatter(theoretical, empirical, s=5, marker="x", color="navy")
plt.plot(theoretical, theoretical, label=f"$x=y$", color="navy")
if id_obs:
plt.fill_betweenx(
y=empirical, x1=lower_bound, x2=upper_bound, alpha=0.2, color="navy"
)
else:
plt.fill_between(theoretical, lower_bound, upper_bound, alpha=0.2, color="navy")
plt.title("QQ Plot of Maxima vs GEV distribution with parameters:\n" + text_title)
plt.ylabel(f"Empirical ({n} observations)")
plt.xlabel("GEV distribution")
plt.tight_layout()
plt.savefig(f"{path_to_figure}/{figure_name}.png")
plt.clf()
to_concat = pd.DataFrame(
[theoretical, lower_bound, upper_bound],
columns=empirical.index,
index=["theoretical", "lower_bound", "upper_bound"],
).T
return pd.concat([empirical.rename("realized"), to_concat], axis=1)
def consecutive_days_above_value_plot(
data: pd.DataFrame, value: float, path_to_figure: str
):
above_value = data[data["data"] >= value].assign(
timedelta=lambda x: x["days_since_start"].diff()
)
consecutive = (
above_value[above_value["timedelta"] == 1]
.assign(timedelta=lambda x: x["days_since_start"].diff())
.fillna(0.0)
.assign(subgroup=lambda x: (x["timedelta"] != x["timedelta"].shift(1)).cumsum())
.groupby(["year", "subgroup"])
.agg({"data": "count"})
)
consecutive = consecutive + 1
consecutive_mean_per_year = consecutive.groupby(level="year").agg("mean")
quantile_inf = consecutive.groupby(level="year").agg(lambda x: np.quantile(x, 0.05))
quantile_sup = consecutive.groupby(level="year").agg(lambda x: np.quantile(x, 0.95))
plt.scatter(
consecutive_mean_per_year.index,
consecutive_mean_per_year,
label="Mean",
color="salmon",
s=7,
)
plt.vlines(
consecutive_mean_per_year.index,
quantile_inf,
quantile_sup,
label=r"5\% Quantiles",
alpha=0.6,
color="salmon",
)
plt.legend()
plt.title(f"Mean Number of Consecutive Days Above {value} per Year.")
plt.savefig(f"{path_to_figure}/mean_nb_days_cons_above_{value}_year.png")
plt.clf()
def consecutive_days_under_value_plot(
data: pd.DataFrame, value: float, path_to_figure: str
):
under_value = data[data["data"] <= value].assign(
timedelta=lambda x: x["days_since_start"].diff()
)
consecutive = (
under_value[under_value["timedelta"] == 1]
.assign(timedelta=lambda x: x["days_since_start"].diff())
.fillna(0.0)
.assign(subgroup=lambda x: (x["timedelta"] != x["timedelta"].shift(1)).cumsum())
.groupby(["year", "subgroup"])
.agg({"data": "count"})
)
consecutive = consecutive + 1
consecutive_mean_per_year = consecutive.groupby(level="year").agg("mean")
quantile_inf = consecutive.groupby(level="year").agg(lambda x: np.quantile(x, 0.05))
quantile_sup = consecutive.groupby(level="year").agg(lambda x: np.quantile(x, 0.95))
plt.scatter(
consecutive_mean_per_year.index,
consecutive_mean_per_year,
label="Mean",
color="salmon",
s=7,
)
plt.vlines(
consecutive_mean_per_year.index,
quantile_inf,
quantile_sup,
label=r"5\% Quantiles",
alpha=0.6,
color="salmon",
)
plt.legend()
plt.title(f"Mean Number of Consecutive Days Under {value} per Year.")
plt.savefig(f"{path_to_figure}/mean_nb_days_cons_under_{value}_year.png")
plt.clf()
### INTER EXCEEDANCES DIAGNOSTIC PLOTS, CLUSTERING ###
def mean_inter_exceedance_time_per_year(data: pd.DataFrame, path_to_figure: str):
"""
Plots the observed inter-exceedance times per year, useful to visualize an increase or decrease in the distance between two extreme events.
:param data: Dataframe pandas with columns "data", "threshold" (possibly seasonal or periodical), "days_since_start" (of the full period) and "year".
:param path_to_figure: Path to save the figure.
:return: Plots diagnostic graphs.
"""
data = data.dropna().reset_index()
empty_years = []
mean_year = []
quantile_sup_year = []
quantile_inf_year = []
for year in data["year"].unique():
try:
iat_days = (
data[(data["year"] == year) & (data["data"] >= data["threshold"])][
"days_since_start"
]
.diff()
.dropna()
)
quantile_sup_year.append(np.quantile(iat_days, q=0.99))
quantile_inf_year.append(np.quantile(iat_days, q=0.01))
mean_year.append(iat_days.mean())
except:
empty_years.append(year)
not_y = [y for y in data["year"].unique() if y not in empty_years]
plt.scatter(not_y, mean_year, s=7, color="salmon", label="Mean")
plt.vlines(
not_y,
quantile_inf_year,
quantile_sup_year,
alpha=0.6,
color="salmon",
label=r"10\% Quantiles",
)
plt.title("Inter-exceedance time per year")
plt.legend()
plt.savefig(f"{path_to_figure}/inter_exceedances_per_year.png")
plt.clf()
def qq_plot_exponential(
data: Union[pd.Series, np.array],
exp_fit: Distribution,
path_to_figure: str,
ci_confidence=0.99,
figure_name="qq_plot_exponential",
):
id_obs = True
if len(exp_fit.flattened_params) != len(exp_fit.params):
id_obs = False
theoretical, empirical, lower_bound, upper_bound = (
get_quantiles_and_confidence_intervals(exp_fit, data, ci_confidence)
if id_obs
else get_quantiles_and_confidence_intervals_uniform_scale(
exp_fit, data, ci_confidence
)
)
plt.scatter(theoretical, empirical, s=5, marker="x", color="navy")
plt.plot(theoretical, theoretical, label=f"$x=y$", color="navy")
plt.legend()
if id_obs:
plt.fill_betweenx(
y=empirical, x1=lower_bound, x2=upper_bound, alpha=0.2, color="navy"
)
else:
plt.fill_between(theoretical, lower_bound, upper_bound, alpha=0.2, color="navy")
plt.title("QQ Plot of positive spacing vs Exponential distribution")
plt.xlabel("Empirical")
plt.ylabel(
r"Exponential with parameter $\lambda$" + f"= {round(exp_fit.rate(), 2)}"
)
plt.savefig(f"{path_to_figure}/{figure_name}.png")
plt.clf()
to_concat = pd.DataFrame(
[theoretical, lower_bound, upper_bound],
columns=empirical.index,
index=["theoretical", "lower_bound", "upper_bound"],
).T
return pd.concat([empirical.rename("realized"), to_concat], axis=1)
def extremogram_plot(
data: pd.DataFrame,
h_range: Union[List, np.array],
path_to_figure: str,
compare_to_hawkes=False,
figure_name="extremogram",
):
"""
Plots the observed extremogram (ie proba of the observation in t+h to be an exceedance knowing that the observation in t was one.
Compares the estimate using an homogeneous poisson process vs a Hawkes process.
:param data: Dataframe pandas containing the columns "data" for the variable of interest, "threshold" for the (possibly seasonal) threshold(s) and "days_since_start" which is a non-normalized version
of the time in days unit.
:param h_range: Range for the h parameter to vary through.
:param path_to_figure: Path to plot the extremogram.
:return: Plots diagnostic graphs.
"""
def extremogram_loop(h_range, indices_exceedances):
counts = []
indices_exceedances = np.array(indices_exceedances)
for h in h_range:
mat1 = np.tile(indices_exceedances, (len(indices_exceedances), 1))
mat2 = np.tile(indices_exceedances + h, (len(indices_exceedances), 1))
diff = mat2.T - mat1
diff = diff[np.abs(diff) < 0.5]
counts.append(len(diff) / len(indices_exceedances))
return counts
# empirical
data_extremogram = data[["data", "days_since_start", "threshold"]]
data_extremogram = (
data_extremogram[data_extremogram["data"] >= data["threshold"]]
.assign(iat=lambda x: x["days_since_start"].diff())
.fillna(0.0)
)
indices_exceedances = [
int(round(e, 0)) for e in list(data_extremogram["days_since_start"])
]
extremogram_realized = extremogram_loop(h_range, indices_exceedances)
extremogram_realized = pd.Series(extremogram_realized, index=h_range)
# Simulated Poisson process
exp_fit = Exponential.fit(
data_extremogram["iat"], loc=0.0, x0=[len(data_extremogram) / len(data)]
)
exceedances_pp = []
for i in range(1000):
exceedances_pp.append(exp_fit.rvs(len(data_extremogram)).cumsum())
count_pp = []
for i in range(len(exceedances_pp)):
# PP
ex_pp = exceedances_pp[i].copy()
indices_exceedances = [e for e in ex_pp]
local_count_pp = extremogram_loop(h_range, indices_exceedances)
local_count_pp = pd.Series(local_count_pp, index=h_range)
count_pp.append(local_count_pp)
count_pp = pd.concat(count_pp, axis=1)
mean_pp = count_pp.mean(axis=1)
quantile_inf_pp = np.quantile(count_pp, q=0.001, axis=1)
quantile_sup_pp = np.quantile(count_pp, q=0.999, axis=1)
to_return = pd.concat(
[
extremogram_realized.rename("realized"),
mean_pp.rename("mean_pp"),
pd.DataFrame(
[quantile_inf_pp, quantile_sup_pp],
index=["pp_lb", "pp_ub"],
columns=h_range,
).T,
],
axis=1,
)
plt.bar(
x=h_range,
height=extremogram_realized,
label="Empirical",
width=0.8,
color="slategrey",
alpha=0.6,
)
plt.plot(h_range, mean_pp, label="Poisson Process Simulated", color="salmon")
plt.fill_between(
x=h_range, y1=quantile_inf_pp, y2=quantile_sup_pp, alpha=0.2, color="salmon"
)
# Simulated Hawkes process
if compare_to_hawkes:
if UEHP is not None:
uv = UEHP()
uv.fit(np.array(data_extremogram["days_since_start"]))
exceedances_hp = []
for _ in range(1000):
exceedances_hp.append(uv.sample(data["days_since_start"].max()))
else:
from pykelihood.samplers import HawkesByThinningModified
mu = 1 / (len(data_extremogram) / len(data))
alpha = 0
theta = 0
def score(dist, data):
if dist.rate.alpha >= 1.0:
return 10 ** 10
else:
return opposite_log_likelihood(dist, data)
hawkes_fit = Exponential.fit(
data_extremogram["iat"],
x0=(mu, alpha, theta),
loc=0.0,
rate=kernels.hawkes_with_exp_kernel(
np.array(data_extremogram["days_since_start"])
),
score=score,
)
mu, alpha, theta = hawkes_fit.optimisation_params
exceedances_hp = []
for _ in range(1000):
exceedances_hp.append(
HawkesByThinningModified(
data["days_since_start"].max(), mu, alpha, theta
)
)
count_hp = []
for i in range(len(exceedances_hp)):
# HP
ex_hp = exceedances_hp[i]
indices_exceedances = [e for e in ex_hp]
local_count_hp = extremogram_loop(h_range, indices_exceedances)
local_count_hp = | pd.Series(local_count_hp, index=h_range) | pandas.Series |
import matplotlib.image as mpimg
import matplotlib.style as style
import matplotlib.pyplot as plt
from matplotlib import rcParams
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
from sys import stdout
import seaborn as sns
from math import exp
import pandas as pd
import mdtraj as md
import pickle as pk
import numpy as np
import statistics
import itertools
import fileinput
import fnmatch
import shutil
import random
import math
import os
import re
def fix_cap_remove_ace(pdb_file):
"""
Removes the H atoms of the capped ACE residue.
"""
remove_words = [
"H1 ACE",
"H2 ACE",
"H3 ACE",
"H31 ACE",
"H32 ACE",
"H33 ACE",
]
with open(pdb_file) as oldfile, open("intermediate.pdb", "w") as newfile:
for line in oldfile:
if not any(word in line for word in remove_words):
newfile.write(line)
command = "rm -rf " + pdb_file
os.system(command)
command = "mv intermediate.pdb " + pdb_file
os.system(command)
def fix_cap_replace_ace(pdb_file):
"""
Replaces the alpha carbon atom of the
capped ACE residue with a standard name.
"""
fin = open(pdb_file, "rt")
data = fin.read()
data = data.replace("CA ACE", "CH3 ACE")
data = data.replace("C ACE", "CH3 ACE")
fin.close()
fin = open(pdb_file, "wt")
fin.write(data)
fin.close()
def fix_cap_remove_nme(pdb_file):
"""
Removes the H atoms of the capped NME residue.
"""
remove_words = [
"H1 NME",
"H2 NME",
"H3 NME",
"H31 NME",
"H32 NME",
"H33 NME",
]
with open(pdb_file) as oldfile, open("intermediate.pdb", "w") as newfile:
for line in oldfile:
if not any(word in line for word in remove_words):
newfile.write(line)
command = "rm -rf " + pdb_file
os.system(command)
command = "mv intermediate.pdb " + pdb_file
os.system(command)
def fix_cap_replace_nme(pdb_file):
"""
Replaces the alpha carbon atom of the
capped NME residue with a standard name.
"""
fin = open(pdb_file, "rt")
data = fin.read()
data = data.replace("CA NME", "CH3 NME")
data = data.replace("C NME", "CH3 NME")
fin.close()
fin = open(pdb_file, "wt")
fin.write(data)
fin.close()
def prepare_alanine_dipeptide():
"""
Prepares the alanine dipeptide system for Gaussian
Accelerated Molecular Dynamics (GaMD) simulations.
Downloads the pdb structure from
https://markovmodel.github.io/mdshare/ALA2/ and
parameterizes it using General Amber Force Field
(GAFF).
"""
os.system(
"curl -O http://ftp.imp.fu-berlin.de/pub/cmb-data/alanine-dipeptide-nowater.pdb"
)
os.system(
"rm -rf system_inputs"
) # Removes any existing directory named system_inputs
os.system("mkdir system_inputs") # Creates a directory named system_inputs
cwd = os.getcwd()
target_dir = cwd + "/" + "system_inputs"
os.system("pdb4amber -i alanine-dipeptide-nowater.pdb -o intermediate.pdb")
# Delete HH31, HH32 and HH33 from the ACE residue (tleap adds them later)
remove_words = ["HH31 ACE", "HH32 ACE", "HH33 ACE"]
with open("intermediate.pdb") as oldfile, open(
"system.pdb", "w"
) as newfile:
for line in oldfile:
if not any(word in line for word in remove_words):
newfile.write(line)
os.system("rm -rf intermediate*")
# save the tleap script to file
with open("input_TIP3P.leap", "w") as f:
f.write(
"""
source leaprc.protein.ff14SB
source leaprc.water.tip3p
set default FlexibleWater on
set default PBRadii mbondi2
pdb = loadpdb system.pdb
solvateBox pdb TIP3PBOX 15
saveamberparm pdb system_TIP3P.prmtop system_TIP3P.inpcrd
saveamberparm pdb system_TIP3P.parm7 system_TIP3P.rst7
savepdb pdb system_TIP3P.pdb
quit
"""
)
os.system("tleap -f input_TIP3P.leap")
os.system("rm -rf leap.log")
shutil.copy(
cwd + "/" + "system_TIP3P.inpcrd",
target_dir + "/" + "system_TIP3P.inpcrd",
)
shutil.copy(
cwd + "/" + "system_TIP3P.parm7",
target_dir + "/" + "system_TIP3P.parm7",
)
shutil.copy(
cwd + "/" + "system_TIP3P.pdb", target_dir + "/" + "system_TIP3P.pdb"
)
shutil.copy(
cwd + "/" + "system_TIP3P.prmtop",
target_dir + "/" + "system_TIP3P.prmtop",
)
shutil.copy(
cwd + "/" + "system_TIP3P.rst7", target_dir + "/" + "system_TIP3P.rst7"
)
shutil.copy(cwd + "/" + "system.pdb", target_dir + "/" + "system.pdb")
shutil.copy(
cwd + "/" + "alanine-dipeptide-nowater.pdb",
target_dir + "/" + "alanine-dipeptide-nowater.pdb",
)
shutil.copy(
cwd + "/" + "input_TIP3P.leap", target_dir + "/" + "input_TIP3P.leap"
)
os.system("rm -rf system_TIP3P.inpcrd")
os.system("rm -rf system_TIP3P.parm7")
os.system("rm -rf system_TIP3P.pdb")
os.system("rm -rf system_TIP3P.inpcrd")
os.system("rm -rf system_TIP3P.rst7")
os.system("rm -rf system_TIP3P.prmtop")
os.system("rm -rf system.pdb")
os.system("rm -rf input_TIP3P.leap")
os.system("rm -rf alanine-dipeptide-nowater.pdb")
def create_vectors(x):
"""
Extracts peridic box information from the
given line.
"""
x = str(x)
x = x.replace("Vec3", "")
x = re.findall("\d*\.?\d+", x)
for i in range(0, len(x)):
x[i] = float(x[i])
x = tuple(x)
n = int(len(x) / 3)
x = [x[i * n : (i + 1) * n] for i in range((len(x) + n - 1) // n)]
return x
def simulated_annealing(
parm="system_TIP3P.prmtop",
rst="system_TIP3P.inpcrd",
annealing_output_pdb="system_annealing_output.pdb",
annealing_steps=100000,
pdb_freq=100000,
starting_temp=0,
target_temp=300,
temp_incr=3,
):
"""
Performs simulated annealing of the system from
0K to 300 K (default) using OpenMM MD engine and
saves the last frame of the simulation to be
accessed by the next simulation.
Parameters
----------
parm: str
System's topology file
rst: str
System's coordinate file
annealing_output_pdb: str
System's output trajectory file
annealing_steps: int
Aneealing steps at each temperatrure jump
pdb_freq: int
Trajectory to be saved after every pdb_freq steps
starting_temp: int
Initial temperature of Simulated Annealing
target_temp: int
Final temperature of Simulated Annealing
temp_incr: int
Temmperature increase for every step
"""
prmtop = AmberPrmtopFile(parm)
inpcrd = AmberInpcrdFile(rst)
annealing_system = prmtop.createSystem(
nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds
)
annealing_integrator = LangevinIntegrator(
0 * kelvin, 1 / picosecond, 2 * femtoseconds
)
total_steps = ((target_temp / temp_incr) + 1) * annealing_steps
annealing_temp_range = int((target_temp / temp_incr) + 1)
annealing_platform = Platform.getPlatformByName("CUDA")
annealing_properties = {"CudaDeviceIndex": "0", "CudaPrecision": "mixed"}
annealing_simulation = Simulation(
prmtop.topology,
annealing_system,
annealing_integrator,
annealing_platform,
annealing_properties,
)
annealing_simulation.context.setPositions(inpcrd.positions)
if inpcrd.boxVectors is not None:
annealing_simulation.context.setPeriodicBoxVectors(*inpcrd.boxVectors)
annealing_simulation.minimizeEnergy()
annealing_simulation.reporters.append(
PDBReporter(annealing_output_pdb, pdb_freq)
)
simulated_annealing_last_frame = (
annealing_output_pdb[:-4] + "_last_frame.pdb"
)
annealing_simulation.reporters.append(
PDBReporter(simulated_annealing_last_frame, total_steps)
)
annealing_simulation.reporters.append(
StateDataReporter(
stdout,
pdb_freq,
step=True,
time=True,
potentialEnergy=True,
totalSteps=total_steps,
temperature=True,
progress=True,
remainingTime=True,
speed=True,
separator="\t",
)
)
temp = starting_temp
while temp <= target_temp:
annealing_integrator.setTemperature(temp * kelvin)
if temp == starting_temp:
annealing_simulation.step(annealing_steps)
annealing_simulation.saveState("annealing.state")
else:
annealing_simulation.loadState("annealing.state")
annealing_simulation.step(annealing_steps)
temp += temp_incr
state = annealing_simulation.context.getState()
print(state.getPeriodicBoxVectors())
annealing_simulation_box_vectors = state.getPeriodicBoxVectors()
print(annealing_simulation_box_vectors)
with open("annealing_simulation_box_vectors.pkl", "wb") as f:
pk.dump(annealing_simulation_box_vectors, f)
print("Finshed NVT Simulated Annealing Simulation")
def npt_equilibration(
parm="system_TIP3P.prmtop",
npt_output_pdb="system_npt_output.pdb",
pdb_freq=500000,
npt_steps=5000000,
target_temp=300,
npt_pdb="system_annealing_output_last_frame.pdb",
):
"""
Performs NPT equilibration MD of the system
using OpenMM MD engine and saves the last
frame of the simulation to be accessed by
the next simulation.
Parameters
----------
parm: str
System's topology file
npt_output_pdb: str
System's output trajectory file
pdb_freq: int
Trajectory to be saved after every pdb_freq steps
npt_steps: int
NPT simulation steps
target_temp: int
Temperature for MD simulation
npt_pdb: str
Last frame of the simulation
"""
npt_init_pdb = PDBFile(npt_pdb)
prmtop = AmberPrmtopFile(parm)
npt_system = prmtop.createSystem(
nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds
)
barostat = MonteCarloBarostat(25.0 * bar, target_temp * kelvin, 25)
npt_system.addForce(barostat)
npt_integrator = LangevinIntegrator(
target_temp * kelvin, 1 / picosecond, 2 * femtoseconds
)
npt_platform = Platform.getPlatformByName("CUDA")
npt_properties = {"CudaDeviceIndex": "0", "CudaPrecision": "mixed"}
npt_simulation = Simulation(
prmtop.topology,
npt_system,
npt_integrator,
npt_platform,
npt_properties,
)
npt_simulation.context.setPositions(npt_init_pdb.positions)
npt_simulation.context.setVelocitiesToTemperature(target_temp * kelvin)
with open("annealing_simulation_box_vectors.pkl", "rb") as f:
annealing_simulation_box_vectors = pk.load(f)
annealing_simulation_box_vectors = create_vectors(
annealing_simulation_box_vectors
)
npt_simulation.context.setPeriodicBoxVectors(
annealing_simulation_box_vectors[0],
annealing_simulation_box_vectors[1],
annealing_simulation_box_vectors[2],
)
npt_last_frame = npt_output_pdb[:-4] + "_last_frame.pdb"
npt_simulation.reporters.append(PDBReporter(npt_output_pdb, pdb_freq))
npt_simulation.reporters.append(PDBReporter(npt_last_frame, npt_steps))
npt_simulation.reporters.append(
StateDataReporter(
stdout,
pdb_freq,
step=True,
time=True,
potentialEnergy=True,
totalSteps=npt_steps,
temperature=True,
progress=True,
remainingTime=True,
speed=True,
separator="\t",
)
)
npt_simulation.minimizeEnergy()
npt_simulation.step(npt_steps)
npt_simulation.saveState("npt_simulation.state")
state = npt_simulation.context.getState()
print(state.getPeriodicBoxVectors())
npt_simulation_box_vectors = state.getPeriodicBoxVectors()
print(npt_simulation_box_vectors)
with open("npt_simulation_box_vectors.pkl", "wb") as f:
pk.dump(npt_simulation_box_vectors, f)
print("Finished NPT Simulation")
def nvt_equilibration(
parm="system_TIP3P.prmtop",
nvt_output_pdb="system_nvt_output.pdb",
pdb_freq=500000,
nvt_steps=5000000,
target_temp=300,
nvt_pdb="system_npt_output_last_frame.pdb",
):
"""
Performs NVT equilibration MD of the system
using OpenMM MD engine saves the last
frame of the simulation to be accessed by
the next simulation.
Parameters
----------
parm: str
System's topology file
nvt_output_pdb: str
System's output trajectory file
pdb_freq: int
Trajectory to be saved after every pdb_freq steps
nvt_steps: int
NVT simulation steps
target_temp: int
Temperature for MD simulation
nvt_pdb: str
Last frame of the simulation
"""
nvt_init_pdb = PDBFile(nvt_pdb)
prmtop = AmberPrmtopFile(parm)
nvt_system = prmtop.createSystem(
nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds
)
nvt_integrator = LangevinIntegrator(
target_temp * kelvin, 1 / picosecond, 2 * femtoseconds
)
nvt_platform = Platform.getPlatformByName("CUDA")
nvt_properties = {"CudaDeviceIndex": "0", "CudaPrecision": "mixed"}
nvt_simulation = Simulation(
prmtop.topology,
nvt_system,
nvt_integrator,
nvt_platform,
nvt_properties,
)
nvt_simulation.context.setPositions(nvt_init_pdb.positions)
nvt_simulation.context.setVelocitiesToTemperature(target_temp * kelvin)
with open("npt_simulation_box_vectors.pkl", "rb") as f:
npt_simulation_box_vectors = pk.load(f)
npt_simulation_box_vectors = create_vectors(npt_simulation_box_vectors)
nvt_simulation.context.setPeriodicBoxVectors(
npt_simulation_box_vectors[0],
npt_simulation_box_vectors[1],
npt_simulation_box_vectors[2],
)
nvt_last_frame = nvt_output_pdb[:-4] + "_last_frame.pdb"
nvt_simulation.reporters.append(PDBReporter(nvt_output_pdb, pdb_freq))
nvt_simulation.reporters.append(PDBReporter(nvt_last_frame, nvt_steps))
nvt_simulation.reporters.append(
StateDataReporter(
stdout,
pdb_freq,
step=True,
time=True,
potentialEnergy=True,
totalSteps=nvt_steps,
temperature=True,
progress=True,
remainingTime=True,
speed=True,
separator="\t",
)
)
nvt_simulation.minimizeEnergy()
nvt_simulation.step(nvt_steps)
nvt_simulation.saveState("nvt_simulation.state")
state = nvt_simulation.context.getState()
print(state.getPeriodicBoxVectors())
nvt_simulation_box_vectors = state.getPeriodicBoxVectors()
print(nvt_simulation_box_vectors)
with open("nvt_simulation_box_vectors.pkl", "wb") as f:
pk.dump(nvt_simulation_box_vectors, f)
print("Finished NVT Simulation")
def run_equilibration():
"""
Runs systematic simulated annealing followed by
NPT and NVT equilibration MD simulation.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "equilibration"
os.system("rm -rf equilibration")
os.system("mkdir equilibration")
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.inpcrd",
target_dir + "/" + "system_TIP3P.inpcrd",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.parm7",
target_dir + "/" + "system_TIP3P.parm7",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.pdb",
target_dir + "/" + "system_TIP3P.pdb",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.prmtop",
target_dir + "/" + "system_TIP3P.prmtop",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.rst7",
target_dir + "/" + "system_TIP3P.rst7",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system.pdb",
target_dir + "/" + "system.pdb",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "alanine-dipeptide-nowater.pdb",
target_dir + "/" + "alanine-dipeptide-nowater.pdb",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "input_TIP3P.leap",
target_dir + "/" + "input_TIP3P.leap",
)
os.chdir(target_dir)
simulated_annealing()
npt_equilibration()
nvt_equilibration()
os.system("rm -rf system_TIP3P.inpcrd")
os.system("rm -rf system_TIP3P.parm7")
os.system("rm -rf system_TIP3P.pdb")
os.system("rm -rf system_TIP3P.rst7")
os.system("rm -rf system_TIP3P.prmtop")
os.system("rm -rf system.pdb")
os.system("rm -rf alanine-dipeptide-nowater.pdb")
os.system("rm -rf input_TIP3P.leap")
os.chdir(cwd)
def create_starting_structures():
"""
Prepares starting structures for Amber GaMD simulations.
All input files required to run Amber GaMD simulations are
placed in the starting_structures directory.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "starting_structures"
os.system("rm -rf starting_structures")
os.system("mkdir starting_structures")
shutil.copy(
cwd + "/" + "equilibration" + "/" + "system_nvt_output_last_frame.pdb",
target_dir + "/" + "system_nvt_output_last_frame.pdb",
)
os.chdir(target_dir)
fix_cap_remove_nme("system_nvt_output_last_frame.pdb")
fix_cap_replace_nme("system_nvt_output_last_frame.pdb")
# Save the tleap script to file
with open("final_input_TIP3P.leap", "w") as f:
f.write(
"""
source leaprc.protein.ff14SB
source leaprc.water.tip3p
set default FlexibleWater on
set default PBRadii mbondi2
pdb = loadpdb system_nvt_output_last_frame.pdb
saveamberparm pdb system_final.prmtop system_final.inpcrd
saveamberparm pdb system_final.parm7 system_final.rst7
savepdb pdb system_final.pdb
quit
"""
)
os.system("tleap -f final_input_TIP3P.leap")
os.system("rm -rf leap.log")
os.system("rm -rf system_nvt_output_last_frame.pdb")
os.chdir(cwd)
def add_vec_inpcrd():
"""
Adds box dimensions captured from the last saved
frame of the NVT simulations to the inpcrd file.
Only to be used when the box dimensions are not
present in the inpcrd file.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "starting_structures"
shutil.copy(
cwd + "/" + "equilibration" + "/" + "nvt_simulation_box_vectors.pkl",
target_dir + "/" + "nvt_simulation_box_vectors.pkl",
)
os.chdir(target_dir)
with open("nvt_simulation_box_vectors.pkl", "rb") as f:
nvt_simulation_box_vectors = pk.load(f)
nvt_simulation_box_vectors = create_vectors(nvt_simulation_box_vectors)
vectors = (
(nvt_simulation_box_vectors[0][0]) * 10,
(nvt_simulation_box_vectors[1][1]) * 10,
(nvt_simulation_box_vectors[2][2]) * 10,
)
vectors = (
round(vectors[0], 7),
round(vectors[1], 7),
round(vectors[2], 7),
)
last_line = (
" "
+ str(vectors[0])
+ " "
+ str(vectors[1])
+ " "
+ str(vectors[2])
+ " 90.0000000"
+ " 90.0000000"
+ " 90.0000000"
)
with open("system_final.inpcrd", "a+") as f:
f.write(last_line)
os.system("rm -rf nvt_simulation_box_vectors.pkl")
os.chdir(cwd)
def add_vec_prmtop():
"""
Adds box dimensions captured from the last saved
frame of the NVT simulations to the prmtop file.
Only to be used when the box dimensions are not
present in the prmtop file.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "starting_structures"
shutil.copy(
cwd + "/" + "equilibration" + "/" + "nvt_simulation_box_vectors.pkl",
target_dir + "/" + "nvt_simulation_box_vectors.pkl",
)
os.chdir(target_dir)
with open("nvt_simulation_box_vectors.pkl", "rb") as f:
nvt_simulation_box_vectors = pk.load(f)
nvt_simulation_box_vectors = create_vectors(nvt_simulation_box_vectors)
vectors = (
nvt_simulation_box_vectors[0][0],
nvt_simulation_box_vectors[1][1],
nvt_simulation_box_vectors[2][2],
)
vectors = round(vectors[0], 7), round(vectors[1], 7), round(vectors[2], 7)
oldbeta = "9.00000000E+01"
x = str(vectors[0]) + str(0) + "E+" + "01"
y = str(vectors[1]) + str(0) + "E+" + "01"
z = str(vectors[2]) + str(0) + "E+" + "01"
line1 = "%FLAG BOX_DIMENSIONS"
line2 = "%FORMAT(5E16.8)"
line3 = " " + oldbeta + " " + x + " " + y + " " + z
with open("system_final.prmtop") as i, open(
"system_intermediate_final.prmtop", "w"
) as f:
for line in i:
if line.startswith("%FLAG RADIUS_SET"):
line = line1 + "\n" + line2 + "\n" + line3 + "\n" + line
f.write(line)
os.system("rm -rf system_final.prmtop")
os.system("mv system_intermediate_final.prmtop system_final.prmtop")
os.system("rm -rf nvt_simulation_box_vectors.pkl")
os.chdir(cwd)
def create_filetree(
nst_lim=26000000,
ntw_x=1000,
nt_cmd=1000000,
n_teb=1000000,
n_tave=50000,
ntcmd_prep=200000,
nteb_prep=200000,
):
"""
Creates a directory named gamd_simulations. Inside
this directory, there are subdirectories for dihedral,
dual and total potential-boosted GaMD with upper and
lower threshold boosts separately.
Parameters
----------
nst_lim: int
Total simulation time including preparatory simulation.
For example, if nst_lim = 26000000, then, we may have
2 ns of preparatory simulation i.e. 1000000 preparation steps
and 50 ns of GaMD simulation i.e. 25000000 simulation steps
ntw_x: int
Saving coordinates of the simulation every ntw_x
timesteps. For example, 2 ps implies 1000 timesteps
nt_cmd: int
Number of initial MD simulation step, 2 ns of
preparatory simulation requires 1000000 preparation
timesteps
n_teb: int
Number of biasing MD simulation steps
n_tave: int
Number of simulation steps used to calculate the
average and standard deviation of potential energies
ntcmd_prep: int
Number of preparation conventional molecular dynamics
steps.This is used for system equilibration and
potential energies are not collected for statistics
nteb_prep: int
Number of preparation biasing molecular dynamics
simulation steps. This is used for system
equilibration
"""
cwd = os.getcwd()
os.system("rm -rf gamd_simulations")
os.system("mkdir gamd_simulations")
os.chdir(cwd + "/" + "gamd_simulations")
source_dir = cwd + "/" + "starting_structures"
target_dir = cwd + "/" + "gamd_simulations"
dir_list = [
"dihedral_threshold_lower",
"dihedral_threshold_upper",
"dual_threshold_lower",
"dual_threshold_upper",
"total_threshold_lower",
"total_threshold_upper",
]
for i in range(len(dir_list)):
os.mkdir(dir_list[i])
os.chdir(target_dir + "/" + dir_list[i])
shutil.copy(
source_dir + "/" + "system_final.inpcrd",
target_dir + "/" + dir_list[i] + "/" + "system_final.inpcrd",
)
shutil.copy(
source_dir + "/" + "system_final.prmtop",
target_dir + "/" + dir_list[i] + "/" + "system_final.prmtop",
)
if "lower" in dir_list[i]:
i_E = 1
if "upper" in dir_list[i]:
i_E = 2
if "total" in dir_list[i]:
i_gamd = 1
if "dihedral" in dir_list[i]:
i_gamd = 2
if "dual" in dir_list[i]:
i_gamd = 3
with open("md.in", "w") as f:
f.write("&cntrl" + "\n")
f.write(" imin = 0, irest = 0, ntx = 1," + "\n")
f.write(" nstlim = " + str(nst_lim) + ", dt = 0.002," + "\n")
f.write(" ntc = 2, ntf = 2, tol = 0.000001," + "\n")
f.write(" iwrap = 1, ntb = 1, cut = 8.0," + "\n")
f.write(" ntt = 3, temp0 = 300.0, gamma_ln = 1.0, " + "\n")
f.write(
" ntpr = 500, ntwx = " + str(ntw_x) + ", ntwr = 500," + "\n"
)
f.write(" ntxo = 2, ioutfm = 1, ig = -1, ntwprt = 0," + "\n")
f.write(
" igamd = "
+ str(i_gamd)
+ ", iE = "
+ str(i_E)
+ ", irest_gamd = 0,"
+ "\n"
)
f.write(
" ntcmd = "
+ str(nt_cmd)
+ ", nteb = "
+ str(n_teb)
+ ", ntave = "
+ str(n_tave)
+ ","
+ "\n"
)
f.write(
" ntcmdprep = "
+ str(ntcmd_prep)
+ ", ntebprep = "
+ str(nteb_prep)
+ ","
+ "\n"
)
f.write(" sigma0D = 6.0, sigma0P = 6.0" + " \n")
f.write("&end" + "\n")
os.chdir(target_dir)
os.chdir(cwd)
def run_simulations():
"""
Runs GaMD simulations for each of the dihedral, dual and total
potential boosts for both thresholds i.e. upper and lower potential
thresholds. (Remember to check md.in files for further details and
flag information).
"""
cwd = os.getcwd()
os.chdir(cwd + "/" + "gamd_simulations")
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dihedral_threshold_lower")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dihedral_threshold_upper")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dual_threshold_lower")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dual_threshold_upper")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "total_threshold_lower")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "total_threshold_upper")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations")
os.chdir(cwd)
def create_data_files(
jump=10,
traj="system_final.nc",
topology="system_final.prmtop",
T=300,
):
"""
Extracts data from GaMD log files and saves them as
weights.dat, Psi.dat and Phi_Psi.dat. gamd.log file
contains data excluding the initial equilibration MD
simulation steps but trajectory output file has all
the trajectories including the initial equilibration
MD steps. This part has ben taken care to make the
data consistent.
Parameters
----------
jump: int
Every nth frame to be considered for reweighting
traj: str
System's trajectory file
topology: str
System's topology file
T: int
MD simulation temperature
"""
# To make data consistent with gamd.log and .nc file
factor = 0.001987 * T
with open("md.in") as f:
lines = f.readlines()
for i in lines:
if "nstlim =" in i:
nstlim_line = i
if "ntcmd =" in i:
ntcmd_line = i
if "ntwx =" in i:
ntwx_line = i
x = re.findall(r"\b\d+\b", ntcmd_line)
ntcmd = int(x[0])
x = re.findall(r"\b\d+\b", nstlim_line)
nstlim = int(x[0])
x = re.findall(r"\b\d+\b", ntwx_line)
ntwx = int(x[1])
# From the .nc trajectory files, we will not consider ntcmd trajectories
leave_frames = int(ntcmd / ntwx)
no_frames = int(nstlim / ntwx)
# Recheck conditions
file = open("gamd.log", "r")
number_of_lines = 0
for line in file:
line = line.strip("\n")
number_of_lines += 1
file.close()
f = open("gamd.log")
fourth_line = f.readlines()[3]
if str(ntcmd) in fourth_line:
datapoints = number_of_lines - 4
if not str(ntcmd) in fourth_line:
datapoints = number_of_lines - 3
print(datapoints == int((nstlim - ntcmd) / ntwx))
# Creating Psi.dat and Phi_Psi.dat
traj = md.load(traj, top=topology)
traj = traj[leave_frames:no_frames:jump]
phi = md.compute_phi(traj)
phi = phi[1] # 0:indices, 1:phi angles
phi = np.array([math.degrees(i) for i in phi]) # radians to degrees
psi = md.compute_psi(traj)
psi = psi[1] # 0:indices, 1:psi angles
psi = np.array([math.degrees(i) for i in psi]) # radians to degrees
df_psi = pd.DataFrame(phi, columns=["Psi"])
df_psi = df_psi.tail(int(datapoints))
df_psi.to_csv("Psi.dat", sep="\t", index=False, header=False)
df_phi = pd.DataFrame(psi, columns=["Phi"])
df_phi = df_phi.tail(int(datapoints))
df_phi_psi = pd.concat([df_phi, df_psi], axis=1)
df_phi_psi.to_csv("Phi_Psi.dat", sep="\t", index=False, header=False)
# Creating weights.dat
with open("gamd.log") as f:
lines = f.readlines()
column_names = lines[2]
column_names = column_names.replace("#", "")
column_names = column_names.replace("\n", "")
column_names = column_names.replace(" ", "")
column_names = column_names.split(",")
list_words = ["#"]
with open("gamd.log") as oldfile, open("data.log", "w") as newfile:
for line in oldfile:
if not any(word in line for word in list_words):
newfile.write(line)
df = pd.read_csv("data.log", delim_whitespace=True, header=None)
df.columns = column_names
df["dV(kcal/mol)"] = (
df["Boost-Energy-Potential"] + df["Boost-Energy-Dihedral"]
)
df["dV(kbT)"] = df["dV(kcal/mol)"] / factor
df_ = df[["dV(kbT)", "total_nstep", "dV(kcal/mol)"]]
df_ = df_[::jump]
df_.to_csv("weights.dat", sep="\t", index=False, header=False)
os.system("rm -rf data.log")
print(df_phi_psi.shape)
print(df_phi.shape)
print(df_.shape)
def create_bins(lower_bound, width, upper_bound):
"""
Creates bin if given the lower and upper bound
with the wirdth information.
"""
bins = []
for low in range(lower_bound, upper_bound, width):
bins.append([low, low + width])
return bins
def find_bin(value, bins):
"""
Finds which value belongs to which bin.
"""
for i in range(0, len(bins)):
if bins[i][0] <= value < bins[i][1]:
return i
return -1
def reweight_1d(
binspace=10, n_structures=4, Xdim=[-180, 180], T=300.0, min_prob=0.000001
):
"""
Reweights boosted potential energies in one-dimension based on
Maclaurin series expansion to one, two and three degrees.
Parameters
----------
binspace: int
Spacing between the bins
n_structures: int
Number of structures per bin chosen
for Weighted Ensemble (WE) simulations
Xdim: list
Range of dihedral angles
T: float
MD simulation temperature
min_prob: float
minimum probability threshold
"""
beta = 1.0 / (0.001987 * float(T))
df_Psi = pd.read_csv("Psi.dat", delim_whitespace=True, header=None)
df_Psi.columns = ["Psi"]
df_weight = pd.read_csv("weights.dat", delim_whitespace=True, header=None)
df_weight.columns = ["dV_kBT", "timestep", "dVkcalmol"]
sum_total = df_Psi.shape[0]
binsX = np.arange(float(Xdim[0]), (float(Xdim[1]) + binspace), binspace)
hist, hist_edges = np.histogram(df_Psi[["Psi"]], bins=binsX, weights=None)
pstarA = [i / sum_total for i in list(hist)]
bins = create_bins(
lower_bound=int(Xdim[0]), width=binspace, upper_bound=int(Xdim[1])
)
data = df_Psi["Psi"].values.tolist()
binned_weights = []
for value in data:
bin_index = find_bin(value, bins)
binned_weights.append(bin_index)
df_index = pd.DataFrame(binned_weights)
df_index.columns = ["index"]
df = pd.concat([df_index, df_Psi, df_weight], axis=1)
dV_c1 = []
dV_c2 = []
dV_c3 = []
dV = []
for i in range(len(bins)):
df_i = df.loc[(df["index"] == i)]
dV_list = df_i["dVkcalmol"].values.tolist()
if len(dV_list) >= 10:
dV_c1.append(statistics.mean(dV_list))
dV_c2.append(
statistics.mean([i ** 2 for i in dV_list])
- (statistics.mean(dV_list)) ** 2
)
dV_c3.append(
statistics.mean([i ** 3 for i in dV_list])
- 3
* (statistics.mean([i ** 2 for i in dV_list]))
* (statistics.mean(dV_list))
+ 2 * (statistics.mean(dV_list)) ** 3
)
if len(dV_list) < 10:
dV_c1.append(0)
dV_c2.append(0)
dV_c3.append(0)
dV.append(dV_list)
c1 = [i * beta for i in dV_c1]
c2 = [i * ((beta ** 2) / 2) for i in dV_c2]
c3 = [i * ((beta ** 3) / 6) for i in dV_c3]
c1 = c1
c12 = [a + b for a, b in zip(c1, c2)]
c123 = [a + b for a, b in zip(c12, c3)]
for i in range(len(c1)):
if c1[i] >= 700:
c1[i] = 700
for i in range(len(c12)):
if c12[i] >= 700:
c12[i] = 700
for i in range(len(c123)):
if c123[i] >= 700:
c123[i] = 700
ensemble_average_c1 = [exp(i) for i in c1]
ensemble_average_c12 = [exp(i) for i in c12]
ensemble_average_c123 = [exp(i) for i in c123]
numerator_c1 = [a * b for a, b in zip(pstarA, ensemble_average_c1)]
numerator_c12 = [a * b for a, b in zip(pstarA, ensemble_average_c12)]
numerator_c123 = [a * b for a, b in zip(pstarA, ensemble_average_c123)]
#### c1
denominatorc1 = []
for i in range(len(bins)):
product_c1 = pstarA[i] * ensemble_average_c1[i]
denominatorc1.append(product_c1)
denominator_c1 = sum(denominatorc1)
pA_c1 = [i / denominator_c1 for i in numerator_c1]
#### c12
denominatorc12 = []
for i in range(len(bins)):
product_c12 = pstarA[i] * ensemble_average_c12[i]
denominatorc12.append(product_c12)
denominator_c12 = sum(denominatorc12)
pA_c12 = [i / denominator_c12 for i in numerator_c12]
#### c123
denominatorc123 = []
for i in range(len(bins)):
product_c123 = pstarA[i] * ensemble_average_c123[i]
denominatorc123.append(product_c123)
denominator_c123 = sum(denominatorc123)
pA_c123 = [i / denominator_c123 for i in numerator_c123]
data_c1 = list(zip(bins, pA_c1))
data_c12 = list(zip(bins, pA_c12))
data_c123 = list(zip(bins, pA_c123))
df_c1 = pd.DataFrame(data_c1, columns=["bins", "pA_c1"])
df_c12 = pd.DataFrame(data_c12, columns=["bins", "pA_c12"])
df_c123 = pd.DataFrame(data_c123, columns=["bins", "pA_c123"])
####c1
df_c1.to_csv("c1_1d.txt", header=True, index=None, sep=" ", mode="w")
with open("c1_1d.txt", "r") as f1, open("pA_c1_1d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_1d.txt")
####c12
df_c12.to_csv("c12_1d.txt", header=True, index=None, sep=" ", mode="w")
with open("c12_1d.txt", "r") as f1, open("pA_c12_1d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_1d.txt")
####c123
df_c123.to_csv("c123_1d.txt", header=True, index=None, sep=" ", mode="w")
with open("c123_1d.txt", "r") as f1, open("pA_c123_1d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_1d.txt")
####c1_arranged
df_c1_arranged = df_c1.sort_values(by="pA_c1", ascending=False)
df_c1_arranged = df_c1_arranged[df_c1_arranged.pA_c1 > min_prob]
df_c1_arranged.to_csv(
"c1_arranged_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c1_arranged_1d.txt", "r") as f1, open(
"pA_c1_arranged_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_arranged_1d.txt")
####c12_arranged
df_c12_arranged = df_c12.sort_values(by="pA_c12", ascending=False)
df_c12_arranged = df_c12_arranged[df_c12_arranged.pA_c12 > min_prob]
df_c12_arranged.to_csv(
"c12_arranged_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c12_arranged_1d.txt", "r") as f1, open(
"pA_c12_arranged_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_arranged_1d.txt")
####c123_arranged
df_c123_arranged = df_c123.sort_values(by="pA_c123", ascending=False)
df_c123_arranged = df_c123_arranged[df_c123_arranged.pA_c123 > min_prob]
df_c123_arranged.to_csv(
"c123_arranged_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c123_arranged_1d.txt", "r") as f1, open(
"pA_c123_arranged_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_arranged_1d.txt")
####c1_arranged
df_c1_arranged["index"] = df_c1_arranged.index
index_list_c1 = df_c1_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c1 = []
index_indces_c1 = []
for i in index_list_c1:
df_index_list_c1 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c1 = df_index_list_c1["frame_index"].tolist()
frame_indices_c1.append(frame_c1)
index_c1 = [i] * len(frame_c1)
index_indces_c1.append(index_c1)
frame_indices_c1 = [item for elem in frame_indices_c1 for item in elem]
index_indces_c1 = [item for elem in index_indces_c1 for item in elem]
df_c1_frame = pd.DataFrame(frame_indices_c1, columns=["frame_index"])
df_c1_index = pd.DataFrame(index_indces_c1, columns=["index"])
df_c1_frame_index = pd.concat([df_c1_frame, df_c1_index], axis=1)
df_c1_frame_index = df_c1_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c1_frame_index.to_csv(
"c1_frame_index_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c1_frame_index_1d.txt", "r") as f1, open(
"c1_frame_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_frame_index_1d.txt")
####c12_arranged
df_c12_arranged["index"] = df_c12_arranged.index
index_list_c12 = df_c12_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c12 = []
index_indces_c12 = []
for i in index_list_c12:
df_index_list_c12 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c12 = df_index_list_c12["frame_index"].tolist()
frame_indices_c12.append(frame_c12)
index_c12 = [i] * len(frame_c12)
index_indces_c12.append(index_c12)
frame_indices_c12 = [item for elem in frame_indices_c12 for item in elem]
index_indces_c12 = [item for elem in index_indces_c12 for item in elem]
df_c12_frame = pd.DataFrame(frame_indices_c12, columns=["frame_index"])
df_c12_index = pd.DataFrame(index_indces_c12, columns=["index"])
df_c12_frame_index = pd.concat([df_c12_frame, df_c12_index], axis=1)
df_c12_frame_index = df_c12_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c12_frame_index.to_csv(
"c12_frame_index_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c12_frame_index_1d.txt", "r") as f1, open(
"c12_frame_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_frame_index_1d.txt")
####c123_arranged
df_c123_arranged["index"] = df_c123_arranged.index
index_list_c123 = df_c123_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c123 = []
index_indces_c123 = []
for i in index_list_c123:
df_index_list_c123 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c123 = df_index_list_c123["frame_index"].tolist()
frame_indices_c123.append(frame_c123)
index_c123 = [i] * len(frame_c123)
index_indces_c123.append(index_c123)
frame_indices_c123 = [item for elem in frame_indices_c123 for item in elem]
index_indces_c123 = [item for elem in index_indces_c123 for item in elem]
df_c123_frame = pd.DataFrame(frame_indices_c123, columns=["frame_index"])
df_c123_index = pd.DataFrame(index_indces_c123, columns=["index"])
df_c123_frame_index = pd.concat([df_c123_frame, df_c123_index], axis=1)
df_c123_frame_index = df_c123_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c123_frame_index.to_csv(
"c123_frame_index_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c123_frame_index_1d.txt", "r") as f1, open(
"c123_frame_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_frame_index_1d.txt")
####c1
indices_c1_1d = df_c1_frame_index["index"].unique()
frames_c1 = []
for i in indices_c1_1d:
x = df_c1_frame_index.loc[df_c1_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c1.append(z)
frames_c1_1d = [item for elem in frames_c1 for item in elem]
with open("frames_c1_1d.pickle", "wb") as f:
pk.dump(frames_c1_1d, f)
with open("indices_c1_1d.pickle", "wb") as f:
pk.dump(indices_c1_1d, f)
####c12
indices_c12_1d = df_c12_frame_index["index"].unique()
frames_c12 = []
for i in indices_c12_1d:
x = df_c12_frame_index.loc[df_c12_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c12.append(z)
frames_c12_1d = [item for elem in frames_c12 for item in elem]
with open("frames_c12_1d.pickle", "wb") as f:
pk.dump(frames_c12_1d, f)
with open("indices_c12_1d.pickle", "wb") as f:
pk.dump(indices_c12_1d, f)
####c123
indices_c123_1d = df_c123_frame_index["index"].unique()
frames_c123 = []
for i in indices_c123_1d:
x = df_c123_frame_index.loc[df_c123_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c123.append(z)
frames_c123_1d = [item for elem in frames_c123 for item in elem]
with open("frames_c123_1d.pickle", "wb") as f:
pk.dump(frames_c123_1d, f)
with open("indices_c123_1d.pickle", "wb") as f:
pk.dump(indices_c123_1d, f)
##saving probabilities for each selected frame
####c1
prob_c1_1d_list = []
for i in indices_c1_1d:
prob_c1_1d_list.append(df_c1["pA_c1"][i])
prob_c1_1d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c1_1d_list
)
)
prob_c1_1d_list = [x / n_structures for x in prob_c1_1d_list]
with open("prob_c1_1d_list.pickle", "wb") as f:
pk.dump(prob_c1_1d_list, f)
####c12
prob_c12_1d_list = []
for i in indices_c12_1d:
prob_c12_1d_list.append(df_c12["pA_c12"][i])
prob_c12_1d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c12_1d_list
)
)
prob_c12_1d_list = [x / n_structures for x in prob_c12_1d_list]
with open("prob_c12_1d_list.pickle", "wb") as f:
pk.dump(prob_c12_1d_list, f)
####c123
prob_c123_1d_list = []
for i in indices_c123_1d:
prob_c123_1d_list.append(df_c123["pA_c123"][i])
prob_c123_1d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c123_1d_list
)
)
prob_c123_1d_list = [x / n_structures for x in prob_c123_1d_list]
with open("prob_c123_1d_list.pickle", "wb") as f:
pk.dump(prob_c123_1d_list, f)
ref_df_1d = pd.DataFrame(bins, columns=["dim0", "dim1"])
ref_df_1d["bins"] = ref_df_1d.agg(
lambda x: f"[{x['dim0']} , {x['dim1']}]", axis=1
)
ref_df_1d = ref_df_1d[["bins"]]
index_ref_1d = []
for i in range(len(bins)):
index_ref_1d.append(i)
index_ref_df_1d = pd.DataFrame(index_ref_1d, columns=["index"])
df_ref_1d = pd.concat([ref_df_1d, index_ref_df_1d], axis=1)
df_ref_1d.to_csv("ref_1d.txt", header=True, index=None, sep=" ", mode="w")
df.to_csv("df_1d.csv", index=False)
os.system("rm -rf __pycache__")
print("Successfully Completed Reweighing")
def reweight_2d(
binspace=10,
n_structures=4,
Xdim=[-180, 180],
Ydim=[-180, 180],
T=300.0,
min_prob=0.000001,
):
"""
Reweights boosted potential energies in two-dimensions
based on Maclaurin series expansion to one, two and
three degrees.
Parameters
----------
binspace: int
Spacing between the bins
n_structures: int
Number of structures per bin chosen
for Weighted Ensemble (WE) simulations
Xdim: list
Range of dihedral angles (1st dimension)
Ydim: list
Range of dihedral angles (2nd dimension)
T: float
MD simulation temperature
min_prob: float
minimum probability threshold
"""
beta = 1.0 / (0.001987 * float(T))
df_Phi_Psi = pd.read_csv("Phi_Psi.dat", delim_whitespace=True, header=None)
df_Phi_Psi.columns = ["Phi", "Psi"]
df_weight = pd.read_csv("weights.dat", delim_whitespace=True, header=None)
df_weight.columns = ["dV_kBT", "timestep", "dVkcalmol"]
sum_total = df_Phi_Psi.shape[0]
binsX = np.arange(float(Xdim[0]), (float(Xdim[1]) + binspace), binspace)
binsY = np.arange(float(Ydim[0]), (float(Ydim[1]) + binspace), binspace)
hist2D, hist_edgesX, hist_edgesY = np.histogram2d(
df_Phi_Psi["Phi"].values.tolist(),
df_Phi_Psi["Psi"].values.tolist(),
bins=(binsX, binsY),
weights=None,
)
pstarA_2D = [i / sum_total for i in list(hist2D)]
bins_tuple_X = create_bins(
lower_bound=int(Xdim[0]), width=binspace, upper_bound=int(Xdim[1])
)
bins_tuple_Y = create_bins(
lower_bound=int(Ydim[0]), width=binspace, upper_bound=int(Ydim[1])
)
bins = []
for i in range(len(bins_tuple_X)):
for j in range(len(bins_tuple_Y)):
bins.append([bins_tuple_X[i], bins_tuple_Y[j]])
pstarA = [item for elem in pstarA_2D for item in elem]
hist = [item for elem in hist2D for item in elem]
hist = [int(i) for i in hist]
data_X = df_Phi_Psi["Phi"].values.tolist()
binned_weights_X = []
for value in data_X:
bin_index_X = find_bin(value, bins_tuple_X)
binned_weights_X.append(bin_index_X)
data_Y = df_Phi_Psi["Psi"].values.tolist()
binned_weights_Y = []
for value in data_Y:
bin_index_Y = find_bin(value, bins_tuple_Y)
binned_weights_Y.append(bin_index_Y)
binned_weights_2D = []
for i in range(len(binned_weights_X)):
binned_weights_2D.append([binned_weights_X[i], binned_weights_Y[i]])
binned_weights = []
for i in range(len(binned_weights_2D)):
binned_weights.append(
(binned_weights_2D[i][0] * len(bins_tuple_Y))
+ (binned_weights_2D[i][1] + 1)
)
df_index = pd.DataFrame(binned_weights)
df_index.columns = ["index"]
df_index["index"] = df_index["index"] - 1
df = pd.concat([df_index, df_Phi_Psi, df_weight], axis=1)
dV_c1 = []
dV_c2 = []
dV_c3 = []
dV = []
for i in range(len(bins)):
df_i = df.loc[(df["index"] == i)]
dV_list = df_i["dVkcalmol"].values.tolist()
if len(dV_list) >= 10:
dV_c1.append(statistics.mean(dV_list))
dV_c2.append(
statistics.mean([i ** 2 for i in dV_list])
- (statistics.mean(dV_list)) ** 2
)
dV_c3.append(
statistics.mean([i ** 3 for i in dV_list])
- 3
* (statistics.mean([i ** 2 for i in dV_list]))
* (statistics.mean(dV_list))
+ 2 * (statistics.mean(dV_list)) ** 3
)
if len(dV_list) < 10:
dV_c1.append(0)
dV_c2.append(0)
dV_c3.append(0)
dV.append(dV_list)
c1 = [i * beta for i in dV_c1]
c2 = [i * ((beta ** 2) / 2) for i in dV_c2]
c3 = [i * ((beta ** 3) / 6) for i in dV_c3]
c1 = c1
c12 = [a + b for a, b in zip(c1, c2)]
c123 = [a + b for a, b in zip(c12, c3)]
for i in range(len(c1)):
if c1[i] >= 700:
c1[i] = 700
for i in range(len(c12)):
if c12[i] >= 700:
c12[i] = 700
for i in range(len(c123)):
if c123[i] >= 700:
c123[i] = 700
ensemble_average_c1 = [exp(i) for i in c1]
ensemble_average_c12 = [exp(i) for i in c12]
ensemble_average_c123 = [exp(i) for i in c123]
numerator_c1 = [a * b for a, b in zip(pstarA, ensemble_average_c1)]
numerator_c12 = [a * b for a, b in zip(pstarA, ensemble_average_c12)]
numerator_c123 = [a * b for a, b in zip(pstarA, ensemble_average_c123)]
#### c1
denominatorc1 = []
for i in range(len(bins)):
product_c1 = pstarA[i] * ensemble_average_c1[i]
denominatorc1.append(product_c1)
denominator_c1 = sum(denominatorc1)
pA_c1 = [i / denominator_c1 for i in numerator_c1]
#### c12
denominatorc12 = []
for i in range(len(bins)):
product_c12 = pstarA[i] * ensemble_average_c12[i]
denominatorc12.append(product_c12)
denominator_c12 = sum(denominatorc12)
pA_c12 = [i / denominator_c12 for i in numerator_c12]
#### c123
denominatorc123 = []
for i in range(len(bins)):
product_c123 = pstarA[i] * ensemble_average_c123[i]
denominatorc123.append(product_c123)
denominator_c123 = sum(denominatorc123)
pA_c123 = [i / denominator_c123 for i in numerator_c123]
data_c1 = list(zip(bins, pA_c1))
data_c12 = list(zip(bins, pA_c12))
data_c123 = list(zip(bins, pA_c123))
df_c1 = pd.DataFrame(data_c1, columns=["bins", "pA_c1"])
df_c12 = pd.DataFrame(data_c12, columns=["bins", "pA_c12"])
df_c123 = pd.DataFrame(data_c123, columns=["bins", "pA_c123"])
df_c1.to_csv("c1_2d.txt", header=True, index=None, sep=" ", mode="w")
with open("c1_2d.txt", "r") as f1, open("pA_c1_2d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_2d.txt")
####c12
df_c12.to_csv("c12_2d.txt", header=True, index=None, sep=" ", mode="w")
with open("c12_2d.txt", "r") as f1, open("pA_c12_2d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_2d.txt")
####c123
df_c123.to_csv("c123_2d.txt", header=True, index=None, sep=" ", mode="w")
with open("c123_2d.txt", "r") as f1, open("pA_c123_2d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_2d.txt")
####c1_arranged
df_c1_arranged = df_c1.sort_values(by="pA_c1", ascending=False)
df_c1_arranged = df_c1_arranged[df_c1_arranged.pA_c1 > min_prob]
df_c1_arranged.to_csv(
"c1_arranged_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c1_arranged_2d.txt", "r") as f1, open(
"pA_c1_arranged_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_arranged_2d.txt")
####c12_arranged
df_c12_arranged = df_c12.sort_values(by="pA_c12", ascending=False)
df_c12_arranged = df_c12_arranged[df_c12_arranged.pA_c12 > min_prob]
df_c12_arranged.to_csv(
"c12_arranged_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c12_arranged_2d.txt", "r") as f1, open(
"pA_c12_arranged_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_arranged_2d.txt")
####c123_arranged
df_c123_arranged = df_c123.sort_values(by="pA_c123", ascending=False)
df_c123_arranged = df_c123_arranged[df_c123_arranged.pA_c123 > min_prob]
df_c123_arranged.to_csv(
"c123_arranged_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c123_arranged_2d.txt", "r") as f1, open(
"pA_c123_arranged_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_arranged_2d.txt")
####c1_arranged
df_c1_arranged["index"] = df_c1_arranged.index
index_list_c1 = df_c1_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c1 = []
index_indces_c1 = []
for i in index_list_c1:
df_index_list_c1 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c1 = df_index_list_c1["frame_index"].tolist()
frame_indices_c1.append(frame_c1)
index_c1 = [i] * len(frame_c1)
index_indces_c1.append(index_c1)
frame_indices_c1 = [item for elem in frame_indices_c1 for item in elem]
index_indces_c1 = [item for elem in index_indces_c1 for item in elem]
df_c1_frame = pd.DataFrame(frame_indices_c1, columns=["frame_index"])
df_c1_index = | pd.DataFrame(index_indces_c1, columns=["index"]) | pandas.DataFrame |
import sys
import numpy as np
import pandas as pd
from scipy.stats import mannwhitneyu, norm, rankdata, tiecorrect
from statsmodels.stats.multitest import multipletests
from tqdm import tqdm_notebook as tqdm
from . import config
from .utils import precheck_align
try:
import cupy as cp
from cupyx.scipy.special import ndtr
except ModuleNotFoundError:
cupy = None
ndtr = None
def melt_mwu(effects, pvals, pos_ns, neg_ns, effect):
"""
Flatten matrix-form outputs to column-form.
Parameters
----------
effects: Pandas DataFrame
effect sizes matrix
pvals: Pandas DataFrame
p-values matrix
pos_ns neg_ns: Pandas DataFrames
sample group counts
a_num_cols: int
number of columns in first observations matrix
b_num_cols: int
number of columns in second observations matrix
effect: "mean", "median", or "rank_biserial"
the effect statistic
Returns
-------
series form statistics
"""
melted = pd.DataFrame()
melted[effect] = effects.unstack()
melted["pval"] = pvals.unstack()
melted["qval"] = multipletests(
10 ** (-melted["pval"]),
alpha=config.MULTIPLETESTS_ALPHA,
method=config.MULTIPLETESTS_METHOD,
)[1]
melted["qval"] = -np.log10(melted["qval"])
melted["pos_n"] = pos_ns.unstack()
melted["neg_n"] = neg_ns.unstack()
melted = melted.sort_values(by="pval", ascending=False)
melted.index.set_names(["b_col", "a_col"], inplace=True)
melted.index = melted.index.swaplevel(0, 1)
return melted
def mat_mwu_naive(
a_mat, b_mat, melt: bool, effect: str, use_continuity=True, pbar=False,
):
"""
Compute rank-biserial correlations and Mann-Whitney statistics
between every column-column pair of a_mat (continuous) and b_mat (binary)
using a double for loop.
In the case that a_mat or b_mat has a single column, the results are
re-formatted with the multiple hypothesis-adjusted q-value also returned.
Parameters
----------
a_mat: Pandas DataFrame
Continuous set of observations, with rows as samples and columns
as labels.
b_mat: Pandas DataFrame
Binary set of observations, with rows as samples and columns as labels.
Required to be castable to boolean datatype.
melt: boolean
Whether or not to melt the outputs into columns.
use_continuity: bool
Whether or not to use a continuity correction. True by default.
pbar: Boolean
Whether or not to show a progress bar.
effect: "mean", "median", or "rank_biserial"
The effect statistic.
Returns
-------
effects: rank-biserial correlations
pvals: -log10 p-values of correlations
"""
if effect not in ["mean", "median", "rank_biserial"]:
raise ValueError("effect must be 'mean', 'median', or 'rank_biserial'")
a_mat, b_mat = precheck_align(a_mat, b_mat, np.float64, np.float64)
a_names = a_mat.columns
b_names = b_mat.columns
a_num_cols = a_mat.shape[1] # number of variables in A
b_num_cols = b_mat.shape[1] # number of variables in B
effects = np.zeros((a_num_cols, b_num_cols)) # null value of r = 0
pvals = np.zeros((a_num_cols, b_num_cols)) + 1 # null value of p=1
pos_ns = np.zeros((a_num_cols, b_num_cols))
neg_ns = np.zeros((a_num_cols, b_num_cols))
if pbar:
sys.stderr.flush()
progress = tqdm(total=a_num_cols * b_num_cols)
for a_col_idx, a_col_name in enumerate(a_names):
for b_col_idx, b_col_name in enumerate(b_names):
a_col = a_mat[a_col_name].dropna()
b_col = b_mat[b_col_name].dropna()
a_col, b_col = a_col.align(b_col, join="inner")
b_pos = b_col == 1
b_neg = b_col == 0
pos_n = b_pos.sum()
neg_n = b_neg.sum()
pos_ns[a_col_idx][b_col_idx] = pos_n
neg_ns[a_col_idx][b_col_idx] = neg_n
if pos_n >= 1 and neg_n >= 1:
a_pos = a_col[b_pos]
a_neg = a_col[b_neg]
# handle identical values cases
if np.std(np.concatenate([a_pos, a_neg])) == 0:
pvals[a_col_idx][b_col_idx] = 1
else:
U2, pval = mannwhitneyu(
a_pos,
a_neg,
use_continuity=use_continuity,
alternative="two-sided",
)
if effect == "rank_biserial":
effects[a_col_idx][b_col_idx] = (
2 * U2 / (len(a_pos) * len(a_neg)) - 1
)
elif effect == "median":
pos_med = a_pos.median()
neg_med = a_neg.median()
effects[a_col_idx][b_col_idx] = pos_med - neg_med
elif effect == "mean":
pos_mean = a_pos.mean()
neg_mean = a_neg.mean()
effects[a_col_idx][b_col_idx] = pos_mean - neg_mean
pvals[a_col_idx][b_col_idx] = pval
if pbar:
progress.update(1)
if pbar:
progress.close()
# account for small p-values rounding to 0
pvals[pvals == 0] = np.finfo(np.float64).tiny
effects = pd.DataFrame(effects, index=a_names, columns=b_names)
pvals = | pd.DataFrame(pvals, index=a_names, columns=b_names) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, isnull, date_range,
MultiIndex, Index)
from pandas.tseries.index import Timestamp
from pandas.compat import range
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.Akima1DInterpolator missing')
class TestSeriesMissingData(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(
days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(
0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
from pandas import tslib
result = td.fillna(tslib.NaT)
expected = Series([tslib.NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import datetime
from dateutil.relativedelta import *
from fuzzywuzzy import fuzz
import argparse
import glob
import numpy as np
import pandas as pd
from scipy.stats import ttest_1samp
import sys
import xarray as xr
from paths_bra import *
sys.path.append('./..')
from refuelplot import *
setup()
from utils import *
gen_path = bra_path + '/generation'
# get GWA version
parser = argparse.ArgumentParser(description='Insert optionally GWA')
parser.add_argument('-GWA')
args = parser.parse_args()
if(args.GWA == None):
GWA = "3"
else:
GWA = args.GWA
if GWA == "2":
results_path2 = results_path
results_path = results_path + '/results_GWA2'
# load generation data
print('load generation data')
# load usinas hourly
if gen_path + '/hourly/usinas.pkl' not in glob.glob(gen_path + '/hourly/*.pkl'):
USIh = pd.read_csv(gen_path + '/hourly/Comparativo_Geração_de_Energia_Semana_data_usinas.csv',
sep = ';', index_col = 0, parse_dates = True, dayfirst = True).iloc[1:,[6,8]].sort_index()
# remove missing values
USIh = USIh.loc[USIh.index.notnull()].dropna()
USIh.columns = ['usina','prod_GWh']
# in RIO DO FOGO there is one duplicate hour after one missing hour -> change timestamps of those hours
idxUSIh = USIh.index.values
midxUSIh = USIh.reset_index().set_index(['usina','Data Escala de Tempo 1 GE Comp 3']).index
idxUSIh[midxUSIh.duplicated(keep='last')] = idxUSIh[midxUSIh.duplicated(keep='first')] - np.timedelta64(1,'h')
USIh.index = pd.DatetimeIndex(idxUSIh)
USIhs = USIh.reset_index().set_index(['usina','index']).unstack(level=0).prod_GWh
USIhs.to_csv(gen_path + '/hourly/usinas.csv')
USIhs.to_pickle(gen_path + '/hourly/usinas.pkl')
wpUSIhs = pd.read_pickle(gen_path + '/hourly/usinas.pkl')
# load and match aneel and ons windparks
def get_cap_df(cap,comdate):
com = pd.DataFrame({'capacity': cap}).groupby(comdate).sum()
cap_cum = com.capacity.cumsum()
# if only years given for commissioning dates -> gradual capacity increase over year, full capacity at end of year
if type(cap_cum.index.values[0]) == np.int64:
cap_cum.index = [np.datetime64(str(int(year))+"-12-31 23:00:00") for year in cap_cum.index.values]
# create yearly dates at yearends
drcc = pd.date_range(np.datetime64('2005-12-31 23:00:00'),
np.datetime64('2019-12-31 23:00:00'),freq= 'y')
cap_cum = pd.Series(drcc.map(cap_cum),index = drcc)
# if first year emtpy: either year before or 0 if nothing before
if(sum(com.index<2000) > 0):
cap_cum[0] = com.cumsum()[com.index<2000].max()
else:
cap_cum[0] = 0
# if missing years -> put capacity of year before
cap_cum = cap_cum.ffill()
dr = pd.date_range('1/1/2006','31/12/2019 23:00:00',freq = 'h')
cap_ts = pd.Series(dr.map(cap_cum),index = dr)
cap_ts[0] = cap_cum[cap_cum.index<=pd.Timestamp('2006-01-01')].max()
if type(comdate[0]) == np.int64:
return(cap_ts.interpolate(method='linear'))
else:
return(cap_ts.fillna(method='ffill'))
def matchWords(word, statements):
# function to match a word to different statements
# output: ratio of matching (0-100) for all provided statements
results = []
for s in statements:
r = fuzz.ratio(word, s)
results.append(r)
return results
def match_string(string, array):
# function for matching casefolded strings
Slc = string.strip().casefold()
Alc = [arr.casefold() for arr in array.str.strip().unique()]
scores = matchWords(Slc, Alc)
mscore = max(scores)
strarr = array.unique()[np.where(np.array(scores)==mscore)][0]
return(string,strarr,mscore)
def match_anl(string):
# function to match ONS to ANL windparks
return(match_string(string,ANL2.name))
print('match wind parks')
# load ANEEL and ONS windparks
ONS = pd.read_csv(bra_path + '/ONS_windparks.csv', index_col = 0)
# remove those with CONJUNTO EOLICO - they're there twice and capacities don't match with ANEEL data
ONS = ONS[~ONS.usina.str.contains('CONJUNTO EOLICO')]
# remove some other duplicate windparks
ONS = ONS[[d not in [' CANOA QUEBRADA (E-RV-ACEP)',' PV DO NORDESTE',' SM (SANTA MARIA)',' SÃO BENTO NORTE II'] for d in ONS.usina]]
ANL = pd.read_csv(bra_path + '/turbine_data.csv', index_col = 0)
# characters and strings to replace for better matching
letters = {'õ':'õ',
'ó':'o',
'ã':'a',
'á':'a',
'â':'a',
'é':'e',
'Ã':'A',
'Á':'A',
'Â':'A',
'Ó':'O',
'É':'E',
'ú':'u',
'ô':'o',
'Ô':'O',
'ú':'u',
'Ú':'U',
'ç':'c',
'Ç':'C',
'í':'i',
'Í':'I',
'Ê':'E'}
remove = {' 2LER':'',
' 2LFA':'',
' LFA':'',
'EOL ':'',
' 3LER':'',
'Usina Eolica ':'',
'Eólica ':'',
' ENERGIAS RENOVAVEIS':'',
# ' CONJUNTO EOLICO':'',
'\(E-BV-ACEP\)':'',
'\(E-RV-ACEP\)':'',
'\(BELA BISTA\)':'',
'\(ENERGEN\)':'',
'\(Antiga Ventos Maranhenses 05\)':'',
'PARQUE EOLICO ':'',
' - N HORIZ':'',
'ENERGETICA S/A':'',
'\(ILHEUS\)':'',
' EOLOS':'',
'S\.A\.':''}
replace = {'LAG DO':'LAGOA DO',
'VENTOS S VICENTE':'VENTOS DE SAO VICENTE',
'SERRA BABILONIA':'SERRA DA BABILONIA',
'CORREDOR SENANDES':'CORREDOR DO SENANDES',
'SAO BENTO NORTE':'SAO BENTO DO NORTE',
'GAMELEIRAS':'GAMELERIAS',
'Lagoinha':'Lagoinh',
'PAPAGAIOS':'PAPAGAIO',
'VENTOS DE SAO ABRAAO':'VENTOS DO SANTO ABRAAO',
'VENTOS DO SAO MARIO':'VENTOS DE SAO MARIO',
'DAGUA':'D AGUA',
'B VEN':'BONS VENTOS',
'NOVA BURITI':'BURITI',
'NOVA CAJUCOCO':'CAJUCOCO',
'PALMAS':'DE PALMAS',
'DE PALMARES':'PALMARES',
'PV DO NORDESTE':'VENTOS DO NORDESTE',
'Aura Lagoa do Barro':'Lagoa do Barro',
'AURA LAGOA DO BARRO':'LAGOA DO BARRO',
'LAGOA BARRO':'LAGOA DO BARRO',
'GRAVATA':'GRAVATA FRUITRADE',
'FAZENDA DO ROSARIO':'FAZENDA ROSARIO',
'Parque Eolico do Horizonte':'Ventos de Horizonte',
'S BENTO':'SAO BENTO',
'SANTO ANTONIO (BTG PACTUAL)':'SANTO ANTONIO DE PADUA',
'SM \(SANTA MARIA\)':'SANTA MARIA',
'SAO JORGE CE':'SAO JORGE',
'VENT DA ST ESPERANCA':'VENTOS DA SANTA ESPERANCA',
'VENTOS DA STA DULCE':'VENTOS DA SANTA DULCE',
'ESPERANCA NORDESTE':'ESPERANCA DO NORDESTE',
'Eolica Delta':'Delta',
'Eolica Serra das Vacas':'Serra das Vacas',
'Ventos de Santo Augusto':'Santo Augusto',
'Ventos do Sao Gabriel':'Sao Gabriel',
'GE <NAME>':'<NAME>'}
numbers = {'10':'X',
'11':'XI',
'12':'XII',
'13':'XIII',
'14':'XIV',
'15':'XV',
'17':'XVII',
'19':'XIX',
'21':'XXI',
'23':'XXIII',
'24':'XXIV',
'25':'XXV',
'26':'XXVI',
'27':'XXVII',
'28':'XXVIII',
'29':'XXIX',
'31':'XXXI',
'34':'XXXIV',
'35':'XXXV',
'36':'XXXVI',
'01':'I',
'02':'II',
'03':'III',
'04':'IV',
'05':'V',
'06':'VI',
'07':'VII',
'08':'VIII',
'09':'IX',
'1':'I',
'2':'II',
'3':'III',
'4':'IV',
'5':'V',
'6':'VI',
'7':'VII',
'8':'VIII',
'9':'IX'}
# replace characters
ONS2 = ONS.copy(deep=True)
ANL2 = ANL.copy(deep=True)
for i in letters:
ONS2.usina = ONS2.usina.str.replace(i,letters.get(i))
ANL2.name = ANL2.name.str.replace(i,letters.get(i))
for i in replace:
ONS2.usina = ONS2.usina.str.replace(i,replace.get(i))
ANL2.name = ANL2.name.str.replace(i,replace.get(i))
for i in remove:
ONS2.usina = ONS2.usina.str.replace(i,remove.get(i))
for i in numbers:
ONS2.usina = ONS2.usina.str.replace(i,numbers.get(i))
ANL2.name = ANL2.name.str.replace(i,numbers.get(i))
# match windparks
matches = ONS2.usina.apply(match_anl).apply(pd.Series)
matches.columns = ['ONS_name','ANL_name','score']
ONSd = pd.Series(ONS.usina.values,index=ONS2.usina.values)
ANLd = pd.Series(ANL.name.values,index=ANL2.name.values)
ONSd.columns = ['simpl','orig']
ANLd.columns = ['simpl','orig']
# load simulated data
print('load simulated data')
# prepare simulated data as dataframe
if (results_path + '/wpUSI_MER.pkl' not in glob.glob(results_path + '/*.pkl')):
if GWA == "2":
wpERAxr = xr.open_dataset(results_path2 + '/windpower_stat_ERA5.nc',chunks={'time':80})
wpMERxr = xr.open_dataset(results_path2 + '/windpower_stat_MERRA2.nc',chunks={'time':80})
else:
wpERAxr = xr.open_dataset(results_path + '/windpower_stat_ERA5.nc',chunks={'time':80})
wpMERxr = xr.open_dataset(results_path + '/windpower_stat_MERRA2.nc',chunks={'time':80})
wpERAgxr = xr.open_mfdataset(results_path +'/windpower_??_ERA5_GWA.nc',chunks={'time':80})
wpMERgxr = xr.open_mfdataset(results_path +'/windpower_??_MERRA2_GWA.nc',chunks={'time':80})
turb_mer = pd.read_csv(bra_path + '/turbine_data_mer.csv',index_col=0)
turb_era = pd.read_csv(bra_path + '/turbine_data_era.csv',index_col=0)
turb_merg = pd.read_csv(bra_path + '/turbine_data_mer_gwa' + GWA + '.csv',index_col=0)
turb_erag = pd.read_csv(bra_path + '/turbine_data_era_gwa' + GWA + '.csv',index_col=0)
lbl = | pd.read_csv(bra_path+ '/labels_turbine_data_gwa' + GWA + '.csv',index_col=0) | pandas.read_csv |
import pandas as pd
import numpy as np
def btk_data_decoy_old():
df = pd.read_csv('btk_active_decoy/BTK_2810_old.csv')
df_decoy = pd.read_csv('btk_active_decoy/btk_finddecoy.csv')
df_decoy = pd.DataFrame(df_decoy['smile'])
df_decoy['label'] = 0
df_active = df[df['target2']<300]
df_active['target2'] = 1
df_ic_decoy = df[df['target2']>9000]
df_ic_decoy['target2'] = 0
del df_active['target1'],df_ic_decoy['target1']
df_active.columns = df_ic_decoy.columns = df_decoy.columns = ['smiles','label']
df_all = pd.concat([df_active,df_ic_decoy])
df_all = pd.concat([df_all,df_decoy])
df_all.to_csv('btk_active_decoy/btk_2810_add_decoy_old.csv',index=None)
def btk_data_cut_decoy():
df = pd.read_csv('btk_active_decoy/BTK_2810_old.csv')
df_decoy = pd.read_csv('btk_active_decoy/btk_finddecoy.csv')
df_cut_decoy = pd.read_csv('btk_active_decoy/similarity_active_decoy.csv')
df_cut_decoy = df_cut_decoy.head(1139)#1139是根据正样本1393个,乘以10比例13930,原先数据decoy总量为15069,15069-13930=1139
df_decoy = pd.DataFrame(df_decoy['smile'])
df_decoy['label'] = 0
df_active = df[df['target2']<300]
df_active['target2'] = 1
df_ic_decoy = df[df['target2']>9000]
df_ic_decoy['target2'] = 0
del df_active['target1'],df_ic_decoy['target1']
df_active.columns = df_ic_decoy.columns = df_decoy.columns = ['smiles','label']
df_all = pd.concat([df_active,df_ic_decoy])
df_all = pd.concat([df_all,df_decoy])
df_all_filter = df_all[~ df_all['smiles'].isin(df_cut_decoy['train_smiles'])]
df_all_filter.to_csv('btk_active_decoy/btk_2810_cut_decoy.csv',index=None)
def btk_data_decoy():
df = pd.read_csv('btk_active_decoy/BTK_2810.csv')
df_decoy = pd.read_csv('btk_active_decoy/btk_finddecoy.csv')
df_decoy = df_decoy.sample(frac=0.5, random_state=123)
df_decoy = pd.DataFrame(df_decoy['smile'])
df_decoy['label'] = 0
df_active = df[df['target2']<300]
df_active['target2'] = 1
df_ic_decoy = df[df['target2']>9000]
df_ic_decoy['target2'] = 0
del df_active['target1'],df_ic_decoy['target1']
df_active.columns = df_ic_decoy.columns = df_decoy.columns = ['smiles','label']
df_all = pd.concat([df_active,df_ic_decoy])
df_all = pd.concat([df_all,df_decoy])
df_all.to_csv('btk_active_decoy/btk_2810_add_decoy.csv',index=None)
def btk_2810_ic50():
df = pd.read_csv('btk_active_decoy/BTK_2810_old.csv')
df_decoy = pd.read_csv('btk_active_decoy/btk_finddecoy.csv')
df_decoy = pd.DataFrame(df_decoy['smile'])
df_decoy['label'] = 2
df_active = df[df['target2']<300]
df_ic_decoy = df[df['target2']>9000]
del df_active['target1'],df_ic_decoy['target1']
df_active.columns = df_ic_decoy.columns = df_decoy.columns = ['smiles','label']
df_all = pd.concat([df_active,df_ic_decoy])
df_all = pd.concat([df_all,df_decoy])
df_all.to_csv('btk_active_decoy/btk_2810_ic50.csv',index=None)
def btk_2610_data():
df = pd.read_csv('btk_active_decoy/BTK_2610.csv')
df_decoy = pd.read_csv('btk_active_decoy/btk_2610_find_decoy.csv')
df_decoy = pd.DataFrame(df_decoy['smile'])
df_decoy['label'] = 0
df_active = df[df['ic50']<100]
df_active['ic50'] = 1
df_ic_decoy = df[df['ic50']>=1000]
df_ic_decoy['ic50'] = 0
del df_active['chemblid'],df_ic_decoy['chemblid']
df_active.columns = df_ic_decoy.columns = df_decoy.columns = ['smiles','label']
df_all = pd.concat([df_active,df_ic_decoy])
df_all = pd.concat([df_all,df_decoy])
df_all.to_csv('btk_active_decoy/btk_2610_add_decoy.csv',index=None)
def btk_our_data():
df = pd.read_csv('btk_active_decoy/chembl_train_add_decoy.csv')
df_decoy = | pd.read_csv('btk_active_decoy/btk_our_decoy.csv') | pandas.read_csv |
from matplotlib import pyplot as plt
from matplotlib.ticker import FormatStrFormatter
import math
import numpy as np
import pandas as pd
from matplotlib import colors
from matplotlib.pyplot import cm
import sys
try:
# case = int(sys.argv[1])
infile = str(sys.argv[1])
except IndexError as err:
print("Not enough arguments: {0}".format(err))
sys.exit(1)
except ValueError as err:
print("Illegal value in argument: {0}".format(err))
sys.exit(1)
min_density = 50
from functools import reduce
plt.rcParams["font.family"] = "Times New Roman"
plt.rcParams['font.size'] = 20
plt.rc('text', usetex=True)
allfiles = open(infile, 'r')
allfiles = allfiles.readlines()
df = pd.read_csv(allfiles[0].strip(), header=0, index_col=0)
for i in range(1,len(allfiles)):
nextdf = pd.read_csv(allfiles[i].strip(), header=0, index_col=0)
df = pd.concat([df, nextdf], ignore_index=True)
# samp0 = df['samples'].unique()[0]
# samp1 = df['samples'].unique()[1]
# samp2 = df['samples'].unique()[2]
# tempdf = df[df['samples'] == samp2]['iterate rate']
# print("val range = ", tempdf.max() - tempdf.min())
# tempdf = df[df['samples'] == samp2]['grad rate']
# print("grd range = ", tempdf.max() - tempdf.min())
###### filter out small densitites
# print("*** Filtering out densities < ", min_density)
df = df[df['density'] > min_density]
df = df.reset_index()
# print("Available sample sizes: ", df['samples'].unique())
# print("Available densities: ", df['density'].unique())
# slurmid = str(allfiles[0][3:9])
funcname = str(df['function name'][0])
dimen = str(df['dim of intput'][0])
logbase = str(df['log base'][0])
# lb = str(df['left bound'][0])
# rb = str(df['right bound'][0])
tbscale = str(np.round(df['test grid scale'][0],2))
fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(6,12))
fig.suptitle('Rates for '+funcname+', dim='+dimen+', b='+logbase, fontsize=20)
df['charL'] = df['right bound'] - df['left bound']
df['avg samp spacing'] = df['charL']/(np.power(df['samples'],1.0/float(dimen)))
# print("Available spacings: ", df['avg samp spacing'].unique())
###### compute zoom breaks
md = df['density'].unique().max()
zoom_breaks = df[df['density'] == md]['avg samp spacing'].unique()
for i in range(2):
if i == 0:
rate_to_plot = 'iterate rate'
else:
rate_to_plot = 'grad rate'
dfsub = df[[rate_to_plot, 'avg samp spacing']]
ir_mean =dfsub.groupby('avg samp spacing').mean().rename(columns={"iterate rate norm inside": "ir_mean"})
ir_25per =dfsub.groupby('avg samp spacing').quantile(q=.25).rename(columns={"iterate rate norm inside": "ir_25per"})
ir_75per =dfsub.groupby('avg samp spacing').quantile(q=.75).rename(columns={"iterate rate norm inside": "ir_75per"})
ir_max =dfsub.groupby('avg samp spacing').max().rename(columns={"iterate rate norm inside": "ir_max"})
ir_min =dfsub.groupby('avg samp spacing').min().rename(columns={"iterate rate norm inside": "ir_min"})
ir_10per =dfsub.groupby('avg samp spacing').quantile(q=.10).rename(columns={"iterate rate norm inside": "ir_10per"})
ir_90per =dfsub.groupby('avg samp spacing').quantile(q=.90).rename(columns={"iterate rate norm inside": "ir_90per"})
# ir_5per =dfsub.groupby('avg samp spacing').quantile(q=.05).rename(columns={"iterate rate norm inside": "ir_5per"})
# ir_95per =dfsub.groupby('avg samp spacing').quantile(q=.95).rename(columns={"iterate rate norm inside": "ir_95per"})
groupP=dfsub.groupby('avg samp spacing')
#groupby attributes
ir_mean =groupP.mean().rename(columns={rate_to_plot: "ir_mean"})
ir_25per =groupP.quantile(q=.25).rename(columns={rate_to_plot: "ir_25per"})
ir_75per =groupP.quantile(q=.75).rename(columns={rate_to_plot: "ir_75per"})
ir_max =groupP.max().rename(columns={rate_to_plot: "ir_max"})
ir_min =groupP.min().rename(columns={rate_to_plot: "ir_min"})
ir_10per =groupP.quantile(q=.10).rename(columns={rate_to_plot: "ir_10per"})
ir_90per =groupP.quantile(q=.90).rename(columns={rate_to_plot: "ir_90per"})
# ir_5per =groupP.quantile(q=.05).rename(columns={rate_to_plot: "ir_5per"})
# ir_95per =groupP.quantile(q=.95).rename(columns={rate_to_plot: "ir_95per"})
by_sample_count=groupP.count().rename(columns={rate_to_plot:"by_sample_count"})
# Make a list of the dataframes
data_frames = [ir_mean, ir_25per, ir_75per, ir_max, ir_min,
ir_10per, ir_90per, by_sample_count]
# Merge them all at once
merged_df = | pd.concat(data_frames, join='outer', axis=1) | pandas.concat |
'''
CIS 419/519 project: Using decision tree ensembles to infer the pathological
cause of age-related neurodegenerative changes based on clinical assessment
nadfahors: <NAME>, <NAME>, & <NAME>
This file contains code for preparing NACC data for analysis, including:
* synthesis of pathology data to create pathology class outcomes
* dropping uninformative variables from predictor set
* identifying and merging/resolving redundant clusters of variables
* identifying missing data codes and replacing with NaNs as appropriate
* creating change variables from longitudinal data
* imputation of missing data
* categorizing retained variables as interval/ratio, ordinal, or nominal
* creation of dummy variables for nominal variables
* standardizing interval/ratio and ordinal variables
* creating date variables, then converting these to useful ages or intervals
* quadratic expansion for interval/ratio variables?
'''
# Module imports
import pandas as pd
import numpy as np
import datetime
# Read in full dataset. Warning: this is about 340 MB.
fulldf = pd.read_csv('investigator_nacc48.csv')
# List of Uniform Data Set (UDS) values that will serve as potential
# predictors. Those with a "False" next to them will be excluded after data
# preparation; those with a True will be kept.
xvar = pd.read_csv('xvar.csv')
# Variables from the NACC neuropathology table that will be used to group
# individuals by pathology class:
# 1) Alzheimer's disease (AD);
# 2) frontotemporal lobar degeneration due to tauopathy (FTLD-tau)
# 3) frontotemporal lobar degeneration due to TDP-43 (FTLD-TDP)
# 4) Lewy body disease due to alpha synuclein (including Lewy body dementia and Parkinson's disease)
# 5) vascular disease
# Path classes: AD (ABC criteria); FTLD-tau; FTLD-TDP, including ALS; Lewy body disease (are PD patients captured here?); vascular
npvar = pd.DataFrame(np.array(["NPPMIH",0, # Postmortem interval--keep in as a potential confound variable?
"NPFIX",0,
"NPFIXX",0,
"NPWBRWT",0,
"NPWBRF",0,
"NACCBRNN",0,
"NPGRCCA",0,
"NPGRLA",0,
"NPGRHA",0,
"NPGRSNH",0,
"NPGRLCH",0,
"NACCAVAS",0,
"NPTAN",False,
"NPTANX",False,
"NPABAN",False,
"NPABANX",False,
"NPASAN",False,
"NPASANX",False,
"NPTDPAN",False,
"NPTDPANX",False,
"NPHISMB",False,
"NPHISG",False,
"NPHISSS",False,
"NPHIST",False,
"NPHISO",False,
"NPHISOX",False,
"NPTHAL",False,# Use for ABC scoring to create ordinal measure of AD change
"NACCBRAA",False,# Use for ABC scoring to create ordinal measure of AD change
"NACCNEUR",False,# Use for ABC scoring to create ordinal measure of AD change
"NPADNC",False,# Use for ABC scoring to create ordinal measure of AD change
"NACCDIFF",False,
"NACCVASC",False,# Vasc presence/absence
"NACCAMY",False,
"NPLINF",False,
"NPLAC",False,
"NPINF",False,# Derived variable summarizing several assessments of infarcts and lacunes
"NPINF1A",False,
"NPINF1B",False,
"NPINF1D",False,
"NPINF1F",False,
"NPINF2A",False,
"NPINF2B",False,
"NPINF2D",False,
"NPINF2F",False,
"NPINF3A",False,
"NPINF3B",False,
"NPINF3D",False,
"NPINF3F",False,
"NPINF4A",False,
"NPINF4B",False,
"NPINF4D",False,
"NPINF4F",False,
"NACCINF",False,
"NPHEM",False,
"NPHEMO",False,
"NPHEMO1",False,
"NPHEMO2",False,
"NPHEMO3",False,
"NPMICRO",False,
"NPOLD",False,
"NPOLD1",False,
"NPOLD2",False,
"NPOLD3",False,
"NPOLD4",False,
"NACCMICR",False,# Derived variable for microinfarcts
"NPOLDD",False,
"NPOLDD1",False,
"NPOLDD2",False,
"NPOLDD3",False,
"NPOLDD4",False,
"NACCHEM",False,# Derived variables for microbleeds and hemorrhages
"NACCARTE",False,
"NPWMR",False,
"NPPATH",False,# Other ischemic/vascular pathology
"NACCNEC",False,
"NPPATH2",False,
"NPPATH3",False,
"NPPATH4",False,
"NPPATH5",False,
"NPPATH6",False,
"NPPATH7",False,
"NPPATH8",False,
"NPPATH9",False,
"NPPATH10",False,
"NPPATH11",False,
"NPPATHO",False,
"NPPATHOX",False,
"NPART",False,
"NPOANG",False,
"NACCLEWY",False,# Note that limbic/transitional and amygdala-predominant are not differentiated
"NPLBOD",False,# But here they are differentiated!
"NPNLOSS",False,
"NPHIPSCL",False,
"NPSCL",False,
"NPFTDTAU",False,# FTLD-tau
"NACCPICK",False,# FTLD-tau
"NPFTDT2",False,# FTLD-tau
"NACCCBD",False,# FTLD-tau
"NACCPROG",False,# FTLD-tau
"NPFTDT5",False,# FTLD-tau
"NPFTDT6",False,# FTLD-tau
"NPFTDT7",False,# FTLD-tau
"NPFTDT8",False,# This is FTLD-tau but associated with ALS/parkinsonism--wut?
"NPFTDT9",False,# tangle-dominant disease--is this PART? Maybe exclude cases who have this as only path type.
"NPFTDT10",False,# FTLD-tau: other 3R+4R tauopathy. What is this if not AD? Maybe exclude. How many cases?
"NPFRONT",False,# FTLD-tau
"NPTAU",False,# FTLD-tau
"NPFTD",False,# FTLD-TDP
"NPFTDTDP",False,# FTLD-TDP
"NPALSMND",False,# FTLD-TDP (but exclude FUS and SOD1)
"NPOFTD",False,
"NPOFTD1",False,
"NPOFTD2",False,
"NPOFTD3",False,
"NPOFTD4",False,
"NPOFTD5",False,
"NPFTDNO",False,
"NPFTDSPC",False,
"NPTDPA",False,# In second pass, use anatomical distribution to stage
"NPTDPB",False,# In second pass, use anatomical distribution to stage
"NPTDPC",False,# In second pass, use anatomical distribution to stage
"NPTDPD",False,# In second pass, use anatomical distribution to stage
"NPTDPE",False,# In second pass, use anatomical distribution to stage
"NPPDXA",False,# Exclude?
"NPPDXB",False,# Exclude
"NACCPRIO",False,# Exclude
"NPPDXD",False,# Exclude
"NPPDXE",False,
"NPPDXF",False,
"NPPDXG",False,
"NPPDXH",False,
"NPPDXI",False,
"NPPDXJ",False,
"NPPDXK",False,
"NPPDXL",False,
"NPPDXM",False,
"NPPDXN",False,
"NACCDOWN",False,
"NACCOTHP",False,# Survey for exclusion criteria
"NACCWRI1",False,# Survey for exclusion criteria
"NACCWRI2",False,# Survey for exclusion criteria
"NACCWRI3",False,# Survey for exclusion criteria
"NACCBNKF",False,
"NPBNKB",False,
"NACCFORM",False,
"NACCPARA",False,
"NACCCSFP",False,
"NPBNKF",False,
"NPFAUT",False,
"NPFAUT1",False,
"NPFAUT2",False,
"NPFAUT3",False,
"NPFAUT4",False,
"NACCINT",False,
"NPNIT",False,
"NPCERAD",False,# What sort of variable?
"NPADRDA",False,
"NPOCRIT",False,
"NPVOTH",False,
"NPLEWYCS",False,
"NPGENE",True,# Family history--include in predictors?
"NPFHSPEC",False,# Code as dummy variables if useful.
"NPCHROM",False,# Exclusion factor? Genetic/chromosomal abnormalities
"NPPNORM",False,# Check all the following variables for redundancy with the ones above.
"NPCNORM",False,
"NPPADP",False,
"NPCADP",False,
"NPPAD",False,
"NPCAD",False,
"NPPLEWY",False,
"NPCLEWY",False,
"NPPVASC",False,
"NPCVASC",False,
"NPPFTLD",False,
"NPCFTLD",False,
"NPPHIPP",False,
"NPCHIPP",False,
"NPPPRION",False,
"NPCPRION",False,
"NPPOTH1",False,
"NPCOTH1",False,
"NPOTH1X",False,
"NPPOTH2",False,
"NPCOTH2",False,
"NPOTH2X",False,
"NPPOTH3",False,
"NPCOTH3",False,
"NPOTH3X",0]).reshape((-1,2)))
npvar.columns = ['Variable','Keep']
## Case selection process.
# Include only those with autopsy data.
aut = fulldf[fulldf.NACCAUTP == 1]
del fulldf
def table(a,b):
print(pd.crosstab(aut[a],aut[b],dropna=False,margins=True))
# Exclude for Down's, Huntington's, and other conditions.
aut = aut.loc[aut.DOWNS != 1]
aut = aut.loc[aut.HUNT != 1]
aut = aut.loc[aut.PRION != 1]
aut = aut.loc[~aut.MSAIF.isin([1,2,3])]
aut = aut.loc[~aut.NEOPIF.isin([1,2,3])]
aut = aut.loc[~aut.SCHIZOIF.isin([1,2,3])]
aut.index = list(range(aut.shape[0]))
# How many unique IDs?
# For now, keep in follow-up visits to increase our training data.
uids = aut.NACCID[~aut.NACCID.duplicated()]
#aut = aut[~aut.NACCID.duplicated()]
## Coding of pathology class outcomes.
# Create binary variables for the presence of each pathology class of interest.
# Code Alzheimer's disease pathology based on NPADNC, which implements
# ABC scoring based on Montine et al. (2012).
aut = aut.assign(ADPath = 0)
aut.loc[aut.NPADNC.isin((2,3)),'ADPath'] = 1
aut.loc[aut.NPPAD == 1,'ADPath'] = 1
# The following two commands make the ADPath variable false if the AD path
# diagnosis is as contributing, not as primary.
aut.loc[aut.NPPAD == 2,'ADPath'] = 0
aut.loc[aut.NPCAD == 1,'ADPath'] = 0
aut.loc[aut.NPPVASC == 1,'ADPath'] = 0
aut.loc[aut.NPPLEWY == 1,'ADPath'] = 0
aut.loc[aut.NPPFTLD == 1,'ADPath'] = 0
# Several variables pertain to FTLD tauopathies.
aut = aut.assign(TauPath = [0 for i in range(aut.shape[0])])
aut.loc[aut.NPFTDTAU == 1,'TauPath'] = 1
aut.loc[aut.NACCPICK == 1,'TauPath'] = 1
aut.loc[aut.NACCCBD == 1,'TauPath'] = 1
aut.loc[aut.NACCPROG == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT2 == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT5 == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT6 == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT7 == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT9 == 1,'TauPath'] = 1
aut.loc[aut.NPFRONT == 1,'TauPath'] = 1
aut.loc[aut.NPTAU == 1,'TauPath'] = 1
aut.loc[aut.ADPath == 1, 'TauPath'] = 0
aut.loc[aut.NPCFTLD == 1, 'TauPath'] = 0
# Code Lewy body disease based on NPLBOD variable. Do not include amygdala-
# predominant, brainstem-predominant, or olfactory-only cases.
# See Toledo et al. (2016, Acta Neuropathol) and Irwin et al. (2018, Nat Rev
# Neuro).
aut = aut.assign(LBPath = [0 for i in range(aut.shape[0])])
aut.loc[aut.NPLBOD.isin((2,3)),'LBPath'] = 1
aut.loc[aut.NPPLEWY == 1,'LBPath'] = 1
aut.loc[aut.NPPLEWY == 2,'LBPath'] = 0
aut.loc[aut.NPCLEWY == 1,'LBPath'] = 0
aut.loc[aut.ADPath == 1 & (aut.NPPLEWY != 1), 'LBPath'] = 0
aut.loc[aut.TauPath == 1 & (aut.NPPLEWY != 1),'LBPath'] = 0
# Code TDP-43 pathology based on NPFTDTDP and NPALSMND, excluding FUS and SOD1
# cases.
aut = aut.assign(TDPPath = [0 for i in range(aut.shape[0])])
aut.loc[aut.NPFTD == 1,'TDPPath'] = 1
aut.loc[aut.NPFTDTDP == 1,'TDPPath'] = 1
aut.loc[aut.NPALSMND == 1,'TDPPath'] = 1
aut.loc[aut.ADPath == 1, 'TDPPath'] = 0
aut.loc[aut.LBPath == 1, 'TDPPath'] = 0
aut.loc[aut.TauPath == 1, 'TDPPath'] = 0
# Code vascular disease based on relevant derived variables:
aut = aut.assign(VPath = [0 for i in range(aut.shape[0])])
aut.loc[aut.NPINF == 1,'VPath'] = 1
aut.loc[aut.NACCMICR == 1,'VPath'] = 1
aut.loc[aut.NACCHEM == 1,'VPath'] = 1
aut.loc[aut.NPPATH == 1,'VPath'] = 1
aut.loc[aut.NPPVASC == 1,'VPath'] = 1
aut.loc[aut.NPPVASC == 2,'VPath'] = 0
aut.loc[aut.NPCVASC == 1,'VPath'] = 0
aut.loc[aut.ADPath == 1 & (aut.NPPVASC != 1), 'VPath'] = 0
aut.loc[aut.LBPath == 1 & (aut.NPPVASC != 1), 'VPath'] = 0
aut.loc[aut.NPPFTLD == 1 & (aut.NPPVASC != 1),'VPath'] = 0
aut.loc[aut.TDPPath == 1 & (aut.NPPVASC != 1), 'VPath'] = 0
aut.loc[aut.TauPath == 1 & (aut.NPPVASC != 1), 'VPath'] = 0
aut = aut.assign(Class = aut.ADPath)
aut.loc[aut.TauPath == 1,'Class'] = 2
aut.loc[aut.TDPPath == 1,'Class'] = 3
aut.loc[aut.LBPath == 1,'Class'] = 4
aut.loc[aut.VPath == 1,'Class'] = 5
aut = aut.loc[aut.Class != 0]
aut.index = list(range(aut.shape[0]))
## Predictor variable preparation: one-hot-encoding, date/age/interval operations,
# consolidating redundant variables, consolidating free-text variables.
aut = aut.assign(DOB = aut.BIRTHYR)
aut = aut.assign(DOD = aut.NACCYOD)
aut = aut.assign(VISITDATE = aut.VISITYR)
for i in range(aut.shape[0]):
aut.loc[i,'DOB'] = datetime.datetime.strptime('-'.join([str(aut.BIRTHYR.loc[i]),str(aut.BIRTHMO.loc[i]),'01']),'%Y-%m-%d')
aut.loc[i,'DOD'] = datetime.datetime.strptime('-'.join([str(aut.NACCYOD.loc[i]),str(aut.NACCMOD.loc[i]),'01']),'%Y-%m-%d')
aut.loc[i,'VISITDATE'] = datetime.datetime.strptime('-'.join([str(aut.VISITYR.loc[i]),str(aut.VISITMO.loc[i]),str(aut.VISITDAY.loc[i])]),'%Y-%m-%d')
# Some time/interval variables
aut = aut.assign(SinceQUITSMOK = aut.NACCAGE - aut.QUITSMOK) # Years since quitting smoking
aut = aut.assign(AgeStroke = aut.NACCSTYR - aut.BIRTHYR)
aut = aut.assign(AgeTIA = aut.NACCTIYR - aut.BIRTHYR)
aut = aut.assign(AgePD = aut.PDYR - aut.BIRTHYR)
aut = aut.assign(AgePDOTHR = aut.PDOTHRYR - aut.BIRTHYR)
aut = aut.assign(AgeTBI = aut.TBIYEAR - aut.BIRTHYR)
aut = aut.assign(Duration = aut.NACCAGE - aut.DECAGE)
# Hispanic origin
aut.HISPORX = aut.HISPORX.str.lower()
aut.loc[aut.HISPORX == 'spanish','HISPORX'] = 'spain'
# Race. RACESECX and RACETERX have too few values to be useful.
aut.RACEX = aut.RACEX.str.lower().str.replace(' ','').str.replace('-','')
aut.loc[aut.RACEX.isin(['hispanic','puerto rican']),'RACEX'] = 'latino'
aut.loc[aut.RACEX.isin(['guam - chamorro']),'RACEX'] = 'chamorro'
aut.loc[aut.RACEX.isin(['multi racial']),'RACEX'] = 'multiracial'
# Other language. But actually, let's just drop this and code as English/non-English.
#aut.PRIMLANX = aut.PRIMLANX.str.lower().str.replace(' ','').str.replace('-','')
# Drug list. First get a list of all the unique drug names, then code as dummy variables.
# Update as of 04/01/2020: drugs alone are going to be a huge amount of work.
# For now, just rely on the NACC derived variables for diabetes meds, cardiac drugs, etc.
drugcols = ['DRUG' + str(i) for i in range(1,41)]
drugs = aut[drugcols].stack()
# Several varieties of insulin--important to distinguish?
# drop "*not-codable"
# drop "diphtheria/hepb/pertussis,acel/polio/tetanus"
drugs = drugs.unique()
drugs = [eachdrug.lower() for eachdrug in drugs.tolist()]
drugs = pd.Series(drugs)
drug_corrections = [("multivitamin with minerals","multivitamin"),
("multivitamin, prenatal","multivitamin"),
("omega 3-6-9","omega369"),
("omega-3","omega3"),
("vitamin-d","vitamin d"),
("acetyl-l-carnitine","acetyl l carnitine"),
("levodopa","levadopa"),
("pro-stat","prostat"),
("alpha-d-galactosidase","alpha d galactosidase"),
("indium pentetate in-111","indium pentetate in111"),
("fludeoxyglucose f-18","fludeoxyglucose f18"),
("calcium with vitamins d and k", "calcium-vitamin d-vitamin k"),
("aloe vera topical", "aloe vera"),
("ammonium lactate topical", "ammonium lactate")]
for i in range(len(drug_corrections)):
oldval = drug_corrections[i][0]
newval = drug_corrections[i][1]
drugs = drugs.str.replace(pat = oldval, repl = newval)
drugs = drugs.loc[drugs != "*not codable*"]
drugs = drugs.loc[drugs != "diphtheria/hepb/pertussis,acel/polio/tetanus"]
drugs = np.unique([ss for eachdrug in drugs for ss in eachdrug.split('-')])
drugs = np.unique([ss for eachdrug in drugs for ss in eachdrug.split('/')])
drugs.sort()
## Combining redundant variables. Often this reflects a change in form or
# variable name between UDS version 2 & 3.
aut.loc[(aut.CVPACE == -4) & (aut.CVPACDEF == 0),'CVPACE'] = 0
aut.loc[(aut.CVPACE == -4) & (aut.CVPACDEF == 1),'CVPACE'] = 1
xvar.loc[xvar.Variable == 'CVPACDEF','Keep'] = False
# Combine TBIBRIEF and TRAUMBRF.
aut.loc[(aut.TBIBRIEF == -4) & (aut.TRAUMBRF.isin([0])),'TBIBRIEF'] = 0
aut.loc[(aut.TBIBRIEF == -4) & (aut.TRAUMBRF.isin([1,2])),'TBIBRIEF'] = 1
xvar.loc[xvar.Variable == 'TRAUMBRF','Keep'] = False
# More data cleaning
aut.ABRUPT = aut.ABRUPT.replace(to_replace = 2, value = 1)
aut.FOCLSYM = aut.FOCLSYM.replace(to_replace = 2, value = 1)
aut.FOCLSIGN = aut.FOCLSIGN.replace(to_replace = 2, value = 1)
# Convert language to a binary variable (English/non-English)
aut = aut.assign(English = 0)
aut.loc[aut.PRIMLANG == 1,'English'] = 1
xvar.loc[xvar.Variable == 'PRIMLANG','Keep'] = False
# Some dummy coding
vv = xvar.Variable.loc[(xvar.Keep) & (xvar.Comments == "Dummy coding for (95,96,97,98)")]
for v in vv:
aut[v + '_couldnt'] = 0
aut.loc[aut[v].isin([95,96,97,98]),v + '_couldnt'] = 1
vv = xvar.Variable.loc[xvar.Comments == "Dummy coding for (995,996,997,998)"]
for v in vv:
aut[v + '_couldnt'] = 0
aut.loc[aut[v].isin([995,996,997,998]),v + '_couldnt'] = 1
# Drop all columns where xvar.Keep == False.
aut2 = aut
xvar.loc[xvar.Variable == 'NACCID','Keep'] = True
xvar.loc[xvar.Variable == 'NACCID','Type'] = "ID"
xvar.loc[xvar.Variable == 'VISITDATE','Keep'] = True
xvar.loc[xvar.Variable == 'VISITDATE','Type'] = "ID"
aut = aut.drop(columns = xvar.Variable[~xvar.Keep])
# Fill with NA values
xvar = xvar.loc[xvar.Keep]
xvar.index = range(xvar.shape[0])
for i in range(xvar.shape[0]):
if not xvar.NaNValues.isna()[i]:
v = xvar.Variable[i]
badval = eval(xvar.NaNValues[i])
#print(v,badval)
if isinstance(badval,int):
badval = [badval]
aut[v].mask(aut[v].isin(badval),inplace = True)
# Get rid of variables with very few meaningful observations.
valcounts = aut.describe().iloc[0]
aut = aut.drop(columns = valcounts.loc[valcounts < 100].index)
#aut = aut[valcounts.loc[valcounts >= 100].index]
# Find correlated variables and drop.
ac = aut.corr()
acs = ac.unstack(level = 0)
acs = acs.loc[abs(acs)>0.8]
acsind = list(acs.index)
diagnames = [ind for ind in acsind if ind[0] == ind[1]]
acs = acs.drop(labels=diagnames)
acs = pd.DataFrame(acs)
acs.columns = ['r']
acs['v1'] = acs.index
acs[['v1','v2']] = pd.DataFrame(acs['v1'].tolist(),index = acs.index)
y = aut.Class
X = aut.drop(columns = npvar.Variable.loc[npvar.Variable.isin(aut.columns)])
X = X.drop(columns = ['Class','ADPath','TauPath','TDPPath','LBPath','VPath'])
xd = X.describe().iloc[0]
# Impute numeric variables with the mean.
from sklearn.impute import SimpleImputer
numvar = X.columns.intersection(xvar.Variable.loc[xvar.Type == "Numeric"])
imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
imp_mean.fit(X[numvar])
Xnumimp = imp_mean.transform(X[numvar])
Xnumimp = pd.DataFrame(Xnumimp)
Xnumimp.columns = X[numvar].columns
# Impute ordinal variables with the median.
ordvar = X.columns.intersection(xvar.Variable.loc[xvar.Type == "Ordinal"])
imp_med = SimpleImputer(missing_values=np.nan, strategy='median')
imp_med.fit(X[ordvar])
Xordimp = imp_med.transform(X[ordvar])
Xordimp = pd.DataFrame(Xordimp)
Xordimp.columns = X[ordvar].columns
# Impute boolean variables with zero.
boolvar = X.columns.intersection(xvar.Variable.loc[xvar.Type == "Boolean"])
boolenc = SimpleImputer(missing_values = np.nan, strategy = 'constant',
fill_value = 0)
boolenc.fit(X[boolvar])
Xbool = boolenc.transform(X[boolvar])
Xbool = | pd.DataFrame(Xbool) | pandas.DataFrame |
import hvplot.pandas
import pandas as pd
import panel as pn
def _get_chart_data() -> pd.DataFrame:
"""## Chart Data
Returns:
pd.DataFrame -- A DataFrame with dummy data and columns=["Day", "Orders"]
"""
chart_data = {
"Day": ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday",],
"Orders": [15539, 21345, 18483, 24003, 23489, 24092, 12034],
}
return | pd.DataFrame(chart_data) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 11 20:21:34 2020
@author: nickcostanzino
"""
def NN_structure(layers, perceptrons):
A = list()
for n in range(layers):
A.append(perceptrons)
return tuple(A)
def NN_structures(layers, perceptrons):
A = list()
for i in range(1, layers):
for j in range(1, perceptrons):
A.append(NN_structure(i, j))
A = array(A)
A = list(A)
return A
def MSE(prediction, true):
from sklearn.metrics import mean_squared_error
mse = mean_squared_error(prediction, true)
return mse
def process_simulator(f, sigma_X, sigma_e, N):
import pandas as pd
import numpy as np
from scipy.optimize import fsolve
from sklearn.linear_model import LinearRegression
from sklearn.neural_network import MLPRegressor
f = 'np.' + f
e = np.random.normal(0,sigma_e,N)
X = np.random.normal(0,sigma_X,N)
Y = eval(f) + e
df = pd.DataFrame()
df['X'] = X
df['Y'] = Y
df['e'] = e
return df
def performance_analyzer(func, sigma_X, sigma_e, N, number_of_partitions, number_of_simulations, output_folder):
import pandas as pd
import numpy as np
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV, TimeSeriesSplit
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.neural_network import MLPRegressor
COLUMNS = ['training_data_points', 'testing_data_points', 'LR_intercept', 'LR_slope', 'NN_layers', 'NN_perceptrons',
'NN_activation', 'NN_alpha', 'LR-In-Sample-R2', 'NN-In-Sample-R2', 'Best-Possible-In-Sample-R2',
'LR-Out-Sample-R2', 'NN-Out-Sample-R2', 'Best-Possible-Out-Sample-R2']
full_results = pd.DataFrame(columns = COLUMNS)
for k in range(number_of_simulations):
S = process_simulator(func, sigma_X, sigma_e, N)
X = | pd.DataFrame(S.X) | pandas.DataFrame |
'''
Created on Sep 2, 2016
@author: Gully
'''
from __future__ import print_function, division
import argparse
import argparse_config
import codecs
import os
import numpy as np
import pandas as pd
import warnings
from sets import Set
import re
from sets import Set
import re
from bokeh.plotting import figure, show, save, output_notebook, output_file
from bokeh.models import ColumnDataSource, Range1d
#
# This function checks to see if there is a boundary condition between clause 1 and clause 2
# Returns a tuple: (True / False, Explanation)
#
def checkForStartBoundary(clause1, clause2, expt_codes, tsv, c_s_lookup, s_c_lookup):
row1 = tsv.loc[clause1]
row2 = tsv.loc[clause2]
# both clauses are in the same sentence => false
if( row1['SentenceId'] == row2['SentenceId'] ):
return (False,"Same sentence")
# clause 1 is a title paragraph => true
elif( "header" in row1['Codes'] ):
return (True, "?/header")
#
# clause 1 is in a sentence where
# (A) there are hypotheses/problems/facts
# (B) there are results/implications with exLinks present
# clause 2 is in a sentence where
# (A) there are goals/methods
# (B) there are results/implications with no exLinks
#
sentence1 = c_s_lookup[clause1]
sentence2 = c_s_lookup[clause2]
go_condition_2 = False
for cs2 in s_c_lookup[sentence2]:
disc2 = tsv.loc[cs2]['Discourse Type']
inExHead2 = tsv.loc[cs2]['Codes']
if( (disc2 == 'result' or disc2 == 'implication')
and "exLink" not in inExHead2):
go_condition_2 = True
elif( disc2 == 'goal' or disc2 == 'method'):
go_condition_2 = True
if( go_condition_2 ) :
for cs1 in s_c_lookup[sentence1]:
disc1 = tsv.loc[cs1]['Discourse Type']
inExHead1 = tsv.loc[cs1]['Codes']
if(disc1 == 'hypothesis' or disc1 == 'problem' or disc1 == 'fact'):
#print(tsv.loc[cs2])
return (True, "A:"+disc1+inExHead1+"/"+disc2+inExHead2)
elif((disc1 == 'result' or disc1 != 'implication') and "exLink" in inExHead1):
#print(tsv.loc[cs2])
return (True, "B:"+disc1+inExHead1+"/"+disc2+inExHead2)
es1 = row1['ExperimentValues']
if( es1 == es1 and len(set(expt_codes).intersection(es1.split('|'))) == 0 ):
return (True, "|".join(expt_codes) + "!=" + es1 + "(1)")
es2 = row2['ExperimentValues']
if( es2 == es2 and len(set(expt_codes).intersection(es2.split('|'))) == 0 ):
return (True, "|".join(expt_codes) + "!=" + es2 + "(2)")
return (False,"end")
#
# This function checks to see if there is a boundary condition between clause 1 and clause 2
# Returns a tuple: (True / False, Explanation)
#
def checkForEndBoundary(clause1, clause2, expt_codes, tsv, c_s_lookup, s_c_lookup):
row1 = tsv.loc[clause1]
row2 = tsv.loc[clause2]
# both clauses are in the same sentence => false
if( row1['SentenceId'] == row2['SentenceId'] ):
return (False,"Same sentence")
# clause 2 is a title paragraph => true
elif( "header" in row2['Codes'] ):
return (True, "?/header")
#
# clause 1 is in a sentence where there are results/implications with no exLinks and
# clause 2 is in a sentence where
# (A) there are goals/methods/hypotheses/problems/facts
# (B) there are results/implications with exLinks present
#
sentence1 = c_s_lookup[clause1]
sentence2 = c_s_lookup[clause2]
go_condition_1 = False
for cs1 in s_c_lookup[sentence1]:
disc1 = tsv.loc[cs1]['Discourse Type']
inExHead1 = tsv.loc[cs1]['Codes']
if( (disc1 == 'result' or disc1 == 'implication')
and "exLink" not in inExHead1):
go_condition_1 = True
if( go_condition_1 ) :
for cs2 in s_c_lookup[sentence2]:
disc2 = tsv.loc[cs2]['Discourse Type']
inExHead2 = tsv.loc[cs2]['Codes']
if(disc1 != 'result' and disc1 != 'implication'):
#print(tsv.loc[cs2])
return (True, "C"+disc1+inExHead1+"/"+disc2+inExHead2)
elif((disc1 == 'result' or disc1 != 'implication') and "exLink" in inExHead2):
#print(tsv.loc[cs2])
return (True, "D"+disc1+inExHead1+"/"+disc2+inExHead2)
es1 = row1['ExperimentValues']
if( es1 == es1 and len(set(expt_codes).intersection(es1.split('|'))) == 0 ):
return (True, "|".join(expt_codes) + "!=" + es1 + "(1)")
es2 = row2['ExperimentValues']
if( es2 == es2 and len(set(expt_codes).intersection(es2.split('|'))) == 0 ):
return (True, "|".join(expt_codes) + "!=" + es2 + "(2)")
return (False,"end")
def add_spans(tsv):
c_s_lookup = {}
c_p_lookup = {}
s_c_lookup = {}
p_c_lookup = {}
fig_ref_set = Set()
expt_code_set = Set()
clause_max = -1
clause_min = 1000
for i,row in tsv.iterrows():
es = row['ExperimentValues']
dt = row['Discourse Type']
inExHead = row['Codes']
sid = row['SentenceId']
paragraph = row['Paragraph']
heading = str(row['Headings'])
floatingBox = row['FloatingBox?']
#print("i: " + str(i))
#print("refs: " + str(es))
#print("~~~~~~~~~~~~~~~~~~")
s = int(sid[1:])
if(paragraph!=paragraph):
continue
p = 0
if( paragraph == '-'):
p = 0
elif( paragraph[0:1] == 'p'):
p = int(paragraph[1:])
elif( paragraph[0:5] == 'title'):
p = int(paragraph[5:])
c_s_lookup[i] = s
c_p_lookup[i] = p
if( s_c_lookup.get(s) is None ):
s_c_lookup[s] = [i]
else:
s_c_lookup.get(s).append(i)
if( p_c_lookup.get(p) is None ):
p_c_lookup[p] = [i]
else:
p_c_lookup.get(p).append(i)
if( heading != heading ):
heading = ""
if( re.match('^Result', heading) is None or floatingBox):
continue
if( i > clause_max):
clause_max = i
if( i < clause_min):
clause_min = i
if(es!=es):
continue
try:
codes = str(es).split('|')
except AttributeError:
print(str(es) + " is not a string. Skipping...")
continue
fig_ref_set.add(i)
for c in codes:
expt_code_set.add(c)
fig_refs = sorted(fig_ref_set)
fig_spans = {}
for i_fig in fig_refs:
row = tsv.loc[i_fig]
es = row['ExperimentValues']
dt = row['Discourse Type']
inExHead = row['Codes']
sid = row['SentenceId']
paragraph = row['Paragraph']
heading = str(row['Headings'])
floatingBox = row['FloatingBox?']
try:
expt_codes = str(es).split('|')
except AttributeError:
print(str(es) + " is not a string. Skipping...")
continue
# search backwards for a boundary condition between sentences
c1 = i_fig - 1
c2 = i_fig
while( checkForStartBoundary(c1, c2, expt_codes, tsv, c_s_lookup, s_c_lookup)[0] is False ):
c1 = c1-1
c2 = c2-1
expt_start = c2
# search forwards for a boundary condition between sentences
c1 = i_fig
c2 = i_fig + 1
while( checkForEndBoundary(c1, c2, expt_codes, tsv, c_s_lookup, s_c_lookup)[0] is False ):
c1 = c1+1
c2 = c2+1
expt_end = c1
for c in range(expt_start, expt_end+1):
if( fig_spans.get(c) is None ):
fig_spans[c] = set(expt_codes)
else:
fig_spans.get(c).update(set(expt_codes))
#print("Figure Location: " + str(i_fig) )
#print("Experiment Label: " + es )
#print("Expt Start: " + str(expt_start) )
#print("Expt Start Expl: " + str(checkForStartBoundary(expt_start-1, expt_start, expt_codes, tsv, c_s_lookup, s_c_lookup)) )
#print("Expt End: " + str(expt_end) )
#print("Expt End Expl: " + str(checkForEndBoundary(expt_end, expt_end+1, expt_codes, tsv, c_s_lookup, s_c_lookup)) )
#print( "~~~~~~~~~~~~~~~~~~~~" )
for i in fig_spans:
fig_spans[i] = "|".join(fig_spans.get(i))
#print(fig_spans[i])
tsv['fig_spans'] = pd.Series(fig_spans, index=fig_spans)
return tsv
def prepare_and_draw_gannt(filename, title, tsv):
gantt_rows = []
gantt_rows2 = []
gantt_rows3 = []
dtypes = ["fact","hypothesis","problem","goal" ,"method","result","implication"]
colors = ["Snow" ,"Snow" ,"Snow" ,"LightGray","Gray" ,"LightBlue" ,"LightGreen"]
colors_s = pd.Series(colors, index=dtypes)
all_codes = Set()
clause_max = -1
clause_min = 1000
for i,row in tsv.iterrows():
fig_refs = row['ExperimentValues']
fig_spans = row['fig_spans']
dt = row['Discourse Type']
inExHead = row['Codes']
sid = row['SentenceId']
paragraph = row['Paragraph']
heading = str(row['Headings'])
floatingBox = row['FloatingBox?']
#print("i: " + str(i))
#print("refs: " + str(fig_refs))
#print("~~~~~~~~~~~~~~~~~~")
if( heading != heading ):
heading = ""
#if(not floatingBox):
# clause_max = i
if( re.match('^Result', heading) is None or floatingBox):
continue
if( i > clause_max):
clause_max = i
if( i < clause_min):
clause_min = i
if(fig_spans!=fig_spans):
continue
if(fig_refs!=fig_refs):
fig_refs = ""
fig_span_list = fig_spans.split('|')
fig_ref_list = fig_refs.split('|')
#print("i: " + str(i))
#print("spans: " + fig_spans)
#print("refs: " + fig_refs)
#print("~~~~~~~~~~~~~~~~~~")
for fs in fig_span_list:
all_codes.add(fs)
if( fs in fig_ref_list ):
gantt_rows2.append([fs, i])
if('exLink' in inExHead):
gantt_rows3.append([fs, i])
gantt_rows.append([fs, i, dt, heading])
codes_s = pd.Series(range(len(all_codes)), index=sorted(list(all_codes)))
gantt_df = pd.DataFrame.from_records(gantt_rows, columns=['fig_span', 'clause_id','discourse_type', 'heading'])
gantt_df = gantt_df.sort(columns=['clause_id'], ascending=True)
#print(codes_s.loc[gantt_df['expt'].tolist()].tolist())
gantt_df['fig_span_id'] = codes_s.loc[gantt_df['fig_span'].tolist()].tolist()
gantt_df['color'] = colors_s.loc[gantt_df['discourse_type'].tolist()].tolist()
gantt_df2 = | pd.DataFrame.from_records(gantt_rows2, columns=['fig_ref','clause_id']) | pandas.DataFrame.from_records |
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
from pandas.core.frame import DataFrame
from torch.utils.data import Dataset, DataLoader
import torch
import pickle
import datetime
class data_loader(Dataset):
def __init__(self, df_feature, df_label, df_label_reg, t=None):
assert len(df_feature) == len(df_label)
assert len(df_feature) == len(df_label_reg)
# df_feature = df_feature.reshape(df_feature.shape[0], df_feature.shape[1] // 6, df_feature.shape[2] * 6)
self.df_feature=df_feature
self.df_label=df_label
self.df_label_reg = df_label_reg
self.T=t
self.df_feature=torch.tensor(
self.df_feature, dtype=torch.float32)
self.df_label=torch.tensor(
self.df_label, dtype=torch.float32)
self.df_label_reg=torch.tensor(
self.df_label_reg, dtype=torch.float32)
def __getitem__(self, index):
sample, target, label_reg =self.df_feature[index], self.df_label[index], self.df_label_reg[index]
if self.T:
return self.T(sample), target
else:
return sample, target, label_reg
def __len__(self):
return len(self.df_feature)
def create_dataset(df, station, start_date, end_date, mean=None, std=None):
data=df[station]
feat, label, label_reg =data[0], data[1], data[2]
referece_start_time=datetime.datetime(2013, 3, 1, 0, 0)
referece_end_time=datetime.datetime(2017, 2, 28, 0, 0)
assert (pd.to_datetime(start_date) - referece_start_time).days >= 0
assert (pd.to_datetime(end_date) - referece_end_time).days <= 0
assert (pd.to_datetime(end_date) - pd.to_datetime(start_date)).days >= 0
index_start=(pd.to_datetime(start_date) - referece_start_time).days
index_end=(pd.to_datetime(end_date) - referece_start_time).days
feat=feat[index_start: index_end + 1]
label=label[index_start: index_end + 1]
label_reg=label_reg[index_start: index_end + 1]
# ori_shape_1, ori_shape_2=feat.shape[1], feat.shape[2]
# feat=feat.reshape(-1, feat.shape[2])
# feat=(feat - mean) / std
# feat=feat.reshape(-1, ori_shape_1, ori_shape_2)
return data_loader(feat, label, label_reg)
def create_dataset_shallow(df, station, start_date, end_date, mean=None, std=None):
data=df[station]
feat, label, label_reg =data[0], data[1], data[2]
referece_start_time=datetime.datetime(2013, 3, 1, 0, 0)
referece_end_time=datetime.datetime(2017, 2, 28, 0, 0)
assert (pd.to_datetime(start_date) - referece_start_time).days >= 0
assert (pd.to_datetime(end_date) - referece_end_time).days <= 0
assert (pd.to_datetime(end_date) - pd.to_datetime(start_date)).days >= 0
index_start=(pd.to_datetime(start_date) - referece_start_time).days
index_end=(pd.to_datetime(end_date) - referece_start_time).days
feat=feat[index_start: index_end + 1]
label=label[index_start: index_end + 1]
label_reg=label_reg[index_start: index_end + 1]
# ori_shape_1, ori_shape_2=feat.shape[1], feat.shape[2]
# feat=feat.reshape(-1, feat.shape[2])
# feat=(feat - mean) / std
# feat=feat.reshape(-1, ori_shape_1, ori_shape_2)
return feat, label_reg
def get_dataset_statistic(df, station, start_date, end_date):
data=df[station]
feat, label =data[0], data[1]
referece_start_time=datetime.datetime(2013, 3, 1, 0, 0)
referece_end_time=datetime.datetime(2017, 2, 28, 0, 0)
assert ( | pd.to_datetime(start_date) | pandas.to_datetime |
#!/usr/bin/env python
"""Calculate regionprops of segments.
"""
import sys
import argparse
# conda install cython
# conda install pytest
# conda install pandas
# pip install ~/workspace/scikit-image/ # scikit-image==0.16.dev0
import os
import re
import glob
import pickle
import numpy as np
import pandas as pd
from scipy import ndimage as ndi
from scipy.spatial import distance
from stapl3d import Image, LabelImage, wmeMPI
from stapl3d.channels import get_bias_field_block
from skimage.measure import regionprops, regionprops_table
from skimage.morphology import binary_dilation
from skimage.segmentation import find_boundaries
from stapl3d.segmentation.segment import extract_segments
def main(argv):
"""Calculate regionprops of segments."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--seg_paths',
nargs='*',
help='paths to label volumes (xyz)',
)
parser.add_argument(
'--seg_names',
nargs='*',
help='names for (sub)segmentations',
)
parser.add_argument(
'--data_path',
nargs='*',
help='paths to data channels',
)
parser.add_argument(
'--data_names',
nargs='*',
help='names for channels',
)
parser.add_argument(
'--aux_data_path',
help='path to auxilliary data file (zyxc)',
)
parser.add_argument(
'--downsample_factors',
nargs='*',
type=int,
default=[],
help='the downsample factors applied to the aux_data_path image'
)
parser.add_argument(
'--csv_path',
default='',
help='path to output csv file',
)
parser.add_argument(
'-s', '--blocksize',
required=True,
nargs='*',
type=int,
default=[],
help='size of the datablock'
)
parser.add_argument(
'-m', '--blockmargin',
nargs='*',
type=int,
default=[],
help='the datablock overlap used'
)
parser.add_argument(
'--blockrange',
nargs=2,
type=int,
default=[],
help='a range of blocks to process'
)
parser.add_argument(
'--channels',
nargs='*',
type=int,
default=[],
help='a list of channel indices to extract intensity features for'
)
parser.add_argument(
'-f', '--filter_borderlabels',
action='store_true',
help='save intermediate results'
)
parser.add_argument(
'--min_labelsize',
type=int,
default=0,
help='minimum labelsize in voxels',
)
parser.add_argument(
'--split_features',
action='store_true',
help='save intermediate results'
)
parser.add_argument(
'--fset_morph',
default=['label'],
help='morphology feature set',
)
parser.add_argument(
'--fset_intens',
default=['mean_intensity'],
help='intensity feature set',
)
parser.add_argument(
'--fset_addit',
default=['com_z', 'com_y', 'com_x'],
help='auxilliary feature set',
)
args = parser.parse_args()
export_regionprops(
args.seg_paths,
args.seg_names,
args.data_path,
args.data_names,
args.aux_data_path,
args.downsample_factors,
args.csv_path,
args.blocksize,
args.blockmargin,
args.blockrange,
args.channels,
args.filter_borderlabels,
args.min_labelsize,
args.split_features,
args.fset_morph,
args.fset_intens,
args.fset_addit,
)
def export_regionprops(
seg_paths,
seg_names=['full', 'memb', 'nucl'],
data_paths=[],
data_names=[],
aux_data_path=[],
downsample_factors=[1, 1, 1],
outputstem='',
blocksize=[],
blockmargin=[],
blockrange=[],
channels=[],
filter_borderlabels=False,
min_labelsize=0,
split_features=False,
fset_morph=['label'],
fset_intens=['mean_intensity'],
fset_addit=['com_z', 'com_y', 'com_x'],
):
# load the segments: ['full'] or ['full', 'memb', 'nucl']
label_ims = {}
pfs = seg_names[:len(seg_paths)]
for pf, seg_path in zip(pfs, seg_paths):
im = LabelImage(seg_path, permission='r')
im.load(load_data=False)
label_ims[pf] = im
comps = label_ims['full'].split_path()
# prepare parallel processing
mpi_label = wmeMPI(usempi=False)
blocksize = blocksize or label_ims['full'].dims
mpi_label.set_blocks(label_ims['full'], blocksize, blockmargin, blockrange)
mpi_label.scatter_series()
# load the data
data_ims = {}
mpi_data = wmeMPI(usempi=False)
for i, data_path in enumerate(data_paths):
pf = 'im{:02d}'.format(i)
data = Image(data_path, permission='r')
data.load(load_data=False)
ch_idx = data.axlab.index('c')
# FIXME channels for multiple data_paths
chs = channels or [ch for ch in range(data.dims[ch_idx])]
names = [data_names.pop(0) for _ in range(len(chs))]
data_ims[pf] = {'im': data, 'ch': chs, 'names': names}
""" TODO
try:
mpi_data.blocks = [
{'id': split_filename(comps['file'])[0]['postfix'],
'slices': dset_name2slices(comps['file'], axlab=data.axlab, shape=data.dims),
'path': '',},
]
except:
"""
mpi_data.set_blocks(data, blocksize, blockmargin, blockrange)
border_labelset = set([])
# if filter_borderlabels:
# outstem = outputstem or label_ims['full'].split_path()['base']
# outstem += '_dataset'
# border_labelset |= filter_borders(label_ims['full'], outstem)
dfs = []
for i in mpi_label.series:
print('processing block {:03d} with id: {}'.format(i, mpi_label.blocks[i]['id']))
dfs.append(process_block(
mpi_label.blocks[i],
mpi_data.blocks[i],
label_ims,
split_features,
data_ims,
min_labelsize,
channels,
filter_borderlabels,
fset_morph,
fset_intens,
fset_addit,
border_labelset,
outputstem,
aux_data_path,
downsample_factors,
)
)
return dfs
def process_block(
block_label,
block_data,
label_ims,
split_features,
data_ims,
min_labelsize,
channels,
filter_borderlabels=False,
fset_morph=['label'],
fset_intens=['mean_intensity'],
fset_addit=['com_z', 'com_y', 'com_x'],
border_labelset=set([]),
outputstem='',
aux_data_path='',
downsample_factors=[1, 1, 1],
):
morph, intens, add = get_feature_set(fset_morph, fset_intens, fset_addit)
all_regions = {}
for pf, label_im in label_ims.items():
label_im.slices = block_label['slices'][:3]
all_regions[pf] = label_im.slice_dataset().astype('int')
all_data = {}
for dpf, datadict in data_ims.items():
data = datadict['im']
data.slices = block_data['slices']
for ch, name in zip(datadict['ch'], datadict['names']):
data.slices[data.axlab.index('c')] = slice(ch, ch + 1, 1)
ch_data = data.slice_dataset()
all_data[name] = ch_data
outstem = outputstem or label_ims['full'].split_path()['base']
outstem += '_{}'.format(block_label['id'])
if filter_borderlabels:
border_labelset |= filter_borders(label_ims['full'], outstem)
if min_labelsize:
all_regions = filter_size(all_regions, min_labelsize, outstem)
for pf, regions in all_regions.items():
try:
rpt = regionprops_table(regions, properties=morph)
except IndexError:
print('IndexError on MORPH {}: empty labelset'.format(block_label['id']))
df = get_empty_dataframe(morph, add, intens, channels)
except ValueError:
print('ValueError on MORPH {}'.format(block_label['id']))
df = get_empty_dataframe(morph, add, intens, channels)
else:
df = | pd.DataFrame(rpt) | pandas.DataFrame |
#!/usr/bin/env python3
#SBATCH --partition=mcs.default.q
#SBATCH --output=openme.out
# coding: utf-8
# In[1]:
import pandas as pd
import datetime as dt
import numpy as np
import time
from sklearn.feature_extraction.text import CountVectorizer
word_vectorizer = CountVectorizer(ngram_range=(1,2), analyzer='word')
import numpy as np
from nltk import ngrams
import nltk
from nltk.util import ngrams
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
# In[2]:
df=pd.read_csv('./rawdata/BPI2016_Clicks_Logged_In.csv', sep=';', encoding='latin-1', keep_default_na=False)
# In[3]:
df['time'] = pd.to_datetime(df['TIMESTAMP'])
df['dates'] = df['time'].dt.date
# In[4]:
# pd.set_option("display.max_rows", None, "display.max_columns", None)
# df.head(100)
# In[5]:
qns= | pd.read_csv('./rawdata/BPI2016_Questions.csv', sep=';', encoding='latin-1', keep_default_na=False) | pandas.read_csv |
from flask import Flask,render_template,request,send_file
from flask_sqlalchemy import SQLAlchemy
import os
import pandas as pd
from openpyxl import load_workbook
import sqlalchemy as db
######### function#######################
def transform(df):
#count the number of columns in the data frame
col=len(df.columns)
if col>3:
#Transform a matrix to a vector
df=df.set_index(df.columns[0]).stack().reset_index()
df[['Date','level_1']]=df[['Date','level_1']].astype(str)
df['dtime']=df['Date']+' '+df['level_1']
df['dtime'] = pd.to_datetime(df['dtime'])
df=df.drop(['Date', 'level_1'], axis=1)
df.columns=['KW','dtime']
df=df[['dtime','KW']]
df=df.sort_values(by='dtime',ascending=False)
df.reset_index(inplace=True,drop=True)
#df.index = pd.to_datetime(df[df.columns[[0,1]]].astype(str).apply('-'.join,1))
else:
df.columns =(['dtime','kW'])
df['dtime'] = pd.to_datetime(df['dtime'])
df['dtime'] = pd.to_datetime(df['dtime'])
#find the interval by substracting the second date from the first one
a = df.loc[0, 'dtime']
b = df.loc[1, 'dtime']
c = a - b
minutes = c.total_seconds() / 60
d=int(minutes) #d can be only 15 ,30 or 60
#This function will create new row to the time series anytime when it finds gaps and will fill it with NaN or leave it blank.
#df.drop_duplicates(keep='first') keeps the first value of duplicates
if d==15:
df.drop_duplicates(keep='first',inplace=True)
df= df.set_index('dtime').asfreq('-15T')
elif d==30:
df.drop_duplicates(keep='first',inplace=True)
df= df.set_index('dtime').asfreq('-30T')
elif d==60:
df.drop_duplicates(keep='first',inplace=True)
df= df.set_index('dtime').asfreq('-60T')
else:
None
return df
###########Flask APP######################
app=Flask(__name__)
#app.config['Save_File']='C:\Users\......'
#db=SQLAlchemy(app)
#class FileContents(db.Model):
# id=db.Column(db.Integer,primary_key=True)
# name=db.Column(db.String(300))
# data=db.Column(db.LargeBinary)
@app.route('/')
def index():
return render_template('firstpage.html')
@app.route('/upload',methods=['Get','POST'])
def upload():
file=request.files['inputfile']
xls= | pd.ExcelFile(file) | pandas.ExcelFile |
#! /usr/bin/env python3
import os
import sys
import json
import numpy as np
import pandas as pd
from glob import glob
from enum import Enum
from dateutil import tz
from datetime import datetime, timedelta
map_station = {
1:"Castello, <NAME>", 2:"Hotel Carlton", 3:"Via del Podestà", 4:"Corso di P.Reno / Via Ragno" ,
5:"Piazza Trento Trieste", 6:"Piazza Stazione"
}
if __name__ == '__main__':
import argparse
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument('-sh', '--show', action='store_true')
parser.add_argument('-c', '--cfg', help='config file', required=True)
parser.add_argument('-d', '--data', help='counters data csv', required=True)
parser.add_argument('-tt', '--time_ticks', help='set time spacing between ticks', type=int, default=300)
parser.add_argument('-tl', '--time_labels', help='set time spacing between ticks\' labels', type=int, default=3600)
args = parser.parse_args()
filein = args.data
base = filein[:filein.rfind('/')]
base_save = os.path.join(os.environ['WORKSPACE'], 'slides', 'work_lavoro', 'ferrara', 'data', 'compare_presence')
if not os.path.exists(base_save): os.mkdir(base_save)
fname = filein[filein.find('/')+1:filein.rfind('.')].split('_')
fine_freq = fname[-2]
fine_freq_s = int(fine_freq[:-1])
interp = fname[-1]
dt_ticks = args.time_ticks
if dt_ticks > fine_freq_s:
tus = dt_ticks // fine_freq_s
else:
tus = 1
dt_ticks = fine_freq_s
dt_lbls = args.time_labels
if dt_lbls > dt_ticks:
lus = dt_lbls // dt_ticks
else:
lus = 1
dt_lbls = dt_ticks
print(f'Data sampling {fine_freq_s}. Ticks sampling {dt_ticks} u {tus}. Labels sampling {dt_lbls} u {lus}')
with open(args.cfg) as f:
config = json.load(f)
base_start_date = config['base_start_date']
base_stop_date = config['base_stop_date']
first_start_date = config['first_start_date']
first_stop_date = config['first_stop_date']
second_start_date = config['second_start_date']
second_stop_date = config['second_stop_date']
build_df = config['build']
conf_name = args.cfg
conf_name = conf_name[:conf_name.rfind('.')]
b_start_date = f'{conf_name}_{base_start_date}_{base_stop_date}_wifi'
b_first_date = f'{conf_name}_{first_start_date}_{first_stop_date}_wifi'
b_second_date = f'{conf_name}_{second_start_date}_{second_stop_date}_wifi'
file_base_date = f'{b_start_date}/counters_{fine_freq}_lin.csv'
file_first_date = f'{b_first_date}/counters_{fine_freq}_lin.csv'
file_second_date = f'{b_second_date}/counters_{fine_freq}_lin.csv'
def box_centered_kernel(tot_len, box_len):
pad_len = tot_len - box_len
kern = np.concatenate([
np.zeros((pad_len // 2)),
np.ones((box_len)) / box_len,
np.zeros((pad_len - pad_len // 2))
])
return kern
def building(filein):
base = filein[:filein.rfind('/')]
stats = | pd.read_csv(filein, sep=';', parse_dates=['time'], index_col='time') | pandas.read_csv |
# -*- coding: utf-8 -*-
from collections import OrderedDict
from datetime import date, datetime, timedelta
import numpy as np
import pytest
from pandas.compat import product, range
import pandas as pd
from pandas import (
Categorical, DataFrame, Grouper, Index, MultiIndex, Series, concat,
date_range)
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.reshape.pivot import crosstab, pivot_table
import pandas.util.testing as tm
@pytest.fixture(params=[True, False])
def dropna(request):
return request.param
class TestPivotTable(object):
def setup_method(self, method):
self.data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_pivot_table(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, values='D',
index=index, columns=columns)
table2 = self.data.pivot_table(
values='D', index=index, columns=columns)
tm.assert_frame_equal(table, table2)
# this works
pivot_table(self.data, values='D', index=index)
if len(index) > 1:
assert table.index.names == tuple(index)
else:
assert table.index.name == index[0]
if len(columns) > 1:
assert table.columns.names == columns
else:
assert table.columns.name == columns[0]
expected = self.data.groupby(
index + [columns])['D'].agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_table_nocols(self):
df = DataFrame({'rows': ['a', 'b', 'c'],
'cols': ['x', 'y', 'z'],
'values': [1, 2, 3]})
rs = df.pivot_table(columns='cols', aggfunc=np.sum)
xp = df.pivot_table(index='cols', aggfunc=np.sum).T
tm.assert_frame_equal(rs, xp)
rs = df.pivot_table(columns='cols', aggfunc={'values': 'mean'})
xp = df.pivot_table(index='cols', aggfunc={'values': 'mean'}).T
tm.assert_frame_equal(rs, xp)
def test_pivot_table_dropna(self):
df = DataFrame({'amount': {0: 60000, 1: 100000, 2: 50000, 3: 30000},
'customer': {0: 'A', 1: 'A', 2: 'B', 3: 'C'},
'month': {0: 201307, 1: 201309, 2: 201308, 3: 201310},
'product': {0: 'a', 1: 'b', 2: 'c', 3: 'd'},
'quantity': {0: 2000000, 1: 500000,
2: 1000000, 3: 1000000}})
pv_col = df.pivot_table('quantity', 'month', [
'customer', 'product'], dropna=False)
pv_ind = df.pivot_table(
'quantity', ['customer', 'product'], 'month', dropna=False)
m = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'), ('A', 'c'),
('A', 'd'), ('B', 'a'), ('B', 'b'),
('B', 'c'), ('B', 'd'), ('C', 'a'),
('C', 'b'), ('C', 'c'), ('C', 'd')],
names=['customer', 'product'])
tm.assert_index_equal(pv_col.columns, m)
tm.assert_index_equal(pv_ind.index, m)
def test_pivot_table_categorical(self):
cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'],
dropna=True)
exp_index = pd.MultiIndex.from_arrays(
[cat1, cat2],
names=['A', 'B'])
expected = DataFrame(
{'values': [1, 2, 3, 4]},
index=exp_index)
tm.assert_frame_equal(result, expected)
def test_pivot_table_dropna_categoricals(self, dropna):
# GH 15193
categories = ['a', 'b', 'c', 'd']
df = DataFrame({'A': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'],
'B': [1, 2, 3, 1, 2, 3, 1, 2, 3],
'C': range(0, 9)})
df['A'] = df['A'].astype(CDT(categories, ordered=False))
result = df.pivot_table(index='B', columns='A', values='C',
dropna=dropna)
expected_columns = Series(['a', 'b', 'c'], name='A')
expected_columns = expected_columns.astype(
CDT(categories, ordered=False))
expected_index = Series([1, 2, 3], name='B')
expected = DataFrame([[0, 3, 6],
[1, 4, 7],
[2, 5, 8]],
index=expected_index,
columns=expected_columns,)
if not dropna:
# add back the non observed to compare
expected = expected.reindex(
columns=Categorical(categories)).astype('float')
tm.assert_frame_equal(result, expected)
def test_pivot_with_non_observable_dropna(self, dropna):
# gh-21133
df = pd.DataFrame(
{'A': pd.Categorical([np.nan, 'low', 'high', 'low', 'high'],
categories=['low', 'high'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3]},
index=pd.Index(
pd.Categorical.from_codes([0, 1],
categories=['low', 'high'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
# gh-21378
df = pd.DataFrame(
{'A': pd.Categorical(['left', 'low', 'high', 'low', 'high'],
categories=['low', 'high', 'left'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3, 0]},
index=pd.Index(
pd.Categorical.from_codes([0, 1, 2],
categories=['low', 'high', 'left'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
def test_pass_array(self):
result = self.data.pivot_table(
'D', index=self.data.A, columns=self.data.C)
expected = self.data.pivot_table('D', index='A', columns='C')
tm.assert_frame_equal(result, expected)
def test_pass_function(self):
result = self.data.pivot_table('D', index=lambda x: x // 5,
columns=self.data.C)
expected = self.data.pivot_table('D', index=self.data.index // 5,
columns='C')
tm.assert_frame_equal(result, expected)
def test_pivot_table_multiple(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, index=index, columns=columns)
expected = self.data.groupby(index + [columns]).agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_dtypes(self):
# can convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1, 2, 3, 4], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'int64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.sum)
result = z.get_dtype_counts()
expected = Series(dict(int64=2))
tm.assert_series_equal(result, expected)
# cannot convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1.5, 2.5, 3.5, 4.5], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'float64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.mean)
result = z.get_dtype_counts()
expected = Series(dict(float64=2))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('columns,values',
[('bool1', ['float1', 'float2']),
('bool1', ['float1', 'float2', 'bool1']),
('bool2', ['float1', 'float2', 'bool1'])])
def test_pivot_preserve_dtypes(self, columns, values):
# GH 7142 regression test
v = np.arange(5, dtype=np.float64)
df = DataFrame({'float1': v, 'float2': v + 2.0,
'bool1': v <= 2, 'bool2': v <= 3})
df_res = df.reset_index().pivot_table(
index='index', columns=columns, values=values)
result = dict(df_res.dtypes)
expected = {col: np.dtype('O') if col[0].startswith('b')
else np.dtype('float64') for col in df_res}
assert result == expected
def test_pivot_no_values(self):
# GH 14380
idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-01-02',
'2011-01-01', '2011-01-02'])
df = pd.DataFrame({'A': [1, 2, 3, 4, 5]},
index=idx)
res = df.pivot_table(index=df.index.month, columns=df.index.day)
exp_columns = pd.MultiIndex.from_tuples([('A', 1), ('A', 2)])
exp = pd.DataFrame([[2.5, 4.0], [2.0, np.nan]],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
df = pd.DataFrame({'A': [1, 2, 3, 4, 5],
'dt': pd.date_range('2011-01-01', freq='D',
periods=5)},
index=idx)
res = df.pivot_table(index=df.index.month,
columns=pd.Grouper(key='dt', freq='M'))
exp_columns = pd.MultiIndex.from_tuples([('A',
pd.Timestamp('2011-01-31'))])
exp_columns.names = [None, 'dt']
exp = pd.DataFrame([3.25, 2.0],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
res = df.pivot_table(index=pd.Grouper(freq='A'),
columns=pd.Grouper(key='dt', freq='M'))
exp = pd.DataFrame([3],
index=pd.DatetimeIndex(['2011-12-31']),
columns=exp_columns)
tm.assert_frame_equal(res, exp)
def test_pivot_multi_values(self):
result = pivot_table(self.data, values=['D', 'E'],
index='A', columns=['B', 'C'], fill_value=0)
expected = pivot_table(self.data.drop(['F'], axis=1),
index='A', columns=['B', 'C'], fill_value=0)
tm.assert_frame_equal(result, expected)
def test_pivot_multi_functions(self):
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
# margins not supported??
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func, margins=True)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_index_with_nan(self, method):
# GH 3588
nan = np.nan
df = DataFrame({'a': ['R1', 'R2', nan, 'R4'],
'b': ['C1', 'C2', 'C3', 'C4'],
'c': [10, 15, 17, 20]})
if method:
result = df.pivot('a', 'b', 'c')
else:
result = pd.pivot(df, 'a', 'b', 'c')
expected = DataFrame([[nan, nan, 17, nan], [10, nan, nan, nan],
[nan, 15, nan, nan], [nan, nan, nan, 20]],
index=Index([nan, 'R1', 'R2', 'R4'], name='a'),
columns=Index(['C1', 'C2', 'C3', 'C4'], name='b'))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df.pivot('b', 'a', 'c'), expected.T)
# GH9491
df = DataFrame({'a': pd.date_range('2014-02-01', periods=6, freq='D'),
'c': 100 + np.arange(6)})
df['b'] = df['a'] - pd.Timestamp('2014-02-02')
df.loc[1, 'a'] = df.loc[3, 'a'] = nan
df.loc[1, 'b'] = df.loc[4, 'b'] = nan
if method:
pv = df.pivot('a', 'b', 'c')
else:
pv = pd.pivot(df, 'a', 'b', 'c')
assert pv.notna().values.sum() == len(df)
for _, row in df.iterrows():
assert pv.loc[row['a'], row['b']] == row['c']
if method:
result = df.pivot('b', 'a', 'c')
else:
result = pd.pivot(df, 'b', 'a', 'c')
tm.assert_frame_equal(result, pv.T)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tz(self, method):
# GH 5878
df = DataFrame({'dt1': [datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0),
datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0)],
'dt2': [datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 2, 9, 0),
datetime(2014, 1, 2, 9, 0)],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'] * 2,
name='dt2', tz='Asia/Tokyo')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=exp_col)
if method:
pv = df.pivot(index='dt1', columns='dt2')
else:
pv = pd.pivot(df, index='dt1', columns='dt2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'],
name='dt2',
tz='Asia/Tokyo'))
if method:
pv = df.pivot(index='dt1', columns='dt2', values='data1')
else:
pv = pd.pivot(df, index='dt1', columns='dt2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_periods(self, method):
df = DataFrame({'p1': [pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D'),
pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D')],
'p2': [pd.Period('2013-01', 'M'),
pd.Period('2013-01', 'M'),
pd.Period('2013-02', 'M'),
pd.Period('2013-02', 'M')],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.PeriodIndex(['2013-01', '2013-02'] * 2,
name='p2', freq='M')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=exp_col)
if method:
pv = df.pivot(index='p1', columns='p2')
else:
pv = pd.pivot(df, index='p1', columns='p2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=pd.PeriodIndex(['2013-01', '2013-02'],
name='p2', freq='M'))
if method:
pv = df.pivot(index='p1', columns='p2', values='data1')
else:
pv = pd.pivot(df, index='p1', columns='p2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('values', [
['baz', 'zoo'], np.array(['baz', 'zoo']),
pd.Series(['baz', 'zoo']), pd.Index(['baz', 'zoo'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='foo', columns='bar', values=values)
else:
result = pd.pivot(df, index='foo', columns='bar', values=values)
data = [[1, 2, 3, 'x', 'y', 'z'],
[4, 5, 6, 'q', 'w', 't']]
index = Index(data=['one', 'two'], name='foo')
columns = MultiIndex(levels=[['baz', 'zoo'], ['A', 'B', 'C']],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
names=[None, 'bar'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('values', [
['bar', 'baz'], np.array(['bar', 'baz']),
pd.Series(['bar', 'baz']), pd.Index(['bar', 'baz'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values_nans(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='zoo', columns='foo', values=values)
else:
result = pd.pivot(df, index='zoo', columns='foo', values=values)
data = [[np.nan, 'A', np.nan, 4],
[np.nan, 'C', np.nan, 6],
[np.nan, 'B', np.nan, 5],
['A', np.nan, 1, np.nan],
['B', np.nan, 2, np.nan],
['C', np.nan, 3, np.nan]]
index = Index(data=['q', 't', 'w', 'x', 'y', 'z'], name='zoo')
columns = MultiIndex(levels=[['bar', 'baz'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[None, 'foo'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason='MultiIndexed unstack with tuple names fails'
'with KeyError GH#19966')
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_multiindex(self, method):
# issue #17160
index = Index(data=[0, 1, 2, 3, 4, 5])
data = [['one', 'A', 1, 'x'],
['one', 'B', 2, 'y'],
['one', 'C', 3, 'z'],
['two', 'A', 4, 'q'],
['two', 'B', 5, 'w'],
['two', 'C', 6, 't']]
columns = MultiIndex(levels=[['bar', 'baz'], ['first', 'second']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
df = DataFrame(data=data, index=index, columns=columns, dtype='object')
if method:
result = df.pivot(index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
else:
result = pd.pivot(df,
index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
data = {'A': Series([1, 4], index=['one', 'two']),
'B': Series([2, 5], index=['one', 'two']),
'C': Series([3, 6], index=['one', 'two'])}
expected = DataFrame(data)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tuple_of_values(self, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
with pytest.raises(KeyError):
# tuple is seen as a single column name
if method:
df.pivot(index='zoo', columns='foo', values=('bar', 'baz'))
else:
pd.pivot(df, index='zoo', columns='foo', values=('bar', 'baz'))
def test_margins(self):
def _check_output(result, values_col, index=['A', 'B'],
columns=['C'],
margins_col='All'):
col_margins = result.loc[result.index[:-1], margins_col]
expected_col_margins = self.data.groupby(index)[values_col].mean()
tm.assert_series_equal(col_margins, expected_col_margins,
check_names=False)
assert col_margins.name == margins_col
result = result.sort_index()
index_margins = result.loc[(margins_col, '')].iloc[:-1]
expected_ix_margins = self.data.groupby(columns)[values_col].mean()
tm.assert_series_equal(index_margins, expected_ix_margins,
check_names=False)
assert index_margins.name == (margins_col, '')
grand_total_margins = result.loc[(margins_col, ''), margins_col]
expected_total_margins = self.data[values_col].mean()
assert grand_total_margins == expected_total_margins
# column specified
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean)
_check_output(result, 'D')
# Set a different margins_name (not 'All')
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean,
margins_name='Totals')
_check_output(result, 'D', margins_col='Totals')
# no column specified
table = self.data.pivot_table(index=['A', 'B'], columns='C',
margins=True, aggfunc=np.mean)
for value_col in table.columns.levels[0]:
_check_output(table[value_col], value_col)
# no col
# to help with a buglet
self.data.columns = [k * 2 for k in self.data.columns]
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc=np.mean)
for value_col in table.columns:
totals = table.loc[('All', ''), value_col]
assert totals == self.data[value_col].mean()
# no rows
rtable = self.data.pivot_table(columns=['AA', 'BB'], margins=True,
aggfunc=np.mean)
assert isinstance(rtable, Series)
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc='mean')
for item in ['DD', 'EE', 'FF']:
totals = table.loc[('All', ''), item]
assert totals == self.data[item].mean()
def test_margins_dtype(self):
# GH 17013
df = self.data.copy()
df[['D', 'E', 'F']] = np.arange(len(df) * 3).reshape(len(df), 3)
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [12, 21, 3, 9, 45],
'shiny': [33, 0, 36, 51, 120]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = df.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=np.sum, fill_value=0)
tm.assert_frame_equal(expected, result)
@pytest.mark.xfail(reason='GH#17035 (len of floats is casted back to '
'floats)')
def test_margins_dtype_len(self):
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [1, 1, 2, 1, 5],
'shiny': [2, 0, 2, 2, 6]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=len, fill_value=0)
tm.assert_frame_equal(expected, result)
def test_pivot_integer_columns(self):
# caused by upstream bug in unstack
d = date.min
data = list(product(['foo', 'bar'], ['A', 'B', 'C'], ['x1', 'x2'],
[d + timedelta(i)
for i in range(20)], [1.0]))
df = DataFrame(data)
table = df.pivot_table(values=4, index=[0, 1, 3], columns=[2])
df2 = df.rename(columns=str)
table2 = df2.pivot_table(
values='4', index=['0', '1', '3'], columns=['2'])
tm.assert_frame_equal(table, table2, check_names=False)
def test_pivot_no_level_overlap(self):
# GH #1181
data = DataFrame({'a': ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'] * 2,
'b': [0, 0, 0, 0, 1, 1, 1, 1] * 2,
'c': (['foo'] * 4 + ['bar'] * 4) * 2,
'value': np.random.randn(16)})
table = data.pivot_table('value', index='a', columns=['b', 'c'])
grouped = data.groupby(['a', 'b', 'c'])['value'].mean()
expected = grouped.unstack('b').unstack('c').dropna(axis=1, how='all')
tm.assert_frame_equal(table, expected)
def test_pivot_columns_lexsorted(self):
n = 10000
dtype = np.dtype([
("Index", object),
("Symbol", object),
("Year", int),
("Month", int),
("Day", int),
("Quantity", int),
("Price", float),
])
products = np.array([
('SP500', 'ADBE'),
('SP500', 'NVDA'),
('SP500', 'ORCL'),
('NDQ100', 'AAPL'),
('NDQ100', 'MSFT'),
('NDQ100', 'GOOG'),
('FTSE', 'DGE.L'),
('FTSE', 'TSCO.L'),
('FTSE', 'GSK.L'),
], dtype=[('Index', object), ('Symbol', object)])
items = np.empty(n, dtype=dtype)
iproduct = np.random.randint(0, len(products), n)
items['Index'] = products['Index'][iproduct]
items['Symbol'] = products['Symbol'][iproduct]
dr = pd.date_range(date(2000, 1, 1),
date(2010, 12, 31))
dates = dr[np.random.randint(0, len(dr), n)]
items['Year'] = dates.year
items['Month'] = dates.month
items['Day'] = dates.day
items['Price'] = np.random.lognormal(4.0, 2.0, n)
df = DataFrame(items)
pivoted = df.pivot_table('Price', index=['Month', 'Day'],
columns=['Index', 'Symbol', 'Year'],
aggfunc='mean')
assert pivoted.columns.is_monotonic
def test_pivot_complex_aggfunc(self):
f = OrderedDict([('D', ['std']), ('E', ['sum'])])
expected = self.data.groupby(['A', 'B']).agg(f).unstack('B')
result = self.data.pivot_table(index='A', columns='B', aggfunc=f)
tm.assert_frame_equal(result, expected)
def test_margins_no_values_no_cols(self):
# Regression test on pivot table: no values or cols passed.
result = self.data[['A', 'B']].pivot_table(
index=['A', 'B'], aggfunc=len, margins=True)
result_list = result.tolist()
assert sum(result_list[:-1]) == result_list[-1]
def test_margins_no_values_two_rows(self):
# Regression test on pivot table: no values passed but rows are a
# multi-index
result = self.data[['A', 'B', 'C']].pivot_table(
index=['A', 'B'], columns='C', aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_margins_no_values_one_row_one_col(self):
# Regression test on pivot table: no values passed but row and col
# defined
result = self.data[['A', 'B']].pivot_table(
index='A', columns='B', aggfunc=len, margins=True)
assert result.All.tolist() == [4.0, 7.0, 11.0]
def test_margins_no_values_two_row_two_cols(self):
# Regression test on pivot table: no values passed but rows and cols
# are multi-indexed
self.data['D'] = ['a', 'b', 'c', 'd',
'e', 'f', 'g', 'h', 'i', 'j', 'k']
result = self.data[['A', 'B', 'C', 'D']].pivot_table(
index=['A', 'B'], columns=['C', 'D'], aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_pivot_table_with_margins_set_margin_name(self):
# see gh-3335
for margin_name in ['foo', 'one', 666, None, ['a', 'b']]:
with pytest.raises(ValueError):
# multi-index index
pivot_table(self.data, values='D', index=['A', 'B'],
columns=['C'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# multi-index column
pivot_table(self.data, values='D', index=['C'],
columns=['A', 'B'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# non-multi-index index/column
pivot_table(self.data, values='D', index=['A'],
columns=['B'], margins=True,
margins_name=margin_name)
def test_pivot_timegrouper(self):
df = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': '<NAME> <NAME>'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [datetime(2013, 1, 1),
datetime(2013, 1, 1),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 12, 2),
datetime(2013, 12, 2), ]}).set_index('Date')
expected = DataFrame(np.array([10, 18, 3], dtype='int64')
.reshape(1, 3),
index=[datetime(2013, 12, 31)],
columns='<NAME>'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='A'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='A'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
expected = DataFrame(np.array([1, np.nan, 3, 9, 18, np.nan])
.reshape(2, 3),
index=[datetime(2013, 1, 1),
datetime(2013, 7, 1)],
columns='<NAME>'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='6MS'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='6MS'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
# passing the name
df = df.reset_index()
result = pivot_table(df, index=Grouper(freq='6MS', key='Date'),
columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer',
columns=Grouper(freq='6MS', key='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
pytest.raises(KeyError, lambda: pivot_table(
df, index=Grouper(freq='6MS', key='foo'),
columns='Buyer', values='Quantity', aggfunc=np.sum))
pytest.raises(KeyError, lambda: pivot_table(
df, index='Buyer',
columns=Grouper(freq='6MS', key='foo'),
values='Quantity', aggfunc=np.sum))
# passing the level
df = df.set_index('Date')
result = pivot_table(df, index=Grouper(freq='6MS', level='Date'),
columns='Buyer', values='Quantity',
aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer',
columns=Grouper(freq='6MS', level='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
pytest.raises(ValueError, lambda: pivot_table(
df, index=Grouper(freq='6MS', level='foo'),
columns='Buyer', values='Quantity', aggfunc=np.sum))
pytest.raises(ValueError, lambda: pivot_table(
df, index='Buyer',
columns=Grouper(freq='6MS', level='foo'),
values='Quantity', aggfunc=np.sum))
# double grouper
df = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': '<NAME>'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [datetime(2013, 11, 1, 13, 0), datetime(2013, 9, 1, 13, 5),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 11, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 10, 2, 12, 0),
datetime(2013, 12, 5, 14, 0)],
'PayDay': [datetime(2013, 10, 4, 0, 0),
datetime(2013, 10, 15, 13, 5),
datetime(2013, 9, 5, 20, 0),
datetime(2013, 11, 2, 10, 0),
datetime(2013, 10, 7, 20, 0),
datetime(2013, 9, 5, 10, 0),
datetime(2013, 12, 30, 12, 0),
datetime(2013, 11, 20, 14, 0), ]})
result = pivot_table(df, index=Grouper(freq='M', key='Date'),
columns=Grouper(freq='M', key='PayDay'),
values='Quantity', aggfunc=np.sum)
expected = DataFrame(np.array([np.nan, 3, np.nan, np.nan,
6, np.nan, 1, 9,
np.nan, 9, np.nan, np.nan, np.nan,
np.nan, 3, np.nan]).reshape(4, 4),
index=[datetime(2013, 9, 30),
datetime(2013, 10, 31),
datetime(2013, 11, 30),
datetime(2013, 12, 31)],
columns=[datetime(2013, 9, 30),
datetime(2013, 10, 31),
datetime(2013, 11, 30),
datetime(2013, 12, 31)])
expected.index.name = 'Date'
expected.columns.name = 'PayDay'
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=Grouper(freq='M', key='PayDay'),
columns=Grouper(freq='M', key='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
tuples = [(datetime(2013, 9, 30), datetime(2013, 10, 31)),
(datetime(2013, 10, 31),
datetime(2013, 9, 30)),
(datetime(2013, 10, 31),
datetime(2013, 11, 30)),
(datetime(2013, 10, 31),
datetime(2013, 12, 31)),
(datetime(2013, 11, 30),
datetime(2013, 10, 31)),
(datetime(2013, 12, 31), datetime(2013, 11, 30)), ]
idx = MultiIndex.from_tuples(tuples, names=['Date', 'PayDay'])
expected = DataFrame(np.array([3, np.nan, 6, np.nan, 1, np.nan,
9, np.nan, 9, np.nan,
np.nan, 3]).reshape(6, 2),
index=idx, columns=['A', 'B'])
expected.columns.name = 'Branch'
result = pivot_table(
df, index=[Grouper(freq='M', key='Date'),
Grouper(freq='M', key='PayDay')], columns=['Branch'],
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=['Branch'],
columns=[Grouper(freq='M', key='Date'),
Grouper(freq='M', key='PayDay')],
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
def test_pivot_datetime_tz(self):
dates1 = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00']
dates2 = ['2013-01-01 15:00:00', '2013-01-01 15:00:00',
'2013-01-01 15:00:00',
'2013-02-01 15:00:00', '2013-02-01 15:00:00',
'2013-02-01 15:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'dt1': dates1, 'dt2': dates2,
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_idx = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'],
tz='US/Pacific', name='dt1')
exp_col1 = Index(['value1', 'value1'])
exp_col2 = Index(['a', 'b'], name='label')
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 3], [1, 4], [2, 5]],
index=exp_idx, columns=exp_col)
result = pivot_table(df, index=['dt1'], columns=[
'label'], values=['value1'])
tm.assert_frame_equal(result, expected)
exp_col1 = Index(['sum', 'sum', 'sum', 'sum',
'mean', 'mean', 'mean', 'mean'])
exp_col2 = Index(['value1', 'value1', 'value2', 'value2'] * 2)
exp_col3 = pd.DatetimeIndex(['2013-01-01 15:00:00',
'2013-02-01 15:00:00'] * 4,
tz='Asia/Tokyo', name='dt2')
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2, exp_col3])
expected = DataFrame(np.array([[0, 3, 1, 2, 0, 3, 1, 2],
[1, 4, 2, 1, 1, 4, 2, 1],
[2, 5, 1, 2, 2, 5, 1, 2]],
dtype='int64'),
index=exp_idx,
columns=exp_col)
result = pivot_table(df, index=['dt1'], columns=['dt2'],
values=['value1', 'value2'],
aggfunc=[np.sum, np.mean])
tm.assert_frame_equal(result, expected)
def test_pivot_dtaccessor(self):
# GH 8103
dates1 = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00']
dates2 = ['2013-01-01 15:00:00', '2013-01-01 15:00:00',
'2013-01-01 15:00:00',
'2013-02-01 15:00:00', '2013-02-01 15:00:00',
'2013-02-01 15:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'dt1': dates1, 'dt2': dates2,
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d))
result = pivot_table(df, index='label', columns=df['dt1'].dt.hour,
values='value1')
exp_idx = Index(['a', 'b'], name='label')
expected = DataFrame({7: [0, 3], 8: [1, 4], 9: [2, 5]},
index=exp_idx,
columns=Index([7, 8, 9], name='dt1'))
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=df['dt2'].dt.month,
columns=df['dt1'].dt.hour,
values='value1')
expected = DataFrame({7: [0, 3], 8: [1, 4], 9: [2, 5]},
index=Index([1, 2], name='dt2'),
columns=Index([7, 8, 9], name='dt1'))
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=df['dt2'].dt.year.values,
columns=[df['dt1'].dt.hour, df['dt2'].dt.month],
values='value1')
exp_col = MultiIndex.from_arrays(
[[7, 7, 8, 8, 9, 9], [1, 2] * 3], names=['dt1', 'dt2'])
expected = DataFrame(np.array([[0, 3, 1, 4, 2, 5]], dtype='int64'),
index=[2013], columns=exp_col)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=np.array(['X', 'X', 'X',
'X', 'Y', 'Y']),
columns=[df['dt1'].dt.hour, df['dt2'].dt.month],
values='value1')
expected = DataFrame(np.array([[0, 3, 1, np.nan, 2, np.nan],
[np.nan, np.nan, np.nan,
4, np.nan, 5]]),
index=['X', 'Y'], columns=exp_col)
tm.assert_frame_equal(result, expected)
def test_daily(self):
rng = date_range('1/1/2000', '12/31/2004', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
annual = pivot_table(DataFrame(ts), index=ts.index.year,
columns=ts.index.dayofyear)
annual.columns = annual.columns.droplevel(0)
doy = np.asarray(ts.index.dayofyear)
for i in range(1, 367):
subset = ts[doy == i]
subset.index = subset.index.year
result = annual[i].dropna()
tm.assert_series_equal(result, subset, check_names=False)
assert result.name == i
def test_monthly(self):
rng = date_range('1/1/2000', '12/31/2004', freq='M')
ts = Series(np.random.randn(len(rng)), index=rng)
annual = pivot_table(pd.DataFrame(ts), index=ts.index.year,
columns=ts.index.month)
annual.columns = annual.columns.droplevel(0)
month = ts.index.month
for i in range(1, 13):
subset = ts[month == i]
subset.index = subset.index.year
result = annual[i].dropna()
tm.assert_series_equal(result, subset, check_names=False)
assert result.name == i
def test_pivot_table_with_iterator_values(self):
# GH 12017
aggs = {'D': 'sum', 'E': 'mean'}
pivot_values_list = pd.pivot_table(
self.data, index=['A'], values=list(aggs.keys()), aggfunc=aggs,
)
pivot_values_keys = pd.pivot_table(
self.data, index=['A'], values=aggs.keys(), aggfunc=aggs,
)
tm.assert_frame_equal(pivot_values_keys, pivot_values_list)
agg_values_gen = (value for value in aggs.keys())
pivot_values_gen = pd.pivot_table(
self.data, index=['A'], values=agg_values_gen, aggfunc=aggs,
)
tm.assert_frame_equal(pivot_values_gen, pivot_values_list)
def test_pivot_table_margins_name_with_aggfunc_list(self):
# GH 13354
margins_name = 'Weekly'
costs = pd.DataFrame(
{'item': ['bacon', 'cheese', 'bacon', 'cheese'],
'cost': [2.5, 4.5, 3.2, 3.3],
'day': ['M', 'M', 'T', 'T']}
)
table = costs.pivot_table(
index="item", columns="day", margins=True,
margins_name=margins_name, aggfunc=[np.mean, max]
)
ix = pd.Index(
['bacon', 'cheese', margins_name], dtype='object', name='item'
)
tups = [('mean', 'cost', 'M'), ('mean', 'cost', 'T'),
('mean', 'cost', margins_name), ('max', 'cost', 'M'),
('max', 'cost', 'T'), ('max', 'cost', margins_name)]
cols = pd.MultiIndex.from_tuples(tups, names=[None, None, 'day'])
expected = pd.DataFrame(table.values, index=ix, columns=cols)
tm.assert_frame_equal(table, expected)
@pytest.mark.xfail(reason='GH#17035 (np.mean of ints is casted back to '
'ints)')
def test_categorical_margins(self, observed):
# GH 10989
df = pd.DataFrame({'x': np.arange(8),
'y': np.arange(8) // 4,
'z': np.arange(8) % 2})
expected = pd.DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]])
expected.index = Index([0, 1, 'All'], name='y')
expected.columns = Index([0, 1, 'All'], name='z')
table = df.pivot_table('x', 'y', 'z', dropna=observed, margins=True)
tm.assert_frame_equal(table, expected)
@pytest.mark.xfail(reason='GH#17035 (np.mean of ints is casted back to '
'ints)')
def test_categorical_margins_category(self, observed):
df = pd.DataFrame({'x': np.arange(8),
'y': np.arange(8) // 4,
'z': np.arange(8) % 2})
expected = pd.DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]])
expected.index = Index([0, 1, 'All'], name='y')
expected.columns = Index([0, 1, 'All'], name='z')
df.y = df.y.astype('category')
df.z = df.z.astype('category')
table = df.pivot_table('x', 'y', 'z', dropna=observed, margins=True)
tm.assert_frame_equal(table, expected)
def test_categorical_aggfunc(self, observed):
# GH 9534
df = pd.DataFrame({"C1": ["A", "B", "C", "C"],
"C2": ["a", "a", "b", "b"],
"V": [1, 2, 3, 4]})
df["C1"] = df["C1"].astype("category")
result = df.pivot_table("V", index="C1", columns="C2",
dropna=observed, aggfunc="count")
expected_index = pd.CategoricalIndex(['A', 'B', 'C'],
categories=['A', 'B', 'C'],
ordered=False,
name='C1')
expected_columns = pd.Index(['a', 'b'], name='C2')
expected_data = np.array([[1., np.nan],
[1., np.nan],
[np.nan, 2.]])
expected = pd.DataFrame(expected_data,
index=expected_index,
columns=expected_columns)
tm.assert_frame_equal(result, expected)
def test_categorical_pivot_index_ordering(self, observed):
# GH 8731
df = pd.DataFrame({'Sales': [100, 120, 220],
'Month': ['January', 'January', 'January'],
'Year': [2013, 2014, 2013]})
months = ['January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November',
'December']
df['Month'] = df['Month'].astype('category').cat.set_categories(months)
result = df.pivot_table(values='Sales',
index='Month',
columns='Year',
dropna=observed,
aggfunc='sum')
expected_columns = pd.Int64Index([2013, 2014], name='Year')
expected_index = pd.CategoricalIndex(['January'],
categories=months,
ordered=False,
name='Month')
expected = pd.DataFrame([[320, 120]],
index=expected_index,
columns=expected_columns)
if not observed:
result = result.dropna().astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_pivot_table_not_series(self):
# GH 4386
# pivot_table always returns a DataFrame
# when values is not list like and columns is None
# and aggfunc is not instance of list
df = DataFrame({'col1': [3, 4, 5],
'col2': ['C', 'D', 'E'],
'col3': [1, 3, 9]})
result = df.pivot_table('col1', index=['col3', 'col2'], aggfunc=np.sum)
m = MultiIndex.from_arrays([[1, 3, 9],
['C', 'D', 'E']],
names=['col3', 'col2'])
expected = DataFrame([3, 4, 5],
index=m, columns=['col1'])
tm.assert_frame_equal(result, expected)
result = df.pivot_table(
'col1', index='col3', columns='col2', aggfunc=np.sum
)
expected = DataFrame([[3, np.NaN, np.NaN],
[np.NaN, 4, np.NaN],
[np.NaN, np.NaN, 5]],
index=Index([1, 3, 9], name='col3'),
columns=Index(['C', 'D', 'E'], name='col2'))
tm.assert_frame_equal(result, expected)
result = df.pivot_table('col1', index='col3', aggfunc=[np.sum])
m = MultiIndex.from_arrays([['sum'],
['col1']])
expected = DataFrame([3, 4, 5],
index=Index([1, 3, 9], name='col3'),
columns=m)
tm.assert_frame_equal(result, expected)
def test_pivot_margins_name_unicode(self):
# issue #13292
greek = u'\u0394\u03bf\u03ba\u03b9\u03bc\u03ae'
frame = pd.DataFrame({'foo': [1, 2, 3]})
table = pd.pivot_table(frame, index=['foo'], aggfunc=len, margins=True,
margins_name=greek)
index = pd.Index([1, 2, 3, greek], dtype='object', name='foo')
expected = pd.DataFrame(index=index)
tm.assert_frame_equal(table, expected)
def test_pivot_string_as_func(self):
# GH #18713
# for correctness purposes
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar',
'bar', 'bar', 'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one',
'one', 'two', 'two', 'two', 'one'],
'C': range(11)})
result = pivot_table(data, index='A', columns='B', aggfunc='sum')
mi = MultiIndex(levels=[['C'], ['one', 'two']],
codes=[[0, 0], [0, 1]], names=[None, 'B'])
expected = DataFrame({('C', 'one'): {'bar': 15, 'foo': 13},
('C', 'two'): {'bar': 7, 'foo': 20}},
columns=mi).rename_axis('A')
tm.assert_frame_equal(result, expected)
result = pivot_table(data, index='A', columns='B',
aggfunc=['sum', 'mean'])
mi = MultiIndex(levels=[['sum', 'mean'], ['C'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 0, 0, 0], [0, 1, 0, 1]],
names=[None, None, 'B'])
expected = DataFrame({('mean', 'C', 'one'): {'bar': 5.0, 'foo': 3.25},
('mean', 'C', 'two'): {'bar': 7.0,
'foo': 6.666666666666667},
('sum', 'C', 'one'): {'bar': 15, 'foo': 13},
('sum', 'C', 'two'): {'bar': 7, 'foo': 20}},
columns=mi).rename_axis('A')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('f, f_numpy',
[('sum', np.sum),
('mean', np.mean),
('std', np.std),
(['sum', 'mean'], [np.sum, np.mean]),
(['sum', 'std'], [np.sum, np.std]),
(['std', 'mean'], [np.std, np.mean])])
def test_pivot_string_func_vs_func(self, f, f_numpy):
# GH #18713
# for consistency purposes
result = pivot_table(self.data, index='A', columns='B', aggfunc=f)
expected = pivot_table(self.data, index='A', columns='B',
aggfunc=f_numpy)
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
def test_pivot_number_of_levels_larger_than_int32(self):
# GH 20601
df = DataFrame({'ind1': np.arange(2 ** 16),
'ind2': np.arange(2 ** 16),
'count': 0})
with pytest.raises(ValueError, match='int32 overflow'):
df.pivot_table(index='ind1', columns='ind2',
values='count', aggfunc='count')
class TestCrosstab(object):
def setup_method(self, method):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
self.df = df.append(df, ignore_index=True)
def test_crosstab_single(self):
df = self.df
result = crosstab(df['A'], df['C'])
expected = df.groupby(['A', 'C']).size().unstack()
tm.assert_frame_equal(result, expected.fillna(0).astype(np.int64))
def test_crosstab_multiple(self):
df = self.df
result = crosstab(df['A'], [df['B'], df['C']])
expected = df.groupby(['A', 'B', 'C']).size()
expected = expected.unstack(
'B').unstack('C').fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
result = crosstab([df['B'], df['C']], df['A'])
expected = df.groupby(['B', 'C', 'A']).size()
expected = expected.unstack('A').fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_crosstab_ndarray(self):
a = np.random.randint(0, 5, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 10, size=100)
df = DataFrame({'a': a, 'b': b, 'c': c})
result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'))
expected = crosstab(df['a'], [df['b'], df['c']])
tm.assert_frame_equal(result, expected)
result = crosstab([b, c], a, colnames=['a'], rownames=('b', 'c'))
expected = crosstab([df['b'], df['c']], df['a'])
tm.assert_frame_equal(result, expected)
# assign arbitrary names
result = crosstab(self.df['A'].values, self.df['C'].values)
assert result.index.name == 'row_0'
assert result.columns.name == 'col_0'
def test_crosstab_non_aligned(self):
# GH 17005
a = pd.Series([0, 1, 1], index=['a', 'b', 'c'])
b = pd.Series([3, 4, 3, 4, 3], index=['a', 'b', 'c', 'd', 'f'])
c = np.array([3, 4, 3])
expected = pd.DataFrame([[1, 0], [1, 1]],
index=Index([0, 1], name='row_0'),
columns=Index([3, 4], name='col_0'))
result = crosstab(a, b)
tm.assert_frame_equal(result, expected)
result = crosstab(a, c)
tm.assert_frame_equal(result, expected)
def test_crosstab_margins(self):
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
df = DataFrame({'a': a, 'b': b, 'c': c})
result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'),
margins=True)
assert result.index.names == ('a',)
assert result.columns.names == ['b', 'c']
all_cols = result['All', '']
exp_cols = df.groupby(['a']).size().astype('i8')
# to keep index.name
exp_margin = Series([len(df)], index=Index(['All'], name='a'))
exp_cols = exp_cols.append(exp_margin)
exp_cols.name = ('All', '')
tm.assert_series_equal(all_cols, exp_cols)
all_rows = result.loc['All']
exp_rows = df.groupby(['b', 'c']).size().astype('i8')
exp_rows = exp_rows.append(Series([len(df)], index=[('All', '')]))
exp_rows.name = 'All'
exp_rows = exp_rows.reindex(all_rows.index)
exp_rows = exp_rows.fillna(0).astype(np.int64)
tm.assert_series_equal(all_rows, exp_rows)
def test_crosstab_margins_set_margin_name(self):
# GH 15972
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
df = DataFrame({'a': a, 'b': b, 'c': c})
result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'),
margins=True, margins_name='TOTAL')
assert result.index.names == ('a',)
assert result.columns.names == ['b', 'c']
all_cols = result['TOTAL', '']
exp_cols = df.groupby(['a']).size().astype('i8')
# to keep index.name
exp_margin = Series([len(df)], index=Index(['TOTAL'], name='a'))
exp_cols = exp_cols.append(exp_margin)
exp_cols.name = ('TOTAL', '')
tm.assert_series_equal(all_cols, exp_cols)
all_rows = result.loc['TOTAL']
exp_rows = df.groupby(['b', 'c']).size().astype('i8')
exp_rows = exp_rows.append(Series([len(df)], index=[('TOTAL', '')]))
exp_rows.name = 'TOTAL'
exp_rows = exp_rows.reindex(all_rows.index)
exp_rows = exp_rows.fillna(0).astype(np.int64)
tm.assert_series_equal(all_rows, exp_rows)
for margins_name in [666, None, ['a', 'b']]:
with pytest.raises(ValueError):
crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'),
margins=True, margins_name=margins_name)
def test_crosstab_pass_values(self):
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
values = np.random.randn(100)
table = crosstab([a, b], c, values, aggfunc=np.sum,
rownames=['foo', 'bar'], colnames=['baz'])
df = DataFrame({'foo': a, 'bar': b, 'baz': c, 'values': values})
expected = df.pivot_table('values', index=['foo', 'bar'],
columns='baz', aggfunc=np.sum)
tm.assert_frame_equal(table, expected)
def test_crosstab_dropna(self):
# GH 3820
a = np.array(['foo', 'foo', 'foo', 'bar',
'bar', 'foo', 'foo'], dtype=object)
b = np.array(['one', 'one', 'two', 'one',
'two', 'two', 'two'], dtype=object)
c = np.array(['dull', 'dull', 'dull', 'dull',
'dull', 'shiny', 'shiny'], dtype=object)
res = pd.crosstab(a, [b, c], rownames=['a'],
colnames=['b', 'c'], dropna=False)
m = MultiIndex.from_tuples([('one', 'dull'), ('one', 'shiny'),
('two', 'dull'), ('two', 'shiny')],
names=['b', 'c'])
tm.assert_index_equal(res.columns, m)
def test_crosstab_no_overlap(self):
# GS 10291
s1 = pd.Series([1, 2, 3], index=[1, 2, 3])
s2 = pd.Series([4, 5, 6], index=[4, 5, 6])
actual = crosstab(s1, s2)
expected = pd.DataFrame()
tm.assert_frame_equal(actual, expected)
def test_margin_dropna(self):
# GH 12577
# pivot_table counts null into margin ('All')
# when margins=true and dropna=true
df = pd.DataFrame({'a': [1, 2, 2, 2, 2, np.nan],
'b': [3, 3, 4, 4, 4, 4]})
actual = pd.crosstab(df.a, df.b, margins=True, dropna=True)
expected = pd.DataFrame([[1, 0, 1], [1, 3, 4], [2, 3, 5]])
expected.index = Index([1.0, 2.0, 'All'], name='a')
expected.columns = Index([3, 4, 'All'], name='b')
tm.assert_frame_equal(actual, expected)
df = DataFrame({'a': [1, np.nan, np.nan, np.nan, 2, np.nan],
'b': [3, np.nan, 4, 4, 4, 4]})
actual = pd.crosstab(df.a, df.b, margins=True, dropna=True)
expected = pd.DataFrame([[1, 0, 1], [0, 1, 1], [1, 1, 2]])
expected.index = Index([1.0, 2.0, 'All'], name='a')
expected.columns = Index([3.0, 4.0, 'All'], name='b')
tm.assert_frame_equal(actual, expected)
df = DataFrame({'a': [1, np.nan, np.nan, np.nan, np.nan, 2],
'b': [3, 3, 4, 4, 4, 4]})
actual = pd.crosstab(df.a, df.b, margins=True, dropna=True)
expected = pd.DataFrame([[1, 0, 1], [0, 1, 1], [1, 1, 2]])
expected.index = Index([1.0, 2.0, 'All'], name='a')
expected.columns = Index([3, 4, 'All'], name='b')
tm.assert_frame_equal(actual, expected)
# GH 12642
# _add_margins raises KeyError: Level None not found
# when margins=True and dropna=False
df = pd.DataFrame({'a': [1, 2, 2, 2, 2, np.nan],
'b': [3, 3, 4, 4, 4, 4]})
actual = pd.crosstab(df.a, df.b, margins=True, dropna=False)
expected = pd.DataFrame([[1, 0, 1], [1, 3, 4], [2, 4, 6]])
expected.index = Index([1.0, 2.0, 'All'], name='a')
expected.columns = Index([3, 4, 'All'], name='b')
tm.assert_frame_equal(actual, expected)
df = DataFrame({'a': [1, np.nan, np.nan, np.nan, 2, np.nan],
'b': [3, np.nan, 4, 4, 4, 4]})
actual = pd.crosstab(df.a, df.b, margins=True, dropna=False)
expected = pd.DataFrame([[1, 0, 1], [0, 1, 1], [1, 4, 6]])
expected.index = Index([1.0, 2.0, 'All'], name='a')
expected.columns = Index([3.0, 4.0, 'All'], name='b')
tm.assert_frame_equal(actual, expected)
a = np.array(['foo', 'foo', 'foo', 'bar',
'bar', 'foo', 'foo'], dtype=object)
b = np.array(['one', 'one', 'two', 'one',
'two', np.nan, 'two'], dtype=object)
c = np.array(['dull', 'dull', 'dull', 'dull',
'dull', 'shiny', 'shiny'], dtype=object)
actual = pd.crosstab(a, [b, c], rownames=['a'],
colnames=['b', 'c'], margins=True, dropna=False)
m = MultiIndex.from_arrays([['one', 'one', 'two', 'two', 'All'],
['dull', 'shiny', 'dull', 'shiny', '']],
names=['b', 'c'])
expected = DataFrame([[1, 0, 1, 0, 2], [2, 0, 1, 1, 5],
[3, 0, 2, 1, 7]], columns=m)
expected.index = Index(['bar', 'foo', 'All'], name='a')
tm.assert_frame_equal(actual, expected)
actual = pd.crosstab([a, b], c, rownames=['a', 'b'],
colnames=['c'], margins=True, dropna=False)
m = MultiIndex.from_arrays([['bar', 'bar', 'foo', 'foo', 'All'],
['one', 'two', 'one', 'two', '']],
names=['a', 'b'])
expected = DataFrame([[1, 0, 1], [1, 0, 1], [2, 0, 2], [1, 1, 2],
[5, 2, 7]], index=m)
expected.columns = Index(['dull', 'shiny', 'All'], name='c')
tm.assert_frame_equal(actual, expected)
actual = pd.crosstab([a, b], c, rownames=['a', 'b'],
colnames=['c'], margins=True, dropna=True)
m = MultiIndex.from_arrays([['bar', 'bar', 'foo', 'foo', 'All'],
['one', 'two', 'one', 'two', '']],
names=['a', 'b'])
expected = DataFrame([[1, 0, 1], [1, 0, 1], [2, 0, 2], [1, 1, 2],
[5, 1, 6]], index=m)
expected.columns = Index(['dull', 'shiny', 'All'], name='c')
tm.assert_frame_equal(actual, expected)
def test_crosstab_normalize(self):
# Issue 12578
df = pd.DataFrame({'a': [1, 2, 2, 2, 2], 'b': [3, 3, 4, 4, 4],
'c': [1, 1, np.nan, 1, 1]})
rindex = pd.Index([1, 2], name='a')
cindex = pd.Index([3, 4], name='b')
full_normal = pd.DataFrame([[0.2, 0], [0.2, 0.6]],
index=rindex, columns=cindex)
row_normal = pd.DataFrame([[1.0, 0], [0.25, 0.75]],
index=rindex, columns=cindex)
col_normal = pd.DataFrame([[0.5, 0], [0.5, 1.0]],
index=rindex, columns=cindex)
# Check all normalize args
tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='all'),
full_normal)
tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=True),
full_normal)
tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='index'),
row_normal)
tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='columns'),
col_normal)
tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=1),
pd.crosstab(df.a, df.b, normalize='columns'))
tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=0),
pd.crosstab(df.a, df.b, normalize='index'))
row_normal_margins = pd.DataFrame([[1.0, 0],
[0.25, 0.75],
[0.4, 0.6]],
index=pd.Index([1, 2, 'All'],
name='a',
dtype='object'),
columns=pd.Index([3, 4], name='b',
dtype='object'))
col_normal_margins = pd.DataFrame([[0.5, 0, 0.2], [0.5, 1.0, 0.8]],
index=pd.Index([1, 2], name='a',
dtype='object'),
columns=pd.Index([3, 4, 'All'],
name='b',
dtype='object'))
all_normal_margins = pd.DataFrame([[0.2, 0, 0.2],
[0.2, 0.6, 0.8],
[0.4, 0.6, 1]],
index=pd.Index([1, 2, 'All'],
name='a',
dtype='object'),
columns=pd.Index([3, 4, 'All'],
name='b',
dtype='object'))
tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='index',
margins=True), row_normal_margins)
tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='columns',
margins=True),
col_normal_margins)
tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=True,
margins=True), all_normal_margins)
# Test arrays
pd.crosstab([np.array([1, 1, 2, 2]), np.array([1, 2, 1, 2])],
np.array([1, 2, 1, 2]))
# Test with aggfunc
norm_counts = pd.DataFrame([[0.25, 0, 0.25],
[0.25, 0.5, 0.75],
[0.5, 0.5, 1]],
index=pd.Index([1, 2, 'All'],
name='a',
dtype='object'),
columns=pd.Index([3, 4, 'All'],
name='b'))
test_case = pd.crosstab(df.a, df.b, df.c, aggfunc='count',
normalize='all',
margins=True)
tm.assert_frame_equal(test_case, norm_counts)
df = pd.DataFrame({'a': [1, 2, 2, 2, 2], 'b': [3, 3, 4, 4, 4],
'c': [0, 4, np.nan, 3, 3]})
norm_sum = pd.DataFrame([[0, 0, 0.],
[0.4, 0.6, 1],
[0.4, 0.6, 1]],
index=pd.Index([1, 2, 'All'],
name='a',
dtype='object'),
columns=pd.Index([3, 4, 'All'],
name='b',
dtype='object'))
test_case = pd.crosstab(df.a, df.b, df.c, aggfunc=np.sum,
normalize='all',
margins=True)
| tm.assert_frame_equal(test_case, norm_sum) | pandas.util.testing.assert_frame_equal |
import os
import joblib
import numpy as np
import pandas as pd
from joblib import Parallel
from joblib import delayed
from Fuzzy_clustering.version2.common_utils.logging import create_logger
from Fuzzy_clustering.version2.dataset_manager.common_utils import check_empty_nwp
from Fuzzy_clustering.version2.dataset_manager.common_utils import rescale_mean
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_2d_dense
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_3d
class DatasetCreatorDense:
def __init__(self, projects_group, projects, data, path_nwp, nwp_model, nwp_resolution, data_variables, njobs=1,
test=False, dates=None):
self.projects = projects
self.is_for_test = test
self.projects_group = projects_group
self.data = data
self.path_nwp = path_nwp
self.nwp_model = nwp_model
self.nwp_resolution = nwp_resolution
self.compress = True if self.nwp_resolution == 0.05 else False
self.n_jobs = njobs
self.variables = data_variables
self.logger = create_logger(logger_name=__name__, abs_path=self.path_nwp,
logger_path=f'log_{self.projects_group}.log', write_type='a')
if not self.data is None:
self.dates = self.check_dates()
elif not dates is None:
self.dates = dates
def check_dates(self):
start_date = pd.to_datetime(self.data.index[0].strftime('%d%m%y'), format='%d%m%y')
end_date = pd.to_datetime(self.data.index[-1].strftime('%d%m%y'), format='%d%m%y')
dates = pd.date_range(start_date, end_date)
data_dates = pd.to_datetime(np.unique(self.data.index.strftime('%d%m%y')), format='%d%m%y')
dates = [d for d in dates if d in data_dates]
self.logger.info('Dates are checked. Number of time samples is %s', str(len(dates)))
return pd.DatetimeIndex(dates)
def correct_nwps(self, nwp, variables):
if nwp['lat'].shape[0] == 0:
area_group = self.projects[0]['static_data']['area_group']
resolution = self.projects[0]['static_data']['NWP_resolution']
nwp['lat'] = np.arange(area_group[0][0], area_group[1][0] + resolution / 2,
resolution).reshape(-1, 1)
nwp['long'] = np.arange(area_group[0][1], area_group[1][1] + resolution / 2,
resolution).reshape(-1, 1).T
for var in nwp.keys():
if not var in {'lat', 'long'}:
if nwp['lat'].shape[0] != nwp[var].shape[0]:
nwp[var] = nwp[var].T
if 'WS' in variables and not 'WS' in nwp.keys():
if 'Uwind' in nwp.keys() and 'Vwind' in nwp.keys():
if nwp['Uwind'].shape[0] > 0 and nwp['Vwind'].shape[0] > 0:
r2d = 45.0 / np.arctan(1.0)
wspeed = np.sqrt(np.square(nwp['Uwind']) + np.square(nwp['Vwind']))
wdir = np.arctan2(nwp['Uwind'], nwp['Vwind']) * r2d + 180
nwp['WS'] = wspeed
nwp['WD'] = wdir
if 'Temp' in nwp.keys():
nwp['Temperature'] = nwp['Temp']
del nwp['Temp']
return nwp
def stack_by_sample(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables, predictions):
timestep = 60
x = dict()
y = dict()
x_3d = dict()
file_name = os.path.join(path_nwp, f"{nwp_model}_{t.strftime('%d%m%y')}.pickle")
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for project in projects:
preds = predictions[project['_id']]
hor = preds.columns[-1] + timestep
p_dates = [t + pd.DateOffset(minutes=hor)]
preds = preds.loc[t].to_frame().T
dates_pred = [t + pd.DateOffset(minutes=h) for h in preds.columns]
pred = pd.DataFrame(preds.values.ravel(), index=dates_pred, columns=[project['_id']])
data_temp = pd.concat([data[project['_id']].iloc[np.where(data.index < t)].to_frame(), pred])
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
y[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
try:
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample(date, nwp, nwp_prev, nwp_next, lats[project_id],
longs[project_id], project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data_temp.loc[(date - pd.DateOffset(hours=1))].values
inp['Obs_lag2'] = data_temp.loc[(date - pd.DateOffset(hours=2))].values
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
y[project_id] = pd.concat([y[project_id], pd.DataFrame(data.loc[date, project_id],
columns=['target'],
index=[date])])
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data_temp.loc[(date - pd.DateOffset(hours=1)), project_id].values
inp['Obs_lag2'] = data_temp.loc[(date - pd.DateOffset(hours=2)), project_id].values
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project['_id']] = pd.concat([x[project['_id']], inp])
x_3d[project['_id']] = stack_2d_dense(x_3d[project['_id']], inp_cnn, False)
y[project['_id']] = pd.concat(
[y[project['_id']], pd.DataFrame(data.loc[date, project['_id']],
columns=['target'], index=[date])])
except Exception:
continue
print(t.strftime('%d%m%y%H%M'), ' extracted')
for project in projects:
if len(x_3d[project['_id']].shape) == 3:
x_3d[project['_id']] = x_3d[project['_id']][np.newaxis, :, :, :]
return x, y, x_3d, t.strftime('%d%m%y%H%M')
def stack_daily_nwps(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables):
x = dict()
y = dict()
x_3d = dict()
file_name = os.path.join(path_nwp, f"{nwp_model}_{t.strftime('%d%m%y')}.pickle")
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for project in projects:
if project['static_data']['horizon'] == 'day_ahead':
p_dates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
else:
p_dates = pd.date_range(t + pd.DateOffset(hours=1), t + pd.DateOffset(hours=24), freq='H')
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
y[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample(date, nwp, nwp_prev, nwp_next, lats[project_id],
longs[project_id], project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data.loc[(date - pd.DateOffset(hours=1)), project_id]
inp['Obs_lag2'] = data.loc[(date - pd.DateOffset(hours=2)), project_id]
if not self.is_for_test:
inp['Obs_lag1'] = inp['Obs_lag1'] + np.random.normal(0, 0.05) * inp['Obs_lag1']
inp['Obs_lag2'] = inp['Obs_lag2'] + np.random.normal(0, 0.05) * inp['Obs_lag2']
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
y[project_id] = pd.concat([y[project_id], pd.DataFrame(data.loc[date, project_id],
columns=['target'],
index=[date])])
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data.loc[(date - pd.DateOffset(hours=1)), project_id]
inp['Obs_lag2'] = data.loc[(date - pd.DateOffset(hours=2)), project_id]
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project['_id']] = pd.concat([x[project['_id']], inp])
x_3d[project['_id']] = stack_2d_dense(x_3d[project['_id']], inp_cnn, False)
y[project['_id']] = pd.concat(
[y[project['_id']], pd.DataFrame(data.loc[date, project['_id']],
columns=['target'], index=[date])])
except Exception:
continue
print(t.strftime('%d%m%y%H%M'), ' extracted')
return x, y, x_3d, t.strftime('%d%m%y%H%M')
def stack_daily_nwps_rabbitmq(self, t, path_nwp, nwp_model, project, variables):
x = dict()
x_3d = dict()
nwps = project['nwp']
p_dates = pd.date_range(t, t + pd.DateOffset(days=3) - pd.DateOffset(hours=1), freq='H')
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
try:
date_nwp = date.strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_rabbitmq(date, nwp, nwp_prev, nwp_next, project['static_data']['type'])
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
date = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
x[project['_id']] = pd.concat([x[project['_id']], inp])
x_3d[project['_id']] = stack_2d_dense(x_3d[project['_id']], inp_cnn, False)
except Exception:
continue
print(t.strftime('%d%m%y%H%M'), ' extracted')
return x, x_3d, t.strftime('%d%m%y%H%M')
def stack_daily_nwps_online(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables):
x = dict()
x_3d = dict()
file_name = os.path.join(path_nwp, f"{nwp_model}_{t.strftime('%d%m%y')}.pickle")
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for project in projects:
if project['static_data']['horizon'] == 'day_ahead':
p_dates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
else:
p_dates = pd.date_range(t + pd.DateOffset(hours=1), t + pd.DateOffset(hours=24), freq='15min')
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - | pd.DateOffset(hours=1) | pandas.DateOffset |
import logging
import re
import tempfile
from os.path import exists, join
from unittest.mock import MagicMock, patch
import netCDF4 as nc
import numpy as np
import numpy.testing as npt
import packaging.version
import pandas as pd
import pytest
import xarray as xr
from scmdata import ScmRun
from scmdata.netcdf import _get_xr_dataset_to_write, nc_to_run, run_to_nc
from scmdata.testing import assert_scmdf_almost_equal
def test_run_to_nc(scm_run):
with tempfile.TemporaryDirectory() as tempdir:
out_fname = join(tempdir, "out.nc")
run_to_nc(scm_run, out_fname, dimensions=("scenario",))
assert exists(out_fname)
ds = nc.Dataset(out_fname)
assert ds.dimensions["time"].size == len(scm_run.time_points)
assert ds.dimensions["scenario"].size == 2
assert ds.variables["scenario"][0] == "a_scenario"
assert ds.variables["scenario"][1] == "a_scenario2"
npt.assert_allclose(
ds.variables["Primary_Energy"][0, :],
scm_run.filter(variable="Primary Energy", scenario="a_scenario").values[0],
)
npt.assert_allclose(
ds.variables["Primary_Energy"][1, :],
scm_run.filter(variable="Primary Energy", scenario="a_scenario2").values[0],
)
npt.assert_allclose(
ds.variables["Primary_Energy__Coal"][0, :],
scm_run.filter(
variable="Primary Energy|Coal", scenario="a_scenario"
).values[0],
)
@pytest.mark.parametrize(
"v", ["primary energy", "Primary Energy", "Primary Energy|Coal|Test"],
)
def test_run_to_nc_case(scm_run, v):
with tempfile.TemporaryDirectory() as tempdir:
out_fname = join(tempdir, "out.nc")
scm_run = scm_run.filter(variable="Primary Energy")
scm_run["variable"] = v
run_to_nc(scm_run, out_fname, dimensions=("scenario",))
res = nc_to_run(scm_run.__class__, out_fname)
assert res.get_unique_meta("variable", True) == v
@pytest.mark.parametrize("ch", "!@#$%^&*()~`+={}]<>,;:'\".")
@pytest.mark.parametrize("start_with_weird", (True, False))
def test_run_to_nc_weird_name(scm_run, ch, start_with_weird):
with tempfile.TemporaryDirectory() as tempdir:
out_fname = join(tempdir, "out.nc")
scm_run = scm_run.filter(variable="Primary Energy")
variable = scm_run.get_unique_meta("variable", True)
if start_with_weird:
variable = ch + " " + variable
else:
variable = variable + " " + ch
scm_run["variable"] = variable
if start_with_weird:
error_msg = re.escape("NetCDF: Name contains illegal characters")
with pytest.raises(RuntimeError, match=error_msg):
run_to_nc(scm_run, out_fname, dimensions=("scenario",))
else:
run_to_nc(scm_run, out_fname, dimensions=("scenario",))
res = nc_to_run(scm_run.__class__, out_fname)
assert res.get_unique_meta("variable", True) == variable
@pytest.mark.parametrize("ch", ("|", " ", " "))
def test_run_to_nc_special_character_pass(scm_run, ch):
with tempfile.TemporaryDirectory() as tempdir:
out_fname = join(tempdir, "out.nc")
scm_run = scm_run.filter(variable="Primary Energy")
variable = scm_run.get_unique_meta("variable", True)
variable = ch + variable
scm_run["variable"] = variable
run_to_nc(scm_run, out_fname, dimensions=("scenario",))
res = nc_to_run(scm_run.__class__, out_fname)
assert res.get_unique_meta("variable", True) == variable
def test_run_to_nc_4d(scm_run, tmpdir):
df = scm_run.timeseries().reset_index()
df["climate_model"] = "base_m"
df["run_id"] = 1
big_df = [df]
for climate_model in ["abc_m", "def_m", "ghi_m"]:
for run_id in range(10):
new_df = df.copy()
new_df["run_id"] = run_id
new_df["climate_model"] = climate_model
big_df.append(new_df)
scm_run = scm_run.__class__(pd.concat(big_df).reset_index(drop=True))
out_fname = join(tmpdir, "out.nc")
run_to_nc(scm_run, out_fname, dimensions=("scenario", "climate_model", "run_id"))
assert exists(out_fname)
ds = nc.Dataset(out_fname)
assert ds.dimensions["time"].size == len(scm_run.time_points)
assert ds.dimensions["scenario"].size == 2
assert ds.dimensions["climate_model"].size == 4
assert ds.dimensions["run_id"].size == 10
assert ds.variables["scenario"][0] == "a_scenario"
assert ds.variables["scenario"][1] == "a_scenario2"
assert ds.variables["climate_model"][0] == "abc_m"
assert ds.variables["climate_model"][1] == "base_m"
assert ds.variables["climate_model"][2] == "def_m"
assert ds.variables["climate_model"][3] == "ghi_m"
npt.assert_array_equal(ds.variables["run_id"][:], range(10))
# remove as order doesn't really matter unless I misunderstand something?
# assert ds.variables["Primary_Energy"].shape == (2, 4, 10, 3)
# assert ds.variables["Primary_Energy__Coal"].shape == (2, 4, 10, 3)
def test_run_to_nc_nan_dimension_error(scm_run, tmpdir):
scm_run["run_id"] = np.nan
out_fname = join(tmpdir, "out.nc")
with pytest.raises(AssertionError, match="nan in dimension: `run_id`"):
run_to_nc(scm_run, out_fname, dimensions=("scenario", "run_id"))
@pytest.mark.parametrize(
"dimensions",
(
("scenario", "variable"),
("scenario",),
("scenario", "time"),
("scenario", "variable", "time"),
),
)
def test_nc_to_run(scm_run, dimensions):
with tempfile.TemporaryDirectory() as tempdir:
out_fname = join(tempdir, "out.nc")
run_to_nc(scm_run, out_fname, dimensions=dimensions)
assert exists(out_fname)
run_read = nc_to_run(scm_run.__class__, out_fname)
assert isinstance(run_read, scm_run.__class__)
assert_scmdf_almost_equal(scm_run, run_read, check_ts_names=False)
def test_nc_to_run_4d(scm_run):
df = scm_run.timeseries()
val_cols = df.columns.tolist()
df = df.reset_index()
df["climate_model"] = "base_m"
df["run_id"] = 1
df.loc[:, val_cols] = np.random.rand(df.shape[0], len(val_cols))
big_df = [df]
for climate_model in ["abc_m", "def_m", "ghi_m"]:
for run_id in range(10):
new_df = df.copy()
new_df["run_id"] = run_id
new_df["climate_model"] = climate_model
new_df.loc[:, val_cols] = np.random.rand(df.shape[0], len(val_cols))
big_df.append(new_df)
scm_run = scm_run.__class__(pd.concat(big_df).reset_index(drop=True))
with tempfile.TemporaryDirectory() as tempdir:
out_fname = join(tempdir, "out.nc")
run_to_nc(
scm_run, out_fname, dimensions=("scenario", "climate_model", "run_id")
)
assert exists(out_fname)
run_read = nc_to_run(scm_run.__class__, out_fname)
assert isinstance(run_read, scm_run.__class__)
assert_scmdf_almost_equal(scm_run, run_read, check_ts_names=False)
def test_nc_to_run_with_extras_sparsity(scm_run):
df = scm_run.timeseries()
val_cols = df.columns.tolist()
df = df.reset_index()
df["climate_model"] = "base_m"
df["run_id"] = 1
df.loc[:, val_cols] = np.random.rand(df.shape[0], len(val_cols))
big_df = [df]
for climate_model in ["abc_m", "def_m", "ghi_m"]:
for run_id in range(10):
new_df = df.copy()
new_df["run_id"] = run_id
new_df["climate_model"] = climate_model
new_df.loc[:, val_cols] = np.random.rand(df.shape[0], len(val_cols))
big_df.append(new_df)
scm_run = scm_run.__class__( | pd.concat(big_df) | pandas.concat |
import pandas as pd
import os
def read_file(filename:str,return_type:str='dataframe'):
from .file_utils import FileHandler
file:FileHandler = FileHandler(filename)
return file.get_data(return_type=return_type)
# def calculate_top_marginal_roi(data_frame,colname:str,top_n:int=10):
# lookup_cols = ['gross','budget']
# lookup_cols.extend(colnames)
# missing_cols = set(lookup_cols).intersection(set(data_frame.columns)).difference(lookup_cols)
# if(missing_cols != {}):
# raise Error(f"Required columns {' '.join(missing_cols)} not in input data")
# sub_frame = movies[lookup_cols].groupby(colnames).sum()
# sub_frame['marginal_roi'] = (sub_frame['gross'] - sub_frame['budget'])/sub_frame['budget']
# return sub_frame.nlargest(top_n,'marginal_roi')
def calculate_top_gross_profit(data_frame,colnames:list,top_n:int=10,metric:str='sum'):
lookup_cols = ['gross','budget']
lookup_cols.extend(colnames)
if(metric=='sum'):
sub_frame = data_frame[lookup_cols].groupby(colnames).sum()
sub_frame['gross_profit'] = sub_frame['gross'] - sub_frame['budget']
if(metric=='mean'):
data_frame['gross_profit'] = data_frame['gross'] - data_frame['budget']
lookup_cols.append('gross_profit')
sub_frame = data_frame[lookup_cols].groupby(colnames).mean()
return sub_frame.nlargest(top_n,'gross_profit')
def union_actor_columns(movies):
sub_frame = movies[['actor_1_name','actor_2_name','actor_3_name','budget','gross']]
actor_1 = sub_frame[['actor_1_name','budget','gross']].rename(columns={'actor_1_name':'actor_name'})
actor_2 = sub_frame[['actor_2_name','budget','gross']].rename(columns={'actor_2_name':'actor_name'})
actor_3 = sub_frame[['actor_3_name','budget','gross']].rename(columns={'actor_3_name':'actor_name'})
return | pd.concat([actor_1,actor_2,actor_3]) | pandas.concat |
# coding: utf-8
from functools import wraps
from fastcache import clru_cache
from collections import Iterable
from datetime import datetime as pdDateTime
from FactorLib.data_source.trade_calendar import tc
from xlrd.xldate import xldate_as_datetime
import pandas as pd
import numpy as np
# 日期字符串(20120202)转成datetime(timestamp),如果不是日期字符串,则返回None
def DateStr2Datetime(date_str):
try:
return pdDateTime(int(date_str[0:4]),int(date_str[4:6]),int(date_str[6:8]))
except:
return None
# datetime(timestamp)转成日期字符串(20120202)
def Datetime2DateStr(date):
Year = date.year
Month = date.month
Day = date.day
if Month<10:
Month = '0'+str(Month)
else:
Month = str(Month)
if Day<10:
Day = '0'+str(Day)
else:
Day = str(Day)
return str(Year)+Month+Day
# datetime(timestamp)转成数字日期(20120202)
def Datetime2IntDate(date):
Year = date.year
Month = date.month
Day = date.day
return Year * 10000 + Month * 100 + Day
@clru_cache()
def IntDate2Datetime(date: int):
return pd.to_datetime(str(date))
# 日期字符串(20120202)转成datetime(timestamp),如果不是日期字符串,则返回None
def DateStr2Datetime(date_str):
try:
return pdDateTime(int(date_str[0:4]),int(date_str[4:6]),int(date_str[6:8]))
except:
return None
# matlab格式的日期
def Datetime2MatlabDatetime(dates):
if isinstance(dates, Iterable):
return ((np.array(dates, dtype='datetime64') - np.datetime64('1970-01-01T00:00:00')) /
np.timedelta64(1, 'D')).astype('int32')
else:
return int((np.datetime64(dates) - np.datetime64('1970-01-01T00:00:00')) /
np.timedelta64(1, 'D'))
# matlab格式转datetime
def MatlabDatetime2Datetime(dates):
return pd.to_datetime(dates, unit='D')
# excel数字格式转成datetime
def ExcelDatetime2Datetime(dates):
datetimes = [xldate_as_datetime(x, 0) for x in dates]
return pd.to_datetime(datetimes)
def DateRange2Dates(func):
"""
函数装饰器。
把func中的时间参数(start_date, end_date, dates)都转成dates。
"""
@wraps(func)
def wrapper(*args, **kwargs):
start = kwargs.get('start_date')
end = kwargs.get('end_date')
dates = kwargs.get('dates')
d = tc.get_trade_days(start, end, retstr=None)
if dates is not None:
try:
dates = pd.DatetimeIndex(dates)
except:
dates = | pd.DatetimeIndex([dates]) | pandas.DatetimeIndex |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# import seaborn as sns
from aif360.datasets import AdultDataset
from aif360.datasets import GermanDataset
from aif360.datasets import MEPSDataset19
from torch.utils.data import Dataset
from sklearn.model_selection import train_test_split
from aif360.datasets import BinaryLabelDataset
from aif360.metrics import ClassificationMetric
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
def Adult_dataset(name_prot = 'sex'):
dataset_orig = AdultDataset(protected_attribute_names=['sex'],
privileged_classes= [['Male']],
features_to_keep=['age', 'education-num'])
privileged_groups = [{'sex': 1}]
unprivileged_groups = [{'sex': 0}]
data, _ = dataset_orig.convert_to_dataframe()
data.rename(columns={'income-per-year':'labels'}, inplace = True)
data.reset_index(inplace = True, drop = True)
sensitive = data[name_prot]
output = dataset_orig.labels
atribute = data.drop('labels', axis = 1, inplace = False)
atribute.drop(name_prot, axis = 1, inplace = True)
return data, atribute, sensitive, output, privileged_groups, unprivileged_groups
def german_dataset_age(name_prot=['age']):
dataset_orig = GermanDataset(
protected_attribute_names = name_prot,
privileged_classes=[lambda x: x >= 25],
features_to_drop=['personal_status', 'sex']
)
privileged_groups = [{'age': 1}]
unprivileged_groups = [{'age': 0}]
data, _ = dataset_orig.convert_to_dataframe()
data.rename(columns={'credit':'labels'}, inplace = True)
sensitive = data[name_prot]
output = data['labels']
output.replace((1,2),(0,1),inplace = True)
atribute = data.drop('labels', axis = 1, inplace = False)
atribute.drop(name_prot, axis = 1, inplace = True)
return data, atribute, sensitive, output, privileged_groups, unprivileged_groups
def german_dataset_sex(name_prot=['sex']):
dataset_orig = GermanDataset(
protected_attribute_names = name_prot,
features_to_drop=['personal_status', 'age']
)
privileged_groups = [{'sex': 1}]
unprivileged_groups = [{'sex': 0}]
data, _ = dataset_orig.convert_to_dataframe()
data.rename(columns={'credit':'labels'}, inplace = True)
sensitive = data[name_prot]
output = data['labels']
output.replace((1,2),(0,1),inplace = True)
atribute = data.drop('labels', axis = 1, inplace = False)
atribute.drop(name_prot, axis = 1, inplace = True)
return data, atribute, sensitive, output, privileged_groups, unprivileged_groups
def medical_dataset(name_prot = 'RACE'):
dataset_orig = MEPSDataset19()
privileged_groups = [{'RACE': 1}]
unprivileged_groups = [{'RACE': 0}]
data, _ = dataset_orig.convert_to_dataframe()
data.reset_index(inplace = True, drop = True)
data.rename(columns={'UTILIZATION':'labels'}, inplace = True)
sensitive = data[name_prot]
atribute = data.drop(name_prot, axis = 1, inplace = False)
atribute.drop(['labels'], axis =1, inplace =True)
output = data['labels']
return data, atribute, sensitive, output, privileged_groups, unprivileged_groups
def Readmission_dataset():
folder_name = os.path.join('datasets_raw','readmission.csv')
data = pd.read_csv(folder_name)
data.drop(['ID','readmitDAYS'], axis = 1, inplace = True)
data.rename(columns={'readmitBIN':'labels'}, inplace = True)
sensitive = data['FEMALE']
output = data['labels']
atribute = data.drop(['labels','FEMALE'], axis = 1)
pr_gr = [{'FEMALE': 0}]
un_gr = [{'FEMALE': 1}]
return data, atribute, sensitive, output, pr_gr, un_gr
def format_datasets(data, atribute, sensitive, output, out_name = "labels", sens_name = "sex", test_s = 0.15, val_s = 0.15):
data_train, data_test_all = train_test_split(data, test_size = test_s + val_s, random_state = 30)
data_val, data_test = train_test_split(data_test_all, test_size = test_s/(test_s + val_s), random_state = 30)
sensitive_train = data_train[sens_name]
sensitive_val = data_val[sens_name]
sensitive_test = data_test[sens_name]
output_train = data_train[out_name]
output_val = data_val[out_name]
output_test = data_test[out_name]
atribute_train = data_train.drop([out_name, sens_name], axis = 1, inplace=False)
atribute_val = data_val.drop([out_name, sens_name], axis = 1, inplace=False)
atribute_test = data_test.drop([out_name, sens_name], axis = 1, inplace=False)
return data_train, data_test, data_val, atribute_train, atribute_val, atribute_test, sensitive_train, sensitive_val, sensitive_test, output_train, output_val, output_test
def test(dataset_val, dataset_test,
model, y_val, y_test, A_val, A_test, thresh,
model_AIF, k, dataloader_val, dataloader_test, protected, unprivileged_groups, privileged_groups):
protected = [protected]
bld_val = BinaryLabelDataset(df = dataset_val, label_names = ['labels'],
protected_attribute_names=protected)
bld_test = BinaryLabelDataset(df = dataset_test, label_names = ['labels'],
protected_attribute_names=protected)
if np.isin(k ,model_AIF):
y_val_pred_prob_val = model.predict_proba(bld_val)
A_prob_val = 0
y_val_pred_prob_test = model.predict_proba(bld_test)
A_prob_test = 0
else:
y_val_pred_prob_val, A_prob_val = model.predict_proba(dataloader_val)
y_val_pred_prob_test, A_prob_test = model.predict_proba(dataloader_test)
def metrics_form(y_val_pred_prob, y_test, A_prob, A_test, bld, dataset):
metric_arrs = np.empty([0,8])
if np.isin(k ,model_AIF):
y_val_pred = (y_val_pred_prob > thresh).astype(np.float64)
else:
y_val_pred = (y_val_pred_prob > thresh).astype(np.float64)
# A_pred = (A_prob > thresh).astype(np.float64)
metric_arrs = np.append(metric_arrs, roc_auc_score(y_test, y_val_pred_prob))
# print("y {}".format(roc_auc_score(y_test, y_val_pred_prob)))
metric_arrs = np.append(metric_arrs, accuracy_score(y_test, y_val_pred))
if np.isin(k,model_AIF):
metric_arrs = np.append(metric_arrs, 0)
else:
# metric_arrs = np.append(metric_arrs, roc_auc_score(A_test, A_prob))
metric_arrs = np.append(metric_arrs, 0)
# print("A {}".format(roc_auc_score(A_test, A_prob)))
dataset_pred = dataset.copy()
dataset_pred.labels = y_val_pred
bld2 = BinaryLabelDataset(df = dataset_pred, label_names = ['labels'], protected_attribute_names = protected)
metric = ClassificationMetric(
bld, bld2,
unprivileged_groups = unprivileged_groups,
privileged_groups = privileged_groups)
metric_arrs = np.append(metric_arrs, ((metric.true_positive_rate() + metric.true_negative_rate()) / 2))
metric_arrs = np.append(metric_arrs, np.abs(metric.average_odds_difference()))
metric_arrs = np.append(metric_arrs, metric.disparate_impact())
metric_arrs = np.append(metric_arrs, np.abs(metric.statistical_parity_difference()))
metric_arrs = np.append(metric_arrs, np.abs(metric.equal_opportunity_difference()))
return metric_arrs
metric_val = metrics_form(y_val_pred_prob_val, y_val, A_prob_val, A_val, bld_val, dataset_val)
metric_test = metrics_form(y_val_pred_prob_test, y_test, A_prob_test, A_test, bld_test, dataset_test)
return metric_val, metric_test
class Dataset_format(Dataset):
def __init__(self, atribute, sensitive, output):
self.atribute = atribute.values
self.sensitive = sensitive.values
self.output = output.values
def __len__(self):
return len(self.atribute)
def __getitem__(self, idx):
return self.atribute[idx], self.output[idx], self.sensitive[idx]
def Pareto_optimal(dataset, FAIR = True):
def identify_pareto(scores):
# Count number of items
population_size = scores.shape[0]
# Create a NumPy index for scores on the pareto front (zero indexed)
population_ids = np.arange(population_size)
# Create a starting list of items on the Pareto front
# All items start off as being labelled as on the Parteo front
pareto_front = np.ones(population_size, dtype=bool)
# Loop through each item. This will then be compared with all other items
for i in range(population_size):
# Loop through all other items
for j in range(population_size):
# Check if our 'i' pint is dominated by out 'j' point
if all(scores[j] >= scores[i]) and any(scores[j] > scores[i]):
# j dominates i. Label 'i' point as not on Pareto front
pareto_front[i] = 0
# Stop further comparisons with 'i' (no more comparisons needed)
break
# Return ids of scenarios on pareto front
return population_ids[pareto_front]
points = pd.DataFrame()
for i in dataset.index.unique():
score = dataset[dataset.index == i].values.copy()
if FAIR == True:
score[:,1] = 100 - score[:,1]
population_ids = identify_pareto(score)
points = points.append(dataset[dataset.index == i].iloc[population_ids,[2,3]])
score = points.values.copy()
if FAIR == True:
score[:,1] = 100 - score[:,1]
population_ids = identify_pareto(score)
pareto_optimal = points.iloc[population_ids,:]
return pareto_optimal, points
def Pareto_optimal_total(dataset, FAIR = True, name = "proba"):
def identify_pareto(scores):
# Count number of items
population_size = scores.shape[0]
# Create a NumPy index for scores on the pareto front (zero indexed)
population_ids = np.arange(population_size)
# Create a starting list of items on the Pareto front
# All items start off as being labelled as on the Parteo front
pareto_front = np.ones(population_size, dtype=bool)
# Loop through each item. This will then be compared with all other items
for i in range(population_size):
# Loop through all other items
for j in range(population_size):
# Check if our 'i' pint is dominated by out 'j' point
if all(scores[j] >= scores[i]) and any(scores[j] > scores[i]):
# j dominates i. Label 'i' point as not on Pareto front
pareto_front[i] = 0
# Stop further comparisons with 'i' (no more comparisons needed)
break
# Return ids of scenarios on pareto front
return population_ids[pareto_front]
points = pd.DataFrame()
for i in dataset.index.unique():
score = dataset[dataset.index == i].values.copy()
if FAIR == True:
score[:,1] = 100 - score[:,1]
score[:,2] = 100 - score[:,2]
score[:,3] = 100 - score[:,3]
population_ids = identify_pareto(score)
points = points.append(dataset[dataset.index == i].iloc[population_ids,[4,5,6,7]])
score = points.values.copy()
if FAIR == True:
score[:,1] = 100 - score[:,1]
score[:,2] = 100 - score[:,2]
score[:,3] = 100 - score[:,3]
population_ids = identify_pareto(score)
pareto_optimal = points.iloc[population_ids,:]
pareto_optimal.to_excel("{}.xlsx".format(name))
return pareto_optimal
def plot_Pareto_fronts(PO_points_AOD, PO_points_ASPD, PO_points_AEOD, upper_bound = 0.1, lower_bound = -0.002, name = "Readmission"):
dict_marker = {"PR":'o', "DI-NN":'v', "DI-RF":'^', "Reweighing-NN":'>', "Reweighing-RF":'<', "FAD":'8',
'FAD-prob':'s', "FAIR-scalar":'p', 'FAIR-betaREP':'P', "FAIR-Bernoulli":"*", "FAIR-betaSF":"h"}
dict_color = {"PR":'b', "DI-NN":'g', "DI-RF":'r', "Reweighing-NN":'c', "Reweighing-RF":'m', "FAD":'y',
'FAD-prob':'k', "FAIR-scalar":'brown', 'FAIR-betaREP':'teal', "FAIR-Bernoulli":"blueviolet", "FAIR-betaSF":"crimson"}
size = 100
figure1 = plt.figure(figsize=(9, 12))
PO_points_AOD['labels'] = PO_points_AOD.index
ax1 = plt.subplot(311)
for k,d in PO_points_AOD.groupby('labels'):
if k == "FAD-prob":
continue
ax1.scatter(d.iloc[:,1], d.iloc[:,0], label=k, c=dict_color[k], marker = dict_marker[k], s=size)
# ax1.set_ylim(0.5,1)
ax1.set_xlim(lower_bound, upper_bound)
# ax1.set_xlim(0,model1.time_control[-1])
ax1.set_ylabel('AUC$_y$', fontweight="bold")
ax1.set_xlabel("AOD", fontweight="bold")
ax1.grid()
ax1.legend(loc = 'lower right')
PO_points_ASPD['labels'] = PO_points_ASPD.index
ax2 = plt.subplot(312)
for k,d in PO_points_ASPD.groupby('labels'):
if k == "FAD-prob":
continue
ax2.scatter(d.iloc[:,1], d.iloc[:,0], label=k, c=dict_color[k], marker = dict_marker[k], s=size)
# ax2.set_ylim(0.5,1)
ax2.set_xlim(lower_bound, upper_bound)
# ax1.set_xlim(0,model1.time_control[-1])
ax2.set_ylabel('AUC$_y$', fontweight="bold")
ax2.set_xlabel("ASD", fontweight="bold")
ax2.grid()
ax2.legend(loc = 'lower right')
PO_points_AEOD['labels'] = PO_points_AEOD.index
ax3 = plt.subplot(313)
for k,d in PO_points_AEOD.groupby('labels'):
if k == "FAD-prob":
continue
ax3.scatter(d.iloc[:,1], d.iloc[:,0], label=k, c=dict_color[k], marker = dict_marker[k], s=size)
# ax3.set_ylim(0.5,1)
ax3.set_xlim(lower_bound, upper_bound)
# ax1.set_xlim(0,model1.time_control[-1])
ax3.set_ylabel('AUC$_y$', fontweight="bold")
ax3.set_xlabel("AEOD", fontweight="bold")
ax3.grid()
ax3.legend(loc = 'lower right')
plt.setp([a.get_xticklabels() for a in [ax1, ax2]], visible=False)
plt.savefig('{}.png'.format(name))
def plot_AUC_Y_AUC_A(name):
figure2 = plt.figure(figsize=(9, 8))
points = pd.read_excel("Results/Ger_age.xls", index_col=0)
ax1 = plt.subplot(211)
ax1.plot(points[points.index == 'FAIR-scalar']["alpha"], points[points.index == 'FAIR-scalar'].iloc[:,0], label = "AUC$_y$")
ax1.plot(points[points.index == 'FAIR-scalar']["alpha"], points[points.index == 'FAIR-scalar'].iloc[:,2], label = "AUC$_s$")
plt.xscale("log")
ax1.set_ylabel('AUC', fontweight="bold")
ax1.set_title("German age", fontweight="bold")
# ax1.set_xlabel("alpha", fontweight="bold")
ax1.grid()
ax1.set_xlim(0, 1000)
ax1.legend()
points = pd.read_excel("Results/ger_sex.xlsx", index_col=0)
ax2 = plt.subplot(212)
ax2.plot(points[points.index == 'FAIR-scalar']["alpha"], points[points.index == 'FAIR-scalar'].iloc[:,0], label = "AUC$_y$")
ax2.plot(points[points.index == 'FAIR-scalar']["alpha"], points[points.index == 'FAIR-scalar'].iloc[:,2], label = "AUC$_s$")
plt.xscale("log")
ax2.set_ylabel('AUC', fontweight="bold")
ax2.set_title("German sex", fontweight="bold")
ax2.set_xlabel(r'$\alpha$', fontweight="bold")
ax2.grid()
ax2.set_xlim(0, 1000)
ax2.legend()
plt.setp([a.get_xticklabels() for a in [ax1]], visible=False)
plt.savefig('{}.png'.format(name))
if __name__ == "__main__":
col_AUC_y_val = 0
col_AUC_A_val = 7
add = 4
aaa = np.logspace(-2, np.log10(5), num = 8)
points = pd.read_excel("Results/readmission.xls", index_col=0)
po_Read_AOD, po_Read_AOD_all = Pareto_optimal(points.iloc[:,[col_AUC_y_val,col_AUC_y_val+add,col_AUC_A_val,col_AUC_A_val+add]], FAIR=True)
po_Read_total = Pareto_optimal_total(points.iloc[:,[0, 4, 5, 6, 7, 11, 12, 13]], FAIR=True, name = "Results/Readmission_PO")
points = pd.read_excel("Results/Adult.xls", index_col=0)
po_Adult_AOD, po_Adult_AOD_all = Pareto_optimal(points.iloc[:,[col_AUC_y_val,col_AUC_y_val+add,col_AUC_A_val,col_AUC_A_val+add]], FAIR=True)
po_Adult_total = Pareto_optimal_total(points.iloc[:,[0, 4, 5, 6, 7, 11, 12, 13]], FAIR=True, name = "Results/Adult_PO")
points = pd.read_excel("Results/Ger_age.xls", index_col=0)
po_Ger_age_AOD, po_Ger_age_AOD_all = Pareto_optimal(points.iloc[:,[col_AUC_y_val,col_AUC_y_val+add,col_AUC_A_val,col_AUC_A_val+add]], FAIR=True)
po_Ger_age_total = Pareto_optimal_total(points.iloc[:,[0, 4, 5, 6, 7, 11, 12, 13]], FAIR=True, name = "Results/Ger_age_PO")
points = pd.read_excel("Results/ger_sex.xlsx", index_col=0)
po_Ger_sex_AOD, po_Ger_sex_AOD_all = Pareto_optimal(points.iloc[:,[col_AUC_y_val,col_AUC_y_val+add,col_AUC_A_val,col_AUC_A_val+add]], FAIR=True)
po_Ger_sex_total = Pareto_optimal_total(points.iloc[:,[0, 4, 5, 6, 7, 11, 12, 13]], FAIR=True, name = "Results/Ger_sex_PO")
points = pd.read_excel("Results/MEPS19.xls", index_col=0)
po_MEPS19_AOD, po_MEPS19_AOD_all = Pareto_optimal(points.iloc[:,[col_AUC_y_val,col_AUC_y_val+add,col_AUC_A_val,col_AUC_A_val+add]], FAIR=True)
po_MEPS19_total = Pareto_optimal_total(points.iloc[:,[0, 4, 5, 6, 7, 11, 12, 13]], FAIR=True, name = "Results/MEPS19_PO")
add = 5
points = pd.read_excel("Results/readmission.xls", index_col=0)
po_Read_ASPD, po_Read_ASPD_all = Pareto_optimal(points.iloc[:,[col_AUC_y_val,col_AUC_y_val+add,col_AUC_A_val,col_AUC_A_val+add]], FAIR=True)
points = pd.read_excel("Results/Adult.xls", index_col=0)
po_Adult_ASPD, po_Adult_ASPD_all = Pareto_optimal(points.iloc[:,[col_AUC_y_val,col_AUC_y_val+add,col_AUC_A_val,col_AUC_A_val+add]], FAIR=True)
points = pd.read_excel("Results/Ger_age.xls", index_col=0)
po_Ger_age_ASPD, po_Ger_age_ASPD_all = Pareto_optimal(points.iloc[:,[col_AUC_y_val,col_AUC_y_val+add,col_AUC_A_val,col_AUC_A_val+add]], FAIR=True)
points = pd.read_excel("Results/ger_sex.xlsx", index_col=0)
po_Ger_sex_ASPD, po_Ger_sex_ASPD_all = Pareto_optimal(points.iloc[:,[col_AUC_y_val,col_AUC_y_val+add,col_AUC_A_val,col_AUC_A_val+add]], FAIR=True)
points = pd.read_excel("Results/MEPS19.xls", index_col=0)
po_MEPS19_ASPD, po_MEPS19_ASPD_all = Pareto_optimal(points.iloc[:,[col_AUC_y_val,col_AUC_y_val+add,col_AUC_A_val,col_AUC_A_val+add]], FAIR=True)
add = 6
points = pd.read_excel("Results/readmission.xls", index_col=0)
po_Read_AEOD, po_Read_AEOD_all = Pareto_optimal(points.iloc[:,[col_AUC_y_val,col_AUC_y_val+add,col_AUC_A_val,col_AUC_A_val+add]], FAIR=True)
points = pd.read_excel("Results/Adult.xls", index_col=0)
po_Adult_AEOD, po_Adult_AEOD_all = Pareto_optimal(points.iloc[:,[col_AUC_y_val,col_AUC_y_val+add,col_AUC_A_val,col_AUC_A_val+add]], FAIR=True)
points = pd.read_excel("Results/Ger_age.xls", index_col=0)
po_Ger_age_AEOD, po_Ger_age_AEOD_all = Pareto_optimal(points.iloc[:,[col_AUC_y_val,col_AUC_y_val+add,col_AUC_A_val,col_AUC_A_val+add]], FAIR=True)
points = | pd.read_excel("Results/ger_sex.xlsx", index_col=0) | pandas.read_excel |
# https://projects.datacamp.com/projects/441
# A Visual History of Nobel Prize Winners
## Task 1
# Loading in required libraries
import pandas as pd
import seaborn as sns
import numpy as np
import os as os
# Reading in the Nobel Prize data
fullnobelearly = os.path.abspath(os.path.join('dc','441_nobel_prize_winners','datasets', 'nobel.csv'))
nobel = | pd.read_csv(fullnobelearly) | pandas.read_csv |
# -*- coding: UTF-8 -*-
from __future__ import division
import re
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# 思路分析:
# # 问题: 这里是要求各个国家的GDP和Energy Supply之类的情况.
# # 数据源:
# ## Energy Indicators.xls: 国家及地区名: Energy Supply和Energy Supply每人;
# ## world_bank.csv: 国家及地区名: 历年GDP;
# ## scimagojr-3.xlsx: 国家及地区名: Citations相关数据;
# # 关键点:
# # 由于3个数据源之间的主键(Country Name)不一样, 所以需要对其做基本的数据清洗.
# # 数据清洗只是需要按题目要求, 实际上, 下列Top 15 Rank的国家最为重要, 因为Question 3~13都是基本围绕Top 15.
# # 清洗后, 相join得出最终需要处理的数据源.
# Question 1
# ----------------------------------------
# Quiz Question: Load the energy data from the file Energy Indicators.xls, which is a list of indicators
# of energy supply and renewable electricity production from the United Nations for the year 2013,
# and should be put into a DataFrame with the variable name of energy.
#
# Keep in mind that this is an Excel file, and not a comma separated values file. Also,
# make sure to exclude the footer and header information from the datafile. The first two columns are unneccessary,
# so you should get rid of them, and you should change the column labels so that the columns are:
# ['Country', 'Energy Supply', 'Energy Supply per Capita', '% Renewable]
#
# Convert Energy Supply to gigajoules (there are 1,000,000 gigajoules in a petajoule).
# For all countries which have missing data (e.g. data with "...") make sure this is reflected as np.NaN values.
# Rename the following list of countries (for use in later questions):
# "Republic of Korea": "South Korea",
# "United States of America": "United States",
# "United Kingdom of Great Britain and Northern Ireland": "United Kingdom",
# "China, Hong Kong Special Administrative Region": "Hong Kong"
# There are also several countries with parenthesis in their name. Be sure to remove these,
# e.g. 'Bolivia (Plurinational State of)' should be 'Bolivia'.
#
# Next, load the GDP data from the file world_bank.csv, which is a csv containing countries' GDP from 1960 to 2015
# from World Bank. Call this DataFrame GDP.
#
# Make sure to skip the header, and rename the following list of countries:
# "Korea, Rep.": "South Korea",
# "Iran, Islamic Rep.": "Iran",
# "Hong Kong SAR, China": "Hong Kong"
#
# Finally, load the Sciamgo Journal and Country Rank data for Energy Engineering and Power Technology from the file
# scimagojr-3.xlsx, which ranks countries based on their journal contributions in the aforementioned area.
# Call this DataFrame ScimEn.
# Join the three datasets: GDP, Energy, and ScimEn into a new dataset (using the intersection of country names).
# Use only the last 10 years (2006-2015) of GDP data and only the top 15 countries
# by Scimagojr 'Rank' (Rank 1 through 15).
#
# The index of this DataFrame should be the name of the country, and the columns should be
# ['Rank', 'Documents', 'Citable documents', 'Citations', 'Self-citations', 'Citations per document', 'H index',
# 'Energy Supply', 'Energy Supply per Capita', '% Renewable',
# '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015'].
# This function should return a DataFrame with 20 columns and 15 entries.
def get_energy(_debug=False):
if _debug: print('\nget_energy()')
# Load data
# 1. skip header and footer; 2. NaN data is '...'
df1 = pd.read_excel("data/Energy Indicators.xls", skiprows=17, skipfooter=38, na_values="...")
if _debug: print('Type: {}'.format(type(df1))) # dataframe
# Correct column names
df1 = df1.drop(df1.columns[[0, 1]], axis=1) # remove useless columns
df1.rename(columns={df1.columns[0]: 'Country'}, inplace=True)
df1.rename(columns={df1.columns[1]: 'Energy Supply'}, inplace=True)
df1.rename(columns={df1.columns[2]: 'Energy Supply per Capita'}, inplace=True)
df1.rename(columns={df1.columns[3]: '% Renewable'}, inplace=True)
if _debug: print('df1 Columns: {}'.format(df1.columns))
# Clean data
rename_country1 = {
"Republic of Korea": "South Korea",
"United States of America": "United States",
"United Kingdom of Great Britain and Northern Ireland": "United Kingdom",
"China, Hong Kong Special Administrative Region": "Hong Kong"}
df1['Energy Supply'] *= 1000000
# 理论上DataFrame不应该做循环处理 -> 用apply, 参考assignment 4, question 1
country = pd.Series(len(df1)) # to set value
for i in range(len(df1)):
row = df1.loc[i, :]
# Rename country name:
# Step 1: remove number. e.g. Greenland7 -> Greenland
country[i] = re.sub("\d", "", row['Country']) # 正则表达式替换
# Step 2: remove (), break the row and only keep index=0
country[i] = country[i].split(' (')[0]
# Step 3: replace name by rename_country
if country[i] in rename_country1.keys():
country[i] = rename_country1.get(country[i])
if _debug: print('{} -> {}'.format(row['Country'], country[i]))
df1['Country'] = country
df1.set_index('Country', inplace=True)
if _debug:
print('df1 Columns: {}'.format(df1.columns))
print('\nHong Kong:\n{}'.format(df1.loc['Hong Kong']))
# print(list(df1.index))
return df1
def get_gdp(_debug=False):
if _debug: print('\nget_gdp()')
# Load data
df2 = pd.read_csv('data/world_bank.csv', skiprows=4, encoding="utf8")
if _debug: print('df2 Columns: {}'.format(df2.columns))
# Only needs country name and last 10 years data
df2 = df2[['Country Name', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014',
'2015']] # Here is [[]]
df2.rename(columns={'Country Name': 'Country'}, inplace=True)
if _debug: print('df2 Columns: {}'.format(df2.columns))
# Clean data
rename_country2 = {"Korea, Rep.": "South Korea",
"Iran, Islamic Rep.": "Iran",
"Hong Kong SAR, China": "Hong Kong"}
country = pd.Series(len(df2)) # to set value
for i in range(len(df2)):
row = df2.iloc[i, :]
# replace name by rename_country
country[i] = row['Country']
if country[i] in rename_country2.keys():
country[i] = rename_country2.get(country[i])
if _debug: print('{} -> {}'.format(row['Country'], country[i]))
df2['Country'] = country
df2.set_index('Country', inplace=True)
if _debug:
print('df2 Columns: {}'.format(df2.columns))
print('South Korea:\n{}'.format(df2.loc['South Korea']))
# print(list(df2.index))
return df2
def get_rank(_debug=False):
if _debug: print('\nget_rank()')
# Load data
df3 = pd.read_excel("data/scimagojr-3.xlsx")
if _debug: print('df3 Columns: {}'.format(df3.columns))
df3.set_index('Country', inplace=True)
if _debug:
print('df3 Columns: {}'.format(df3.columns))
# print(list(df3.index))
return df3
def answer_one(_debug=False):
energy = get_energy(_debug)
GDP = get_gdp(_debug)
ScimEn = get_rank(_debug)
ScimEn = ScimEn[ScimEn['Rank'] < 16] # top 15
# left join by the index
df = pd.merge(ScimEn, energy, how='inner', left_index=True, right_index=True)
df = pd.merge(df, GDP, how='inner', left_index=True, right_index=True)
if _debug:
print('\nanswer_one()')
print('df Length: {}'.format(len(df)))
print('df Columns: {}'.format(df.columns))
print('Compare: {}'.format(df.columns == ['Rank', 'Documents', 'Citable documents', 'Citations', 'Self-citations',
'Citations per document', 'H index', 'Energy Supply',
'Energy Supply per Capita', '% Renewable', '2006', '2007', '2008',
'2009', '2010', '2011', '2012', '2013', '2014', '2015']))
# print(df)
return df
print('\nQuestion 1')
df = answer_one(True)
print('\ncountry:\n{}'.format(df))
print('\ndtypes:\n{}'.format(df.dtypes))
print('\nshape: {}'.format(df.shape))
# shape: (15, 20)
# Question 2
# ----------------------------------------
# Quiz Question: The previous question joined three datasets then reduced this to just the top 15 entries.
# When you joined the datasets, but before you reduced this to the top 15 items, how many entries did you lose?
# This function should return a single number.
# 其实这一题和Top 15无关, 直接求全集和子集之间的差便可.
def answer_two():
energy = get_energy()
GDP = get_gdp()
ScimEn = get_rank()
print('Length: energy={}, GDP={}, ScimEn={}'.format(len(energy), len(GDP), len(ScimEn)))
# Length: energy = 227, GDP = 264, ScimEn = 191
# outer join by index
df_inner = pd.merge(energy, GDP, how='inner', left_index=True, right_index=True)
df_inner = pd.merge(df_inner, ScimEn, how='inner', left_index=True, right_index=True)
df_outer = | pd.merge(energy, GDP, how='outer', left_index=True, right_index=True) | pandas.merge |
import pandas as pd
import numpy as np
from src.configs import *
from src.features.transform import categorical_to_ordinal
import collections
class HousePriceData:
'''
Load House Price data for Kaggle competition
'''
def __init__(self, train_path, test_path):
self.trainset = pd.read_csv(train_path)
self.testset = pd.read_csv(test_path)
self.target = self.trainset["SalePrice"]
self.workset = None
self.train_id = self.trainset["Id"]
self.test_id = self.testset["Id"]
# Keep track of feature to use to train in class
# Don'T really like that, but can't find a better way for now
self.original_feature_name = list(self.trainset)
self.created_feature = list()
self.ignored_feature = list()
self.usable_feature_name = list()
self._create_join_workset()
self._replace_significant_nan_values()
self._replace_all_nan_values()
self._change_col_type()
self._apply_feature_engineering()
def _replace_significant_nan_values(self, filler="MISSING"):
for col in COL_WITH_NAN_SIGNIFICATION:
self.workset[col] = self.workset[col].fillna(filler)
self.update_train_and_test_set(self.workset)
def get_usable_feature_name(self):
self.usable_feature_name = self.original_feature_name
for feature_name in self.created_feature:
self.usable_feature_name.append(feature_name)
for feature_name in self.ignored_feature:
try:
self.usable_feature_name.remove(feature_name)
except Exception as e:
pass
return self.usable_feature_name
def add_created_features(self, feature_names):
if isinstance(feature_names, str):
self.created_feature.append(feature_names)
elif isinstance(feature_names, collections.Iterable):
for i in feature_names:
self.created_feature.append(i)
else:
raise ValueError('Not valid feature_names type. Should be str or Iterable')
def add_ignore_features(self, feature_names):
if isinstance(feature_names, str):
self.ignored_feature.append(feature_names)
elif isinstance(feature_names, collections.Iterable):
for i in feature_names:
self.ignored_feature.append(i)
else:
raise ValueError('Not valid feature_names type. Should be str or Iterable')
def _replace_all_nan_values(self, filler="NO"):
self.workset = self.workset.fillna(filler)
self.update_train_and_test_set(self.workset)
def _create_join_workset(self):
self.workset = | pd.concat([self.trainset[ORIGINAL_FEATURE_COLS], self.testset[ORIGINAL_FEATURE_COLS]], axis=0) | pandas.concat |
"""
Tasks
-------
Search and transform jsonable structures, specifically to make it 'easy' to make tabular/csv output for other consumers.
Example
~~~~~~~~~~~~~
*give me a list of all the fields called 'id' in this stupid, gnarly
thing*
>>> Q('id',gnarly_data)
['id1','id2','id3']
Observations:
---------------------
1) 'simple data structures' exist and are common. They are tedious
to search.
2) The DOM is another nested / treeish structure, and jQuery selector is
a good tool for that.
3a) R, Numpy, Excel and other analysis tools want 'tabular' data. These
analyses are valuable and worth doing.
3b) Dot/Graphviz, NetworkX, and some other analyses *like* treeish/dicty
things, and those analyses are also worth doing!
3c) Some analyses are best done using 'one-off' and custom code in C, Python,
or another 'real' programming language.
4) Arbitrary transforms are tedious and error prone. SQL is one solution,
XSLT is another,
5) the XPATH/XML/XSLT family is.... not universally loved :) They are
very complete, and the completeness can make simple cases... gross.
6) For really complicated data structures, we can write one-off code. Getting
80% of the way is mostly okay. There will always have to be programmers
in the loop.
7) Re-inventing SQL is probably a failure mode. So is reinventing XPATH, XSLT
and the like. Be wary of mission creep! Re-use when possible (e.g., can
we put the thing into a DOM using
8) If the interface is good, people can improve performance later.
Simplifying
---------------
1) Assuming 'jsonable' structures
2) keys are strings or stringlike. Python allows any hashable to be a key.
for now, we pretend that doesn't happen.
3) assumes most dicts are 'well behaved'. DAG, no cycles!
4) assume that if people want really specialized transforms, they can do it
themselves.
"""
from __future__ import print_function
from collections import namedtuple
import csv
import itertools
from itertools import product
from operator import attrgetter as aget, itemgetter as iget
import operator
import sys
from pandas.compat import map, u, callable, Counter
import pandas.compat as compat
## note 'url' appears multiple places and not all extensions have same struct
ex1 = {
'name': 'Gregg',
'extensions': [
{'id':'hello',
'url':'url1'},
{'id':'gbye',
'url':'url2',
'more': dict(url='url3')},
]
}
## much longer example
ex2 = {u('metadata'): {u('accessibilities'): [{u('name'): u('accessibility.tabfocus'),
u('value'): 7},
{u('name'): u('accessibility.mouse_focuses_formcontrol'), u('value'): False},
{u('name'): u('accessibility.browsewithcaret'), u('value'): False},
{u('name'): u('accessibility.win32.force_disabled'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.startlinksonly'), u('value'): False},
{u('name'): u('accessibility.usebrailledisplay'), u('value'): u('')},
{u('name'): u('accessibility.typeaheadfind.timeout'), u('value'): 5000},
{u('name'): u('accessibility.typeaheadfind.enabletimeout'), u('value'): True},
{u('name'): u('accessibility.tabfocus_applies_to_xul'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.flashBar'), u('value'): 1},
{u('name'): u('accessibility.typeaheadfind.autostart'), | u('value') | pandas.compat.u |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for dm_c19_modelling.evaluation.dataset_factory."""
from unittest import mock
from absl.testing import absltest
from dm_c19_modelling.evaluation import constants
from dm_c19_modelling.evaluation import dataset_factory
import numpy as np
import pandas as pd
_DEFAULT_ARGS = {"directory": "", "dataset_name": "", "creation_date": "",
"cadence": 1}
def _get_raw_dataset():
dates = ["2020-05-01",
"2020-05-02",
"2020-05-03",
"2020-05-04",
"2020-05-05",
"2020-05-06",
"2020-05-07",
"2020-05-08",
"2020-05-09"]
sites = ["site_1", "site_2"]
df = pd.DataFrame({
constants.DATE: np.repeat(dates, len(sites)),
constants.SITE_ID: np.tile(sites, len(dates)),
"new_deceased": np.random.randint(0, 5,
len(sites) * len(dates)),
"new_confirmed": np.random.randint(0, 9,
len(sites) * len(dates)),
"feature_1": np.random.rand(len(sites) * len(dates)),
"feature_2": np.random.rand(len(sites) * len(dates)),
"feature_3": np.random.rand(len(sites) * len(dates)),
"feature_4": np.random.rand(len(sites) * len(dates))
})
df[constants.DATE] = | pd.to_datetime(df[constants.DATE]) | pandas.to_datetime |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas.core.groupby
import pandas as pd
from pandas.core.dtypes.common import is_list_like
import ray
from .utils import _map_partitions
from .utils import _inherit_docstrings
@_inherit_docstrings(pandas.core.groupby.DataFrameGroupBy)
class DataFrameGroupBy(object):
def __init__(self, df, by, axis, level, as_index, sort, group_keys,
squeeze, **kwargs):
self._columns = df.columns
self._index = df.index
self._axis = axis
self._row_metadata = df._row_metadata
self._col_metadata = df._col_metadata
if axis == 0:
partitions = [column for column in df._block_partitions.T]
self._index_grouped = pd.Series(self._index, index=self._index)\
.groupby(by=by, sort=sort)
else:
partitions = [row for row in df._block_partitions]
self._index_grouped = pd.Series(self._columns, index=self._index)\
.groupby(by=by, sort=sort)
self._keys_and_values = [(k, v)
for k, v in self._index_grouped]
self._grouped_partitions = \
list(zip(*(groupby._submit(args=(by,
axis,
level,
as_index,
sort,
group_keys,
squeeze) + tuple(part.tolist()),
num_return_vals=len(self))
for part in partitions)))
@property
def _iter(self):
from .dataframe import DataFrame
if self._axis == 0:
return [(self._keys_and_values[i][0],
DataFrame(col_partitions=part,
columns=self._columns,
index=self._keys_and_values[i][1].index,
row_metadata=self._row_metadata[
self._keys_and_values[i][1].index],
col_metadata=self._col_metadata))
for i, part in enumerate(self._grouped_partitions)]
else:
return [(self._keys_and_values[i][0],
DataFrame(row_partitions=part,
columns=self._keys_and_values[i][1].index,
index=self._index,
row_metadata=self._row_metadata,
col_metadata=self._col_metadata[
self._keys_and_values[i][1].index]))
for i, part in enumerate(self._grouped_partitions)]
@property
def ngroups(self):
return len(self)
def skew(self, **kwargs):
return self._apply_agg_function(lambda df: df.skew(**kwargs))
def ffill(self, limit=None):
return self._apply_agg_function(lambda df: df.ffill(limit=limit))
def sem(self, ddof=1):
return self._apply_agg_function(lambda df: df.sem(ddof=ddof))
def mean(self, *args, **kwargs):
return self._apply_agg_function(lambda df: df.mean(*args, **kwargs))
def any(self):
return self._apply_agg_function(lambda df: df.any())
@property
def plot(self):
raise NotImplementedError("Not Yet implemented.")
def ohlc(self):
raise NotImplementedError("Not Yet implemented.")
def __bytes__(self):
raise NotImplementedError("Not Yet implemented.")
@property
def tshift(self):
raise NotImplementedError("Not Yet implemented.")
@property
def groups(self):
return {k: pd.Index(v) for k, v in self._keys_and_values}
def min(self, **kwargs):
return self._apply_agg_function(lambda df: df.min(**kwargs))
def idxmax(self):
return self._apply_agg_function(lambda df: df.idxmax())
@property
def ndim(self):
return self._index_grouped.ndim
def shift(self, periods=1, freq=None, axis=0):
raise NotImplementedError("Not Yet implemented.")
def nth(self, n, dropna=None):
raise NotImplementedError("Not Yet implemented.")
def cumsum(self, axis=0, *args, **kwargs):
return self._apply_agg_function(lambda df: df.cumsum(axis,
*args,
**kwargs))
@property
def indices(self):
return dict(self._keys_and_values)
def pct_change(self):
return self._apply_agg_function(lambda df: df.pct_change())
def filter(self, func, dropna=True, *args, **kwargs):
raise NotImplementedError("Not Yet implemented.")
def cummax(self, axis=0, **kwargs):
return self._apply_agg_function(lambda df: df.cummax(axis=axis,
**kwargs))
def apply(self, func, *args, **kwargs):
return self._apply_df_function(lambda df: df.apply(func,
*args,
**kwargs)) \
if is_list_like(func) \
else self._apply_agg_function(lambda df: df.apply(func,
*args,
**kwargs))
@property
def dtypes(self):
return self._apply_agg_function(lambda df: df.dtypes)
def first(self, **kwargs):
return self._apply_agg_function(lambda df: df.first(offset=0,
**kwargs))
def backfill(self, limit=None):
return self.bfill(limit)
def __getitem__(self, key):
# This operation requires a SeriesGroupBy Object
raise NotImplementedError("Not Yet implemented.")
def cummin(self, axis=0, **kwargs):
return self._apply_agg_function(lambda df: df.cummin(axis=axis,
**kwargs))
def bfill(self, limit=None):
return self._apply_agg_function(lambda df: df.bfill(limit=limit))
def idxmin(self):
return self._apply_agg_function(lambda df: df.idxmin())
def prod(self, **kwargs):
return self._apply_agg_function(lambda df: df.prod(**kwargs))
def std(self, ddof=1, *args, **kwargs):
return self._apply_agg_function(lambda df: df.std(ddof=ddof,
*args, **kwargs))
def aggregate(self, arg, *args, **kwargs):
return self._apply_df_function(lambda df: df.agg(arg,
*args,
**kwargs)) \
if is_list_like(arg) \
else self._apply_agg_function(lambda df: df.agg(arg,
*args,
**kwargs))
def last(self, **kwargs):
return self._apply_df_function(lambda df: df.last(**kwargs))
def mad(self):
return self._apply_agg_function(lambda df: df.mad())
def rank(self):
return self._apply_df_function(lambda df: df.rank())
@property
def corrwith(self):
raise NotImplementedError("Not Yet implemented.")
def pad(self, limit=None):
raise NotImplementedError("Not Yet implemented.")
def max(self, **kwargs):
return self._apply_agg_function(lambda df: df.max(**kwargs))
def var(self, ddof=1, *args, **kwargs):
return self._apply_agg_function(lambda df: df.var(ddof,
*args,
**kwargs))
def get_group(self, name, obj=None):
raise NotImplementedError("Not Yet implemented.")
def __len__(self):
return len(self._keys_and_values)
def all(self):
return self._apply_agg_function(lambda df: df.all())
def size(self):
return self._apply_agg_function(lambda df: df.size)
def sum(self, **kwargs):
return self._apply_agg_function(lambda df:
df.sum(axis=self._axis, **kwargs))
def __unicode__(self):
raise NotImplementedError("Not Yet implemented.")
def describe(self, **kwargs):
raise NotImplementedError("Not Yet implemented.")
def boxplot(self, grouped, subplots=True, column=None, fontsize=None,
rot=0, grid=True, ax=None, figsize=None, layout=None, **kwds):
raise NotImplementedError("Not Yet implemented.")
def ngroup(self, ascending=True):
return self._index_grouped.ngroup(ascending)
def nunique(self, dropna=True):
return self._apply_agg_function(lambda df: df.nunique(dropna))
def resample(self, rule, *args, **kwargs):
raise NotImplementedError("Not Yet implemented.")
def median(self, **kwargs):
return self._apply_agg_function(lambda df: df.median(**kwargs))
def head(self, n=5):
return self._apply_df_function(lambda df: df.head(n))
def cumprod(self, axis=0, *args, **kwargs):
return self._apply_df_function(lambda df: df.cumprod(axis,
*args,
**kwargs))
def __iter__(self):
return self._iter.__iter__()
def agg(self, arg, *args, **kwargs):
def agg_help(df):
if isinstance(df, pd.Series):
return | pd.DataFrame(df) | pandas.DataFrame |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
# #find parent directory and import model
# parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parent_dir)
from ..trex_exe import Trex
test = {}
class TestTrex(unittest.TestCase):
"""
Unit tests for T-Rex model.
"""
print("trex unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for trex unit tests.
:return:
"""
pass
# setup the test as needed
# e.g. pandas to open trex qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for trex unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_trex_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty trex object
trex_empty = Trex(df_empty, df_empty)
return trex_empty
def test_app_rate_parsing(self):
"""
unittest for function app_rate_testing:
method extracts 1st and maximum from each list in a series of lists of app rates
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([], dtype="object")
result = pd.Series([], dtype="object")
expected_results = [[0.34, 0.78, 2.34], [0.34, 3.54, 2.34]]
try:
trex_empty.app_rates = pd.Series([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]], dtype='object')
# trex_empty.app_rates = ([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]])
# parse app_rates Series of lists
trex_empty.app_rate_parsing()
result = [trex_empty.first_app_rate, trex_empty.max_app_rate]
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial(self):
"""
unittest for function conc_initial:
conc_0 = (app_rate * self.frac_act_ing * food_multiplier)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [12.7160, 9.8280, 11.2320]
try:
# specify an app_rates Series (that is a series of lists, each list representing
# a set of application rates for 'a' model simulation)
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.food_multiplier_init_sg = pd.Series([110., 15., 240.], dtype='float')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
for i in range(len(trex_empty.frac_act_ing)):
result[i] = trex_empty.conc_initial(i, trex_empty.app_rates[i][0], trex_empty.food_multiplier_init_sg[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_timestep(self):
"""
unittest for function conc_timestep:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [6.25e-5, 0.039685, 7.8886e-30]
try:
trex_empty.foliar_diss_hlife = pd.Series([.25, 0.75, 0.01], dtype='float')
conc_0 = pd.Series([0.001, 0.1, 10.0])
for i in range(len(conc_0)):
result[i] = trex_empty.conc_timestep(i, conc_0[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_percent_to_frac(self):
"""
unittest for function percent_to_frac:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([.04556, .1034, .9389], dtype='float')
try:
trex_empty.percent_incorp = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.percent_to_frac(trex_empty.percent_incorp)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_inches_to_feet(self):
"""
unittest for function inches_to_feet:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([0.37966, 0.86166, 7.82416], dtype='float')
try:
trex_empty.bandwidth = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.inches_to_feet(trex_empty.bandwidth)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird(self):
"""
unittest for function at_bird:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
# following variable is unique to at_bird and is thus sent via arg list
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
for i in range(len(trex_empty.aw_bird_sm)):
result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird1(self):
"""
unittest for function at_bird1; alternative approach using more vectorization:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
# for i in range(len(trex_empty.aw_bird_sm)):
# result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
result = trex_empty.at_bird1(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fi_bird(self):
"""
unittest for function fi_bird:
food_intake = (0.648 * (aw_bird ** 0.651)) / (1 - mf_w_bird)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.19728, 22.7780, 59.31724], dtype='float')
try:
#?? 'mf_w_bird_1' is a constant (i.e., not an input whose value changes per model simulation run); thus it should
#?? be specified here as a constant and not a pd.series -- if this is correct then go ahead and change next line
trex_empty.mf_w_bird_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.fi_bird(trex_empty.aw_bird_sm, trex_empty.mf_w_bird_1)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sc_bird(self):
"""
unittest for function sc_bird:
m_s_a_r = ((self.app_rate * self.frac_act_ing) / 128) * self.density * 10000 # maximum seed application rate=application rate*10000
risk_quotient = m_s_a_r / self.noaec_bird
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([6.637969, 77.805, 34.96289, np.nan], dtype='float')
try:
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4], [3.]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.frac_act_ing = pd.Series([0.15, 0.20, 0.34, np.nan], dtype='float')
trex_empty.density = pd.Series([8.33, 7.98, 6.75, np.nan], dtype='float')
trex_empty.noaec_bird = pd.Series([5., 1.25, 12., np.nan], dtype='float')
result = trex_empty.sc_bird()
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sa_bird_1(self):
"""
# unit test for function sa_bird_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm = pd.Series([0.228229, 0.704098, 0.145205], dtype = 'float')
expected_results_md = pd.Series([0.126646, 0.540822, 0.052285], dtype = 'float')
expected_results_lg = pd.Series([0.037707, 0.269804, 0.01199], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_bird_1 = 0.1
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_1("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_1("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_1("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_bird_2(self):
"""
# unit test for function sa_bird_2
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.018832, 0.029030, 0.010483], dtype = 'float')
expected_results_md = pd.Series([2.774856e-3, 6.945353e-3, 1.453192e-3], dtype = 'float')
expected_results_lg =pd.Series([2.001591e-4, 8.602729e-4, 8.66163e-5], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
trex_empty.max_seed_rate = pd.Series([33.19, 20.0, 45.6])
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_2("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_2("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_2("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_mamm_1(self):
"""
# unit test for function sa_mamm_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.022593, 0.555799, 0.010178], dtype = 'float')
expected_results_md = pd.Series([0.019298, 0.460911, 0.00376], dtype = 'float')
expected_results_lg =pd.Series([0.010471, 0.204631, 0.002715], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.ld50_mamm = pd.Series([321., 100., 400.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_bird_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sa_mamm_1("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_mamm_1("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_mamm_1("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_mamm_2(self):
"""
# unit test for function sa_mamm_2
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([2.46206e-3, 3.103179e-2, 1.03076e-3], dtype = 'float')
expected_results_md = pd.Series([1.304116e-3, 1.628829e-2, 4.220702e-4], dtype = 'float')
expected_results_lg =pd.Series([1.0592147e-4, 1.24391489e-3, 3.74263186e-5], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
trex_empty.max_seed_rate = pd.Series([33.19, 20.0, 45.6])
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.ld50_mamm = pd.Series([321., 100., 400.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_mamm_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sa_mamm_2("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_mamm_2("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_mamm_2("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sc_mamm(self):
"""
# unit test for function sc_mamm
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([2.90089, 15.87995, 8.142130], dtype = 'float')
expected_results_md = pd.Series([2.477926, 13.16889, 3.008207], dtype = 'float')
expected_results_lg =pd.Series([1.344461, 5.846592, 2.172211], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.noael_mamm = pd.Series([2.5, 3.5, 0.5], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_mamm_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sc_mamm("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sc_mamm("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sc_mamm("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_bird(self):
"""
# unit test for function ld50_rg_bird (LD50ft-2 for Row/Band/In-furrow granular birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([346.4856, 25.94132, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'max app rate' per model simulation run
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
equal_nan=True, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_bird1(self):
"""
# unit test for function ld50_rg_bird1 (LD50ft-2 for Row/Band/In-furrow granular birds)
this is a duplicate of the 'test_ld50_rg_bird' method using a more vectorized approach to the
calculations; if desired other routines could be modified similarly
--comparing this method with 'test_ld50_rg_bird' it appears (for this test) that both run in the same time
--but I don't think this would be the case when 100's of model simulation runs are executed (and only a small
--number of the application_types apply to this method; thus I conclude we continue to use the non-vectorized
--approach -- should be revisited when we have a large run to execute
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([346.4856, 25.94132, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'max app rate' per model simulation run
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_bird1(trex_empty.aw_bird_sm)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, equal_nan=True, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bl_bird(self):
"""
# unit test for function ld50_bl_bird (LD50ft-2 for broadcast liquid birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([46.19808, 33.77777, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_bird
trex_empty.application_type = pd.Series(['Broadcast-Liquid', 'Broadcast-Liquid',
'Non-Broadcast'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bl_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bg_bird(self):
"""
# unit test for function ld50_bg_bird (LD50ft-2 for broadcast granular)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([46.19808, np.nan, 0.4214033], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Broadcast-Granular', 'Broadcast-Liquid',
'Broadcast-Granular'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bg_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rl_bird(self):
"""
# unit test for function ld50_rl_bird (LD50ft-2 for Row/Band/In-furrow liquid birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([np.nan, 2.20701, 0.0363297], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Broadcast-Granular', 'Row/Band/In-furrow-Liquid',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rl_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_mamm(self):
"""
unittest for function at_mamm:
adjusted_toxicity = self.ld50_mamm * ((self.tw_mamm / aw_mamm) ** 0.25)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([705.5036, 529.5517, 830.6143], dtype='float')
try:
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
for i in range(len(trex_empty.ld50_mamm)):
result[i] = trex_empty.at_mamm(i, trex_empty.aw_mamm_sm[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_anoael_mamm(self):
"""
unittest for function anoael_mamm:
adjusted_toxicity = self.noael_mamm * ((self.tw_mamm / aw_mamm) ** 0.25)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([5.49457, 9.62821, 2.403398], dtype='float')
try:
trex_empty.noael_mamm = pd.Series([2.5, 5.0, 1.25], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.anoael_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fi_mamm(self):
"""
unittest for function fi_mamm:
food_intake = (0.621 * (aw_mamm ** 0.564)) / (1 - mf_w_mamm)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([3.17807, 16.8206, 42.28516], dtype='float')
try:
trex_empty.mf_w_mamm_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.fi_mamm(trex_empty.aw_mamm_sm, trex_empty.mf_w_mamm_1)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bl_mamm(self):
"""
# unit test for function ld50_bl_mamm (LD50ft-2 for broadcast liquid)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.52983, 9.36547, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Broadcast-Liquid', 'Broadcast-Liquid',
'Non-Broadcast'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
# following parameter values are needed for internal call to "test_at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bl_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='',
verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bg_mamm(self):
"""
# unit test for function ld50_bg_mamm (LD50ft-2 for broadcast granular)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.52983, 9.36547, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Broadcast-Granular', 'Broadcast-Granular',
'Broadcast-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
# following parameter values are needed for internal call to "at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bg_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rl_mamm(self):
"""
# unit test for function ld50_rl_mamm (LD50ft-2 for Row/Band/In-furrow liquid mammals)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([np.nan, 0.6119317, 0.0024497], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Broadcast-Granular',
'Row/Band/In-furrow-Liquid',
'Row/Band/In-furrow-Liquid',], dtype='object')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
# following parameter values are needed for internal call to "at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
result = trex_empty.ld50_rl_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_mamm(self):
"""
# unit test for function ld50_rg_mamm
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([33.9737, 7.192681, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid',], dtype='object')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_eec_diet_max(self):
"""
combined unit test for methods eec_diet_max & eec_diet_timeseries;
* this test calls eec_diet_max, which in turn calls eec_diet_timeseries (which produces
concentration timeseries), which in turn calls conc_initial and conc_timestep
* eec_diet_max processes the timeseries and extracts the maximum values
* this test tests both eec_diet_max & eec_diet_timeseries together (ok, so this violates the exact definition
* of 'unittest', get over it)
* the assertion check is that the maximum values from the timeseries match expectations
* this assumes that for the maximums to be 'as expected' then the timeseries are as well
* note: the 1st application day ('day_out') for the 2nd model simulation run is set to 0 here
* to make sure the timeseries processing works when an application occurs on 1st day of year
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([1.734, 145.3409, 0.702], dtype='float')
num_app_days = pd.Series([], dtype='int')
try:
#specifying 3 different application scenarios of 1, 4, and 2 applications
trex_empty.app_rates = | pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object') | pandas.Series |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.