prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os
import re
from datetime import datetime
from datetime import timedelta
import openpyxl
import pandas as pd
def max_min(html_text):
pd_html = pd.DataFrame({'index': html.index, 'Name': html.values})
a = pd_html.drop('index', axis=1)
for i in range(a.shape[0]):
if type(a.Name[i]) == str:
for y in range(10):
if ' ' + str(y) + ':' in a.Name[i]:
a.Name[i].replace(' ' + str(y) + ':', ' 0' + str(y) + ':')
else:
continue
if type(a.Name[i]) == str:
if '0000' in a.Name[i]:
a.Name[i] = pd.NA
elif '[' in a.Name[i]:
a.Name[i] = a.Name[i].replace(';', '')
a.Name[i] = datetime.strptime(a.Name[i].replace('[', '').replace(']', ''), '%d.%m.%Y %H:%M:%S:%f')
else:
a.Name[i] = pd.NA
else:
a.Name[i] = pd.NA
a = a.dropna()
print(min(a.Name).strftime("%d.%m.%Y %H:%M:%S:%f"))
print(max(a.Name).strftime("%d.%m.%Y %H:%M:%S:%f"))
# print(begin.strftime("%d.%m.%Y %H:%M:%S:%f"))
# print(end.strftime("%d.%m.%Y %H:%M:%S:%f"))
return min(a.Name), max(a.Name)
def read_xml(html_text, sheet, begin, end):
a = html_text
p = 0
p1 = 0
p2 = 0
p3 = []
for i in range(a.shape[0]):
if type(a.Name[i]) == str:
if '0000' in a.Name[i]:
b = a.Name[i].split(" ")
if int(b[0]) == 10:
a.Name[i - 1] += b[2].strip()
a.Name[i] = pd.NA
elif int(b[0]) == 20:
a.Name[i - 2] += b[2]
a.Name[i] = pd.NA
else:
a.Name[i] = b[2]
if b[2].split(" ")[0:2] == ['00', '04'] and b[1].split(" ")[5:7] == ['06', '01']:
p += 1
if b[2].split(" ")[0:2] == ['00', '60'] and b[1].split(" ")[5:7] == ['06', '01']:
p1 += 1
if b[2].split(" ")[0:2] == ['11', '08'] and b[1].split(" ")[5:7] == ['06', '01']:
p2 += 1
if '01' in b[1]:
aa = [' ']
p3.append(b[1:3])
elif '[' in a.Name[i]:
a.Name[i] = datetime.strptime(a.Name[i].replace('[', '').replace(']', ''), '%d.%m.%Y %H:%M:%S:%f')
print('Первый этап закончен')
x = 0
if sheet == 'Спутник':
delta = timedelta(hours=1)
begin += delta
end += delta
count = 0
datetime_count = 0
for i in range(a.shape[0]):
if type(a.Name[i]) == datetime:
if a.Name[i] < begin or a.Name[i] > end:
a.Name[i] = pd.NA
y = 1
if i + y + 1 < len(a.Name):
while type(a.Name[i + y]) != datetime:
a.Name[i + y] = pd.NA
if i + y + 1 < len(a.Name):
y += 1
else:
break
else:
print('Список закончен')
break
else:
datetime_count += 1
# print('Дата ' + str(datetime_count) + ' добавлена')
elif type(a.Name[i]) == str:
a.Name[i] = len(str(a.Name[i]).split(' '))
x += a.Name[i]
count += 1
# print('Данные ' + str(count) + ' добавлены добавлены')
# print('Обработано записей ' + str(i))
print('Завершен расчет')
a = a.dropna(how='all')
a = a.reindex()
print('Переиндексация завершена')
print(sheet)
print(str(x) + ' байт')
print(str(count) + ' пакетов')
print(str(datetime_count) + ' дат')
print(a.shape)
print(begin.strftime("%d.%m.%Y %H:%M:%S:%f"))
print(end.strftime("%d.%m.%Y %H:%M:%S:%f"))
a.to_excel(sheet + ".xlsx")
print("Перепад даления 2 -\t\t\t", p, " раз запрошен")
print("Загазованность служебная -\t", p1, " раз запрошен")
print("GSM канал -\t\t\t\t\t", p2, " раз запрошен")
# for item in p3:
# print(item)
def html_reader(path_text):
if 'csv' in path_text:
html_text = pd.read_csv(path_text)
else:
html_text =
|
pd.read_table(path_text)
|
pandas.read_table
|
import csv,yaml,os
import pandas as pd
import json
with open('player_map.json','r') as fd:
player_map = json.load(fd)
yaml_list = os.listdir()
def make_reg(temp):
if temp[1].islower():
return temp
temp=temp.split()
reg=temp[0][0]+'.*'+temp[-1]
return reg
def find_player(name,df):
if name in player_map:
k = player_map[name]
player = df.filter(regex=k,axis = 0)
return player.iloc[0]
else:
print(name)
return -1
bats_cluster =
|
pd.read_csv('batsman_cluster.csv',index_col='player_name')
|
pandas.read_csv
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pandas.core.frame import DataFrame
ef1 = 'shift-data.xlsx'
ef2 = 'third-shift-data.xlsx'
df_first_shift = pd.read_excel(ef1, sheet_name='first')
df_second_shift = pd.read_excel(ef1, sheet_name='second')
df_third_shift = pd.read_excel(ef2)
df_all =
|
pd.concat([df_first_shift, df_second_shift, df_third_shift])
|
pandas.concat
|
'''
This file is part of the PSL software.
Copyright 2011-2015 University of Maryland
Copyright 2013-2019 The Regents of the University of California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import pandas
from pslpython.partition import Partition
from pslpython.predicate import Predicate
from pslpython.predicate import PredicateError
from tests.base_test import PSLTest
class TestPredicate(PSLTest):
def test_name_normalization(self):
# [(input, expected), ...]
names = [
('a', 'A'),
('foo', 'FOO'),
('Bar', 'BAR'),
('BAZ', 'BAZ'),
('123', '123'),
]
for (input_name, expected_name) in names:
predicate = Predicate(input_name, closed = True, size = 2)
self.assertEqual(predicate.name(), expected_name)
def test_init_args(self):
failing_configs = [
({'raw_name': 'Foo', 'closed': False}, 'No size supplied.'),
({'raw_name': 'Foo', 'closed': False, 'size': -1}, 'Negative size.'),
({'raw_name': 'Foo', 'closed': False, 'size': 0}, 'Zero size.'),
({'raw_name': 'Foo', 'closed': False, 'size': 2, 'arg_types': [Predicate.ArgType.UNIQUE_INT_ID]}, 'Type size mismatch.'),
({'raw_name': 'Foo', 'closed': False, 'size': 1, 'arg_types': ['UniqueIntID']}, 'Non-enum arg type.'),
]
for (args, reason) in failing_configs:
try:
predicate = Predicate(**args)
self.fail('Failed to raise exception on: ' + reason)
except PredicateError as ex:
# Expected
pass
def test_add_record(self):
predicate = Predicate('Foo', closed = True, size = 2)
predicate.add_data_row(Partition.OBSERVATIONS, ['A', 'B'])
predicate.add_data_row(Partition.OBSERVATIONS, ['C', 'D'], 0.5)
predicate.add_data_row(Partition.OBSERVATIONS, [1, 2])
expected = pandas.DataFrame([
['A', 'B', 1.0],
['C', 'D', 0.5],
[1, 2, 1.0],
])
pandas.testing.assert_frame_equal(predicate._data[Partition.OBSERVATIONS], expected)
def test_add_frame(self):
predicate = Predicate('Foo', closed = True, size = 2)
input_data = pandas.DataFrame([
['A', 'B'],
['C', 'D'],
[1, 2],
])
predicate.add_data(Partition.OBSERVATIONS, input_data)
expected = pandas.DataFrame([
['A', 'B', 1.0],
['C', 'D', 1.0],
[1, 2, 1.0],
])
|
pandas.testing.assert_frame_equal(predicate._data[Partition.OBSERVATIONS], expected)
|
pandas.testing.assert_frame_equal
|
#!/usr/bin/env python
# coding: utf-8
# # IBM HR Employee Attrition & Performance.
# ## [Please star/upvote in case you find it helpful.]
# In[ ]:
from IPython.display import Image
Image("../../../input/pavansubhasht_ibm-hr-analytics-attrition-dataset/imagesibm/image-logo.png")
# ## CONTENTS ::->
# [ **1 ) Exploratory Data Analysis**](#content1)
# [ **2) Corelation b/w Features**](#content2)
# [** 3) Feature Selection**](#content3)
# [** 4) Preparing Dataset**](#content4)
# [ **5) Modelling**](#content5)
#
# Note that this notebook uses traditional ML algorithms. I have another notebook in which I have used an ANN on the same dataset. To check it out please follow the below link-->
#
# https://www.kaggle.com/rajmehra03/an-introduction-to-ann-keras-with-ibm-hr-dataset/
# [ **6) Conclusions**](#content6)
# <a id="content1"></a>
# ## 1 ) Exploratory Data Analysis
# ## 1.1 ) Importing Various Modules
# In[ ]:
# Ignore the warnings
import warnings
warnings.filterwarnings('always')
warnings.filterwarnings('ignore')
# data visualisation and manipulation
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import style
import seaborn as sns
import missingno as msno
#configure
# sets matplotlib to inline and displays graphs below the corressponding cell.
style.use('fivethirtyeight')
sns.set(style='whitegrid',color_codes=True)
#import the necessary modelling algos.
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
#model selection
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score,precision_score,recall_score,confusion_matrix,roc_curve,roc_auc_score
from sklearn.model_selection import GridSearchCV
from imblearn.over_sampling import SMOTE
#preprocess.
from sklearn.preprocessing import MinMaxScaler,StandardScaler,Imputer,LabelEncoder,OneHotEncoder
# ## 1.2 ) Reading the data from a CSV file
# In[ ]:
df=pd.read_csv(r"../../../input/pavansubhasht_ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv")
# In[ ]:
df.head()
# In[ ]:
df.shape
# In[ ]:
df.columns
# ## 1.3 ) Missing Values Treatment
# In[ ]:
df.info() # no null or 'Nan' values.
# In[ ]:
df.isnull().sum()
# In[ ]:
msno.matrix(df) # just to visualize. one final time.
# ## 1.4 ) The Features and the 'Target'
# In[ ]:
df.columns
# In[ ]:
df.head()
# In all we have 34 features consisting of both the categorical as well as the numerical features. The target variable is the
# 'Attrition' of the employee which can be either a Yes or a No. This is what we have to predict.
# **Hence this is a Binary Classification problem. **
# ## 1.5 ) Univariate Analysis
# In this section I have done the univariate analysis i.e. I have analysed the range or distribution of the values that various features take. To better analyze the results I have plotted various graphs and visualizations wherever necessary. Univariate analysis helps us identify the outliers in the data.
# In[ ]:
df.describe()
# Let us first analyze the various numeric features. To do this we can actually plot a boxplot showing all the numeric features. Also the distplot or a histogram is a reasonable choice in such cases.
# In[ ]:
sns.factorplot(data=df,kind='box',size=10,aspect=3)
# Note that all the features have pretty different scales and so plotting a boxplot is not a good idea. Instead what we can do is plot histograms of various continuously distributed features.
#
# We can also plot a kdeplot showing the distribution of the feature. Below I have plotted a kdeplot for the 'Age' feature.
# Similarly we plot for other numeric features also. Similarly we can also use a distplot from seaborn library which combines most..
# In[ ]:
sns.kdeplot(df['Age'],shade=True,color='#ff4125')
# In[ ]:
sns.distplot(df['Age'])
# Similarly we can do this for all the numerical features. Below I have plotted the subplots for the other features.
# In[ ]:
warnings.filterwarnings('always')
warnings.filterwarnings('ignore')
fig,ax = plt.subplots(5,2, figsize=(9,9))
sns.distplot(df['TotalWorkingYears'], ax = ax[0,0])
sns.distplot(df['MonthlyIncome'], ax = ax[0,1])
sns.distplot(df['YearsAtCompany'], ax = ax[1,0])
sns.distplot(df['DistanceFromHome'], ax = ax[1,1])
sns.distplot(df['YearsInCurrentRole'], ax = ax[2,0])
sns.distplot(df['YearsWithCurrManager'], ax = ax[2,1])
sns.distplot(df['YearsSinceLastPromotion'], ax = ax[3,0])
sns.distplot(df['PercentSalaryHike'], ax = ax[3,1])
sns.distplot(df['YearsSinceLastPromotion'], ax = ax[4,0])
sns.distplot(df['TrainingTimesLastYear'], ax = ax[4,1])
plt.tight_layout()
print()
# Let us now analyze the various categorical features. Note that in these cases the best way is to use a count plot to show the relative count of observations of different categories.
# In[ ]:
cat_df=df.select_dtypes(include='object')
# In[ ]:
cat_df.columns
# In[ ]:
def plot_cat(attr,labels=None):
if(attr=='JobRole'):
sns.factorplot(data=df,kind='count',size=5,aspect=3,x=attr)
return
sns.factorplot(data=df,kind='count',size=5,aspect=1.5,x=attr)
# I have made a function that accepts the name of a string. In our case this string will be the name of the column or attribute which we want to analyze. The function then plots the countplot for that feature which makes it easier to visualize.
# In[ ]:
plot_cat('Attrition')
# **Note that the number of observations belonging to the 'No' category is way greater than that belonging to 'Yes' category. Hence we have skewed classes and this is a typical example of the 'Imbalanced Classification Problem'. To handle such types of problems we need to use the over-sampling or under-sampling techniques. I shall come back to this point later.**
# **Let us now similalry analyze other categorical features.**
# In[ ]:
plot_cat('BusinessTravel')
# The above plot clearly shows that most of the people belong to the 'Travel_Rarely' class. This indicates that most of the people did not have a job which asked them for frequent travelling.
# In[ ]:
plot_cat('OverTime')
# In[ ]:
plot_cat('Department')
# In[ ]:
plot_cat('EducationField')
# In[ ]:
plot_cat('Gender')
# Note that males are present in higher number.
# In[ ]:
plot_cat('JobRole')
# ** Similarly we can continue for other categorical features. **
# **Note that the same function can also be used to better analyze the numeric discrete features like 'Education','JobSatisfaction' etc...
# In[ ]:
# just uncomment the following cell.
# In[ ]:
# num_disc=['Education','EnvironmentSatisfaction','JobInvolvement','JobSatisfaction','WorkLifeBalance','RelationshipSatisfaction','PerformanceRating']
# for i in num_disc:
# plot_cat(i)
# similarly we can intrepret these graphs.
# <a id="content2"></a>
# ## 2 ) Corelation b/w Features
#
# In[ ]:
#corelation matrix.
cor_mat= df.corr()
mask = np.array(cor_mat)
mask[np.tril_indices_from(mask)] = False
fig=plt.gcf()
fig.set_size_inches(30,12)
# ###### SOME INFERENCES FROM THE ABOVE HEATMAP
#
# 1. Self relation ie of a feature to itself is equal to 1 as expected.
#
# 2. JobLevel is highly related to Age as expected as aged employees will generally tend to occupy higher positions in the company.
#
# 3. MonthlyIncome is very strongly related to joblevel as expected as senior employees will definately earn more.
#
# 4. PerformanceRating is highly related to PercentSalaryHike which is quite obvious.
#
# 5. Also note that TotalWorkingYears is highly related to JobLevel which is expected as senior employees must have worked for a larger span of time.
#
# 6. YearsWithCurrManager is highly related to YearsAtCompany.
#
# 7. YearsAtCompany is related to YearsInCurrentRole.
#
#
# **Note that we can drop some highly corelated features as they add redundancy to the model but since the corelation is very less in genral let us keep all the features for now. In case of highly corelated features we can use something like Principal Component Analysis(PCA) to reduce our feature space.**
# In[ ]:
df.columns
# <a id="content3"></a>
# ## 3 ) Feature Selection
#
# ## 3.1 ) Plotting the Features against the 'Target' variable.
# #### 3.1.1 ) Age
# Note that Age is a continuous quantity and therefore we can plot it against the Attrition using a boxplot.
# In[ ]:
sns.factorplot(data=df,y='Age',x='Attrition',size=5,aspect=1,kind='box')
# Note that the median as well the maximum age of the peole with 'No' attrition is higher than that of the 'Yes' category. This shows that people with higher age have lesser tendency to leave the organisation which makes sense as they may have settled in the organisation.
# #### 3.1.2 ) Department
# Note that both Attrition(Target) as well as the Deaprtment are categorical. In such cases a cross-tabulation is the most reasonable way to analyze the trends; which shows clearly the number of observaftions for each class which makes it easier to analyze the results.
# In[ ]:
df.Department.value_counts()
# In[ ]:
sns.factorplot(data=df,kind='count',x='Attrition',col='Department')
# In[ ]:
pd.crosstab(columns=[df.Attrition],index=[df.Department],margins=True,normalize='index') # set normalize=index to view rowwise %.
# Note that most of the observations corresspond to 'No' as we saw previously also. About 81 % of the people in HR dont want to leave the organisation and only 19 % want to leave. Similar conclusions can be drawn for other departments too from the above cross-tabulation.
# #### 3.1.3 ) Gender
# In[ ]:
pd.crosstab(columns=[df.Attrition],index=[df.Gender],margins=True,normalize='index') # set normalize=index to view rowwise %.
# About 85 % of females want to stay in the organisation while only 15 % want to leave the organisation. All in all 83 % of employees want to be in the organisation with only being 16% wanting to leave the organisation or the company.
# #### 3.1.4 ) Job Level
# In[ ]:
pd.crosstab(columns=[df.Attrition],index=[df.JobLevel],margins=True,normalize='index') # set normalize=index to view rowwise %.
# People in Joblevel 4 have a very high percent for a 'No' and a low percent for a 'Yes'. Similar inferences can be made for other job levels.
# #### 3.1.5 ) Monthly Income
# In[ ]:
sns.factorplot(data=df,kind='bar',x='Attrition',y='MonthlyIncome')
# Note that the average income for 'No' class is quite higher and it is obvious as those earning well will certainly not be willing to exit the organisation. Similarly those employees who are probably not earning well will certainly want to change the company.
# #### 3.1.6 ) Job Satisfaction
# In[ ]:
sns.factorplot(data=df,kind='count',x='Attrition',col='JobSatisfaction')
# In[ ]:
pd.crosstab(columns=[df.Attrition],index=[df.JobSatisfaction],margins=True,normalize='index') # set normalize=index to view rowwise %.
# Note this shows an interesting trend. Note that for higher values of job satisfaction( ie more a person is satisfied with his job) lesser percent of them say a 'Yes' which is quite obvious as highly contented workers will obvioulsy not like to leave the organisation.
# #### 3.1.7 ) Environment Satisfaction
# In[ ]:
pd.crosstab(columns=[df.Attrition],index=[df.EnvironmentSatisfaction],margins=True,normalize='index') # set normalize=index to view rowwise %.
# Again we can notice that the relative percent of 'No' in people with higher grade of environment satisfacftion which is expected.
# #### 3.1.8 ) Job Involvement
# In[ ]:
pd.crosstab(columns=[df.Attrition],index=[df.JobInvolvement],margins=True,normalize='index') # set normalize=index to view rowwise %.
# #### 3.1.9 ) Work Life Balance
# In[ ]:
|
pd.crosstab(columns=[df.Attrition],index=[df.WorkLifeBalance],margins=True,normalize='index')
|
pandas.crosstab
|
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
mode: python; py-indent-offset: 4; tab-width: 4; coding: utf-8
'''
import unittest
import pprint
from numpy import array
from numpy.testing import assert_array_equal, assert_array_almost_equal
from pandas import DataFrame
from pandas.testing import assert_frame_equal
from sos_trades_core.execution_engine.execution_engine import ExecutionEngine
class TestExtendDict(unittest.TestCase):
"""
Extend dict type for GEMSEO test class
"""
def setUp(self):
self.name = 'EE'
self.pp = pprint.PrettyPrinter(indent=4, compact=True)
def test_01_sosdiscipline_simple_dict(self):
exec_eng = ExecutionEngine(self.name)
exec_eng.ns_manager.add_ns('ns_test', self.name)
mod_list = 'sos_trades_core.sos_wrapping.test_discs.disc5dict.Disc5'
disc5_builder = exec_eng.factory.get_builder_from_module(
'Disc5', mod_list)
exec_eng.factory.set_builders_to_coupling_builder(disc5_builder)
exec_eng.configure()
# additional test to verify that values_in are used
values_dict = {}
values_dict['EE.z'] = [3., 0.]
values_dict['EE.dict_out'] = {'key1': 0.5, 'key2': 0.5}
exec_eng.dm.set_values_from_dict(values_dict)
exec_eng.execute()
target = {
'EE.z': [
3.0, 0.0], 'EE.dict_out': [
0.5, 0.5], 'EE.h': [
0.75, 0.75]}
res = {}
for key in target:
res[key] = exec_eng.dm.get_value(key)
if target[key] is dict:
self.assertDictEqual(res[key], target[key])
elif target[key] is array:
self.assertListEqual(list(target[key]), list(res[key]))
def test_02_sosdiscipline_simple_dict_and_dataframe(self):
exec_eng = ExecutionEngine(self.name)
exec_eng.ns_manager.add_ns('ns_test', self.name)
mod_list = 'sos_trades_core.sos_wrapping.test_discs.disc4_dict_df.Disc4'
disc4_builder = exec_eng.factory.get_builder_from_module(
'Disc4', mod_list)
exec_eng.factory.set_builders_to_coupling_builder(disc4_builder)
exec_eng.configure()
# -- build input data
values_dict = {}
# built my_dict (private in)
values_dict['EE.Disc4.mydict'] = {'md_1': array([3., 4.])}
# build dict of dataframe (coupling in)
h = {'dataframe': DataFrame(data={'col1': array([0.75, 0.75])})}
values_dict['EE.h'] = h
# store data
exec_eng.dm.set_values_from_dict(values_dict)
# -- exec
exec_eng.execute()
# compare output h (sos_trades format) to reference
rp = exec_eng.root_process.sos_disciplines[0]
z_out, dict_out = rp.get_sosdisc_outputs(["z", "dict_out"])
z_out_target = array([0.75, 1.5])
df_data = {'col1': [1, 2], 'col2': [3, 0.75]}
df =
|
DataFrame(data=df_data)
|
pandas.DataFrame
|
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),
np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),
np.array( # matching offsets
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
),
],
)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize("box_other", [True, False])
def test_dt64arr_add_sub_offset_array(
self, tz_naive_fixture, box_with_array, box_other, op, other
):
# GH#18849
# GH#10699 array of offsets
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])
expected = tm.box_expected(expected, box_with_array)
if box_other:
other = tm.box_expected(other, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dtarr, other)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = DatetimeIndex(exp, tz=tz)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = Timestamp("1700-01-31")
td = Timedelta("20000 Days")
dti = date_range("1949-09-30", freq="100Y", periods=4)
ser = Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = NaT
expected = Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = NaT
expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([Timestamp.min])
t1 = tmin + Timedelta.max + Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([Timestamp.max])
t2 = tmax + Timedelta.min - Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
Timestamp("20111230"),
Timestamp("20120101"),
Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
Timestamp("20111231"),
Timestamp("20120102"),
Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = Series(dti)
expected = Series(TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
# subtraction
tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(
-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize(
"dt64_series",
[
Series([Timestamp("19900315"), Timestamp("19900315")]),
Series([NaT, Timestamp("19900315")]),
Series([NaT, NaT], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("one", [1, 1.0, np.array(1)])
def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):
# multiplication
msg = "cannot perform .* with this index type"
with pytest.raises(TypeError, match=msg):
dt64_series * one
with pytest.raises(TypeError, match=msg):
one * dt64_series
# division
with pytest.raises(TypeError, match=msg):
dt64_series / one
with pytest.raises(TypeError, match=msg):
one / dt64_series
# TODO: parametrize over box
def test_dt64_series_add_intlike(self, tz_naive_fixture):
# GH#19123
tz = tz_naive_fixture
dti = DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz)
ser = Series(dti)
other = Series([20, 30, 40], dtype="uint8")
msg = "|".join(
[
"Addition/subtraction of integers and integer-arrays",
"cannot subtract .* from ndarray",
]
)
assert_invalid_addsub_type(ser, 1, msg)
assert_invalid_addsub_type(ser, other, msg)
assert_invalid_addsub_type(ser, np.array(other), msg)
assert_invalid_addsub_type(ser, pd.Index(other), msg)
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
assert td2._values.freq is None
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
td1[0] - dt1
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError, match=msg):
td2[0] - dt2
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "cannot (add|subtract)"
with pytest.raises(TypeError, match=msg):
td1 - dt1
with pytest.raises(TypeError, match=msg):
td2 - dt2
class TestDatetimeIndexArithmetic:
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_addsub_int(self, tz_naive_fixture, one):
# Variants of `one` for #19012
tz = tz_naive_fixture
rng = date_range("2000-01-01 09:00", freq="H", periods=10, tz=tz)
msg = "Addition/subtraction of integers"
with pytest.raises(TypeError, match=msg):
rng + one
with pytest.raises(TypeError, match=msg):
rng += one
with pytest.raises(TypeError, match=msg):
rng - one
with pytest.raises(TypeError, match=msg):
rng -= one
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("freq", ["H", "D"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "|".join(
["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
)
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("freq", ["W", "M", "MS", "Q"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_non_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "|".join(
["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
)
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_no_freq(self, int_holder):
# GH#19959
dti = DatetimeIndex(["2016-01-01", "NaT", "2017-04-05 06:07:08"])
other = int_holder([9, 4, -1])
msg = "|".join(
["cannot subtract DatetimeArray from", "Addition/subtraction of integers"]
)
assert_invalid_addsub_type(dti, other, msg)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
expected = expected._with_freq(None)
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .*TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
expected = expected._with_freq(None)
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
# DTA.__isub__ GH#43904
dta = dti._data.copy()
dta -= tdi
tm.assert_datetime_array_equal(dta, expected._data)
out = dti._data.copy()
np.subtract(out, tdi, out=out)
tm.assert_datetime_array_equal(out, expected._data)
msg = "cannot subtract .* from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract DatetimeArray from ndarray"
with pytest.raises(TypeError, match=msg):
tdi.values -= dti
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi._values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
@pytest.mark.parametrize(
"addend",
[
datetime(2011, 1, 1),
DatetimeIndex(["2011-01-01", "2011-01-02"]),
DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize("US/Eastern"),
np.datetime64("2011-01-01"),
Timestamp("2011-01-01"),
],
ids=lambda x: type(x).__name__,
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_add_datetimelike_and_dtarr(self, box_with_array, addend, tz):
# GH#9631
dti = DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize(tz)
dtarr = tm.box_expected(dti, box_with_array)
msg = "cannot add DatetimeArray and"
assert_cannot_add(dtarr, addend, msg)
# -------------------------------------------------------------
def test_dta_add_sub_index(self, tz_naive_fixture):
# Check that DatetimeArray defers to Index classes
dti = date_range("20130101", periods=3, tz=tz_naive_fixture)
dta = dti.array
result = dta - dti
expected = dti - dti
tm.assert_index_equal(result, expected)
tdi = result
result = dta + tdi
expected = dti + tdi
tm.assert_index_equal(result, expected)
result = dta - tdi
expected = dti - tdi
tm.assert_index_equal(result, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range("20130101", periods=3)
dti_tz = date_range("20130101", periods=3).tz_localize("US/Eastern")
dti_tz2 = date_range("20130101", periods=3).tz_localize("UTC")
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
msg = "DatetimeArray subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dti_tz - dti
with pytest.raises(TypeError, match=msg):
dti - dti_tz
with pytest.raises(TypeError, match=msg):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range("20130101", periods=3)
dti2 = date_range("20130101", periods=4)
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(["2012-01-01", np.nan, "2012-01-03"])
dti2 =
|
DatetimeIndex(["2012-01-02", "2012-01-03", np.nan])
|
pandas.DatetimeIndex
|
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.feature_selection import mutual_info_regression
# DATASET GENERATION
def generate_bias(series: pd.Series, effect_size: float = 1, power: float = 1) -> pd.Series:
"""
Calculate bias for sensitive attribute
Parameters
----------
series : pd.Series
sensitive attribute for which the bias is calculated.
effect_size : float, optional
Size of the bias for 1 std from the mean. The default is 1.
power : float, optional
power=1: linear bias, power=2: quadratic bias, etc. The default is 1.
Returns
-------
pd.Series
DESCRIPTION.
"""
bias = series.sub(series.mean()).pow(power)
bias = (bias - bias.mean())/bias.std() # Make the bias neutral
return bias * effect_size
def display_df(df: pd.DataFrame, n=10):
"""Nicely display all dataframes with column types
df: the DataFrame to display
n: the number of lines to show
"""
display(df.sample(n, random_state=42)
.style.format({'Age': "{:.2f}",
'Education': "{:.2f}",
'SocialSkills': "{:.2f}",
'Experience': '{:.2f}',
'Gender': "{:d}",
'PromotionEligibilitySkill': "{:.2f}",
'PromotionEligibilityTrue': "{:.2f}",
'SalarySkill': "€{:.2f}",
'SalaryTrue': '€{:.2f}'})
)
# PREDICTION
def predict_series(estimator, X: pd.DataFrame, method='predict') -> pd.Series:
''' Return predictions from an estimator as a series with index.
estimator: sklearn model
X: the set to be predicted
method: predict or predict_proba
'''
if method == 'predict':
y_pred = estimator.predict(X)
elif method == 'predict_proba':
y_pred = estimator.predict_proba(X)[:, 1]
else:
raise ValueError(f'method must be `predict` or `predict_proba`, not {method}')
return
|
pd.Series(y_pred, index=X.index)
|
pandas.Series
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
** Script for Calculate mCNN features (for each mutation). which was revoked by run_coord.py for "qsub" action on cluster.
i.e., integrating all the features to specific (k-neighboring) csv file.
** NOTICE
1. Both rosetta ref and rosetta mut are considered. It should be pointed out that all the items are based on those
successful runs of rosetta_[ref|mut], i.e., those failed items (rosetta runs failed) were dropped.
2. Other spatial feature such as orientation, sine or cosine values of dihedral angles, etc. can be calculated by coords in the csv file.
3. left for blank
** File Name: coord.py rather than mCNN.py for the reason that it is contradict-free with the self-defined module named mCNN.
The self-defined module mCNN was implemented by the following mapping:
<1> Create the mapping: /public/home/sry/opt/miniconda3/envs/bio/custom_path/mCNN -> /public/home/sry/projects/mCNN/src/.
<2> Adding pth file: /public/home/sry/opt/miniconda3/envs/bio/lib/python3.6/site-packages/custom_path.pth
$ cat custom_path.pth with the output: /public/home/sry/opt/miniconda3/envs/bio/custom_path
** 10/10/2019.
** --sry.
'''
import os, sys,argparse
import numpy as np
import pandas as pd
from mCNN.processing import aa_321dict,log,read_csv,str2bool,aa_123dict
def main_all_atom():
'''
计算native_wild,TR_wild, TR_mutant的所有CA原子并保存在csv文件中
:return:
'''
csvpth = '/public/home/sry/mCNN/dataset/TR/S2648_TR500.csv'
df = pd.read_csv(csvpth)
for i in range(len(df)):
key, PDB, WILD_TYPE, CHAIN, POSITION, MUTANT, PH, TEMPERATURE, DDG = df.iloc[i, :].values
mutant_tag = '%s.%s.%s.%s.%s.%s' % (key, PDB, WILD_TYPE, CHAIN, POSITION, MUTANT)
## for wild
outdir = '/public/home/sry/mCNN/dataset/TR/feature/coord/wild'
pdbpth = '/public/home/sry/mCNN/dataset/TR/pdb_chain/%s.pdb'%PDB
stridepth = '/public/home/sry/mCNN/dataset/TR/feature/stride/wild/%s.stride'%PDB
df_pdb, center_coord = ParsePDB(pdbpth, mutant_tag, accept_atom=('CA',), center='CA')
FG = FeatureGenerator()
df_feature = FG.append_stride(df_pdb=df_pdb,stride_pth=stridepth)
save_csv(df_feature, outdir=outdir, filename='%s_neighbor_all'%PDB)
## for TR output
outdir = '/public/home/sry/mCNN/dataset/TR/feature/coord/TR'
# TR_wild
TR_wild_tag = '%s.%s.%s' % (key, PDB, CHAIN)
pdbpth = '/public/home/sry/mCNN/dataset/TR/output/%s/model1.pdb' % TR_wild_tag
stridepth = '/public/home/sry/mCNN/dataset/TR/feature/stride/TR/%s.stride' % TR_wild_tag
df_pdb, center_coord = ParsePDB(pdbpth, mutant_tag, accept_atom=('CA',), center='CA')
FG = FeatureGenerator()
df_feature = FG.append_stride(df_pdb=df_pdb, stride_pth=stridepth)
save_csv(df_feature, outdir=outdir, filename='%s_neighbor_all' % TR_wild_tag)
# TR_mut
pdbpth = '/public/home/sry/mCNN/dataset/TR/output/%s/model1.pdb' % mutant_tag
stridepth = '/public/home/sry/mCNN/dataset/TR/feature/stride/TR/%s.stride' % mutant_tag
df_pdb, center_coord = ParsePDB(pdbpth, mutant_tag, accept_atom=('CA',), center='CA')
FG = FeatureGenerator()
df_feature = FG.append_stride(df_pdb=df_pdb,stride_pth=stridepth)
save_csv(df_feature, outdir=outdir, filename='%s_neighbor_all' % mutant_tag)
def main_appending_wild_TR():
'''将native_wild的原子append到TR_mutant后面(原子之间相互对应,即对应的残基是一样的)'''
kneighbor = 20
csvpth = '/public/home/sry/mCNN/dataset/TR/S2648_TR500.csv'
outdir = '/public/home/sry/mCNN/dataset/TR/feature/coord/wild_TR'
df = pd.read_csv(csvpth)
for i in range(len(df)):
key, PDB, WILD_TYPE, CHAIN, POSITION, MUTANT, PH, TEMPERATURE, DDG = df.iloc[i, :].values
mutant_tag = '%s.%s.%s.%s.%s.%s' % (key, PDB, WILD_TYPE, CHAIN, POSITION, MUTANT)
csvpth1 = '/public/home/sry/mCNN/dataset/TR/feature/coord/wild/%s_neighbor_all.csv'%PDB
csvpth2 = '/public/home/sry/mCNN/dataset/TR/feature/coord/TR/%s_neighbor_all.csv'%mutant_tag
df_neighbor = get_corresponding_coord_wild_TR(csvpth1, csvpth2, mutant_tag, kneighbor=kneighbor)
save_csv(df_neighbor,outdir=outdir,filename='%s_neighbor_%s' % (mutant_tag,kneighbor))
def main_appending_TR_TR():
'''将TR_wild的原子append到TR_mutant后面(原子之间相互对应,即对应的残基是一样的)'''
kneighbor = 20
csvpth = '/public/home/sry/mCNN/dataset/TR/S2648_TR500.csv'
outdir = '/public/home/sry/mCNN/dataset/TR/feature/coord/TR_TR'
df = pd.read_csv(csvpth)
for i in range(len(df)):
key, PDB, WILD_TYPE, CHAIN, POSITION, MUTANT, PH, TEMPERATURE, DDG = df.iloc[i, :].values
mutant_tag = '%s.%s.%s.%s.%s.%s' % (key, PDB, WILD_TYPE, CHAIN, POSITION, MUTANT)
wild_tag = '%s.%s.%s'% (key, PDB, CHAIN)
csvpth1 = '/public/home/sry/mCNN/dataset/TR/feature/coord/TR/%s_neighbor_all.csv'%wild_tag
csvpth2 = '/public/home/sry/mCNN/dataset/TR/feature/coord/TR/%s_neighbor_all.csv'%mutant_tag
df_neighbor = get_corresponding_coord_TR_TR(csvpth1, csvpth2, mutant_tag, kneighbor=kneighbor)
save_csv(df_neighbor, outdir=outdir, filename='%s_neighbor_%s' % (mutant_tag, kneighbor))
# ----------------------------------------------------------------------------------------------------------------------
def save_csv(df,outdir,filename):
if not os.path.exists(outdir):
os.makedirs(outdir)
df.to_csv('%s/%s.csv'%(outdir,filename),index=False)
@log
def ParsePDB(pdbpth, mutant_tag, accept_atom = ('CA',), center='CA'):
"""
:param pdbpth:
:param mutant_tag:# ['key', 'PDB', 'WILD_TYPE', 'CHAIN', 'POSITION', 'MUTANT']
:param atom_list:
:param center:
:return:
"""
import warnings
from Bio import BiopythonWarning
from Bio.PDB.PDBParser import PDBParser
warnings.simplefilter('ignore', BiopythonWarning)
df_pdb = pd.DataFrame(
{'chain': [], 'res': [], 'het': [], 'posid': [], 'inode': [], 'full_name': [], 'atom_name': [],
'dist': [], 'x': [], 'y': [], 'z': [], 'occupancy': [], 'b_factor': []})
key,pdbid,wtaa,mtchain,pos,mtaa = mutant_tag.split('.')
print('The pdbid is:', pdbid, 'pth: %s' % pdbpth)
# --------------------------------------------------------------------------------------------------------------
# consider mapping
if pdbpth.split('/')[-1] == 'model1.pdb':
map_pos_pth = '/public/home/sry/mCNN/dataset/TR/map_pos/%s_mapping.csv'%pdbid
df_map = pd.read_csv(map_pos_pth)
df_map[['POSITION_OLD']] = df_map[['POSITION_OLD']].astype(str)
df_map[['POSITION_NEW']] = df_map[['POSITION_NEW']].astype(str)
pos = df_map.loc[(df_map.CHAIN == mtchain) & (df_map.POSITION_OLD == pos),'POSITION_NEW'].values[0] #CHAIN,POSITION_OLD,POSITION_NEW
# --------------------------------------------------------------------------------------------------------------
if pos.isdigit():
INODE = ' '
POSID = int(pos)
else:
INODE = pos[-1]
POSID = int(pos[:-1])
MT_pos = (' ',POSID,INODE)
parser = PDBParser(PERMISSIVE=1)
structure = parser.get_structure(pdbid, pdbpth)
model = structure[0]
if pdbpth.split('/')[-1] == 'model1.pdb':
try:
assert model['A'][MT_pos].get_resname() == aa_123dict[wtaa]#TR_wild
except:
assert model['A'][MT_pos].get_resname() == aa_123dict[mtaa]#TR_mut
else:
assert model[mtchain][MT_pos].get_resname() == aa_123dict[wtaa]
if center == 'CA':
if pdbpth.split('/')[-1] == 'model1.pdb':
center_coord = model['A'][MT_pos]['CA'].get_coord()
else:
center_coord = model[mtchain][MT_pos]['CA'].get_coord()
for chain in model:
chain_name = chain.get_id()
res_id_lst = [res.get_id() for res in chain]
print('The res_number in chain %s is: %d'%(chain_name,len(res_id_lst)))
res_list = [chain[res_id] for res_id in res_id_lst]
for res in res_list:
res_name = res.get_resname()
het, pos_id, inode = res.get_id()
for atom in res:
full_name, coord, occupancy, b_factor = atom.get_name(), atom.get_coord(), atom.get_occupancy(), atom.get_bfactor()
if not full_name in accept_atom:
continue
name = full_name.strip()[0]
# if name in ('0','1','2','3','4','5','6','7','8','9','H','D'):
# if not name in ('C','O','N','S'):
dist = np.linalg.norm(center_coord - coord)
x,y,z = coord
temp_array = np.array([chain_name,res_name,het,pos_id,inode,full_name,name,dist,x,y,z,occupancy,b_factor]).reshape(1, -1)
temp_df = pd.DataFrame(temp_array)
temp_df.columns = df_pdb.columns
df_pdb = pd.concat([df_pdb, temp_df], axis=0, ignore_index=True)
break
df_pdb[['dist']] = df_pdb[['dist']].astype(float)
print('The atom_number (only CA) is:',len(df_pdb))
return df_pdb, center_coord
def get_corresponding_coord_TR_TR(csvpth1, csvpth2, mutant_tag, kneighbor=20):
print(csvpth1)
print(csvpth2)
success_cnt = 0
df_lst = []
key, PDB, WILD_TYPE, CHAIN, POSITION, MUTANT = mutant_tag.split('.')
df1 = pd.read_csv(csvpth1)
df2 =
|
pd.read_csv(csvpth2)
|
pandas.read_csv
|
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None
class Draft:
def __init__(self, projection, number_teams = 12,
batters = {'C','1B','2B', '3B','SS','OF','UTIL'},
pitchers = {'SP','RP','P'},
number_positions = {'C':1,'1B':1,'2B':1, '3B':1,'SS':1,'OF':3,'UTIL':1,'SP':2,'RP':2,'P':3,'BN':5},
batter_statline = {'R':0,'1B':0,'2B':0, '3B':0,'HR':0,'RBI':0,'SB':0,'BB':0,'AVG':0,'OPS':0},
pitcher_statline = {'W':0, 'L':0,'CG':0,'SHO':0,'SV':0,'BB':0,'SO':0,'ERA':0,'WHIP':0,'BSV':0} ):
self.projection = projection
self.batters = batters
self.pitchers = pitchers
self.open_positions = number_positions
self.batter_statline = batter_statline
self.pitcher_statline = pitcher_statline
self.number_teams = number_teams
bat_rows = []
bat_cols = ['Name']
for ibat in self.batters:
for i in range(self.open_positions[ibat]):
bat_rows.append(ibat)
for ibat in self.batter_statline:
bat_cols.append(ibat)
print(bat_rows)
print(bat_cols)
bat_df = pd.DataFrame(np.zeros([len(bat_rows),len(bat_cols)]), columns = bat_cols)
bat_df['Position'] = bat_rows
pit_rows = ['Name']
pit_cols = []
for ipit in self.pitchers:
for i in range(self.open_positions[ipit]):
pit_rows.append(ipit)
for ipit in self.pitcher_statline:
pit_cols.append(ipit)
print(pit_rows)
print(pit_cols)
pit_df = pd.DataFrame(np.zeros([len(pit_rows),len(pit_cols)]), columns = pit_cols)
pit_df['Position'] = pit_rows
self.bat_df = bat_df
self.pit_df = pit_df
#return bat_df, pit_df
def draft_team(self, draft_position = 'Best'):
#batter_categories = ['R','1B','2B', '3B','HR','RBI','SB','BB','AVG','OPS']
#pitcher_categories = ['W', 'L','CG','SHO','SV','BB','SO','ERA','WHIP','BSV' ]
self.draftees = pd.DataFrame()
for psn in self.batters:
if psn != 'UTIL':
ind_position_df = (self.projection.statline['batters']['Position']==psn) & (self.projection.statline['batters']['Drafted']=='False')
nlst = min(len(np.unique(self.projection.statline['batters'][ind_position_df]['Name'])), self.number_teams * self.open_positions[psn] * 2)
ranked_position_df = self.projection.statline['batters'][ind_position_df].sort_values('Rank')[0:nlst]
ranked_stat = np.zeros([len(self.batter_statline),nlst])
for i in range(len(self.batter_statline)):
cat = self.batter_statline.keys()[i]
if cat in ['HR', 'R', 'RBI', '2B', 'SB', '1B', '3B', 'BB']:
ind = np.argsort(ranked_position_df[cat])
ranked_stat[i, ind[::-1]]= np.arange(len(ind),0,-1)
elif cat in ['AVG', 'OPS']:
weighted_ranked_position_df = ranked_position_df[cat] * (162. * 4.) / ranked_position_df['AB']
ind = np.argsort(weighted_ranked_position_df)
ranked_stat[i, ind[::-1]]= np.arange(len(ind),0,-1)
position_rank = np.sum(ranked_stat.T, axis=1)
#print(position_rank)
sorted_rank = np.sort(position_rank)
sorted_arg = np.argsort(position_rank)
for i in (1 + np.arange(nlst - 1)):
print(psn,ranked_position_df.Name.values[sorted_arg[-i]], sorted_rank[-i])
#print(psn,ranked_position_df.Name.values[np.argsort(position_rank)[-1]], np.sort(position_rank)[-1])
df = pd.DataFrame([psn,ranked_position_df.Name.values[np.argsort(position_rank)[-1]], np.sort(position_rank)[-1]])
if self.draftees.empty:
self.draftees = df.T
else:
self.draftees = pd.concat([self.draftees, df.T])
for psn in self.pitchers:
if psn != 'P':
ind_position_df = (self.projection.statline['pitchers']['Position']==psn) & (self.projection.statline['pitchers']['Drafted']=='False')
nlst = min(len(np.unique(self.projection.statline['pitchers'][ind_position_df]['Name'])), self.number_teams * self.open_positions[psn] * 2)
ranked_position_df = self.projection.statline['pitchers'][ind_position_df].sort_values('Rank')[0:nlst]
ranked_stat = np.zeros([len(self.batter_statline),nlst])
for i in range(len(self.pitcher_statline)):
cat = self.pitcher_statline.keys()[i]
if cat in ['SO', 'W', 'SV', 'CG', 'SHO']:
ind = np.argsort(ranked_position_df[cat])
ranked_stat[i, ind[::-1]]= np.arange(len(ind),0,-1)
elif cat in ['L', 'BB', 'BSV']:
ind = np.argsort(ranked_position_df[cat])
ranked_stat[i, ind]= np.arange(len(ind),0,-1)
elif cat in ['WHIP', 'ERA']:
weighted_ranked_position_df = ranked_position_df[cat] * (1250. * 9.) / ranked_position_df['IP']
ind = np.argsort(weighted_ranked_position_df)
ranked_stat[i, ind]= np.arange(len(ind),0,-1)
position_rank = np.sum(ranked_stat.T, axis=1)
for i in (1 + np.arange(nlst - 1)):
print(psn,ranked_position_df.Name.values[np.argsort(position_rank)[-i]], np.sort(position_rank)[-i])
#print(psn,ranked_position_df.Name.values[np.argsort(position_rank)[-1]], np.sort(position_rank)[-1])
df = pd.DataFrame([psn,ranked_position_df.Name.values[np.argsort(position_rank)[-1]], np.sort(position_rank)[-1]])
self.draftees =
|
pd.concat([self.draftees, df.T])
|
pandas.concat
|
import csv
import logging
import os
import tempfile
import time
from hashlib import sha256
from ipaddress import IPv4Address, ip_address
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from imblearn.under_sampling import RandomUnderSampler
from pandas.api.types import is_bool_dtype as is_bool
from pandas.api.types import is_datetime64_any_dtype as is_datetime
from pandas.api.types import (
is_float_dtype,
is_integer_dtype,
is_numeric_dtype,
is_string_dtype,
)
from pandas.core.dtypes.common import is_period_dtype
from upgini.errors import ValidationError
from upgini.http import UPGINI_API_KEY, get_rest_client
from upgini.metadata import (
EVAL_SET_INDEX,
SYSTEM_RECORD_ID,
DataType,
FeaturesFilter,
FileColumnMeaningType,
FileColumnMetadata,
FileMetadata,
FileMetrics,
ModelTaskType,
NumericInterval,
RuntimeParameters,
SearchCustomization,
)
from upgini.normalizer.phone_normalizer import phone_to_int
from upgini.search_task import SearchTask
class Dataset(pd.DataFrame):
MIN_ROWS_COUNT = 100
MAX_ROWS_REGISTERED = 299_999
MAX_ROWS_UNREGISTERED = 149_999
FIT_SAMPLE_ROWS = 100_000
FIT_SAMPLE_THRESHOLD = FIT_SAMPLE_ROWS * 3
IMBALANCE_THESHOLD = 0.4
MIN_TARGET_CLASS_COUNT = 100
MAX_MULTICLASS_CLASS_COUNT = 100
MIN_SUPPORTED_DATE_TS = 1114992000000 # 2005-05-02
_metadata = [
"dataset_name",
"description",
"meaning_types",
"search_keys",
"ignore_columns",
"hierarchical_group_keys",
"hierarchical_subgroup_keys",
"date_format",
"random_state",
"task_type",
"initial_data",
"file_upload_id",
"etalon_def",
"endpoint",
"api_key",
"columns_renaming",
"sampled",
]
def __init__(
self,
dataset_name: str,
description: Optional[str] = None,
df: Optional[pd.DataFrame] = None,
path: Optional[str] = None,
meaning_types: Optional[Dict[str, FileColumnMeaningType]] = None,
search_keys: Optional[List[Tuple[str, ...]]] = None,
model_task_type: Optional[ModelTaskType] = None,
date_format: Optional[str] = None,
random_state: Optional[int] = None,
endpoint: Optional[str] = None,
api_key: Optional[str] = None,
**kwargs,
):
if df is not None:
data = df.copy()
elif path is not None:
if "sep" in kwargs:
data = pd.read_csv(path, **kwargs)
else:
# try different separators: , ; \t ...
with open(path, mode="r") as csvfile:
sep = csv.Sniffer().sniff(csvfile.read(2048)).delimiter
kwargs["sep"] = sep
data = pd.read_csv(path, **kwargs)
else:
raise ValueError("DataFrame or path to file should be passed.")
if isinstance(data, pd.DataFrame):
super(Dataset, self).__init__(data) # type: ignore
else:
raise ValueError("Iteration is not supported. Remove `iterator` and `chunksize` arguments and try again.")
self.dataset_name = dataset_name
self.task_type = model_task_type
self.description = description
self.meaning_types = meaning_types
self.search_keys = search_keys
self.ignore_columns = []
self.hierarchical_group_keys = []
self.hierarchical_subgroup_keys = []
self.date_format = date_format
self.initial_data = data.copy()
self.file_upload_id: Optional[str] = None
self.etalon_def: Optional[Dict[str, str]] = None
self.endpoint = endpoint
self.api_key = api_key
self.random_state = random_state
self.columns_renaming: Dict[str, str] = {}
self.sampled: bool = False
@property
def meaning_types_checked(self) -> Dict[str, FileColumnMeaningType]:
if self.meaning_types is None:
raise ValueError("meaning_types is empty.")
else:
return self.meaning_types
@property
def search_keys_checked(self) -> List[Tuple[str, ...]]:
if self.search_keys is None:
raise ValueError("search_keys is empty.")
else:
return self.search_keys
@property
def etalon_def_checked(self) -> Dict[str, str]:
if self.etalon_def is None:
self.etalon_def = {
v.value: k for k, v in self.meaning_types_checked.items() if v != FileColumnMeaningType.FEATURE
}
return self.etalon_def
def __validate_min_rows_count(self):
if self.shape[0] < self.MIN_ROWS_COUNT:
raise ValueError(f"X should contain at least {self.MIN_ROWS_COUNT} valid distinct rows.")
def __validate_max_row_count(self):
api_key = self.api_key or os.environ.get(UPGINI_API_KEY)
is_registered = api_key is not None and api_key != ""
if is_registered:
if len(self) > self.MAX_ROWS_REGISTERED:
raise ValueError(
f"Total X + eval_set rows count limit is {self.MAX_ROWS_REGISTERED}. "
"Please sample X and eval_set"
)
else:
if len(self) > self.MAX_ROWS_UNREGISTERED:
raise ValueError(
f"For unregistered users total rows count limit for X + eval_set is {self.MAX_ROWS_UNREGISTERED}. "
"Please register to increase the limit"
)
def __rename_columns(self):
# logging.info("Replace restricted symbols in column names")
for column in self.columns:
if len(column) == 0:
raise ValueError("Some of column names are empty. Fill them and try again, please.")
new_column = str(column).lower()
if ord(new_column[0]) not in range(ord("a"), ord("z")):
new_column = "a" + new_column
for idx, c in enumerate(new_column):
if ord(c) not in range(ord("a"), ord("z")) and ord(c) not in range(ord("0"), ord("9")):
new_column = new_column[:idx] + "_" + new_column[idx + 1 :]
self.rename(columns={column: new_column}, inplace=True)
self.meaning_types = {
(new_column if key == str(column) else key): value for key, value in self.meaning_types_checked.items()
}
self.search_keys = [
tuple(new_column if key == str(column) else key for key in keys) for keys in self.search_keys_checked
]
self.columns_renaming[new_column] = str(column)
def __validate_too_long_string_values(self):
"""Check that string values less than 400 characters"""
# logging.info("Validate too long string values")
for col in self.columns:
if is_string_dtype(self[col]):
max_length: int = self[col].astype("str").str.len().max()
if max_length > 400:
raise ValueError(
f"Some of column {col} values are too long: {max_length} characters. "
"Remove this column or trim values to 50 characters."
)
def __clean_duplicates(self):
"""Clean DataSet from full duplicates."""
# logging.info("Clean full duplicates")
nrows = len(self)
unique_columns = self.columns.tolist()
logging.info(f"Dataset shape before clean duplicates: {self.shape}")
self.drop_duplicates(subset=unique_columns, inplace=True)
logging.info(f"Dataset shape after clean duplicates: {self.shape}")
nrows_after_full_dedup = len(self)
share_full_dedup = 100 * (1 - nrows_after_full_dedup / nrows)
if share_full_dedup > 0:
print(f"{share_full_dedup:.5f}% of the rows are fully duplicated")
target_column = self.etalon_def_checked.get(FileColumnMeaningType.TARGET.value)
if target_column is not None:
unique_columns.remove(target_column)
unique_columns.remove(SYSTEM_RECORD_ID)
self.drop_duplicates(subset=unique_columns, inplace=True)
nrows_after_tgt_dedup = len(self)
share_tgt_dedup = 100 * (1 - nrows_after_tgt_dedup / nrows_after_full_dedup)
if nrows_after_tgt_dedup < nrows_after_full_dedup:
msg = (
f"{share_tgt_dedup:.5f}% of rows in X are duplicates with different y values. "
"Please check the dataframe and restart fit"
)
logging.error(msg)
raise ValueError(msg)
def __convert_bools(self):
"""Convert bool columns True -> 1, False -> 0"""
# logging.info("Converting bool to int")
for col in self.columns:
if is_bool(self[col]):
self[col] = self[col].astype("Int64")
def __convert_float16(self):
"""Convert float16 to float"""
# logging.info("Converting float16 to float")
for col in self.columns:
if is_float_dtype(self[col]):
self[col] = self[col].astype("float64")
def __correct_decimal_comma(self):
"""Check DataSet for decimal commas and fix them"""
# logging.info("Correct decimal commas")
tmp = self.head(10)
# all columns with sep="," will have dtype == 'object', i.e string
# sep="." will be casted to numeric automatically
cls_to_check = [i for i in tmp.columns if is_string_dtype(tmp[i])]
for col in cls_to_check:
if tmp[col].astype(str).str.match("^[0-9]+,[0-9]*$").any():
self[col] = self[col].astype(str).str.replace(",", ".").astype(np.float64)
def __to_millis(self):
"""Parse date column and transform it to millis"""
date = self.etalon_def_checked.get(FileColumnMeaningType.DATE.value) or self.etalon_def_checked.get(
FileColumnMeaningType.DATETIME.value
)
def intToOpt(i: int) -> Optional[int]:
if i == -9223372036855:
return None
else:
return i
if date is not None and date in self.columns:
# logging.info("Transform date column to millis")
if is_string_dtype(self[date]):
self[date] = (
pd.to_datetime(self[date], format=self.date_format).dt.floor("D").view(np.int64) // 1_000_000
)
elif is_datetime(self[date]):
self[date] = self[date].dt.floor("D").view(np.int64) // 1_000_000
elif is_period_dtype(self[date]):
self[date] = pd.to_datetime(self[date].astype("string")).dt.floor("D").view(np.int64) // 1_000_000
elif is_numeric_dtype(self[date]):
msg = f"Unsupported type of date column {date}. Convert to datetime manually please."
logging.error(msg)
raise Exception(msg)
self[date] = self[date].apply(lambda x: intToOpt(x)).astype("Int64")
@staticmethod
def __email_to_hem(email: str) -> Optional[str]:
if email is None or not isinstance(email, str) or email == "":
return None
else:
return sha256(email.lower().encode("utf-8")).hexdigest()
def __hash_email(self):
"""Add column with HEM if email presented in search keys"""
email = self.etalon_def_checked.get(FileColumnMeaningType.EMAIL.value)
if email is not None and email in self.columns:
# logging.info("Hashing email")
generated_hem_name = "generated_hem"
self[generated_hem_name] = self[email].apply(self.__email_to_hem)
self.meaning_types_checked[generated_hem_name] = FileColumnMeaningType.HEM
self.meaning_types_checked.pop(email)
self.etalon_def_checked[FileColumnMeaningType.HEM.value] = generated_hem_name
del self.etalon_def_checked[FileColumnMeaningType.EMAIL.value]
self.search_keys = [
tuple(key if key != email else generated_hem_name for key in search_group)
for search_group in self.search_keys_checked
]
self["email_domain"] = self[email].str.split("@").str[1]
self.drop(columns=email, inplace=True)
@staticmethod
def __ip_to_int(ip: Union[str, int, IPv4Address]) -> Optional[int]:
try:
return int(ip_address(ip))
except Exception:
return None
def __convert_ip(self):
"""Convert ip address to int"""
ip = self.etalon_def_checked.get(FileColumnMeaningType.IP_ADDRESS.value)
if ip is not None and ip in self.columns:
# logging.info("Convert ip address to int")
self[ip] = self[ip].apply(self.__ip_to_int).astype("Int64")
def __normalize_iso_code(self):
iso_code = self.etalon_def_checked.get(FileColumnMeaningType.COUNTRY.value)
if iso_code is not None and iso_code in self.columns:
# logging.info("Normalize iso code column")
self[iso_code] = (
self[iso_code]
.astype(str)
.str.upper()
.str.replace(r"[^A-Z]", "", regex=True)
.str.replace("UK", "GB", regex=False)
)
def __normalize_postal_code(self):
postal_code = self.etalon_def_checked.get(FileColumnMeaningType.POSTAL_CODE.value)
if postal_code is not None and postal_code in self.columns:
# logging.info("Normalize postal code")
self[postal_code] = (
self[postal_code]
.astype(str)
.str.upper()
.str.replace(r"[^0-9A-Z]", "", regex=True)
.str.replace(r"^0+\B", "", regex=True)
)
def __remove_old_dates(self):
date_column = self.etalon_def_checked.get(FileColumnMeaningType.DATE.value) or self.etalon_def_checked.get(
FileColumnMeaningType.DATETIME.value
)
if date_column is not None:
old_subset = self[self[date_column] < self.MIN_SUPPORTED_DATE_TS]
if len(old_subset) > 0:
logging.info(f"df before dropping old rows: {self.shape}")
self.drop(index=old_subset.index, inplace=True)
logging.info(f"df after dropping old rows: {self.shape}")
msg = "We don't have data before '2000-01-01' and removed all earlier records from the search dataset"
logging.warning(msg)
print("WARN: ", msg)
def __drop_ignore_columns(self):
"""Drop ignore columns"""
columns_to_drop = list(set(self.columns) & set(self.ignore_columns))
if len(columns_to_drop) > 0:
# logging.info(f"Dropping ignore columns: {self.ignore_columns}")
self.drop(columns_to_drop, axis=1, inplace=True)
def __target_value(self) -> pd.Series:
target_column = self.etalon_def_checked.get(FileColumnMeaningType.TARGET.value, "")
target: pd.Series = self[target_column]
# clean target from nulls
target.dropna(inplace=True)
if is_numeric_dtype(target):
target = target.loc[np.isfinite(target)] # type: ignore
else:
target = target.loc[target != ""]
return target
def __validate_target(self):
# logging.info("Validating target")
target_column = self.etalon_def_checked.get(FileColumnMeaningType.TARGET.value, "")
target = self[target_column]
if self.task_type == ModelTaskType.BINARY:
if not is_integer_dtype(target):
try:
self[target_column] = self[target_column].astype("int")
except ValueError:
logging.exception("Failed to cast target to integer for binary task type")
raise ValidationError(
f"Unexpected dtype of target for binary task type: {target.dtype}." " Expected int or bool"
)
target_classes_count = target.nunique()
if target_classes_count != 2:
msg = f"Binary task type should contain only 2 target values, but {target_classes_count} presented"
logging.error(msg)
raise ValidationError(msg)
elif self.task_type == ModelTaskType.MULTICLASS:
if not is_integer_dtype(target) and not is_string_dtype(target):
if is_numeric_dtype(target):
try:
self[target_column] = self[target_column].astype("int")
except ValueError:
logging.exception("Failed to cast target to integer for multiclass task type")
raise ValidationError(
f"Unexpected dtype of target for multiclass task type: {target.dtype}."
"Expected int or str"
)
else:
msg = f"Unexpected dtype of target for multiclass task type: {target.dtype}. Expected int or str"
logging.exception(msg)
raise ValidationError(msg)
elif self.task_type == ModelTaskType.REGRESSION:
if not is_float_dtype(target):
try:
self[target_column] = self[target_column].astype("float")
except ValueError:
logging.exception("Failed to cast target to float for regression task type")
raise ValidationError(
f"Unexpected dtype of target for regression task type: {target.dtype}. Expected float"
)
elif self.task_type == ModelTaskType.TIMESERIES:
if not is_float_dtype(target):
try:
self[target_column] = self[target_column].astype("float")
except ValueError:
logging.exception("Failed to cast target to float for timeseries task type")
raise ValidationError(
f"Unexpected dtype of target for timeseries task type: {target.dtype}. Expected float"
)
def __resample(self):
# logging.info("Resampling etalon")
# Resample imbalanced target. Only train segment (without eval_set)
if self.task_type in [ModelTaskType.BINARY, ModelTaskType.MULTICLASS]:
if EVAL_SET_INDEX in self.columns:
train_segment = self[self[EVAL_SET_INDEX] == 0]
validation_segment = self[self[EVAL_SET_INDEX] != 0]
else:
train_segment = self
validation_segment = None
count = len(train_segment)
min_class_count = count
min_class_value = None
target_column = self.etalon_def_checked.get(FileColumnMeaningType.TARGET.value, "")
target = train_segment[target_column]
target_classes_count = target.nunique()
if target_classes_count > self.MAX_MULTICLASS_CLASS_COUNT:
msg = (
f"The number of target classes {target_classes_count} exceeds the allowed threshold: "
f"{self.MAX_MULTICLASS_CLASS_COUNT}. Please, correct your data and try again"
)
logging.error(msg)
raise ValidationError(msg)
unique_target = target.unique()
for v in list(unique_target): # type: ignore
current_class_count = len(train_segment.loc[target == v])
if current_class_count < min_class_count:
min_class_count = current_class_count
min_class_value = v
if min_class_count < self.MIN_TARGET_CLASS_COUNT:
msg = (
f"The rarest class `{min_class_value}` occurs {min_class_count}. "
"The minimum number of observations for each class in a train dataset must be "
f"grater than {self.MIN_TARGET_CLASS_COUNT}. Please, correct your data and try again"
)
logging.error(msg)
raise ValidationError(msg)
min_class_percent = self.IMBALANCE_THESHOLD / target_classes_count
min_class_threshold = min_class_percent * count
if min_class_count < min_class_threshold:
logging.info(
f"Target is imbalanced. The rarest class `{min_class_value}` occurs {min_class_count} times. "
"The minimum number of observations for each class in a train dataset must be "
f"grater than or equal to {min_class_threshold} ({min_class_percent * 100} %). "
"It will be undersampled"
)
if is_string_dtype(target):
target_replacement = {v: i for i, v in enumerate(unique_target)} # type: ignore
prepared_target = target.replace(target_replacement)
else:
prepared_target = target
sampler = RandomUnderSampler(random_state=self.random_state)
X = train_segment[SYSTEM_RECORD_ID]
X = X.to_frame(SYSTEM_RECORD_ID)
new_x, _ = sampler.fit_resample(X, prepared_target) # type: ignore
resampled_data = train_segment[train_segment[SYSTEM_RECORD_ID].isin(new_x[SYSTEM_RECORD_ID])]
if validation_segment is not None:
resampled_data = pd.concat([resampled_data, validation_segment], ignore_index=True)
self._update_inplace(resampled_data)
logging.info(f"Shape after resampling: {self.shape}")
self.sampled = True
# Resample over fit threshold
if EVAL_SET_INDEX in self.columns:
train_segment = self[self[EVAL_SET_INDEX] == 0]
validation_segment = self[self[EVAL_SET_INDEX] != 0]
else:
train_segment = self
validation_segment = None
if len(train_segment) > self.FIT_SAMPLE_THRESHOLD:
logging.info(
f"Etalon has size {len(train_segment)} more than threshold {self.FIT_SAMPLE_THRESHOLD} "
f"and will be downsampled to {self.FIT_SAMPLE_ROWS}"
)
resampled_data = train_segment.sample(n=self.FIT_SAMPLE_ROWS, random_state=self.random_state)
if validation_segment is not None:
resampled_data =
|
pd.concat([resampled_data, validation_segment], ignore_index=True)
|
pandas.concat
|
import argparse
import io
import os
import os.path
import pandas as pd
import sacrebleu
from dotenv import load_dotenv
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from ..common.common import call_translation, set_log_level, load_tmx_file, call_sentence_alignment
import logging
load_dotenv()
class Config:
"""
Read from .env file - These are params that are static across parallel jobs
"""
SUBSCRIPTION_KEY = os.environ.get("SUBSCRIPTION_KEY") # The Custom Translation Subscription key
CATEGORIES = os.environ.get("CATEGORIES") # The categories/model ids we are evaluating
REGION = os.environ.get("REGION") # The region our model is deployed in
ALIGNER_PATH = os.environ.get("ALIGNER_PATH") # The location of the alignment script
# https://www.microsoft.com/en-us/download/details.aspx?id=52608&from=https%3A%2F%2Fresearch.microsoft.com%2Fen-us%2Fdownloads%2Faafd5dcf-4dcc-49b2-8a22-f7055113e656%2F
DEBUG = bool(os.environ.get("DEBUG")) # Activate debugging
def pdf_parser(data):
"""
:param data: The file stream
:return: The converted text
"""
fp = open(data, 'rb')
rsrc_mgr = PDFResourceManager()
ret_str = io.StringIO()
la_params = LAParams()
device = TextConverter(rsrc_mgr, ret_str, laparams=la_params)
# Create a PDF interpreter object.
interpreter = PDFPageInterpreter(rsrc_mgr, device)
# Process each page contained in the document.
for page in PDFPage.get_pages(fp):
interpreter.process_page(page)
data = ret_str.getvalue()
return data
def build_HTML_aligned_report(categories, cat_sentences, lst_source_text, lst_target_txt, output_path, source_doc,
translated_doc):
"""
This function generates a simple HTML page that is sentence aligned, per model, containing the HYP, REF and MT text
:param categories: The models we are evaluating
:param cat_sentences: The translated sentences per model
:param lst_source_text: The HYP text
:param lst_target_txt: The REF text
:param output_path: The path we want to write to
:param source_doc: The source document HYP we are translating
:param translated_doc: The human translated REF doc
:return:
"""
for cat_id, category_id in enumerate(categories):
for i, source_text in enumerate(lst_source_text):
# Create a simple aligned html report
html_file = open(os.path.join(output_path, 'MT_' + translated_doc[:-3] + '_' + category_id + '.html'), 'a')
if i == 0:
html = """<!DOCTYPE html><html lang = "en" ><head><meta charset = "UTF-8">"""
html_file.write(html)
html = """<title>""" + 'MT_' + translated_doc[:-3] + '_' + category_id + """</title>"""
html_file.write(html)
html = """</head><div>"""
html_file.write(html)
html = """<table id =""" + '"' + source_doc + '"' + """style = "border:1px solid; width:50%;
float:left" frame=void rules=rows><tr>"""
html_file.write(html)
html = """<td><u>""" + source_doc + """</u></td></tr>"""
html_file.write(html)
# Now we add a table row
html = """<tr><td>ENU: """ + source_text + """</td></tr>"""
html_file.write(html)
html = """<tr><td>REF: """ + lst_target_txt[i] + """</td></tr>"""
html_file.write(html)
if i == len(lst_source_text) - 1:
html = """</table>"""
html_file.write(html)
html = """<table id =""" + '"' + translated_doc + '"' + """style = "border:1px solid; width:50%; float:left"
frame=void rules=rows><tr>"""
html_file.write(html)
html = """<td><u>""" + translated_doc + """</u></td></tr>"""
html_file.write(html)
# Now we add a table row
for j, sentence in enumerate(cat_sentences[cat_id]):
html = """<tr><td>MT: """ + sentence + """</td></tr>"""
html_file.write(html)
html = """<tr><td>REF: """ + lst_target_txt[j] + """</td></tr>"""
html_file.write(html)
html = """</table>"""
html_file.write(html)
html = """</div><body></body></html>"""
html_file.write(html)
html_file.close()
logging.debug(f"Generated HTML report "
f"{str(os.path.join(output_path, 'MT_' + translated_doc[:-3] + '_' + category_id + '.html'))}")
def main():
"""
This script takes a source document, reference translated document and:
* Converts the PDF to text
* Sentence aligns the source and reference documents
* Translates the document sentence by sentence against all models
* Generates various reports
:return: Full text translation, CSV file with BLEU scores and text, HTML report with sentences only
"""
# We pass these dynamic arguments in for parallel jobs
parser = argparse.ArgumentParser(description='Process docs for machine translation')
parser.add_argument('--translated-path', type=str,
help='path to translated documents')
parser.add_argument('--source-path', type=str,
help='path to source documents')
parser.add_argument('--translated-doc', type=str,
help='The translated document')
parser.add_argument('--source-doc', type=str, default='',
help='The document to translate')
parser.add_argument('--output-path', type=str, default='',
help='The output path for our translation scores and results')
parser.add_argument('--target-language', type=str, default='',
help='es or fr')
args = parser.parse_args()
set_log_level(Config.DEBUG)
translated_path = args.translated_path
translated_doc = args.translated_doc
source_path = args.source_path
source_doc = args.source_doc
output_path = args.output_path
fr_text = pdf_parser(os.path.join(translated_path, translated_doc))
en_text = pdf_parser(os.path.join(source_path, source_doc))
source_text_doc = source_doc[:-3] + 'txt'
translated_text_doc = translated_doc[:-3] + 'txt'
with open(os.path.join(translated_path, translated_text_doc), 'w') as fr:
fr.write(fr_text)
with open(os.path.join(source_path, source_text_doc), 'w') as en:
en.write(en_text)
target_aligner = os.path.join(translated_path, translated_text_doc)
source_aligner = os.path.join(source_path, source_text_doc)
# Now we call the Microsoft Bilingual Sentence Alignment script
alignment_results = call_sentence_alignment(source_aligner, target_aligner, Config.ALIGNER_PATH)
logging.info(f"Sentence Alignment {alignment_results}")
source_aligned_doc = source_text_doc + '.aligned'
translated_aligned_doc = translated_text_doc + '.aligned'
with open(os.path.join(translated_path, translated_aligned_doc), 'r') as fr:
fr_aligned = fr.read()
with open(os.path.join(source_path, source_aligned_doc), 'r') as en:
en_aligned = en.read()
lst_fr_aligned = fr_aligned.split('\n')
lst_en_aligned = en_aligned.split('\n')
subscription_key = Config.SUBSCRIPTION_KEY
categories = Config.CATEGORIES
categories = categories.strip().split(',')
cat_dicts = [{} for category_id in categories]
lst_target_txt = []
lst_source_text = []
with open(os.path.join(output_path, 'MT_' + translated_doc[:-3] + '_all_models' + '.txt'),
'w') as mt_file:
for i, etxt in enumerate(lst_en_aligned):
logging.debug(f"Processing {i} of {len(lst_en_aligned)}")
hypothesis = lst_fr_aligned[i]
lst_source_text.append(etxt)
lst_target_txt.append(hypothesis)
for cat_ind, category_id in enumerate(categories):
category_id = category_id.strip()
translation_results = call_translation([{'Text': etxt}], args.target_language, category_id,
subscription_key, Config.REGION)
for translations in translation_results:
logging.info(f"CategoryId {category_id} translation {translations}")
for translation in translations['translations']:
translated_text = translation['text']
if len(translated_text) == 0:
bleu_score = 0
bleu_scores = 0
else:
bleu_scores = sacrebleu.corpus_bleu(hypothesis, translated_text)
bleu_score = bleu_scores.score
logging.info(f"*** Category {category_id}")
mt_file.write(f"\n*** Category {category_id}")
logging.info(f"ENG: {etxt}")
mt_file.write(f"\n ENG: {etxt}")
logging.info(f"REF: {hypothesis}")
mt_file.write(f"\n REF: {hypothesis}")
logging.info(f"MT : {translated_text}")
mt_file.write(f"\n MT : {translated_text}")
logging.info(f"{bleu_scores}")
logging.info(f"\n********************************")
cat_dicts[cat_ind][i] = []
cat_dicts[cat_ind][i].append(translated_text)
cat_dicts[cat_ind][i].append(bleu_score)
logging.info(f"_____________________")
logging.info('\n')
cat_scores = [[] for category_id in categories]
cat_sentences = [[] for category_id in categories]
# This creates a text file for the translated document for each model
for cat_id, category_id in enumerate(categories):
with open(os.path.join(output_path, 'MT_' + translated_doc[:-3] + '_' + category_id + '.txt'),
'w') as mt_file:
for key, value in cat_dicts[cat_id].items():
cat_sentences[cat_id].append(value[0])
cat_scores[cat_id].append(value[1])
mt_file.write(value[0])
data = {'Source': lst_source_text, 'Target': lst_target_txt}
for cat_id, category_id in enumerate(categories):
data[categories[cat_id] + '_score'] = cat_scores[cat_id]
data[categories[cat_id] + '_sentence'] = cat_sentences[cat_id]
df_translated =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
import pickle
from abc import abstractmethod
from typing import List, Set, Optional
import pandas as pd
import os
from src.pipeline.config import Config
from src.pipeline.datasets.constants import DatasetType
class Dataset:
"""
A class that represents a dataset
"""
def __init__(self, dtype: DatasetType, path: str, label_column_name: str, categorical_feature_names: List[str],
numeric_feature_names: List[str], to_load: bool = True, raw_df: Optional[pd.DataFrame] = None,
name: str = '', original_label_column_name: str = ''):
assert os.path.exists(path)
self._path = path
self._to_load = to_load
self._raw_df = raw_df
self._name = self.__class__.__name__ if not name else name
if self._to_load:
self._raw_df = self.load()
print('loading dataset')
self._num_instances, self._num_features = self._raw_df.shape
self._dtype = dtype
self._label_column_name = label_column_name
self._categorical_feature_names = categorical_feature_names
self._numeric_feature_names = numeric_feature_names
self._original_label_column_name = original_label_column_name
self._update_types()
def _update_types(self):
for col in self._categorical_feature_names:
self._raw_df[col] = self._raw_df[col].astype('category')
@property
def original_label_column_name(self) -> str:
return self._original_label_column_name
@original_label_column_name.setter
def original_label_column_name(self, value: str):
self._original_label_column_name = value
@property
def name(self) -> str:
return self._name
@property
def num_features(self) -> int:
return self._num_features
@property
def num_instances(self) -> int:
return self._num_instances
@num_instances.setter
def num_instances(self, value: int):
self._num_instances = value
@property
def dtype(self) -> DatasetType:
return self._dtype
@property
def path(self) -> str:
return self._path
@property
def raw_df(self) -> pd.DataFrame:
return self._raw_df
@raw_df.setter
def raw_df(self, value: pd.DataFrame):
self._raw_df = value
@property
def to_load(self) -> bool:
return self._to_load
@to_load.setter
def to_load(self, value: bool):
self._to_load = value
@property
def label_column_name(self) -> str:
return self._label_column_name
@label_column_name.setter
def label_column_name(self, value: str):
self._label_column_name = value
@property
def numeric_feature_names(self) -> List[str]:
return self._numeric_feature_names
@numeric_feature_names.setter
def numeric_feature_names(self, value: List[str]):
self._numeric_feature_names = value
@property
def categorical_feature_names(self) -> List[str]:
return self._categorical_feature_names
@categorical_feature_names.setter
def categorical_feature_names(self, value: List[str]):
self._categorical_feature_names = value
@classmethod
def concatenate(cls, dataset_list: List['Dataset'], path: str) -> 'Dataset':
dataset_labels: Set[str] = {ds.label_column_name for ds in dataset_list}
assert len(dataset_labels) == 1
dataset_categorical_feature_names: List[List[str]] = [ds.categorical_feature_names for ds in dataset_list]
categorical_feature_names: Set[str] = set()
for inner_list in dataset_categorical_feature_names:
categorical_feature_names |= set(inner_list)
assert categorical_feature_names == set(dataset_list[0].categorical_feature_names)
dataset_numeric_feature_names: List[List[str]] = [ds.numeric_feature_names for ds in dataset_list]
numeric_feature_names: Set[str] = set()
for inner_list in dataset_numeric_feature_names:
numeric_feature_names |= set(inner_list)
assert numeric_feature_names == set(dataset_list[0].numeric_feature_names)
raw_df: pd.DataFrame =
|
pd.concat([ds.raw_df for ds in dataset_list])
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 26 19:54:12 2020
@author: kakdemi
"""
import pandas as pd
#importing data
df_data_new = pd.read_excel('Interchange.xlsx',sheet_name='Daily', header=0)
#importing data
SALBRYNB_hourly = pd.read_excel('Interchange.xlsx',sheet_name='SALBRYNB', header=0)
ROSETON_hourly = pd.read_excel('Interchange.xlsx',sheet_name='ROSETON', header=0)
HQ_P1_P2_hourly = pd.read_excel('Interchange.xlsx',sheet_name='HQ_P1_P2', header=0)
HQHIGATE_hourly = pd.read_excel('Interchange.xlsx',sheet_name='HQHIGATE', header=0)
SHOREHAM_hourly = pd.read_excel('Interchange.xlsx',sheet_name='SHOREHAM', header=0)
NORTHPORT_hourly = pd.read_excel('Interchange.xlsx',sheet_name='NORTHPORT', header=0)
#creating a daily index and removing Feb 29s
daily_index = pd.date_range(start='2008-01-01', end='2018-12-31', freq='D')
daily_index = pd.DatetimeIndex.delete(daily_index, [59, 1520, 2981])
daily_interchange = df_data_new.set_index(daily_index)
#Adding all hourly data together and renaming the columns
SALBRYNB_all_hourly = pd.concat([SALBRYNB_hourly[2008], SALBRYNB_hourly[2009], SALBRYNB_hourly[2010], SALBRYNB_hourly[2011], SALBRYNB_hourly[2012], SALBRYNB_hourly[2013], SALBRYNB_hourly[2014], SALBRYNB_hourly[2015], SALBRYNB_hourly[2016], SALBRYNB_hourly[2017], SALBRYNB_hourly[2018]] ,ignore_index=True)
ROSETON_all_hourly = pd.concat([ROSETON_hourly[2008], ROSETON_hourly[2009], ROSETON_hourly[2010], ROSETON_hourly[2011], ROSETON_hourly[2012], ROSETON_hourly[2013], ROSETON_hourly[2014], ROSETON_hourly[2015], ROSETON_hourly[2016], ROSETON_hourly[2017], ROSETON_hourly[2018]] ,ignore_index=True)
HQ_P1_P2_all_hourly = pd.concat([HQ_P1_P2_hourly[2008], HQ_P1_P2_hourly[2009], HQ_P1_P2_hourly[2010], HQ_P1_P2_hourly[2011], HQ_P1_P2_hourly[2012], HQ_P1_P2_hourly[2013], HQ_P1_P2_hourly[2014], HQ_P1_P2_hourly[2015], HQ_P1_P2_hourly[2016], HQ_P1_P2_hourly[2017], HQ_P1_P2_hourly[2018]] ,ignore_index=True)
HQHIGATE_all_hourly = pd.concat([HQHIGATE_hourly[2008], HQHIGATE_hourly[2009], HQHIGATE_hourly[2010], HQHIGATE_hourly[2011], HQHIGATE_hourly[2012], HQHIGATE_hourly[2013], HQHIGATE_hourly[2014], HQHIGATE_hourly[2015], HQHIGATE_hourly[2016], HQHIGATE_hourly[2017], HQHIGATE_hourly[2018]] ,ignore_index=True)
SHOREHAM_all_hourly = pd.concat([SHOREHAM_hourly[2008], SHOREHAM_hourly[2009], SHOREHAM_hourly[2010], SHOREHAM_hourly[2011], SHOREHAM_hourly[2012], SHOREHAM_hourly[2013], SHOREHAM_hourly[2014], SHOREHAM_hourly[2015], SHOREHAM_hourly[2016], SHOREHAM_hourly[2017], SHOREHAM_hourly[2018]] ,ignore_index=True)
NORTHPORT_all_hourly = pd.concat([NORTHPORT_hourly[2008], NORTHPORT_hourly[2009], NORTHPORT_hourly[2010], NORTHPORT_hourly[2011], NORTHPORT_hourly[2012], NORTHPORT_hourly[2013], NORTHPORT_hourly[2014], NORTHPORT_hourly[2015], NORTHPORT_hourly[2016], NORTHPORT_hourly[2017], NORTHPORT_hourly[2018]] ,ignore_index=True)
All_Lines_hourly = pd.concat([SALBRYNB_all_hourly, ROSETON_all_hourly, HQ_P1_P2_all_hourly, HQHIGATE_all_hourly, SHOREHAM_all_hourly, NORTHPORT_all_hourly], axis=1)
All_Lines_hourly.columns = ['SALBRYNB', 'ROSETON', 'HQ_P1_P2', 'HQHIGATE', 'SHOREHAM', 'NORTHPORT']
#creating list to locate Feb 29s
omitted_dates = []
feb_2008 = list(range(1416,1440))
feb_2012 = list(range(36480,36504))
feb_2016 = list(range(71544,71568))
omitted_dates.extend(feb_2008)
omitted_dates.extend(feb_2012)
omitted_dates.extend(feb_2016)
#creating an hourly index and removing Feb 29s
hourly_index = pd.date_range(start='2008-01-01 00:00:00', end='2018-12-31 23:00:00', freq='H')
hourly_index = pd.DatetimeIndex.delete(hourly_index, omitted_dates)
hourly_interchange = All_Lines_hourly.set_index(hourly_index)
#turning daily dates into strings and saving them in a list
daily_date_list = list(daily_index)
daily_date_list = [str(a) for a in daily_date_list]
#defining interchange lines
Interchange_line = ['SALBRYNB', 'ROSETON', 'HQ_P1_P2', 'HQHIGATE', 'SHOREHAM', 'NORTHPORT']
#creating empty dictionaries to store hourly profiles
SALBRYNB_profile_dict = {}
ROSETON_profile_dict = {}
HQ_P1_P2_profile_dict = {}
HQHIGATE_profile_dict = {}
SHOREHAM_profile_dict = {}
NORTHPORT_profile_dict = {}
for line in Interchange_line:
for day_sp in range(len(daily_date_list)):
day_specific = daily_interchange.loc[daily_date_list[day_sp][:-9], line]
if day_specific < 0:
#if specific day's value is negative, pull out hourly data for that day, turn positive values into zero,
#take absolute value of negative values, find hourly profile by dividing individually by total sum of that day
hour_specific = hourly_interchange.loc[daily_date_list[day_sp][:-9], line]
hour_list = list(hour_specific)
transform_hour_list = [0 if i > 0 else i for i in hour_list]
final_hour_list = [abs(a) if a < 0 else a for a in transform_hour_list]
total_change = sum(final_hour_list)
hourly_profile = [b/total_change for b in final_hour_list]
#saving that days into relevant dictionaries
if line == 'SALBRYNB':
SALBRYNB_profile_dict[daily_date_list[day_sp][:-9]] = hourly_profile
elif line == 'ROSETON':
ROSETON_profile_dict[daily_date_list[day_sp][:-9]] = hourly_profile
elif line == 'HQ_P1_P2':
HQ_P1_P2_profile_dict[daily_date_list[day_sp][:-9]] = hourly_profile
elif line == 'HQHIGATE':
HQHIGATE_profile_dict[daily_date_list[day_sp][:-9]] = hourly_profile
elif line == 'SHOREHAM':
SHOREHAM_profile_dict[daily_date_list[day_sp][:-9]] = hourly_profile
elif line == 'NORTHPORT':
NORTHPORT_profile_dict[daily_date_list[day_sp][:-9]] = hourly_profile
else:
pass
###############################################
######## AVERAGE YEAR FOR SALBRYNB ############
###############################################
#turn dictionary into dataframe, transpose it and sotre month and day into a list
SALBRYNB_df = pd.DataFrame(SALBRYNB_profile_dict).T
if not SALBRYNB_df.empty:
Month_day = []
for row in range(SALBRYNB_df.shape[0]):
Month_day.append(str(SALBRYNB_df.index[row])[5:10])
#take average of the hourly profiles to find an average year, and reindex the dataframe
SALBRYNB_df['Date'] = Month_day
Average_Year = SALBRYNB_df.groupby(['Date']).mean()
New_Index = [str(value) for value in list(Average_Year.index)]
Average_Year["New_Index"] = New_Index
Average_Year.set_index("New_Index")
#dropping unused column, and sorting
del Average_Year['New_Index']
Average_Year_SALBRYNB = Average_Year.sort_index()
#creating a hypothetical index to use get_loc function later
index_change = list(Average_Year_SALBRYNB.index)
index_change = [i+'-2010' for i in index_change]
Average_Year_SALBRYNB['Hyp_Date'] = index_change
Average_Year_SALBRYNB = Average_Year_SALBRYNB.set_index('Hyp_Date')
Average_Year_SALBRYNB.index = pd.to_datetime(Average_Year_SALBRYNB.index)
Average_Year_SALBRYNB = Average_Year_SALBRYNB.sort_index()
else:
pass
###############################################
######## AVERAGE YEAR FOR ROSETON #############
###############################################
#turn dictionary into dataframe, transpose it and sotre month and day into a list
ROSETON_df = pd.DataFrame(ROSETON_profile_dict).T
if not ROSETON_df.empty:
Month_day = []
for row in range(ROSETON_df.shape[0]):
Month_day.append(str(ROSETON_df.index[row])[5:10])
#take average of the hourly profiles to find an average year, and reindex the dataframe
ROSETON_df['Date'] = Month_day
Average_Year = ROSETON_df.groupby(['Date']).mean()
New_Index = [str(value) for value in list(Average_Year.index)]
Average_Year["New_Index"] = New_Index
Average_Year.set_index("New_Index")
#dropping unused column, and sorting
del Average_Year['New_Index']
Average_Year_ROSETON = Average_Year.sort_index()
#creating a hypothetical index to use get_loc function later
index_change = list(Average_Year_ROSETON.index)
index_change = [i+'-2010' for i in index_change]
Average_Year_ROSETON['Hyp_Date'] = index_change
Average_Year_ROSETON = Average_Year_ROSETON.set_index('Hyp_Date')
Average_Year_ROSETON.index =
|
pd.to_datetime(Average_Year_ROSETON.index)
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 29 08:35:09 2019
@author: user
"""
# execute primary input data building script
# import build_input_res_heating
print('####################')
print('BUILDING INPUT DATA FOR INCLUDING DEMAND-SIDE RESPONSE, ENERGY EFFICIENCY AND DHW BOILERS')
print('####################')
import os
import itertools
import hashlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import grimsel.auxiliary.sqlutils.aux_sql_func as aql
import datetime
import seaborn as sns
from grimsel.auxiliary.aux_general import print_full
from grimsel.auxiliary.aux_general import translate_id
import config_local as conf
from grimsel.auxiliary.aux_general import expand_rows
base_dir = conf.BASE_DIR
data_path = conf.PATH_CSV
data_path_prv = conf.PATH_CSV + '_res_heating'
seed = 2
np.random.seed(seed)
db = conf.DATABASE
sc = conf.SCHEMA
#db = 'grimsel_1'
#sc = 'lp_input_ht_ee_dsm'
def append_new_rows(df, tb):
list_col = list(aql.get_sql_cols(tb, sc, db).keys())
aql.write_sql(df[list_col], db=db, sc=sc, tb=tb, if_exists='append')
def del_new_rows(ind, tb, df):
del_list = df[ind].drop_duplicates()
for i in ind:
del_list[i] = '%s = '%i + del_list[i].astype(str)
del_str = ' OR '.join(del_list.apply(lambda x: '(' + ' AND '.join(x) + ')', axis=1))
exec_strg = '''
DELETE FROM {sc}.{tb}
WHERE {del_str}
'''.format(tb=tb, sc=sc, del_str=del_str)
aql.exec_sql(exec_strg, db=db)
def replace_table(df, tb):
print('Replace table %s'%tb)
# list_col = list(aql.get_sql_cols(tb, sc, db).keys())
aql.write_sql(df, db=db, sc=sc, tb=tb, if_exists='replace')
def append_new_cols(df, tb):
#
list_col = list(aql.get_sql_cols(tb, sc, db).keys())
col_new = dict.fromkeys((set(df.columns.tolist()) - set(list_col)))
for key, value in col_new.items():
col_new[key] = 'DOUBLE PRECISION'
# col_new = dict.fromkeys((set(list_col[0].columns.tolist()) - set(list_col)),1)
aql.add_column(df_src=df,tb_tgt=[sc,tb],col_new=col_new,on_cols=list_col, db=db)
# %% DHW loads
dfload_arch_dhw = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_dhw_dec.csv')
dfload_arch_dhw['DateTime'] = dfload_arch_dhw['DateTime'].astype('datetime64[ns]')
# dfload_arch_dhw = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_dhw_dec')
dferg_arch_dhw = dfload_arch_dhw.groupby('nd_id')['erg_tot'].sum().reset_index()
dferg_arch_dhw['nd_id_new'] = dferg_arch_dhw.nd_id
dfload_arch_dhw_central = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_dhw_cen.csv')
# dfload_arch_dhw_central = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_dhw_cen')
dferg_arch_dhw_central = dfload_arch_dhw_central.groupby('nd_id')['erg_tot'].sum().reset_index()
dferg_arch_dhw_central['nd_id_new'] = dferg_arch_dhw_central.nd_id
# dfload_dhw_elec = pd.read_csv(os.path.join(base_dir,'../heat_dhw/dhw_el_load_night_charge.csv'),sep=';')
# dfload_dhw_elec['DateTime'] = pd.to_datetime(dfload_dhw_elec.DateTime)
# dfload_dhw_remove = pd.merge(dfload_arch_dhw,dfload_dhw_elec.drop(columns='dhw_mw'), on='DateTime' )
# dfload_dhw_remove = pd.merge(dfload_dhw_remove,dferg_arch_dhw.drop(columns='nd_id_new').rename(columns={'erg_tot':'erg_year'}),on='nd_id'
# ).assign(load_dhw_rem = lambda x: x.dhw_rel_load*x.erg_year)
# %% Central DHW loads
#Bau load
dfload_arch_dhw_central = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_dhw_cen.csv')
# dfload_arch_dhw_central = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_dhw_cen')
dfload_arch_dhw_central['erg_tot'] = dfload_arch_dhw_central.erg_tot/24 # MWh -> MW
dfload_arch_dhw_central['erg_tot_retr_1pc'] = dfload_arch_dhw_central.erg_tot # here already in MW previous line
dfload_arch_dhw_central['erg_tot_retr_2pc'] = dfload_arch_dhw_central.erg_tot # here already in MW previous line
dfload_arch_dhw_central = dfload_arch_dhw_central.set_index('DateTime')
dfload_arch_dhw_central.index = pd.to_datetime(dfload_arch_dhw_central.index)
#fossil load
dfload_arch_dhw_central_fossil = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_dhw_cen_fossil.csv')
# dfload_arch_dhw_central_fossil = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_dhw_cen_fossil')
dfload_arch_dhw_central_fossil['erg_tot_fossil'] = dfload_arch_dhw_central_fossil.erg_tot/24 # MWh -> MW
dfload_arch_dhw_central_fossil['erg_tot_retr_1pc_fossil'] = dfload_arch_dhw_central_fossil.erg_tot/24 # MWh -> MW
dfload_arch_dhw_central_fossil['erg_tot_retr_2pc_fossil'] = dfload_arch_dhw_central_fossil.erg_tot/24 # MWh -> MW
dfload_arch_dhw_central_fossil = dfload_arch_dhw_central_fossil.drop(columns='erg_tot')
dfload_arch_dhw_central_fossil = dfload_arch_dhw_central_fossil.set_index('DateTime')
dfload_arch_dhw_central_fossil.index = pd.to_datetime(dfload_arch_dhw_central_fossil.index)
dfload_arch_dhw_central = dfload_arch_dhw_central.reset_index()
dfload_arch_dhw_central = pd.merge(dfload_arch_dhw_central,dfload_arch_dhw_central_fossil,on=['index','doy','nd_id'])
dfload_arch_dhw_central = dfload_arch_dhw_central.set_index('DateTime')
dfload_arch_dhw_central.index = pd.to_datetime(dfload_arch_dhw_central.index)
# %% Seperation for aw and bw heat pumps DHW central
dfload_arch_dhw_central_aw = dfload_arch_dhw_central.copy()
dfload_arch_dhw_central_aw[['erg_tot', 'erg_tot_fossil',
'erg_tot_retr_1pc', 'erg_tot_retr_2pc', 'erg_tot_retr_1pc_fossil',
'erg_tot_retr_2pc_fossil']] *= 0.615
dfload_arch_dhw_central_ww = dfload_arch_dhw_central.copy()
dfload_arch_dhw_central_ww[['erg_tot', 'erg_tot_fossil',
'erg_tot_retr_1pc', 'erg_tot_retr_2pc', 'erg_tot_retr_1pc_fossil',
'erg_tot_retr_2pc_fossil']] *= 0.385
# %% DSR loads
dfload_arch_dsr_sfh_1day = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_dsr_sfh_1day.csv')
dfload_arch_dsr_mfh_1day = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_dsr_mfh_1day.csv')
# dfload_arch_dsr_sfh_1day = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_dsr_sfh_1day')
# dfload_arch_dsr_mfh_1day = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_dsr_mfh_1day')
dfload_arch_dsr_sfh_1h = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_dsr_sfh_1h.csv')
dfload_arch_dsr_sfh_1h['DateTime'] = dfload_arch_dsr_sfh_1h['DateTime'].astype('datetime64[ns]')
dfload_arch_dsr_mfh_1h = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_dsr_mfh_1h.csv')
dfload_arch_dsr_mfh_1h['DateTime'] = dfload_arch_dsr_mfh_1h['DateTime'].astype('datetime64[ns]')
# dfload_arch_dsr_sfh_1h = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_dsr_sfh_1h')
# dfload_arch_dsr_mfh_1h = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_dsr_mfh_1h')
dfload_arch_dsr_1day = pd.concat([dfload_arch_dsr_sfh_1day,dfload_arch_dsr_mfh_1day])
dfload_arch_dsr_1day['erg_dsr_1day_MW'] = dfload_arch_dsr_1day.erg_dsr_1day/24 # MWh -> MW
dfload_arch_dsr_1h = pd.concat([dfload_arch_dsr_sfh_1h,dfload_arch_dsr_mfh_1h])
dfload_arch_dsr_1h_2015 = dfload_arch_dsr_1h.loc[dfload_arch_dsr_1h.nd_id.str.contains('2015')]
dfload_arch_dsr_1h_2015 = dfload_arch_dsr_1h_2015.reset_index(drop=True)
dferg_arch_dsr = dfload_arch_dsr_1day.groupby('nd_id')['erg_dsr_1day'].sum().reset_index()
# dferg_arch_dsr_1h = dfload_arch_dsr_1h.groupby('nd_id')['erg_dsr_1h'].sum().reset_index()
dferg_arch_dsr_1day = dfload_arch_dsr_1day.groupby('nd_id')['erg_dsr_1day'].sum().reset_index()
dferg_arch_dsr['nd_id_new'] = dferg_arch_dsr.nd_id.str[0:13]
dferg_arch_dsr_1day['nd_id_new'] = dferg_arch_dsr.nd_id.str[0:13]
dferg_arch_dsr_1day['erg_dsr_2015'] = dferg_arch_dsr_1day.loc[dferg_arch_dsr_1day.nd_id.str.contains('DSR_2015')].erg_dsr_1day
dferg_arch_dsr_1day['erg_dsr_2035'] = dferg_arch_dsr_1day.loc[dferg_arch_dsr_1day.nd_id.str.contains('DSR_2035')].erg_dsr_1day
dferg_arch_dsr_1day['erg_dsr_2050'] = dferg_arch_dsr_1day.loc[dferg_arch_dsr_1day.nd_id.str.contains('DSR_2050')].erg_dsr_1day
dferg_arch_dsr_1day['erg_dsr_best_2035'] = dferg_arch_dsr_1day.loc[dferg_arch_dsr_1day.nd_id.str.contains('DSR_best_2035')].erg_dsr_1day
# dferg_arch_dsr_1day = dferg_arch_dsr_1day.fillna(method='ffill').loc[dferg_arch_dsr_1day.nd_id.str.contains('2050')].reset_index(drop=True)
dferg_arch_dsr_1day = dferg_arch_dsr_1day.fillna(method='ffill').loc[dferg_arch_dsr_1day.nd_id.str.contains('DSR_best_2035')].reset_index(drop=True)
# %% EE loads just others (without DSR hourly demand)
dfload_arch_ee_sfh = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_ee_sfh_diff_wo_dsr.csv')
dfload_arch_ee_sfh['DateTime'] = dfload_arch_ee_sfh['DateTime'].astype('datetime64[ns]')
dfload_arch_ee_mfh = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_ee_mfh_diff_wo_dsr.csv')
dfload_arch_ee_mfh['DateTime'] = dfload_arch_ee_mfh['DateTime'].astype('datetime64[ns]')
# dfload_arch_ee_sfh = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_ee_sfh_diff_wo_dsr')
# dfload_arch_ee_mfh = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_ee_mfh_diff_wo_dsr')
dfload_arch_ee = pd.concat([dfload_arch_ee_sfh,dfload_arch_ee_mfh])
dferg_arch_ee = dfload_arch_ee.groupby('nd_id')['erg_diff'].sum().reset_index()
# dferg_arch_ee['nd_id_new'] = dferg_arch_dhw.nd_id
# %% COP profile
#
#dfcop_pr_35 = aql.read_sql('grimsel_1', 'profiles_raw','cop_35')
dfcop_pr_60 = pd.read_csv(base_dir + '/dsr_ee_dhw/cop/cop_60.csv')
# dfcop_pr_60 = aql.read_sql('grimsel_1', 'profiles_raw','cop_60')
#
dfcop_pr_60_dhw = dfcop_pr_60
dfcop_pr_60_dhw['pp_id'] = dfcop_pr_60.pp_id.str.replace('HP','DHW')
#dfcop_pr_35 = dfcop_pr_35.set_index('DateTime')
#dfcop_pr_35.index = pd.to_datetime(dfcop_pr_35.index)
# dfcop_pr_60 = dfcop_pr_60.set_index('DateTime')
# dfcop_pr_60.index = pd.to_datetime(dfcop_pr_60.index)
dfcop_pr_60_dhw = dfcop_pr_60_dhw.set_index('DateTime')
dfcop_pr_60_dhw.index = pd.to_datetime(dfcop_pr_60_dhw.index)
#
#dfcop_pr_35['hy'] = 24*dfcop_pr_35.doy - 24
dfcop_pr_60_dhw['hy'] = 24*dfcop_pr_60_dhw.doy - 24
# %% ~~~~~~~~~~~~~~~~~~ DEF_NODE (we add DSR nodes)
color_nd = {'MFH_RUR_0_DSR': '#472503',
'MFH_RUR_1_DSR': '#472503',
'MFH_RUR_2_DSR': '#472503',
'MFH_RUR_3_DSR': '#472503',
'MFH_SUB_0_DSR': '#041FA3',
'MFH_SUB_1_DSR': '#041FA3',
'MFH_SUB_2_DSR': '#041FA3',
'MFH_SUB_3_DSR': '#041FA3',
'MFH_URB_0_DSR': '#484A4B',
'MFH_URB_1_DSR': '#484A4B',
'MFH_URB_2_DSR': '#484A4B',
'MFH_URB_3_DSR': '#484A4B',
'SFH_RUR_0_DSR': '#0A81EE',
'SFH_RUR_1_DSR': '#0A81EE',
'SFH_RUR_2_DSR': '#0A81EE',
'SFH_RUR_3_DSR': '#0A81EE',
'SFH_SUB_0_DSR': '#6D3904',
'SFH_SUB_1_DSR': '#6D3904',
'SFH_SUB_2_DSR': '#6D3904',
'SFH_SUB_3_DSR': '#6D3904',
'SFH_URB_0_DSR': '#818789',
'SFH_URB_1_DSR': '#818789',
'SFH_URB_2_DSR': '#818789',
'SFH_URB_3_DSR': '#818789'
}
col_nd_df = pd.DataFrame.from_dict(color_nd, orient='index').reset_index().rename(columns={'index': 'nd',0:'color'})
df_def_node_0 = pd.read_csv(data_path_prv + '/def_node.csv')
# df_def_node_0 = aql.read_sql(db, sc, 'def_node')
# df_nd_add = pd.DataFrame(pd.concat([dferg_arch_dsr.nd_id_new.rename('nd'),
# ], axis=0)).reset_index(drop=True)
df_nd_add = pd.DataFrame(dferg_arch_dsr.nd_id_new.unique()).rename(columns={0:'nd'}
).reset_index(drop=True)
#
nd_id_max = df_def_node_0.loc[~df_def_node_0.nd.isin(df_nd_add.nd)].nd_id.max()
df_nd_add['nd_id'] = np.arange(0, len(df_nd_add)) + nd_id_max + 1
#
df_nd_add = pd.merge(df_nd_add,col_nd_df, on = 'nd')
#
df_def_node = df_def_node_0.copy()
df_def_node = df_nd_add.reindex(columns=df_def_node_0.columns.tolist()).fillna(0).reset_index(drop=True)
# df_def_node_new = pd.concat([df_def_node,df_nd_add.reindex(columns=df_def_node_0.columns.tolist()).fillna(0)]).reset_index(drop=True)
#
dict_nd_id_dsr = df_nd_add.set_index('nd')['nd_id'].to_dict()
#
# dict_nd_id = {nd_old: dict_nd_id[nd] for nd_old, nd in dict_nd_ht.items()
# if nd in dict_nd_id}
df_nd_res_el = df_def_node_0.loc[~df_def_node_0.nd.str.contains('HT') & df_def_node_0.nd.str.contains('SFH|MFH')]
df_nd_not_res = df_def_node_0.loc[~df_def_node_0.nd.str.contains('MFH|SFH')]
df_nd_arch_el = df_def_node_0.loc[~df_def_node_0.nd.str.contains('HT') & df_def_node_0.nd.str.contains('SFH|MFH|OCO|IND')]
df_nd_arch_ht = df_def_node_0.loc[df_def_node_0.nd.str.contains('HT')]
df_nd_ch0_el = df_def_node_0.loc[df_def_node_0.nd.str.contains('CH0')]
dict_nd_res_el = df_nd_res_el.set_index('nd')['nd_id'].to_dict()
dict_nd_arch_ht = df_nd_arch_ht.set_index('nd')['nd_id'].to_dict()
# %% ~~~~~~~~~~~~~~~~~~~~~~~ DEF_PP_TYPE
df_def_pp_type_0 = pd.read_csv(data_path_prv + '/def_pp_type.csv')
# df_def_pp_type_0 = aql.read_sql(db, sc, 'def_pp_type')
df_def_pp_type = df_def_pp_type_0.copy().head(0)
for npt, pt, cat, color in ((0, 'DHW_BO_SFH', 'DHW_BOILER_SFH', '#D9F209'),
(1, 'DHW_BO_MFH', 'DHW_BOILER_MFH', '#D9F209'),
(2, 'DHW_STO_SFH', 'DHW_STORAGE_SFH', '#28A503'),
(3, 'DHW_STO_MFH', 'DHW_STORAGE_MFH', '#1A6703'),
(4, 'DHW_AW_SFH', 'DHW_HEATPUMP_AIR_SFH', '#F2D109'),
(5, 'DHW_WW_SFH', 'DHW_HEATPUMP_WAT_SFH', '#F2D109'),
(6, 'DHW_AW_MFH', 'DHW_HEATPUMP_AIR_MFH', '#F2D109'),
(7, 'DHW_WW_MFH', 'DHW_HEATPUMP_WAT_MFH', '#F2D109'),
# (8, 'STO_HT_SFH', 'HEAT_STORAGE_SFH', '#F2D109'),
# (9, 'STO_HT_MFH', 'HEAT_STORAGE_MFH', '#F2D109'),):
# (10, 'STO_CAES_CH0', 'NEW_STORAGE_CAES_CH0', '#D9F209')
):
df_def_pp_type.loc[npt] = (npt, pt, cat, color)
df_def_pp_type.loc[:,'pt_id'] = np.arange(0, len(df_def_pp_type)) + df_def_pp_type_0.pt_id.max() + 1
# %% ~~~~~~~~~~~~~~~~~~~~~~ DEF_FUEL for DHW
#
df_def_fuel_0 = pd.read_csv(data_path_prv + '/def_fuel.csv')
# df_def_fuel_0 = aql.read_sql(db, sc, 'def_fuel')
df_def_fuel = df_def_fuel_0.copy().head(0)
for nfl, fl, co2_int, ca, constr, color in ((0, 'ca_dhw', 0,0,0, 'p'),
(1, 'dhw_storage', 0,0,0, 'r'),
(2, 'ca_dhw_aw', 0,0,0, 'r'),
(3, 'ca_dhw_ww', 0,0,0, 'r'),
):
#
df_def_fuel.loc[nfl] = (nfl, fl, co2_int, ca, constr, color)
df_def_fuel.loc[:,'fl_id'] = np.arange(0, len(df_def_fuel)) + df_def_fuel_0.fl_id.max() + 1
# %% ~~~~~~~~~~~~~~~~~~~~~~ DEF_ENCAR for DHW
#
df_def_encar_0 = pd.read_csv(data_path_prv + '/def_encar.csv')
# df_def_encar_0 = aql.read_sql(db, sc, 'def_encar')
df_def_encar = df_def_encar_0.copy().head(0)
for nca, fl_id, ca in ((0, 27, 'HW'),
(1, 29, 'HA'),
(2, 30, 'HB'),
):
df_def_encar.loc[nca] = (nca, fl_id, ca)
df_def_encar.loc[:,'ca_id'] = np.arange(0, len(df_def_encar)) + df_def_encar_0.ca_id.max() + 1
# %% ~~~~~~~~~~~~~~~~~~~~~~~ DEF_PLANT
df_def_plant_0 = pd.read_csv(data_path_prv + '/def_plant.csv')
# df_def_plant_0 = aql.read_sql(db, sc, 'def_plant')
df_pp_add_arch = pd.DataFrame(df_nd_res_el.nd).rename(columns={'nd': 'nd_id'})
df_pp_add_1 = df_pp_add_arch.nd_id.str.slice(stop=3)
df_pp_add = pd.DataFrame()
#TO do here maybe add different boilers storage
for sfx, fl_id, pt_id, set_1 in [('_DHW_BO', 'ca_electricity', 'DHW_BO_', ['set_def_pp']),
('_DHW_STO', 'dhw_storage', 'DHW_STO_', ['set_def_st']),
]:
new_pp_id = df_def_plant_0.pp_id.max() + 1
data = dict(pp=df_pp_add_arch + sfx,
fl_id=fl_id, pt_id=pt_id + df_pp_add_1 , pp_id=np.arange(new_pp_id, new_pp_id + len(df_pp_add_arch)),
**{st: 1 if st in set_1 else 0 for st in [c for c in df_def_plant_0.columns if 'set' in c]})
df_pp_add = df_pp_add.append(df_pp_add_arch.assign(**data), sort=True)
df_pp_add.pp_id = np.arange(0, len(df_pp_add)) + df_pp_add.pp_id.min()
df_pp_add_ht = pd.DataFrame(df_nd_arch_ht.nd).rename(columns={'nd': 'nd_id'})
df_pp_add_2 = df_pp_add_ht.nd_id.str.slice(stop=3)
for sfx, fl_id, pt_id, set_1 in [
('_DHW_AW', 'ca_electricity', 'DHW_AW_', ['set_def_pp']),
('_DHW_WW', 'ca_electricity', 'DHW_WW_', ['set_def_pp']),
]:
new_pp_id = df_def_plant_0.pp_id.max() + 1
data = dict(pp=df_pp_add_ht + sfx,
fl_id=fl_id, pt_id=pt_id + df_pp_add_2 , pp_id=np.arange(new_pp_id, new_pp_id + len(df_pp_add_ht)),
**{st: 1 if st in set_1 else 0 for st in [c for c in df_def_plant_0.columns if 'set' in c]})
df_pp_add = df_pp_add.append(df_pp_add_ht.assign(**data), sort=True)
df_pp_add.pp_id = np.arange(0, len(df_pp_add)) + df_pp_add.pp_id.min()
df_def_plant = df_pp_add[df_def_plant_0.columns].reset_index(drop=True)
for df, idx in [(pd.concat([df_def_fuel_0,df_def_fuel]), 'fl'), (df_def_pp_type, 'pt'), (pd.concat([df_def_node_0]), 'nd')]:
df_def_plant, _ = translate_id(df_def_plant, df, idx)
#df_def_plant_new = pd.concat([df_def_plant_0,df_def_plant]).reset_index(drop=True)
# %% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DEF_PROFILE for DSHW and DSR
df_def_profile_0 = pd.read_csv(data_path_prv + '/def_profile.csv')
# df_def_profile_0 = aql.read_sql(db, sc, 'def_profile')
## COP profile
df_def_profile_cop_60 = df_nd_arch_ht.nd.copy().rename('primary_nd').reset_index()
df_def_profile_cop_60['pf'] = 'cop_60_' + df_def_profile_cop_60.primary_nd
df_def_profile_cop_60['pf_id'] = df_def_profile_cop_60.index.rename('pf_id') + df_def_profile_0.pf_id.max() + 1
df_def_profile_cop_60 = df_def_profile_cop_60[df_def_profile_0.columns]
# Demand profile decentral DHW
df_def_profile_dmnd_dhw = df_nd_res_el.nd.copy().rename('primary_nd').reset_index()
df_def_profile_dmnd_dhw['pf'] = 'demand_DHW_' + df_def_profile_dmnd_dhw.primary_nd
df_def_profile_dmnd_dhw['pf_id'] = df_def_profile_dmnd_dhw.index.rename('pf_id') + df_def_profile_cop_60.pf_id.max() + 1
df_def_profile_dmnd_dhw = df_def_profile_dmnd_dhw[df_def_profile_0.columns]
# Demand profiles heat A/W and W/W
#
df_def_profile_dmnd_dhw_aw = df_nd_arch_ht.nd.copy().rename('primary_nd').reset_index()
df_def_profile_dmnd_dhw_aw['pf'] = 'demand_DHW_AW_' + df_def_profile_dmnd_dhw_aw.primary_nd
df_def_profile_dmnd_dhw_aw['pf_id'] = df_def_profile_dmnd_dhw_aw.index.rename('pf_id') + df_def_profile_dmnd_dhw.pf_id.max() + 1
df_def_profile_dmnd_dhw_aw = df_def_profile_dmnd_dhw_aw[df_def_profile_0.columns]
df_def_profile_dmnd_dhw_ww = df_nd_arch_ht.nd.copy().rename('primary_nd').reset_index()
df_def_profile_dmnd_dhw_ww['pf'] = 'demand_DHW_WW_' + df_def_profile_dmnd_dhw_ww.primary_nd
df_def_profile_dmnd_dhw_ww['pf_id'] = df_def_profile_dmnd_dhw_ww.index.rename('pf_id') + df_def_profile_dmnd_dhw_aw.pf_id.max() + 1
df_def_profile_dmnd_dhw_ww = df_def_profile_dmnd_dhw_ww[df_def_profile_0.columns]
#
# Demand profile for EE
df_def_profile_dmnd_ee_2035 = df_nd_res_el.nd.copy().rename('primary_nd').reset_index()
df_def_profile_dmnd_ee_2035['pf'] = 'demand_EL_' + df_def_profile_dmnd_ee_2035.primary_nd + '_diff_2035_2015'
df_def_profile_dmnd_ee_2035['pf_id'] = df_def_profile_dmnd_ee_2035.index.rename('pf_id') + df_def_profile_dmnd_dhw_ww.pf_id.max() + 1
df_def_profile_dmnd_ee_2035 = df_def_profile_dmnd_ee_2035[df_def_profile_0.columns]
df_def_profile_dmnd_ee_2050 = df_nd_res_el.nd.copy().rename('primary_nd').reset_index()
df_def_profile_dmnd_ee_2050['pf'] = 'demand_EL_' + df_def_profile_dmnd_ee_2050.primary_nd + '_diff_2050_2015'
df_def_profile_dmnd_ee_2050['pf_id'] = df_def_profile_dmnd_ee_2050.index.rename('pf_id') + df_def_profile_dmnd_ee_2035.pf_id.max() + 1
df_def_profile_dmnd_ee_2050 = df_def_profile_dmnd_ee_2050[df_def_profile_0.columns]
df_def_profile_dmnd_ee_best_2035 = df_nd_res_el.nd.copy().rename('primary_nd').reset_index()
df_def_profile_dmnd_ee_best_2035['pf'] = 'demand_EL_' + df_def_profile_dmnd_ee_best_2035.primary_nd + '_diff_best_2035_2015'
df_def_profile_dmnd_ee_best_2035['pf_id'] = df_def_profile_dmnd_ee_best_2035.index.rename('pf_id') + df_def_profile_dmnd_ee_2050.pf_id.max() + 1
df_def_profile_dmnd_ee_best_2035 = df_def_profile_dmnd_ee_best_2035[df_def_profile_0.columns]
# Demand profile for DSR
df_def_profile_dmnd_dsr_2015 = df_nd_add.nd.copy().rename('primary_nd').reset_index()
df_def_profile_dmnd_dsr_2015['pf'] = 'demand_EL_' + df_def_profile_dmnd_dsr_2015.primary_nd + '_2015'
df_def_profile_dmnd_dsr_2015['pf_id'] = df_def_profile_dmnd_dsr_2015.index.rename('pf_id') + df_def_profile_dmnd_ee_best_2035.pf_id.max() + 1
df_def_profile_dmnd_dsr_2015 = df_def_profile_dmnd_dsr_2015[df_def_profile_0.columns]
df_def_profile_dmnd_dsr_2035 = df_nd_add.nd.copy().rename('primary_nd').reset_index()
df_def_profile_dmnd_dsr_2035['pf'] = 'demand_EL_' + df_def_profile_dmnd_dsr_2035.primary_nd + '_2035'
df_def_profile_dmnd_dsr_2035['pf_id'] = df_def_profile_dmnd_dsr_2035.index.rename('pf_id') + df_def_profile_dmnd_dsr_2015.pf_id.max() + 1
df_def_profile_dmnd_dsr_2035 = df_def_profile_dmnd_dsr_2035[df_def_profile_0.columns]
df_def_profile_dmnd_dsr_2050 = df_nd_add.nd.copy().rename('primary_nd').reset_index()
df_def_profile_dmnd_dsr_2050['pf'] = 'demand_EL_' + df_def_profile_dmnd_dsr_2050.primary_nd + '_2050'
df_def_profile_dmnd_dsr_2050['pf_id'] = df_def_profile_dmnd_dsr_2050.index.rename('pf_id') + df_def_profile_dmnd_dsr_2035.pf_id.max() + 1
df_def_profile_dmnd_dsr_2050 = df_def_profile_dmnd_dsr_2050[df_def_profile_0.columns]
df_def_profile_dmnd_dsr_best_2035 = df_nd_add.nd.copy().rename('primary_nd').reset_index()
df_def_profile_dmnd_dsr_best_2035['pf'] = 'demand_EL_' + df_def_profile_dmnd_dsr_best_2035.primary_nd + '_best_2035'
df_def_profile_dmnd_dsr_best_2035['pf_id'] = df_def_profile_dmnd_dsr_best_2035.index.rename('pf_id') + df_def_profile_dmnd_dsr_2050.pf_id.max() + 1
df_def_profile_dmnd_dsr_best_2035 = df_def_profile_dmnd_dsr_best_2035[df_def_profile_0.columns]
df_def_profile = pd.concat([df_def_profile_cop_60,df_def_profile_dmnd_dhw,
df_def_profile_dmnd_dhw_aw,df_def_profile_dmnd_dhw_ww,
df_def_profile_dmnd_ee_2035,df_def_profile_dmnd_ee_2050,
df_def_profile_dmnd_ee_best_2035,
df_def_profile_dmnd_dsr_2015,
df_def_profile_dmnd_dsr_2035,df_def_profile_dmnd_dsr_2050,df_def_profile_dmnd_dsr_best_2035,
], axis=0)
df_def_profile = df_def_profile.reset_index(drop=True)
df_def_profile
# %% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NODE_ENCAR for DHW and DSR
df_node_encar_0 = pd.read_csv(data_path_prv + '/node_encar.csv')
# df_node_encar_0 = aql.read_sql(db, sc, 'node_encar')
df_ndca_add_dhw_decentral = (dferg_arch_dhw.loc[dferg_arch_dhw.nd_id_new.isin(df_nd_res_el.nd), ['nd_id_new', 'erg_tot']]
.rename(columns={'erg_tot': 'dmnd_sum', 'nd_id_new': 'nd_id'}))
df_ndca_add_dhw_central = (dferg_arch_dhw_central.loc[dferg_arch_dhw_central.nd_id_new.isin(df_nd_arch_ht.nd), ['nd_id_new', 'erg_tot']]
.rename(columns={'erg_tot': 'dmnd_sum', 'nd_id_new': 'nd_id'}))
data_3 = dict(vc_dmnd_flex=0, ca_id=3, grid_losses=0, grid_losses_absolute=0)
data_4 = dict(vc_dmnd_flex=0, ca_id=4, grid_losses=0, grid_losses_absolute=0)
data_5 = dict(vc_dmnd_flex=0, ca_id=5, grid_losses=0, grid_losses_absolute=0)
df_node_encar_dhw = df_ndca_add_dhw_decentral.assign(**data_3).reindex(columns=df_node_encar_0.columns).reset_index(drop=True)
df_node_encar_dhw_aw = df_ndca_add_dhw_central.assign(**data_4).reindex(columns=df_node_encar_0.columns).reset_index(drop=True)
df_node_encar_dhw_ww = df_ndca_add_dhw_central.assign(**data_5).reindex(columns=df_node_encar_0.columns).reset_index(drop=True)
df_node_encar_dhw = pd.merge(df_node_encar_dhw, df_def_profile_dmnd_dhw, left_on='nd_id', right_on='primary_nd', how='inner')
df_node_encar_dhw_aw = pd.merge(df_node_encar_dhw_aw, df_def_profile_dmnd_dhw_aw, left_on='nd_id', right_on='primary_nd', how='inner')
df_node_encar_dhw_ww = pd.merge(df_node_encar_dhw_ww, df_def_profile_dmnd_dhw_ww, left_on='nd_id', right_on='primary_nd', how='inner')
df_ndca_add_dsr = (dferg_arch_dsr_1day.loc[dferg_arch_dsr_1day.nd_id_new.isin(df_nd_add.nd),
['nd_id_new', 'erg_dsr_2015','erg_dsr_2035', 'erg_dsr_2050']]
.rename(columns={'erg_dsr_2015': 'dmnd_sum',
'erg_dsr_2035': 'dmnd_sum_yr2035',
'erg_dsr_2050': 'dmnd_sum_yr2050',
'nd_id_new': 'nd_id'}))
data_0 = dict(vc_dmnd_flex=0, ca_id=0, grid_losses=0, grid_losses_absolute=0)
df_node_encar_dsr= df_ndca_add_dsr.assign(**data_0).reindex(columns=df_node_encar_0.columns).reset_index(drop=True)
df_node_encar_dsr = pd.merge(df_node_encar_dsr, df_def_profile_dmnd_dsr_2015, left_on='nd_id', right_on='primary_nd', how='inner')
df_node_encar_dhw = pd.concat([df_node_encar_dhw,df_node_encar_dhw_aw,df_node_encar_dhw_ww]).reset_index(drop=True)
list_dmnd = [c for c in df_node_encar_dhw if 'dmnd_sum' in c]
df_node_encar_dhw = df_node_encar_dhw.assign(**{c: df_node_encar_dhw.dmnd_sum
for c in list_dmnd})
#df_node_encar_dhw.update(df_node_encar_dhw.loc[df_node_encar_dhw.ca_id==0].assign(**{c: 0
# for c in list_dmnd}))
df_node_encar_dsr = df_node_encar_dsr.assign(
dmnd_sum_yr2020 = lambda x: x.dmnd_sum - (x.dmnd_sum - x.dmnd_sum_yr2035)*1/4,
dmnd_sum_yr2025 = lambda x: x.dmnd_sum - (x.dmnd_sum - x.dmnd_sum_yr2035)*2/4,
dmnd_sum_yr2030 = lambda x: x.dmnd_sum - (x.dmnd_sum - x.dmnd_sum_yr2035)*3/4,
dmnd_sum_yr2040 = lambda x: x.dmnd_sum_yr2035 - (x.dmnd_sum_yr2035 - x.dmnd_sum_yr2050)*1/3,
dmnd_sum_yr2045 = lambda x: x.dmnd_sum_yr2035 - (x.dmnd_sum_yr2035 - x.dmnd_sum_yr2050)*2/3,
)
list_dmnd_dsr_rem = list_dmnd.copy()
list_dmnd_dsr_rem.append('nd_id')
df_dsr_remove = pd.merge(df_node_encar_dsr[list_dmnd_dsr_rem].assign(nd = lambda x: x.nd_id.str[:9]).drop(columns='nd_id'),
df_nd_res_el[['nd_id','nd']],on='nd')
# df_node_encar_dhw = pd.concat([df_node_encar_dhw,df_node_encar_dsr]).reset_index(drop=True)
df_node_encar_dhw['dmnd_pf_id'] = df_node_encar_dhw.pf
df_node_encar_dhw = df_node_encar_dhw.loc[:, df_node_encar_0.columns]
for df, idx in [(df_def_node_0, 'nd'), (df_def_profile, ['pf', 'dmnd_pf'])]:
df_node_encar_dhw, _ = translate_id(df_node_encar_dhw, df, idx)
df_node_encar_dsr['dmnd_pf_id'] = df_node_encar_dsr.pf
df_node_encar_dsr = df_node_encar_dsr.loc[:, df_node_encar_0.columns]
for df, idx in [(df_def_node, 'nd'), (df_def_profile, ['pf', 'dmnd_pf'])]:
df_node_encar_dsr, _ = translate_id(df_node_encar_dsr, df, idx)
#Substract electricity from dhw
# Use this only to lower DHW later
df_node_encar_wo_dhw = (df_node_encar_0.set_index('nd_id').loc[:,list_dmnd] - df_node_encar_dhw.set_index('nd_id').loc[:,list_dmnd]).reset_index()
df_node_encar_wo_dhw = df_node_encar_wo_dhw.loc[df_node_encar_wo_dhw.nd_id.isin(dict_nd_res_el.values())].set_index('nd_id')
#Substract electricity from dhw and from DSR
df_node_encar_wo_dhw_dsr = (df_node_encar_0.set_index('nd_id').loc[:,list_dmnd] - df_node_encar_dhw.set_index('nd_id').loc[:,list_dmnd]
- df_dsr_remove.set_index('nd_id').loc[:,list_dmnd]).reset_index()
df_node_encar_wo_dhw_dsr = df_node_encar_wo_dhw_dsr.loc[df_node_encar_wo_dhw_dsr.nd_id.isin(dict_nd_res_el.values())].set_index('nd_id')
#df_node_encar_ht = df_node_encar_ht.sort_values(by='ca_id', ascending=False).reset_index(drop=True)
#check if we add a factor for heat load (climate correction) or just retrofit scenario
fct_dmnd_dhw = pd.read_csv(base_dir+'/dsr_ee_dhw/demand/dhw_factor_dmnd_future_years_aw_ww.csv',sep=';')
fct_dhw = fct_dmnd_dhw.filter(like='dmnd_sum')
# df_0 = df_node_encar_dhw.copy().loc[df_node_encar_dhw.nd_id.isin(dict_nd_arch_ht.values())].reset_index(drop=True).filter(like='dmnd_sum')*fct_dhw
# # df_node_encar_dhw.loc[df_node_encar_dhw.nd_id.isin(dict_nd_arch_ht.values())].reset_index(drop=True).update(df_0)
# df_node_encar_dhw_cen = df_node_encar_dhw.loc[df_node_encar_dhw.nd_id.isin(dict_nd_arch_ht.values())].reset_index(drop=True)
# df_node_encar_dhw_cen.update(df_0)
df_0 = df_node_encar_dhw.loc[df_node_encar_dhw.ca_id.isin([4,5])].set_index(['nd_id','ca_id']).filter(like='dmnd_sum')
fct_dhw.index = df_0.index
df_0 = df_0*fct_dhw
df_node_encar_dhw_tmp = df_node_encar_dhw.set_index(['nd_id','ca_id'])
df_node_encar_dhw_tmp.update(df_0)
df_node_encar_dhw_tmp = df_node_encar_dhw_tmp.reset_index()
df_node_encar_dhw_cen = df_node_encar_dhw_tmp.loc[df_node_encar_dhw_tmp.ca_id.isin([4,5])].reset_index(drop=True)
# Use this only to lower DHW later
df_node_encar_0_wo_dhw = df_node_encar_0.set_index('nd_id')
df_node_encar_0_wo_dhw.update(df_node_encar_wo_dhw)
df_node_encar_0_wo_dhw_dsr = df_node_encar_0.set_index('nd_id')
df_node_encar_0_wo_dhw_dsr.update(df_node_encar_wo_dhw_dsr)
df_node_encar_dhw_dec = df_node_encar_dhw.loc[df_node_encar_dhw.nd_id.isin(dict_nd_res_el.values())]
# Use this only to lower DHW later
df_node_encar_dhw_for_fct_dmnd = pd.concat([df_node_encar_0_wo_dhw.reset_index(),df_node_encar_dhw_dec,df_node_encar_dhw_cen])
df_node_encar_dhw = pd.concat([df_node_encar_0_wo_dhw_dsr.reset_index(),df_node_encar_dhw_dec,df_node_encar_dhw_cen])
df_node_encar_add = pd.concat([df_node_encar_dhw, df_node_encar_dsr])
#df_node_encar_dhw = df_node_encar_dhw.set_index('nd_id')
#df_node_encar_dhw.update(df_node_encar_wo_dhw)
# df_node_encar_new = df_node_encar_dhw.reset_index(drop=True)
df_node_encar_new = df_node_encar_add.reset_index(drop=True)
# %% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PROFDMND for DHW and DSR
df_tm_st =pd.read_csv(base_dir+'/dsr_ee_dhw/timemap/timestamp_template.csv')
df_tm_st['datetime'] = df_tm_st['datetime'].astype('datetime64[ns]')
# df_tm_st = aql.read_sql(db, 'profiles_raw', 'timestamp_template',filt=[('year', [2015], '=')])
df_profdmnd_0 = pd.read_csv(data_path_prv + '/profdmnd.csv')
# df_profdmnd_0 = aql.read_sql(db, sc, 'profdmnd')
# Decrease DHW in electrical profile
dict_dmnd_pf_res_el = df_def_profile_0.loc[df_def_profile_0.pf.str.contains('EL_MFH|EL_SFH')&~df_def_profile_0.pf.str.contains('HT')]
dict_dmnd_pf_res_el = dict_dmnd_pf_res_el.set_index('pf')['pf_id'].to_dict()
dferg_arch_0 = df_profdmnd_0.groupby('dmnd_pf_id')['value'].sum().reset_index()
df_factor_dmnd = pd.merge(dferg_arch_0.loc[dferg_arch_0.dmnd_pf_id.isin(dict_dmnd_pf_res_el.values())],
df_node_encar_dhw_for_fct_dmnd[['dmnd_pf_id','dmnd_sum']], on = 'dmnd_pf_id').assign(
factor_dmnd = lambda x: x.dmnd_sum/x.value)
df_profdmnd_0_res_el = pd.merge(df_profdmnd_0.loc[df_profdmnd_0.dmnd_pf_id.isin(dict_dmnd_pf_res_el.values())],
df_factor_dmnd[['dmnd_pf_id','factor_dmnd']],on='dmnd_pf_id').assign(
value_wo_dhw = lambda x: x.value * x.factor_dmnd)
df_profdmnd_0_res_el_wo_dhw = df_profdmnd_0_res_el.drop(columns=['value','factor_dmnd']).rename(columns={'value_wo_dhw':'value'})
df_profdmnd_0_other = df_profdmnd_0.loc[~df_profdmnd_0.dmnd_pf_id.isin(dict_dmnd_pf_res_el.values())]
# Only without DHW el
# df_profdmnd_0_wo_dhw = pd.concat([df_profdmnd_0_other,df_profdmnd_0_res_el_wo_dhw])
# Decrease DSR load in electricity profile
# dict_dmnd_pf_res_dsr = df_def_profile_0.loc[df_def_profile_0.pf.str.contains('EL_MFH|EL_SFH')&~df_def_profile_0.pf.str.contains('HT')]
dfload_arch_dsr_1h_2015['nd_id_new'] = dfload_arch_dsr_1h_2015.nd_id.str[0:9]
dfload_arch_dsr_1h_2015['pf'] = 'demand_EL_' + dfload_arch_dsr_1h_2015.nd_id_new
dfload_arch_dsr_1h_2015['dmnd_pf_id'] = dfload_arch_dsr_1h_2015.pf.map(dict_dmnd_pf_res_el)
df_profdmnd_0_res_el_wo_dhw_dsr = pd.merge(df_profdmnd_0_res_el_wo_dhw,
dfload_arch_dsr_1h_2015[['hy','dmnd_pf_id','erg_dsr_1h']],
on=['dmnd_pf_id','hy']).assign(
value_wo_dsr = lambda x: x.value - x.erg_dsr_1h)
df_profdmnd_0_res_el_wo_dhw_dsr = df_profdmnd_0_res_el_wo_dhw_dsr.drop(columns=['value','erg_dsr_1h']
).rename(columns={'value_wo_dsr':'value'})
# Without DHW and DSR loads
df_profdmnd_0_wo_dhw_dsr = pd.concat([df_profdmnd_0_other,df_profdmnd_0_res_el_wo_dhw_dsr])
# DHW
df_dmnd_dhw_add = dfload_arch_dhw.copy()
df_dmnd_dhw_add_aw = dfload_arch_dhw_central_aw.copy()
df_dmnd_dhw_add_ww = dfload_arch_dhw_central_ww.copy()
df_dmnd_dhw_add['ca_id'] = 3
df_dmnd_dhw_add_aw['ca_id'] = 4
df_dmnd_dhw_add_ww['ca_id'] = 5
df_dmnd_dhw_add = pd.merge(df_dmnd_dhw_add, df_def_profile_dmnd_dhw[['pf_id', 'primary_nd']], left_on='nd_id', right_on='primary_nd')
df_dmnd_dhw_add = df_dmnd_dhw_add.rename(columns={'erg_tot': 'value', 'pf_id': 'dmnd_pf_id'})
df_dmnd_dhw_add['nd_id'] = df_dmnd_dhw_add.nd_id.replace(dict_nd_res_el)
df_dmnd_dhw_add['doy'] = (df_dmnd_dhw_add.hy + 24)//24
df_dmnd_dhw_add['erg_tot_fossil'] = 0
df_dmnd_dhw_add['erg_tot_retr_1pc'] = 0
df_dmnd_dhw_add['erg_tot_retr_2pc'] = 0
df_dmnd_dhw_add['erg_tot_retr_1pc_fossil'] = 0
df_dmnd_dhw_add['erg_tot_retr_2pc_fossil'] = 0
df_dmnd_dhw_add_aw = pd.merge(df_dmnd_dhw_add_aw, df_def_profile_dmnd_dhw_aw[['pf_id', 'primary_nd']], left_on='nd_id', right_on='primary_nd')
df_dmnd_dhw_add_aw = df_dmnd_dhw_add_aw.rename(columns={'erg_tot': 'value', 'pf_id': 'dmnd_pf_id'})
df_dmnd_dhw_add_aw['nd_id'] = df_dmnd_dhw_add_aw.nd_id.replace(dict_nd_arch_ht)
df_dmnd_dhw_add_aw['hy'] = 24*df_dmnd_dhw_add_aw.doy - 24
#
df_dmnd_dhw_add_ww = pd.merge(df_dmnd_dhw_add_ww, df_def_profile_dmnd_dhw_ww[['pf_id', 'primary_nd']], left_on='nd_id', right_on='primary_nd')
df_dmnd_dhw_add_ww = df_dmnd_dhw_add_ww.rename(columns={'erg_tot': 'value', 'pf_id': 'dmnd_pf_id'})
df_dmnd_dhw_add_ww['nd_id'] = df_dmnd_dhw_add_ww.nd_id.replace(dict_nd_arch_ht)
df_dmnd_dhw_add_ww['hy'] = 24*df_dmnd_dhw_add_ww.doy - 24
# Energy efficiency
df_dmnd_ee_add = dfload_arch_ee.copy()
df_dmnd_ee_add['ca_id'] = 0
df_def_profile_dmnd_ee_2035_new = df_def_profile_dmnd_ee_2035.copy()
df_def_profile_dmnd_ee_2050_new = df_def_profile_dmnd_ee_2050.copy()
df_def_profile_dmnd_ee_best_2035_new = df_def_profile_dmnd_ee_best_2035.copy()
df_def_profile_dmnd_ee_2035_new['primary_nd_new'] = df_def_profile_dmnd_ee_2035_new.primary_nd+'_diff_2035_2015'
df_def_profile_dmnd_ee_2050_new['primary_nd_new'] = df_def_profile_dmnd_ee_2050_new.primary_nd+'_diff_2050_2015'
df_def_profile_dmnd_ee_best_2035_new['primary_nd_new'] = df_def_profile_dmnd_ee_best_2035_new.primary_nd+'_diff_best_2035_2015'
df_def_profile_dmnd_ee = pd.concat([df_def_profile_dmnd_ee_2035_new,df_def_profile_dmnd_ee_2050_new,
df_def_profile_dmnd_ee_best_2035_new])
df_dmnd_ee_add = pd.merge(df_dmnd_ee_add, df_def_profile_dmnd_ee[['pf_id', 'primary_nd_new']], left_on='nd_id', right_on='primary_nd_new')
# df_dmnd_dhw_add_ee = pd.merge(df_dmnd_dhw_add_ee, df_def_profile_dmnd_ee_2050[['pf_id', 'primary_nd']], left_on='nd_id', right_on='primary_nd')
df_dmnd_ee_add = df_dmnd_ee_add.rename(columns={'erg_diff': 'value', 'pf_id': 'dmnd_pf_id'})
df_dmnd_ee_add['nd_id'] = df_dmnd_ee_add.nd_id.replace(dict_nd_res_el)
df_dmnd_ee_add['doy'] = (df_dmnd_ee_add.hy + 24)//24
# DSR dmnd
# TODO continue here and check if we add both 1h and 1day, thus it will need other pf_id
# df_dmnd_dsr_1day_add = dfload_arch_dsr_1day.copy().loc[dfload_arch_dsr_1day.nd_id.str.contains('2015')]
df_dmnd_dsr_1day_add = dfload_arch_dsr_1day.copy()
df_dmnd_dsr_1h_add = dfload_arch_dsr_1h.copy().loc[dfload_arch_dsr_1h.nd_id.str.contains('2015')]
df_dmnd_dsr_1day_add['ca_id'] = 0
df_dmnd_dsr_1h_add['ca_id'] = 0
df_dmnd_dsr_1day_add['hy'] = 24*df_dmnd_dsr_1day_add.doy - 24
df_dmnd_dsr_1h_add['doy'] = (df_dmnd_dsr_1h_add.hy + 24)//24
df_def_profile_dmnd_dsr_2015_new = df_def_profile_dmnd_dsr_2015.copy()
df_def_profile_dmnd_dsr_2035_new = df_def_profile_dmnd_dsr_2035.copy()
df_def_profile_dmnd_dsr_2050_new = df_def_profile_dmnd_dsr_2050.copy()
df_def_profile_dmnd_dsr_best_2035_new = df_def_profile_dmnd_dsr_best_2035.copy()
df_def_profile_dmnd_dsr_2015_new['primary_nd_new'] = df_def_profile_dmnd_dsr_2015_new.primary_nd+'_2015'
df_def_profile_dmnd_dsr_2035_new['primary_nd_new'] = df_def_profile_dmnd_dsr_2035_new.primary_nd+'_2035'
df_def_profile_dmnd_dsr_2050_new['primary_nd_new'] = df_def_profile_dmnd_dsr_2050_new.primary_nd+'_2050'
df_def_profile_dmnd_dsr_best_2035_new['primary_nd_new'] = df_def_profile_dmnd_dsr_best_2035_new.primary_nd+'_best_2035'
df_def_profile_dmnd_dsr = pd.concat([df_def_profile_dmnd_dsr_2015_new, df_def_profile_dmnd_dsr_2035_new,
df_def_profile_dmnd_dsr_2050_new,
df_def_profile_dmnd_dsr_best_2035_new])
# df_dmnd_dsr_1day_add = pd.merge(df_dmnd_dsr_1day_add, df_def_profile_dmnd_dsr_2015_new[['pf_id', 'primary_nd_new']], left_on='nd_id', right_on='primary_nd_new')
df_dmnd_dsr_1day_add = pd.merge(df_dmnd_dsr_1day_add, df_def_profile_dmnd_dsr[['pf_id', 'primary_nd_new']], left_on='nd_id', right_on='primary_nd_new')
df_dmnd_dsr_1h_add = pd.merge(df_dmnd_dsr_1h_add, df_def_profile_dmnd_dsr_2015_new[['pf_id', 'primary_nd_new']], left_on='nd_id', right_on='primary_nd_new')
df_dmnd_dsr_1day_add = df_dmnd_dsr_1day_add.rename(columns={'erg_dsr_1day_MW': 'value', 'pf_id': 'dmnd_pf_id'})
df_dmnd_dsr_1h_add = df_dmnd_dsr_1h_add.rename(columns={'erg_dsr_1h': 'value', 'pf_id': 'dmnd_pf_id'})
df_dmnd_dsr_add = df_dmnd_dsr_1day_add.copy()#pd.concat([df_dmnd_dsr_1day_add,df_dmnd_dsr_1h_add])
df_dmnd_add = pd.concat([df_dmnd_dhw_add,df_dmnd_dhw_add_aw,df_dmnd_dhw_add_ww,df_dmnd_ee_add,df_dmnd_dsr_add])
#df_profdmnd_dhw = df_dmnd_dhw_add[df_profdmnd_0.columns.tolist()].reset_index(drop=True)
# Without DHW only
# df_profdmnd_add = df_dmnd_add[df_profdmnd_0_wo_dhw.columns.tolist()].reset_index(drop=True)
# Without DHW and DSR loads
df_profdmnd_add = df_dmnd_add[df_profdmnd_0_wo_dhw_dsr.columns.tolist()].reset_index(drop=True)
#df_profdmnd_ht_el = df_dmnd_ht_el_add[df_profdmnd_0.columns.tolist()].reset_index(drop=True)
#df_profdmnd_new = pd.concat([df_profdmnd_0,df_profdmnd_dhw])#,df_profdmnd_ht_el])
# Without DHW only
# df_profdmnd_new = pd.concat([df_profdmnd_0_wo_dhw,df_profdmnd_add])
# Without DHW and DSR
df_profdmnd_new = pd.concat([df_profdmnd_0_wo_dhw_dsr,df_profdmnd_add])
# %% ~~~~~~~~~~~~~~~~~~~~~~~~~~ PROFPRICE
# --> NO CHANGES! HOUSEHOLDS USE CH0 PRICE PROFILES
# %% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PROFSUPPLY
# --> NO CHANGES!
# %% ~~~~~~~~~~~~~~~~~~~~~~~ PLANT_ENCAR (needs profsupply data)
dict_pp_new = pd.Series(df_def_plant.pp_id.values,index=df_def_plant.pp).to_dict()
#dict_nd_id_all = dict(pd.Series(df_def_node_0.nd_id.values,index=df_def_node_0.nd).to_dict(), **dict_nd_id)
dict_pt_id_all = dict(pd.Series(df_def_pp_type_0.pt_id.values,index=df_def_pp_type_0.pt).to_dict(),
**pd.Series(df_def_pp_type.pt_id.values,index=df_def_pp_type.pt))
df_plant_encar = pd.read_csv(data_path_prv + '/plant_encar.csv')
# df_plant_encar = aql.read_sql(db, sc, 'plant_encar')
df_bo_dhw_scen = pd.read_csv(base_dir + '/dsr_ee_dhw/dhw_capacity/dhw_pp_bo_cap.csv',sep=';')
df_hp_cen_full_dhw_scen = pd.read_csv(base_dir + '/dsr_ee_dhw/dhw_capacity/dhw_pp_hp_full_cap.csv',sep=';')
df_hp_cen_fossil_dhw_scen = pd.read_csv(base_dir + '/dsr_ee_dhw/dhw_capacity/dhw_pp_hp_fossil_cap.csv',sep=';')
df_bo_dhw_scen['pp_id'] = df_bo_dhw_scen['pp'].map(dict_pp_new)
df_hp_cen_full_dhw_scen['pp_id'] = df_hp_cen_full_dhw_scen['pp'].map(dict_pp_new)
df_hp_cen_fossil_dhw_scen['pp_id'] = df_hp_cen_fossil_dhw_scen['pp'].map(dict_pp_new)
df_pp_add = df_bo_dhw_scen.drop(columns='pp')
df_pp_add_1 = pd.merge(df_hp_cen_full_dhw_scen,df_hp_cen_fossil_dhw_scen).drop(columns='pp')
df_plant_encar_1 = pd.DataFrame()
df_plant_encar_1 = pd.concat([df_plant_encar, df_pp_add,df_pp_add_1])
df_plant_encar_new = df_plant_encar_1.reset_index(drop=True)#(drop=True)
# %% ~~~~~~~~~~~~~~~~~~~~ NODE_CONNECT
df_node_connect_0 = pd.read_csv(data_path_prv + '/node_connect.csv').query(
'nd_id in %s and nd_2_id not in %s'%(
df_nd_ch0_el.nd_id.tolist(),df_nd_not_res.nd_id.tolist())).reset_index(drop=True)
# df_node_connect = aql.read_sql(db, sc, 'node_connect',
# filt=[('nd_id', df_nd_ch0_el.nd_id.tolist(), ' = ', ' AND '),
# ('nd_2_id', df_nd_not_res.nd_id.tolist(), ' != ', ' AND ')])
node_res_el = df_def_node_0.loc[df_def_node_0.nd_id.isin(dict_nd_res_el.values())].nd.values
node_res_dsr = df_def_node.loc[df_def_node.nd.str.contains('DSR')].nd.values
data_res = dict(nd_id=node_res_el, nd_2_id=node_res_dsr, ca_id=0, mt_id='all',cap_trme_leg=9e9,cap_trmi_leg=0)
data_res_df = pd.DataFrame(data_res)
data_res_df = expand_rows(data_res_df, ['mt_id'], [range(12)])
data_res_df[['mt_id']] = data_res_df.mt_id.astype(int)
df_node_connect = data_res_df[df_node_connect_0.columns]
dft = pd.concat([
|
pd.read_csv(data_path_prv + '/def_node.csv')
|
pandas.read_csv
|
import pandas as pd
import numpy as np
from rdtools import energy_from_power
import pytest
# Tests for resampling at same frequency
def test_energy_from_power_calculation():
power_times = pd.date_range('2018-04-01 12:00', '2018-04-01 13:00', freq='15T')
result_times = power_times[1:]
power_series = pd.Series(data=4.0, index=power_times)
expected_energy_series = pd.Series(data=1.0, index=result_times)
expected_energy_series.name = 'energy_Wh'
result = energy_from_power(power_series, max_timedelta=pd.to_timedelta('15 minutes'))
pd.testing.assert_series_equal(result, expected_energy_series)
def test_energy_from_power_max_interval():
power_times = pd.date_range('2018-04-01 12:00', '2018-04-01 13:00', freq='15T')
result_times = power_times[1:]
power_series = pd.Series(data=4.0, index=power_times)
expected_energy_series = pd.Series(data=np.nan, index=result_times)
expected_energy_series.name = 'energy_Wh'
result = energy_from_power(power_series, max_timedelta=pd.to_timedelta('5 minutes'))
# We expect series of NaNs, because max_interval_hours is smaller than the
# time step of the power time series
pd.testing.assert_series_equal(result, expected_energy_series)
def test_energy_from_power_validation():
power_series = pd.Series(data=[4.0] * 4)
with pytest.raises(ValueError):
energy_from_power(power_series, max_timedelta=pd.to_timedelta('15 minutes'))
def test_energy_from_power_single_argument():
power_times = pd.date_range('2018-04-01 12:00', '2018-04-01 15:00', freq='15T')
result_times = power_times[1:]
power_series = pd.Series(data=4.0, index=power_times)
missing = pd.to_datetime('2018-04-01 13:00:00')
power_series = power_series.drop(missing)
expected_energy_series = pd.Series(data=1.0, index=result_times)
expected_nan = [missing]
expected_nan.append(pd.to_datetime('2018-04-01 13:15:00'))
expected_energy_series.loc[expected_nan] = np.nan
expected_energy_series.name = 'energy_Wh'
# Test that the result has the expected missing timestamp based on median timestep
result = energy_from_power(power_series)
pd.testing.assert_series_equal(result, expected_energy_series)
# Tests for downsampling
def test_energy_from_power_downsample():
times = pd.date_range('2018-04-01 12:00', '2018-04-01 13:00', freq='15T')
time_series = pd.Series(data=[1.0, 2.0, 3.0, 4.0, 5.0], index=times)
expected_energy_series = pd.Series(index=[pd.to_datetime('2018-04-01 13:00:00')],
data=3.0, name='energy_Wh')
expected_energy_series.index.freq = '60T'
result = energy_from_power(time_series, '60T')
pd.testing.assert_series_equal(result, expected_energy_series)
def test_energy_from_power_downsample_max_timedelta_exceeded():
times = pd.date_range('2018-04-01 12:00', '2018-04-01 13:00', freq='15T')
time_series = pd.Series(data=[1.0, 2.0, 3.0, 4.0, 5.0], index=times)
expected_energy_series = pd.Series(index=[pd.to_datetime('2018-04-01 13:00:00')],
data=1.5, name='energy_Wh')
expected_energy_series.index.freq = '60T'
result = energy_from_power(time_series.drop(time_series.index[2]), '60T', pd.to_timedelta('15 minutes'))
pd.testing.assert_series_equal(result, expected_energy_series)
def test_energy_from_power_downsample_max_timedelta_not_exceeded():
times = pd.date_range('2018-04-01 12:00', '2018-04-01 13:00', freq='15T')
time_series = pd.Series(data=[1.0, 2.0, 3.0, 4.0, 5.0], index=times)
expected_energy_series = pd.Series(index=[pd.to_datetime('2018-04-01 13:00:00')],
data=3.0, name='energy_Wh')
expected_energy_series.index.freq = '60T'
result = energy_from_power(time_series.drop(time_series.index[2]), '60T', pd.to_timedelta('60 minutes'))
|
pd.testing.assert_series_equal(result, expected_energy_series)
|
pandas.testing.assert_series_equal
|
from collections import defaultdict
from functools import partial
import itertools
import operator
import re
from typing import List, Optional, Sequence, Tuple, Union
import numpy as np
from pandas._libs import Timedelta, Timestamp, internals as libinternals, lib
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import (
find_common_type,
infer_dtype_from_scalar,
maybe_convert_objects,
maybe_promote,
)
from pandas.core.dtypes.common import (
_NS_DTYPE,
is_extension_array_dtype,
is_list_like,
is_scalar,
is_sparse,
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCExtensionArray, ABCSeries
from pandas.core.dtypes.missing import isna
import pandas.core.algorithms as algos
from pandas.core.base import PandasObject
from pandas.core.index import Index, MultiIndex, ensure_index
from pandas.core.indexers import maybe_convert_indices
from pandas.io.formats.printing import pprint_thing
from .blocks import (
Block,
CategoricalBlock,
DatetimeTZBlock,
ExtensionBlock,
ObjectValuesExtensionBlock,
_extend_blocks,
_merge_blocks,
_safe_reshape,
get_block_type,
make_block,
)
from .concat import ( # all for concatenate_block_managers
combine_concat_plans,
concatenate_join_units,
get_mgr_concatenation_plan,
is_uniform_join_units,
)
# TODO: flexible with index=None and/or items=None
class BlockManager(PandasObject):
"""
Core internal data structure to implement DataFrame, Series, etc.
Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
lightweight blocked set of labeled data to be manipulated by the DataFrame
public API class
Attributes
----------
shape
ndim
axes
values
items
Methods
-------
set_axis(axis, new_labels)
copy(deep=True)
get_dtype_counts
get_ftype_counts
get_dtypes
get_ftypes
apply(func, axes, block_filter_fn)
get_bool_data
get_numeric_data
get_slice(slice_like, axis)
get(label)
iget(loc)
take(indexer, axis)
reindex_axis(new_labels, axis)
reindex_indexer(new_labels, indexer, axis)
delete(label)
insert(loc, label, value)
set(label, value)
Parameters
----------
Notes
-----
This is *not* a public API class
"""
__slots__ = [
"axes",
"blocks",
"_ndim",
"_shape",
"_known_consolidated",
"_is_consolidated",
"_blknos",
"_blklocs",
]
def __init__(
self,
blocks: Sequence[Block],
axes: Sequence[Index],
do_integrity_check: bool = True,
):
self.axes = [ensure_index(ax) for ax in axes]
self.blocks = tuple(blocks) # type: Tuple[Block, ...]
for block in blocks:
if self.ndim != block.ndim:
raise AssertionError(
"Number of Block dimensions ({block}) must equal "
"number of axes ({self})".format(block=block.ndim, self=self.ndim)
)
if do_integrity_check:
self._verify_integrity()
self._consolidate_check()
self._rebuild_blknos_and_blklocs()
def make_empty(self, axes=None):
""" return an empty BlockManager with the items axis of len 0 """
if axes is None:
axes = [ensure_index([])] + [ensure_index(a) for a in self.axes[1:]]
# preserve dtype if possible
if self.ndim == 1:
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
return self.__class__(blocks, axes)
def __nonzero__(self):
return True
# Python3 compat
__bool__ = __nonzero__
@property
def shape(self):
return tuple(len(ax) for ax in self.axes)
@property
def ndim(self) -> int:
return len(self.axes)
def set_axis(self, axis, new_labels):
new_labels = ensure_index(new_labels)
old_len = len(self.axes[axis])
new_len = len(new_labels)
if new_len != old_len:
raise ValueError(
"Length mismatch: Expected axis has {old} elements, new "
"values have {new} elements".format(old=old_len, new=new_len)
)
self.axes[axis] = new_labels
def rename_axis(self, mapper, axis, copy=True, level=None):
"""
Rename one of axes.
Parameters
----------
mapper : unary callable
axis : int
copy : boolean, default True
level : int, default None
"""
obj = self.copy(deep=copy)
obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level))
return obj
@property
def _is_single_block(self):
if self.ndim == 1:
return True
if len(self.blocks) != 1:
return False
blk = self.blocks[0]
return blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice == slice(
0, len(self), 1
)
def _rebuild_blknos_and_blklocs(self):
"""
Update mgr._blknos / mgr._blklocs.
"""
new_blknos = np.empty(self.shape[0], dtype=np.int64)
new_blklocs = np.empty(self.shape[0], dtype=np.int64)
new_blknos.fill(-1)
new_blklocs.fill(-1)
for blkno, blk in enumerate(self.blocks):
rl = blk.mgr_locs
new_blknos[rl.indexer] = blkno
new_blklocs[rl.indexer] = np.arange(len(rl))
if (new_blknos == -1).any():
raise AssertionError("Gaps in blk ref_locs")
self._blknos = new_blknos
self._blklocs = new_blklocs
@property
def items(self):
return self.axes[0]
def _get_counts(self, f):
""" return a dict of the counts of the function in BlockManager """
self._consolidate_inplace()
counts = dict()
for b in self.blocks:
v = f(b)
counts[v] = counts.get(v, 0) + b.shape[0]
return counts
def get_dtype_counts(self):
return self._get_counts(lambda b: b.dtype.name)
def get_ftype_counts(self):
return self._get_counts(lambda b: b.ftype)
def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return algos.take_1d(dtypes, self._blknos, allow_fill=False)
def get_ftypes(self):
ftypes = np.array([blk.ftype for blk in self.blocks])
return algos.take_1d(ftypes, self._blknos, allow_fill=False)
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
axes_array = list(self.axes)
extra_state = {
"0.14.1": {
"axes": axes_array,
"blocks": [
dict(values=b.values, mgr_locs=b.mgr_locs.indexer)
for b in self.blocks
],
}
}
# First three elements of the state are to maintain forward
# compatibility with 0.13.1.
return axes_array, block_values, block_items, extra_state
def __setstate__(self, state):
def unpickle_block(values, mgr_locs):
return make_block(values, placement=mgr_locs)
if isinstance(state, tuple) and len(state) >= 4 and "0.14.1" in state[3]:
state = state[3]["0.14.1"]
self.axes = [ensure_index(ax) for ax in state["axes"]]
self.blocks = tuple(
unpickle_block(b["values"], b["mgr_locs"]) for b in state["blocks"]
)
else:
# discard anything after 3rd, support beta pickling format for a
# little while longer
ax_arrays, bvalues, bitems = state[:3]
self.axes = [ensure_index(ax) for ax in ax_arrays]
if len(bitems) == 1 and self.axes[0].equals(bitems[0]):
# This is a workaround for pre-0.14.1 pickles that didn't
# support unpickling multi-block frames/panels with non-unique
# columns/items, because given a manager with items ["a", "b",
# "a"] there's no way of knowing which block's "a" is where.
#
# Single-block case can be supported under the assumption that
# block items corresponded to manager items 1-to-1.
all_mgr_locs = [slice(0, len(bitems[0]))]
else:
all_mgr_locs = [
self.axes[0].get_indexer(blk_items) for blk_items in bitems
]
self.blocks = tuple(
unpickle_block(values, mgr_locs)
for values, mgr_locs in zip(bvalues, all_mgr_locs)
)
self._post_setstate()
def _post_setstate(self):
self._is_consolidated = False
self._known_consolidated = False
self._rebuild_blknos_and_blklocs()
def __len__(self) -> int:
return len(self.items)
def __repr__(self) -> str:
output = pprint_thing(self.__class__.__name__)
for i, ax in enumerate(self.axes):
if i == 0:
output += "\nItems: {ax}".format(ax=ax)
else:
output += "\nAxis {i}: {ax}".format(i=i, ax=ax)
for block in self.blocks:
output += "\n{block}".format(block=pprint_thing(block))
return output
def _verify_integrity(self):
mgr_shape = self.shape
tot_items = sum(len(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
if block._verify_integrity and block.shape[1:] != mgr_shape[1:]:
construction_error(tot_items, block.shape[1:], self.axes)
if len(self.items) != tot_items:
raise AssertionError(
"Number of manager items must equal union of "
"block items\n# manager items: {0}, # "
"tot_items: {1}".format(len(self.items), tot_items)
)
def apply(
self,
f,
axes=None,
filter=None,
do_integrity_check=False,
consolidate=True,
**kwargs,
):
"""
iterate over the blocks, collect and create a new block manager
Parameters
----------
f : the callable or function name to operate on at the block level
axes : optional (if not supplied, use self.axes)
filter : list, if supplied, only call the block if the filter is in
the block
do_integrity_check : boolean, default False. Do the block manager
integrity check
consolidate: boolean, default True. Join together blocks having same
dtype
Returns
-------
Block Manager (new object)
"""
result_blocks = []
# filter kwarg is used in replace-* family of methods
if filter is not None:
filter_locs = set(self.items.get_indexer_for(filter))
if len(filter_locs) == len(self.items):
# All items are included, as if there were no filtering
filter = None
else:
kwargs["filter"] = filter_locs
if consolidate:
self._consolidate_inplace()
if f == "where":
align_copy = True
if kwargs.get("align", True):
align_keys = ["other", "cond"]
else:
align_keys = ["cond"]
elif f == "putmask":
align_copy = False
if kwargs.get("align", True):
align_keys = ["new", "mask"]
else:
align_keys = ["mask"]
elif f == "fillna":
# fillna internally does putmask, maybe it's better to do this
# at mgr, not block level?
align_copy = False
align_keys = ["value"]
else:
align_keys = []
# TODO(EA): may interfere with ExtensionBlock.setitem for blocks
# with a .values attribute.
aligned_args = {
k: kwargs[k]
for k in align_keys
if not isinstance(kwargs[k], ABCExtensionArray)
and hasattr(kwargs[k], "values")
}
for b in self.blocks:
if filter is not None:
if not b.mgr_locs.isin(filter_locs).any():
result_blocks.append(b)
continue
if aligned_args:
b_items = self.items[b.mgr_locs.indexer]
for k, obj in aligned_args.items():
axis = obj._info_axis_number
kwargs[k] = obj.reindex(b_items, axis=axis, copy=align_copy)
applied = getattr(b, f)(**kwargs)
result_blocks = _extend_blocks(applied, result_blocks)
if len(result_blocks) == 0:
return self.make_empty(axes or self.axes)
bm = self.__class__(
result_blocks, axes or self.axes, do_integrity_check=do_integrity_check
)
bm._consolidate_inplace()
return bm
def quantile(
self,
axis=0,
consolidate=True,
transposed=False,
interpolation="linear",
qs=None,
numeric_only=None,
):
"""
Iterate over blocks applying quantile reduction.
This routine is intended for reduction type operations and
will do inference on the generated blocks.
Parameters
----------
axis: reduction axis, default 0
consolidate: boolean, default True. Join together blocks having same
dtype
transposed: boolean, default False
we are holding transposed data
interpolation : type of interpolation, default 'linear'
qs : a scalar or list of the quantiles to be computed
numeric_only : ignored
Returns
-------
Block Manager (new object)
"""
# Series dispatches to DataFrame for quantile, which allows us to
# simplify some of the code here and in the blocks
assert self.ndim >= 2
if consolidate:
self._consolidate_inplace()
def get_axe(block, qs, axes):
# Because Series dispatches to DataFrame, we will always have
# block.ndim == 2
from pandas import Float64Index
if is_list_like(qs):
ax = Float64Index(qs)
else:
ax = axes[0]
return ax
axes, blocks = [], []
for b in self.blocks:
block = b.quantile(axis=axis, qs=qs, interpolation=interpolation)
axe = get_axe(b, qs, axes=self.axes)
axes.append(axe)
blocks.append(block)
# note that some DatetimeTZ, Categorical are always ndim==1
ndim = {b.ndim for b in blocks}
assert 0 not in ndim, ndim
if 2 in ndim:
new_axes = list(self.axes)
# multiple blocks that are reduced
if len(blocks) > 1:
new_axes[1] = axes[0]
# reset the placement to the original
for b, sb in zip(blocks, self.blocks):
b.mgr_locs = sb.mgr_locs
else:
new_axes[axis] = Index(np.concatenate([ax.values for ax in axes]))
if transposed:
new_axes = new_axes[::-1]
blocks = [
b.make_block(b.values.T, placement=np.arange(b.shape[1]))
for b in blocks
]
return self.__class__(blocks, new_axes)
# single block, i.e. ndim == {1}
values = concat_compat([b.values for b in blocks])
# compute the orderings of our original data
if len(self.blocks) > 1:
indexer = np.empty(len(self.axes[0]), dtype=np.intp)
i = 0
for b in self.blocks:
for j in b.mgr_locs:
indexer[j] = i
i = i + 1
values = values.take(indexer)
return SingleBlockManager(
[make_block(values, ndim=1, placement=np.arange(len(values)))], axes[0]
)
def isna(self, func, **kwargs):
return self.apply("apply", func=func, **kwargs)
def where(self, **kwargs):
return self.apply("where", **kwargs)
def setitem(self, **kwargs):
return self.apply("setitem", **kwargs)
def putmask(self, **kwargs):
return self.apply("putmask", **kwargs)
def diff(self, **kwargs):
return self.apply("diff", **kwargs)
def interpolate(self, **kwargs):
return self.apply("interpolate", **kwargs)
def shift(self, **kwargs):
return self.apply("shift", **kwargs)
def fillna(self, **kwargs):
return self.apply("fillna", **kwargs)
def downcast(self, **kwargs):
return self.apply("downcast", **kwargs)
def astype(self, dtype, **kwargs):
return self.apply("astype", dtype=dtype, **kwargs)
def convert(self, **kwargs):
return self.apply("convert", **kwargs)
def replace(self, value, **kwargs):
assert np.ndim(value) == 0, value
return self.apply("replace", value=value, **kwargs)
def replace_list(self, src_list, dest_list, inplace=False, regex=False):
""" do a list replace """
inplace = validate_bool_kwarg(inplace, "inplace")
# figure out our mask a-priori to avoid repeated replacements
values = self.as_array()
def comp(s, regex=False):
"""
Generate a bool array by perform an equality check, or perform
an element-wise regular expression matching
"""
if isna(s):
return isna(values)
if isinstance(s, (Timedelta, Timestamp)) and getattr(s, "tz", None) is None:
return _compare_or_regex_search(
maybe_convert_objects(values), s.asm8, regex
)
return _compare_or_regex_search(values, s, regex)
masks = [comp(s, regex) for i, s in enumerate(src_list)]
result_blocks = []
src_len = len(src_list) - 1
for blk in self.blocks:
# its possible to get multiple result blocks here
# replace ALWAYS will return a list
rb = [blk if inplace else blk.copy()]
for i, (s, d) in enumerate(zip(src_list, dest_list)):
# TODO: assert/validate that `d` is always a scalar?
new_rb = []
for b in rb:
m = masks[i][b.mgr_locs.indexer]
convert = i == src_len
result = b._replace_coerce(
mask=m,
to_replace=s,
value=d,
inplace=inplace,
convert=convert,
regex=regex,
)
if m.any():
new_rb = _extend_blocks(result, new_rb)
else:
new_rb.append(b)
rb = new_rb
result_blocks.extend(rb)
bm = self.__class__(result_blocks, self.axes)
bm._consolidate_inplace()
return bm
def is_consolidated(self):
"""
Return True if more than one block with the same dtype
"""
if not self._known_consolidated:
self._consolidate_check()
return self._is_consolidated
def _consolidate_check(self):
ftypes = [blk.ftype for blk in self.blocks]
self._is_consolidated = len(ftypes) == len(set(ftypes))
self._known_consolidated = True
@property
def is_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return len(self.blocks) > 1
@property
def is_numeric_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return all(block.is_numeric for block in self.blocks)
@property
def is_datelike_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return any(block.is_datelike for block in self.blocks)
@property
def any_extension_types(self):
"""Whether any of the blocks in this manager are extension blocks"""
return any(block.is_extension for block in self.blocks)
@property
def is_view(self):
""" return a boolean if we are a single block and are a view """
if len(self.blocks) == 1:
return self.blocks[0].is_view
# It is technically possible to figure out which blocks are views
# e.g. [ b.values.base is not None for b in self.blocks ]
# but then we have the case of possibly some blocks being a view
# and some blocks not. setting in theory is possible on the non-view
# blocks w/o causing a SettingWithCopy raise/warn. But this is a bit
# complicated
return False
def get_bool_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_bool], copy)
def get_numeric_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_numeric], copy)
def combine(self, blocks, copy=True):
""" return a new manager with the blocks """
if len(blocks) == 0:
return self.make_empty()
# FIXME: optimization potential
indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks]))
inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])
new_blocks = []
for b in blocks:
b = b.copy(deep=copy)
b.mgr_locs = algos.take_1d(
inv_indexer, b.mgr_locs.as_array, axis=0, allow_fill=False
)
new_blocks.append(b)
axes = list(self.axes)
axes[0] = self.items.take(indexer)
return self.__class__(new_blocks, axes, do_integrity_check=False)
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(slobj)
else:
slicer = [slice(None)] * (axis + 1)
slicer[axis] = slobj
slicer = tuple(slicer)
new_blocks = [blk.getitem_block(slicer) for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
bm = self.__class__(new_blocks, new_axes, do_integrity_check=False)
bm._consolidate_inplace()
return bm
def __contains__(self, item):
return item in self.items
@property
def nblocks(self):
return len(self.blocks)
def copy(self, deep=True):
"""
Make deep or shallow copy of BlockManager
Parameters
----------
deep : boolean o rstring, default True
If False, return shallow copy (do not copy data)
If 'all', copy data and a deep copy of the index
Returns
-------
copy : BlockManager
"""
# this preserves the notion of view copying of axes
if deep:
if deep == "all":
copy = lambda ax: ax.copy(deep=True)
else:
copy = lambda ax: ax.view()
new_axes = [copy(ax) for ax in self.axes]
else:
new_axes = list(self.axes)
return self.apply("copy", axes=new_axes, deep=deep, do_integrity_check=False)
def as_array(self, transpose=False, items=None):
"""Convert the blockmanager data into an numpy array.
Parameters
----------
transpose : boolean, default False
If True, transpose the return array
items : list of strings or None
Names of block items that will be included in the returned
array. ``None`` means that all block items will be used
Returns
-------
arr : ndarray
"""
if len(self.blocks) == 0:
arr = np.empty(self.shape, dtype=float)
return arr.transpose() if transpose else arr
if items is not None:
mgr = self.reindex_axis(items, axis=0)
else:
mgr = self
if self._is_single_block and mgr.blocks[0].is_datetimetz:
# TODO(Block.get_values): Make DatetimeTZBlock.get_values
# always be object dtype. Some callers seem to want the
# DatetimeArray (previously DTI)
arr = mgr.blocks[0].get_values(dtype=object)
elif self._is_single_block or not self.is_mixed_type:
arr = np.asarray(mgr.blocks[0].get_values())
else:
arr = mgr._interleave()
return arr.transpose() if transpose else arr
def _interleave(self):
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
dtype = _interleaved_dtype(self.blocks)
# TODO: https://github.com/pandas-dev/pandas/issues/22791
# Give EAs some input on what happens here. Sparse needs this.
if is_sparse(dtype):
dtype = dtype.subtype
elif is_extension_array_dtype(dtype):
dtype = "object"
result = np.empty(self.shape, dtype=dtype)
itemmask = np.zeros(self.shape[0])
for blk in self.blocks:
rl = blk.mgr_locs
result[rl.indexer] = blk.get_values(dtype)
itemmask[rl.indexer] = 1
if not itemmask.all():
raise AssertionError("Some items were not contained in blocks")
return result
def to_dict(self, copy=True):
"""
Return a dict of str(dtype) -> BlockManager
Parameters
----------
copy : boolean, default True
Returns
-------
values : a dict of dtype -> BlockManager
Notes
-----
This consolidates based on str(dtype)
"""
self._consolidate_inplace()
bd = {}
for b in self.blocks:
bd.setdefault(str(b.dtype), []).append(b)
return {dtype: self.combine(blocks, copy=copy) for dtype, blocks in bd.items()}
def fast_xs(self, loc):
"""
get a cross sectional for a given location in the
items ; handle dups
return the result, is *could* be a view in the case of a
single block
"""
if len(self.blocks) == 1:
return self.blocks[0].iget((slice(None), loc))
items = self.items
# non-unique (GH4726)
if not items.is_unique:
result = self._interleave()
if self.ndim == 2:
result = result.T
return result[loc]
# unique
dtype = _interleaved_dtype(self.blocks)
n = len(items)
if is_extension_array_dtype(dtype):
# we'll eventually construct an ExtensionArray.
result = np.empty(n, dtype=object)
else:
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
# Such assignment may incorrectly coerce NaT to None
# result[blk.mgr_locs] = blk._slice((slice(None), loc))
for i, rl in enumerate(blk.mgr_locs):
result[rl] = blk.iget((i, loc))
if is_extension_array_dtype(dtype):
result = dtype.construct_array_type()._from_sequence(result, dtype=dtype)
return result
def consolidate(self):
"""
Join together blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
bm = self.__class__(self.blocks, self.axes)
bm._is_consolidated = False
bm._consolidate_inplace()
return bm
def _consolidate_inplace(self):
if not self.is_consolidated():
self.blocks = tuple(_consolidate(self.blocks))
self._is_consolidated = True
self._known_consolidated = True
self._rebuild_blknos_and_blklocs()
def get(self, item):
"""
Return values for selected item (ndarray or BlockManager).
"""
if self.items.is_unique:
if not isna(item):
loc = self.items.get_loc(item)
else:
indexer = np.arange(len(self.items))[isna(self.items)]
# allow a single nan location indexer
if not is_scalar(indexer):
if len(indexer) == 1:
loc = indexer.item()
else:
raise ValueError("cannot label index with a null key")
return self.iget(loc)
else:
if isna(item):
raise TypeError("cannot label index with a null key")
indexer = self.items.get_indexer_for([item])
return self.reindex_indexer(
new_axis=self.items[indexer], indexer=indexer, axis=0, allow_dups=True
)
def iget(self, i):
"""
Return the data as a SingleBlockManager if possible
Otherwise return as a ndarray
"""
block = self.blocks[self._blknos[i]]
values = block.iget(self._blklocs[i])
# shortcut for select a single-dim from a 2-dim BM
return SingleBlockManager(
[
block.make_block_same_class(
values, placement=slice(0, len(values)), ndim=1
)
],
self.axes[1],
)
def delete(self, item):
"""
Delete selected item (items if non-unique) in-place.
"""
indexer = self.items.get_loc(item)
is_deleted = np.zeros(self.shape[0], dtype=np.bool_)
is_deleted[indexer] = True
ref_loc_offset = -is_deleted.cumsum()
is_blk_deleted = [False] * len(self.blocks)
if isinstance(indexer, int):
affected_start = indexer
else:
affected_start = is_deleted.nonzero()[0][0]
for blkno, _ in _fast_count_smallints(self._blknos[affected_start:]):
blk = self.blocks[blkno]
bml = blk.mgr_locs
blk_del = is_deleted[bml.indexer].nonzero()[0]
if len(blk_del) == len(bml):
is_blk_deleted[blkno] = True
continue
elif len(blk_del) != 0:
blk.delete(blk_del)
bml = blk.mgr_locs
blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer])
# FIXME: use Index.delete as soon as it uses fastpath=True
self.axes[0] = self.items[~is_deleted]
self.blocks = tuple(
b for blkno, b in enumerate(self.blocks) if not is_blk_deleted[blkno]
)
self._shape = None
self._rebuild_blknos_and_blklocs()
def set(self, item, value):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
"""
# FIXME: refactor, clearly separate broadcasting & zip-like assignment
# can prob also fix the various if tests for sparse/categorical
value_is_extension_type = is_extension_array_dtype(value)
# categorical/sparse/datetimetz
if value_is_extension_type:
def value_getitem(placement):
return value
else:
if value.ndim == self.ndim - 1:
value = _safe_reshape(value, (1,) + value.shape)
def value_getitem(placement):
return value
else:
def value_getitem(placement):
return value[placement.indexer]
if value.shape[1:] != self.shape[1:]:
raise AssertionError(
"Shape of new values must be compatible with manager shape"
)
try:
loc = self.items.get_loc(item)
except KeyError:
# This item wasn't present, just insert at end
self.insert(len(self.items), item, value)
return
if isinstance(loc, int):
loc = [loc]
blknos = self._blknos[loc]
blklocs = self._blklocs[loc].copy()
unfit_mgr_locs = []
unfit_val_locs = []
removed_blknos = []
for blkno, val_locs in libinternals.get_blkno_placements(blknos, group=True):
blk = self.blocks[blkno]
blk_locs = blklocs[val_locs.indexer]
if blk.should_store(value):
blk.set(blk_locs, value_getitem(val_locs))
else:
unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs])
unfit_val_locs.append(val_locs)
# If all block items are unfit, schedule the block for removal.
if len(val_locs) == len(blk.mgr_locs):
removed_blknos.append(blkno)
else:
self._blklocs[blk.mgr_locs.indexer] = -1
blk.delete(blk_locs)
self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk))
if len(removed_blknos):
# Remove blocks & update blknos accordingly
is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
is_deleted[removed_blknos] = True
new_blknos = np.empty(self.nblocks, dtype=np.int64)
new_blknos.fill(-1)
new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos))
self._blknos = algos.take_1d(
new_blknos, self._blknos, axis=0, allow_fill=False
)
self.blocks = tuple(
blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos)
)
if unfit_val_locs:
unfit_mgr_locs = np.concatenate(unfit_mgr_locs)
unfit_count = len(unfit_mgr_locs)
new_blocks = []
if value_is_extension_type:
# This code (ab-)uses the fact that sparse blocks contain only
# one item.
new_blocks.extend(
make_block(
values=value.copy(),
ndim=self.ndim,
placement=slice(mgr_loc, mgr_loc + 1),
)
for mgr_loc in unfit_mgr_locs
)
self._blknos[unfit_mgr_locs] = np.arange(unfit_count) + len(self.blocks)
self._blklocs[unfit_mgr_locs] = 0
else:
# unfit_val_locs contains BlockPlacement objects
unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:])
new_blocks.append(
make_block(
values=value_getitem(unfit_val_items),
ndim=self.ndim,
placement=unfit_mgr_locs,
)
)
self._blknos[unfit_mgr_locs] = len(self.blocks)
self._blklocs[unfit_mgr_locs] = np.arange(unfit_count)
self.blocks += tuple(new_blocks)
# Newly created block's dtype may already be present.
self._known_consolidated = False
def insert(self, loc: int, item, value, allow_duplicates: bool = False):
"""
Insert item at selected position.
Parameters
----------
loc : int
item : hashable
value : array_like
allow_duplicates: bool
If False, trying to insert non-unique item will raise
"""
if not allow_duplicates and item in self.items:
# Should this be a different kind of error??
raise ValueError("cannot insert {}, already exists".format(item))
if not isinstance(loc, int):
raise TypeError("loc must be int")
# insert to the axis; this could possibly raise a TypeError
new_axis = self.items.insert(loc, item)
block = make_block(values=value, ndim=self.ndim, placement=slice(loc, loc + 1))
for blkno, count in _fast_count_smallints(self._blknos[loc:]):
blk = self.blocks[blkno]
if count == len(blk.mgr_locs):
blk.mgr_locs = blk.mgr_locs.add(1)
else:
new_mgr_locs = blk.mgr_locs.as_array.copy()
new_mgr_locs[new_mgr_locs >= loc] += 1
blk.mgr_locs = new_mgr_locs
if loc == self._blklocs.shape[0]:
# np.append is a lot faster, let's use it if we can.
self._blklocs = np.append(self._blklocs, 0)
self._blknos = np.append(self._blknos, len(self.blocks))
else:
self._blklocs = np.insert(self._blklocs, loc, 0)
self._blknos = np.insert(self._blknos, loc, len(self.blocks))
self.axes[0] = new_axis
self.blocks += (block,)
self._shape = None
self._known_consolidated = False
if len(self.blocks) > 100:
self._consolidate_inplace()
def reindex_axis(
self, new_index, axis, method=None, limit=None, fill_value=None, copy=True
):
"""
Conform block manager to new index.
"""
new_index = ensure_index(new_index)
new_index, indexer = self.axes[axis].reindex(
new_index, method=method, limit=limit
)
return self.reindex_indexer(
new_index, indexer, axis=axis, fill_value=fill_value, copy=copy
)
def reindex_indexer(
self, new_axis, indexer, axis, fill_value=None, allow_dups=False, copy=True
):
"""
Parameters
----------
new_axis : Index
indexer : ndarray of int64 or None
axis : int
fill_value : object
allow_dups : bool
pandas-indexer with -1's only.
"""
if indexer is None:
if new_axis is self.axes[axis] and not copy:
return self
result = self.copy(deep=copy)
result.axes = list(self.axes)
result.axes[axis] = new_axis
return result
self._consolidate_inplace()
# some axes don't allow reindexing with dups
if not allow_dups:
self.axes[axis]._can_reindex(indexer)
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(indexer, fill_tuple=(fill_value,))
else:
new_blocks = [
blk.take_nd(
indexer,
axis=axis,
fill_tuple=(
fill_value if fill_value is not None else blk.fill_value,
),
)
for blk in self.blocks
]
new_axes = list(self.axes)
new_axes[axis] = new_axis
return self.__class__(new_blocks, new_axes)
def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None):
"""
Slice/take blocks along axis=0.
Overloaded for SingleBlock
Returns
-------
new_blocks : list of Block
"""
allow_fill = fill_tuple is not None
sl_type, slobj, sllen = _preprocess_slice_or_indexer(
slice_or_indexer, self.shape[0], allow_fill=allow_fill
)
if self._is_single_block:
blk = self.blocks[0]
if sl_type in ("slice", "mask"):
return [blk.getitem_block(slobj, new_mgr_locs=slice(0, sllen))]
elif not allow_fill or self.ndim == 1:
if allow_fill and fill_tuple[0] is None:
_, fill_value = maybe_promote(blk.dtype)
fill_tuple = (fill_value,)
return [
blk.take_nd(
slobj,
axis=0,
new_mgr_locs=slice(0, sllen),
fill_tuple=fill_tuple,
)
]
if sl_type in ("slice", "mask"):
blknos = self._blknos[slobj]
blklocs = self._blklocs[slobj]
else:
blknos = algos.take_1d(
self._blknos, slobj, fill_value=-1, allow_fill=allow_fill
)
blklocs = algos.take_1d(
self._blklocs, slobj, fill_value=-1, allow_fill=allow_fill
)
# When filling blknos, make sure blknos is updated before appending to
# blocks list, that way new blkno is exactly len(blocks).
#
# FIXME: mgr_groupby_blknos must return mgr_locs in ascending order,
# pytables serialization will break otherwise.
blocks = []
for blkno, mgr_locs in libinternals.get_blkno_placements(blknos, group=True):
if blkno == -1:
# If we've got here, fill_tuple was not None.
fill_value = fill_tuple[0]
blocks.append(
self._make_na_block(placement=mgr_locs, fill_value=fill_value)
)
else:
blk = self.blocks[blkno]
# Otherwise, slicing along items axis is necessary.
if not blk._can_consolidate:
# A non-consolidatable block, it's easy, because there's
# only one item and each mgr loc is a copy of that single
# item.
for mgr_loc in mgr_locs:
newblk = blk.copy(deep=True)
newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1)
blocks.append(newblk)
else:
blocks.append(
blk.take_nd(
blklocs[mgr_locs.indexer],
axis=0,
new_mgr_locs=mgr_locs,
fill_tuple=None,
)
)
return blocks
def _make_na_block(self, placement, fill_value=None):
# TODO: infer dtypes other than float64 from fill_value
if fill_value is None:
fill_value = np.nan
block_shape = list(self.shape)
block_shape[0] = len(placement)
dtype, fill_value = infer_dtype_from_scalar(fill_value)
block_values = np.empty(block_shape, dtype=dtype)
block_values.fill(fill_value)
return make_block(block_values, placement=placement)
def take(self, indexer, axis=1, verify=True, convert=True):
"""
Take items along any axis.
"""
self._consolidate_inplace()
indexer = (
np.arange(indexer.start, indexer.stop, indexer.step, dtype="int64")
if isinstance(indexer, slice)
else np.asanyarray(indexer, dtype="int64")
)
n = self.shape[axis]
if convert:
indexer = maybe_convert_indices(indexer, n)
if verify:
if ((indexer == -1) | (indexer >= n)).any():
raise Exception("Indices must be nonzero and less than the axis length")
new_labels = self.axes[axis].take(indexer)
return self.reindex_indexer(
new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True
)
def equals(self, other):
self_axes, other_axes = self.axes, other.axes
if len(self_axes) != len(other_axes):
return False
if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)):
return False
self._consolidate_inplace()
other._consolidate_inplace()
if len(self.blocks) != len(other.blocks):
return False
# canonicalize block order, using a tuple combining the type
# name and then mgr_locs because there might be unconsolidated
# blocks (say, Categorical) which can only be distinguished by
# the iteration order
def canonicalize(block):
return (block.dtype.name, block.mgr_locs.as_array.tolist())
self_blocks = sorted(self.blocks, key=canonicalize)
other_blocks = sorted(other.blocks, key=canonicalize)
return all(
block.equals(oblock) for block, oblock in zip(self_blocks, other_blocks)
)
def unstack(self, unstacker_func, fill_value):
"""Return a blockmanager with all blocks unstacked.
Parameters
----------
unstacker_func : callable
A (partially-applied) ``pd.core.reshape._Unstacker`` class.
fill_value : Any
fill_value for newly introduced missing values.
Returns
-------
unstacked : BlockManager
"""
n_rows = self.shape[-1]
dummy = unstacker_func(np.empty((0, 0)), value_columns=self.items)
new_columns = dummy.get_new_columns()
new_index = dummy.get_new_index()
new_blocks = []
columns_mask = []
for blk in self.blocks:
blocks, mask = blk._unstack(
partial(unstacker_func, value_columns=self.items[blk.mgr_locs.indexer]),
new_columns,
n_rows,
fill_value,
)
new_blocks.extend(blocks)
columns_mask.extend(mask)
new_columns = new_columns[columns_mask]
bm = BlockManager(new_blocks, [new_columns, new_index])
return bm
class SingleBlockManager(BlockManager):
""" manage a single block with """
ndim = 1
_is_consolidated = True
_known_consolidated = True
__slots__ = ()
def __init__(
self,
block: Block,
axis: Union[Index, List[Index]],
do_integrity_check: bool = False,
fastpath: bool = False,
):
if isinstance(axis, list):
if len(axis) != 1:
raise ValueError(
"cannot create SingleBlockManager with more than 1 axis"
)
axis = axis[0]
# passed from constructor, single block, single axis
if fastpath:
self.axes = [axis]
if isinstance(block, list):
# empty block
if len(block) == 0:
block = [np.array([])]
elif len(block) != 1:
raise ValueError(
"Cannot create SingleBlockManager with more than 1 block"
)
block = block[0]
else:
self.axes = [ensure_index(axis)]
# create the block here
if isinstance(block, list):
# provide consolidation to the interleaved_dtype
if len(block) > 1:
dtype = _interleaved_dtype(block)
block = [b.astype(dtype) for b in block]
block = _consolidate(block)
if len(block) != 1:
raise ValueError(
"Cannot create SingleBlockManager with more than 1 block"
)
block = block[0]
if not isinstance(block, Block):
block = make_block(block, placement=slice(0, len(axis)), ndim=1)
self.blocks = tuple([block])
def _post_setstate(self):
pass
@property
def _block(self):
return self.blocks[0]
@property
def _values(self):
return self._block.values
@property
def _blknos(self):
""" compat with BlockManager """
return None
@property
def _blklocs(self):
""" compat with BlockManager """
return None
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
return self.__class__(
self._block._slice(slobj), self.index[slobj], fastpath=True
)
@property
def index(self):
return self.axes[0]
def convert(self, **kwargs):
""" convert the whole block as one """
return self.apply("convert", **kwargs)
@property
def dtype(self):
return self._block.dtype
@property
def array_dtype(self):
return self._block.array_dtype
@property
def ftype(self):
return self._block.ftype
def get_dtype_counts(self):
return {self.dtype.name: 1}
def get_ftype_counts(self):
return {self.ftype: 1}
def get_dtypes(self):
return np.array([self._block.dtype])
def get_ftypes(self):
return np.array([self._block.ftype])
def external_values(self):
return self._block.external_values()
def internal_values(self):
return self._block.internal_values()
def get_values(self):
""" return a dense type view """
return np.array(self._block.to_dense(), copy=False)
@property
def _can_hold_na(self):
return self._block._can_hold_na
def is_consolidated(self):
return True
def _consolidate_check(self):
pass
def _consolidate_inplace(self):
pass
def delete(self, item):
"""
Delete single item from SingleBlockManager.
Ensures that self.blocks doesn't become empty.
"""
loc = self.items.get_loc(item)
self._block.delete(loc)
self.axes[0] = self.axes[0].delete(loc)
def fast_xs(self, loc):
"""
fast path for getting a cross-section
return a view of the data
"""
return self._block.values[loc]
def concat(self, to_concat, new_axis):
"""
Concatenate a list of SingleBlockManagers into a single
SingleBlockManager.
Used for pd.concat of Series objects with axis=0.
Parameters
----------
to_concat : list of SingleBlockManagers
new_axis : Index of the result
Returns
-------
SingleBlockManager
"""
non_empties = [x for x in to_concat if len(x) > 0]
# check if all series are of the same block type:
if len(non_empties) > 0:
blocks = [obj.blocks[0] for obj in non_empties]
if len({b.dtype for b in blocks}) == 1:
new_block = blocks[0].concat_same_type(blocks)
else:
values = [x.values for x in blocks]
values = concat_compat(values)
new_block = make_block(values, placement=slice(0, len(values), 1))
else:
values = [x._block.values for x in to_concat]
values = concat_compat(values)
new_block = make_block(values, placement=slice(0, len(values), 1))
mgr = SingleBlockManager(new_block, new_axis)
return mgr
# --------------------------------------------------------------------
# Constructor Helpers
def create_block_manager_from_blocks(blocks, axes):
try:
if len(blocks) == 1 and not isinstance(blocks[0], Block):
# if blocks[0] is of length 0, return empty blocks
if not len(blocks[0]):
blocks = []
else:
# It's OK if a single block is passed as values, its placement
# is basically "all items", but if there're many, don't bother
# converting, it's an error anyway.
blocks = [
make_block(values=blocks[0], placement=slice(0, len(axes[0])))
]
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except ValueError as e:
blocks = [getattr(b, "values", b) for b in blocks]
tot_items = sum(b.shape[0] for b in blocks)
construction_error(tot_items, blocks[0].shape[1:], axes, e)
def create_block_manager_from_arrays(arrays, names, axes):
try:
blocks = form_blocks(arrays, names, axes)
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except ValueError as e:
construction_error(len(arrays), arrays[0].shape, axes, e)
def construction_error(tot_items, block_shape, axes, e=None):
""" raise a helpful message about our construction """
passed = tuple(map(int, [tot_items] + list(block_shape)))
# Correcting the user facing error message during dataframe construction
if len(passed) <= 2:
passed = passed[::-1]
implied = tuple(len(ax) for ax in axes)
# Correcting the user facing error message during dataframe construction
if len(implied) <= 2:
implied = implied[::-1]
if passed == implied and e is not None:
raise e
if block_shape[0] == 0:
raise ValueError("Empty data passed with indices specified.")
raise ValueError(
"Shape of passed values is {0}, indices imply {1}".format(passed, implied)
)
# -----------------------------------------------------------------------
def form_blocks(arrays, names, axes):
# put "leftover" items in float bucket, where else?
# generalize?
items_dict = defaultdict(list)
extra_locs = []
names_idx = ensure_index(names)
if names_idx.equals(axes[0]):
names_indexer = np.arange(len(names_idx))
else:
assert names_idx.intersection(axes[0]).is_unique
names_indexer = names_idx.get_indexer_for(axes[0])
for i, name_idx in enumerate(names_indexer):
if name_idx == -1:
extra_locs.append(i)
continue
k = names[name_idx]
v = arrays[name_idx]
block_type = get_block_type(v)
items_dict[block_type.__name__].append((i, k, v))
blocks = []
if len(items_dict["FloatBlock"]):
float_blocks = _multi_blockify(items_dict["FloatBlock"])
blocks.extend(float_blocks)
if len(items_dict["ComplexBlock"]):
complex_blocks = _multi_blockify(items_dict["ComplexBlock"])
blocks.extend(complex_blocks)
if len(items_dict["TimeDeltaBlock"]):
timedelta_blocks = _multi_blockify(items_dict["TimeDeltaBlock"])
blocks.extend(timedelta_blocks)
if len(items_dict["IntBlock"]):
int_blocks = _multi_blockify(items_dict["IntBlock"])
blocks.extend(int_blocks)
if len(items_dict["DatetimeBlock"]):
datetime_blocks = _simple_blockify(items_dict["DatetimeBlock"], _NS_DTYPE)
blocks.extend(datetime_blocks)
if len(items_dict["DatetimeTZBlock"]):
dttz_blocks = [
make_block(array, klass=DatetimeTZBlock, placement=[i])
for i, _, array in items_dict["DatetimeTZBlock"]
]
blocks.extend(dttz_blocks)
if len(items_dict["BoolBlock"]):
bool_blocks = _simple_blockify(items_dict["BoolBlock"], np.bool_)
blocks.extend(bool_blocks)
if len(items_dict["ObjectBlock"]) > 0:
object_blocks = _simple_blockify(items_dict["ObjectBlock"], np.object_)
blocks.extend(object_blocks)
if len(items_dict["CategoricalBlock"]) > 0:
cat_blocks = [
make_block(array, klass=CategoricalBlock, placement=[i])
for i, _, array in items_dict["CategoricalBlock"]
]
blocks.extend(cat_blocks)
if len(items_dict["ExtensionBlock"]):
external_blocks = [
make_block(array, klass=ExtensionBlock, placement=[i])
for i, _, array in items_dict["ExtensionBlock"]
]
blocks.extend(external_blocks)
if len(items_dict["ObjectValuesExtensionBlock"]):
external_blocks = [
make_block(array, klass=ObjectValuesExtensionBlock, placement=[i])
for i, _, array in items_dict["ObjectValuesExtensionBlock"]
]
blocks.extend(external_blocks)
if len(extra_locs):
shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:])
# empty items -> dtype object
block_values = np.empty(shape, dtype=object)
block_values.fill(np.nan)
na_block = make_block(block_values, placement=extra_locs)
blocks.append(na_block)
return blocks
def _simple_blockify(tuples, dtype):
""" return a single array of a block that has a single dtype; if dtype is
not None, coerce to this dtype
"""
values, placement = _stack_arrays(tuples, dtype)
# TODO: CHECK DTYPE?
if dtype is not None and values.dtype != dtype: # pragma: no cover
values = values.astype(dtype)
block = make_block(values, placement=placement)
return [block]
def _multi_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes """
# group by dtype
grouper = itertools.groupby(tuples, lambda x: x[2].dtype)
new_blocks = []
for dtype, tup_block in grouper:
values, placement = _stack_arrays(list(tup_block), dtype)
block = make_block(values, placement=placement)
new_blocks.append(block)
return new_blocks
def _stack_arrays(tuples, dtype):
# fml
def _asarray_compat(x):
if isinstance(x, ABCSeries):
return x._values
else:
return np.asarray(x)
def _shape_compat(x):
if isinstance(x, ABCSeries):
return (len(x),)
else:
return x.shape
placement, names, arrays = zip(*tuples)
first = arrays[0]
shape = (len(arrays),) + _shape_compat(first)
stacked = np.empty(shape, dtype=dtype)
for i, arr in enumerate(arrays):
stacked[i] = _asarray_compat(arr)
return stacked, placement
def _interleaved_dtype(
blocks: List[Block],
) -> Optional[Union[np.dtype, ExtensionDtype]]:
"""Find the common dtype for `blocks`.
Parameters
----------
blocks : List[Block]
Returns
-------
dtype : Optional[Union[np.dtype, ExtensionDtype]]
None is returned when `blocks` is empty.
"""
if not len(blocks):
return None
return find_common_type([b.dtype for b in blocks])
def _consolidate(blocks):
"""
Merge blocks having same dtype, exclude non-consolidating blocks
"""
# sort by _can_consolidate, dtype
gkey = lambda x: x._consolidate_key
grouper = itertools.groupby(sorted(blocks, key=gkey), gkey)
new_blocks = []
for (_can_consolidate, dtype), group_blocks in grouper:
merged_blocks = _merge_blocks(
list(group_blocks), dtype=dtype, _can_consolidate=_can_consolidate
)
new_blocks = _extend_blocks(merged_blocks, new_blocks)
return new_blocks
def _compare_or_regex_search(a, b, regex=False):
"""
Compare two array_like inputs of the same shape or two scalar values
Calls operator.eq or re.search, depending on regex argument. If regex is
True, perform an element-wise regex matching.
Parameters
----------
a : array_like or scalar
b : array_like or scalar
regex : bool, default False
Returns
-------
mask : array_like of bool
"""
if not regex:
op = lambda x: operator.eq(x, b)
else:
op = np.vectorize(
lambda x: bool(re.search(b, x)) if isinstance(x, str) else False
)
is_a_array = isinstance(a, np.ndarray)
is_b_array = isinstance(b, np.ndarray)
result = op(a)
if is_scalar(result) and (is_a_array or is_b_array):
type_names = [type(a).__name__, type(b).__name__]
if is_a_array:
type_names[0] = "ndarray(dtype={dtype})".format(dtype=a.dtype)
if is_b_array:
type_names[1] = "ndarray(dtype={dtype})".format(dtype=b.dtype)
raise TypeError(
"Cannot compare types {a!r} and {b!r}".format(
a=type_names[0], b=type_names[1]
)
)
return result
def _transform_index(index, func, level=None):
"""
Apply function to all values found in index.
This includes transforming multiindex entries separately.
Only apply function to one level of the MultiIndex if level is specified.
"""
if isinstance(index, MultiIndex):
if level is not None:
items = [
tuple(func(y) if i == level else y for i, y in enumerate(x))
for x in index
]
else:
items = [tuple(func(y) for y in x) for x in index]
return MultiIndex.from_tuples(items, names=index.names)
else:
items = [func(x) for x in index]
return Index(items, name=index.name, tupleize_cols=False)
def _fast_count_smallints(arr):
"""Faster version of set(arr) for sequences of small numbers."""
counts = np.bincount(arr.astype(np.int_))
nz = counts.nonzero()[0]
return np.c_[nz, counts[nz]]
def _preprocess_slice_or_indexer(slice_or_indexer, length, allow_fill):
if isinstance(slice_or_indexer, slice):
return (
"slice",
slice_or_indexer,
|
libinternals.slice_len(slice_or_indexer, length)
|
pandas._libs.internals.slice_len
|
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import seaborn as sns
import tqdm
def load_data(index=0):
""" 0: C7
1: C8
2: C9
3: C11
4: C13
5: C14
6: C15
7: C16
Note that C7 and C13 included a short break
(for about 100 timestamps long)
between the two procedure.
"""
fp = os.path.dirname(__file__)
if index == 0:
df = pd.read_csv(fp + '/C7-1.csv.gz')
df = pd.concat([df, pd.read_csv(fp + '/C7-2.csv.gz')])
df = df.reset_index(drop=True)
df.Timestamp = df.index.values
return df
elif index == 1:
return pd.read_csv(fp + '/C8.csv.gz')
elif index == 2:
return pd.read_csv(fp + '/C9.csv.gz')
elif index == 3:
return pd.read_csv(fp + '/C11.csv.gz')
elif index == 4:
df = pd.read_csv(fp + '/C13-1.csv.gz')
df = pd.concat([df, pd.read_csv(fp + '/C13-2.csv.gz')])
df = df.reset_index(drop=True)
df.Timestamp = df.index.values
return df
elif index == 5:
return pd.read_csv(fp + '/C14.csv.gz')
elif index == 6:
return
|
pd.read_csv(fp + '/C15.csv.gz')
|
pandas.read_csv
|
import pandas as pd
import nltk
import multiprocessing as mp
from nltk.tokenize import word_tokenize
from nltk.stem.porter import *
from nltk.corpus import stopwords
from tqdm import tqdm
import sys
# User defined Imports ugly python import syntax >:(
sys.path.append('../Preprocess')
from dataJoin import joinData
from parallelLoad import parallelLoad
def tokenize(df):
# To measure the progress of our lambda apply functions
# Need to specify since they will be running on separate processes
tqdm.pandas()
# Do a tokenization by row
#print('Tokenizing text...')
#print(df.loc[69377:69380,['text']]) # This will have NA as text
# Drop the NA tweets texts so we dont have problems with our tokenizers
df = df.dropna(subset=['text'])
# Do the apply method
#df['tokenized_text'] = df.progress_apply(lambda row: word_tokenize(row['text']), axis=1)
df['tokenized_text'] = df.apply(lambda row: word_tokenize(row['text']), axis=1)
# Return df
return df
def wordCount(df):
# Also the lenght of the tokenizer (could be useful?)
#print('Getting number of words...')
#df['tweets_length'] = df.progress_apply(lambda row: len(row['tokenized_text']), axis=1)
df['tweets_length'] = df.apply(lambda row: len(row['tokenized_text']), axis=1)
# Return the new df
return df
def steem(df):
#print('Stemming Words...')
# Create an instance of the porter stemmer
stemmer = PorterStemmer()
# Steam the words
#df['stemmed_tweets'] = df['tokenized_text'].progress_apply(lambda words:[stemmer.stem(word) for word in words])
df['stemmed_tweets'] = df['tokenized_text'].apply(lambda words:[stemmer.stem(word) for word in words])
# Return the new stemmed df
return df
def removeUrls(df):
#print('Removing Urls...')
# Remove the urls/# etc
#df['stemmed_tweets'] = df['stemmed_tweets'].progress_apply(lambda words:[ re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", "", word) for word in words])
df['stemmed_tweets'] = df['stemmed_tweets'].apply(lambda words:[ re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", "", word) for word in words])
# Return the df without URLs
return df
def removeStopWords(df):
# Set-up remove of stop words
stop_words = set(stopwords.words('english'))
# Get the multidimensional array
stemmedWords = df['stemmed_tweets'].values.reshape(-1,).tolist()
# Flatten to 1d array
#print('Flattening the array...')
flattenedStemmedWords = [x for sublist in stemmedWords for x in sublist]
#print('The flattened stemmed words are: \n', flattenedStemmedWords[:10])
# Cleanup of the stemmed words because they are dirty :O
cleanedStemmedWords = []
#print('Removing stop words and punctuation...')
for word in flattenedStemmedWords:
# Not commas periods and applause.
if word not in [
",",
".",
"``",
"''",
";",
"?",
"--",
")",
"(",
":",
"!",
"...",
"http",
"u2013"
] and len(word) > 2 and word not in stop_words:
cleanedStemmedWords.append(word.lower())
#print('The cleaned Stemmed Words are: \n',cleanedStemmedWords[:30])
return cleanedStemmedWords
if __name__ =='__main__':
# To measure the progress of our lambda apply functions
tqdm.pandas()
print('Loading data...')
# Start Data loading using paralelization parallelLoad(route_to_files) function!
filesRoute = '../data/traditionalSpamBotsChunks1/'
botData = parallelLoad(filesRoute)
filesRoute = '../data/genuineTweetsChunks/'
genuineData = parallelLoad(filesRoute)
print('Joining data...')
df = joinData(botData.head(50000), genuineData.head(5000))
# Drop all columns but the one containing the tweets text
df = df[['text','bot']]
# Divide data into chunks
n = 1000 #chunk row size
list_df = [df[i:i+n] for i in range(0,df.shape[0],n)]
# Use 4 processes
pool = mp.Pool(8) # use 4 processes
print('Tokenizing text...')
# Create a list of async functions
funclist = []
for df in list_df:
# Process each df using and async function
f = pool.apply_async(tokenize, [df])
# Append it to a list of async functions
funclist.append(f)
result = []
for f in tqdm(funclist):
# Timeout in 2 mins
# Use the get method on the f object generated by apply_async
# to retrive the result once the process is finished
result.append(f.get(timeout=120))
# Concat results
df = pd.concat(result)
# Divide data into chunks for parallel processing
n = 1000 #chunk row size
list_df = [df[i:i+n] for i in range(0,df.shape[0],n)]
print('Counting number of words...')
# Create a list of async functions
funclist = []
for df in list_df:
# Process each df using and async function
f = pool.apply_async(wordCount, [df])
# Append it to a list of async functions
funclist.append(f)
result = []
for f in tqdm(funclist):
# Timeout in 2 mins
# Use the get method on the f object generated by apply_async
# to retrive the result once the process is finished
result.append(f.get(timeout=120))
# Concat results
df = pd.concat(result)
print('Stemming...')
# Divide data into chunks for parallel processing
n = 1000 #chunk row size
list_df = [df[i:i+n] for i in range(0,df.shape[0],n)]
# Create a list of async functions
funclist = []
for df in list_df:
# Process each df using and async function
f = pool.apply_async(steem, [df])
# Append it to a list of async functions
funclist.append(f)
result = []
for f in tqdm(funclist):
# Timeout in 2 mins
# Use the get method on the f object generated by apply_async
# to retrive the result once the process is finished
result.append(f.get(timeout=120))
# Concat results
df = pd.concat(result)
print('Removing Urls...')
# Divide data into chunks for parallel processing
n = 1000 #chunk row size
list_df = [df[i:i+n] for i in range(0,df.shape[0],n)]
# Create a list of async functions
funclist = []
for df in list_df:
# Process each df using and async function
f = pool.apply_async(removeUrls, [df])
# Append it to a list of async functions
funclist.append(f)
result = []
for f in tqdm(funclist):
# Timeout in 2 mins
# Use the get method on the f object generated by apply_async
# to retrive the result once the process is finished
result.append(f.get(timeout=120))
# Concat results
df =
|
pd.concat(result)
|
pandas.concat
|
import pendulum as pdl
import sys
sys.path.append(".")
# the memoization-related library
import loguru
import itertools
import portion
import klepto.keymaps
import CacheIntervals as ci
from CacheIntervals.utils import flatten
from CacheIntervals.utils import pdl2pd, pd2pdl
from CacheIntervals.utils import Timer
from CacheIntervals.Intervals import pd2po, po2pd
from CacheIntervals.RecordInterval import RecordIntervals, RecordIntervalsPandas
class QueryRecorder:
'''
A helper class
'''
pass
class MemoizationWithIntervals(object):
'''
The purpose of this class is to optimise
the number of call to a function retrieving
possibly disjoint intervals:
- do standard caching for a given function
- additively call for a date posterior to one
already cached is supposed to yield a pandas
Frame which can be obtained by concatenating
the cached result and a -- hopefully much --
smaller query
Maintains a list of intervals that have been
called.
With a new interval:
-
'''
keymapper = klepto.keymaps.stringmap(typed=False, flat=False)
def __init__(self,
pos_args=None,
names_kwarg=None,
classrecorder=RecordIntervalsPandas,
aggregation=lambda listdfs: pd.concat(listdfs, axis=0),
debug=False,
# memoization=klepto.lru_cache(
# cache=klepto.archives.hdf_archive(
# f'{pdl.today().to_date_string()}_memoization.hdf5'),
# keymap=keymapper),
memoization=klepto.lru_cache(
cache=klepto.archives.dict_archive(),
keymap=keymapper),
**kwargs):
'''
:param pos_args: the indices of the positional
arguments that will be handled as intervals
:param names_kwarg: the name of the named parameters
that will be handled as intervals
:param classrecorder: the interval recorder type
we want to use
:param memoization: a memoization algorithm
'''
# A dictionary of positional arguments indices
# that are intervals
self.argsi = {}
self.kwargsi = {}
# if pos_args is not None:
# for posarg in pos_args:
# self.argsi[posarg] = classrecorder(**kwargs)
self.pos_args_itvl = pos_args if pos_args is not None else []
#print(self.args)
# if names_kwarg is not None:
# for namedarg in names_kwarg:
# self.kwargsi[namedarg] = classrecorder(**kwargs)
self.names_kwargs_itvl = names_kwarg if names_kwarg is not None else {}
#print(self.kwargs)
self.memoization = memoization
self.aggregation = aggregation
self.debugQ = debug
self.argsdflt = None
self.kwargsdflt = None
self.time_last_call = pdl.today()
self.classrecorder = classrecorder
self.kwargsrecorder = kwargs
self.argssolver = None
self.query_recorder = QueryRecorder()
def __call__(self, f):
'''
The interval memoization leads to several calls to the
standard memoised function and generates a list of return values.
The aggregation is needed for the doubly lazy
function to have the same signature as the
To access, the underlying memoized function pass
get_function_cachedQ=True to the kwargs of the
overloaded call (not of this function
:param f: the function to memoize
:return: the wrapper to the memoized function
'''
if self.argssolver is None:
self.argssolver = ci.Functions.ArgsSolver(f, split_args_kwargsQ=True)
@self.memoization
def f_cached(*args, **kwargs):
'''
The cached function is used for a double purpose:
1. for standard calls, will act as the memoised function in a traditional way
2. Additively when pass parameters of type QueryRecorder, it will create
or retrieve the interval recorders associated with the values of
non-interval parameters.
In this context, we use the cached function as we would a dictionary.
'''
QueryRecorderQ = False
args_new = []
kwargs_new = {}
'''
check whether this is a standard call to the user function
or a request for the interval recorders
'''
for i,arg in enumerate(args):
if isinstance(arg, QueryRecorder):
args_new.append(self.classrecorder(**self.kwargsrecorder))
QueryRecorderQ = True
else:
args_new.append(args[i])
for name in kwargs:
if isinstance(kwargs[name], QueryRecorder):
kwargs_new[name] = self.classrecorder(**self.kwargsrecorder)
QueryRecorderQ = True
else:
kwargs_new[name] = kwargs[name]
if QueryRecorderQ:
return args_new, kwargs_new
return f(*args, **kwargs)
def wrapper(*args, **kwargs):
if kwargs.get('get_function_cachedQ', False):
return f_cached
#loguru.logger.debug(f'function passed: {f_cached}')
loguru.logger.debug(f'args passed: {args}')
loguru.logger.debug(f'kwargs passed: {kwargs}')
# First pass: resolve the recorders
dargs_exp, kwargs_exp = self.argssolver(*args, **kwargs)
# Intervals are identified by position and keyword name
# 1. First get the interval recorders
args_exp = list(dargs_exp.values())
args_exp_copy = args_exp.copy()
kwargs_exp_copy = kwargs_exp.copy()
for i in self.pos_args_itvl:
args_exp_copy[i] = self.query_recorder
for name in self.names_kwargs_itvl:
kwargs_exp_copy[name] = self.query_recorder
args_with_ri, kwargs_with_ri = f_cached(*args_exp_copy, **kwargs_exp_copy)
# 2. Now get the the actual list of intervals
for i in self.pos_args_itvl:
# reuse args_exp_copy to store the list
args_exp_copy[i] = args_with_ri[i](args_exp[i])
for name in self.names_kwargs_itvl:
# reuse kwargs_exp_copy to store the list
kwargs_exp_copy[name] = kwargs_with_ri[name](kwargs_exp[name])
'''3. Then generate all combination of parameters
3.a - args'''
ns_args = range(len(args_exp))
lists_possible_args = [[args_exp[i]] if i not in self.pos_args_itvl else args_exp_copy[i] for i in ns_args]
# Take the cartesian product of these
calls_args = list( map(list,itertools.product(*lists_possible_args)))
'''3.b kwargs'''
#kwargs_exp_vals = kwargs_exp_copy.values()
names_kwargs = list(kwargs_exp_copy.keys())
lists_possible_kwargs = [[kwargs_exp[name]] if name not in self.names_kwargs_itvl
else kwargs_exp_copy[name] for name in names_kwargs]
calls_kwargs = list(map(lambda l: dict(zip(names_kwargs,l)), itertools.product(*lists_possible_kwargs)))
calls = list(itertools.product(calls_args, calls_kwargs))
if self.debugQ:
results = []
for call in calls:
with Timer() as timer:
results.append(f_cached(*call[0], **call[1]) )
print('Timer to demonstrate caching:')
timer.display(printQ=True)
else:
results = [f_cached(*call[0], **call[1]) for call in calls]
result = self.aggregation(results)
return result
return wrapper
if __name__ == "__main__":
import logging
import daiquiri
import pandas as pd
import time
daiquiri.setup(logging.DEBUG)
logging.getLogger('OneTick64').setLevel(logging.WARNING)
logging.getLogger('databnpp.ODCB').setLevel(logging.WARNING)
logging.getLogger('requests_kerberos').setLevel(logging.WARNING)
pd.set_option('display.max_rows', 200)
pd.set_option('display.width', 600)
pd.set_option('display.max_columns', 200)
tssixdaysago = pdl2pd(pdl.yesterday('UTC').add(days=-5))
tsfivedaysago = pdl2pd(pdl.yesterday('UTC').add(days=-4))
tsfourdaysago = pdl2pd(pdl.yesterday('UTC').add(days=-3))
tsthreedaysago = pdl2pd(pdl.yesterday('UTC').add(days=-2))
tstwodaysago = pdl2pd(pdl.yesterday('UTC').add(days=-1))
tsyesterday = pdl2pd(pdl.yesterday('UTC'))
tstoday = pdl2pd(pdl.today('UTC'))
tstomorrow = pdl2pd(pdl.tomorrow('UTC'))
tsintwodays = pdl2pd(pdl.tomorrow('UTC').add(days=1))
tsinthreedays = pdl2pd(pdl.tomorrow('UTC').add(days=2))
def print_calls(calls):
print( list( map( lambda i: (i.left, i.right), calls)))
def print_calls_dates(calls):
print( list( map( lambda i:
(pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()),
calls)))
def display_calls(calls):
loguru.logger.info( list( map( lambda i:
(pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()),
calls)))
# Testing record intervals -> ok
if True:
itvals = RecordIntervals()
calls = itvals(portion.closed(pdl.yesterday(), pdl.today()))
print(list(map( lambda i: (i.lower.to_date_string(), i.upper.to_date_string()), calls)))
print(list(map(lambda i: type(i), calls)))
calls = itvals( portion.closed(pdl.yesterday().add(days=-1), pdl.today().add(days=1)))
#print(calls)
print( list( map( lambda i: (i.lower.to_date_string(), i.upper.to_date_string()),
calls)))
# Testing record intervals pandas -> ok
if True:
itvals = RecordIntervalsPandas()
# yesterday -> today
calls = itvals(pd.Interval(pdl2pd(pdl.yesterday()), pdl2pd(pdl.today()), closed='left'))
print( list( map( lambda i: (pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()), calls)))
# day before yesterday -> tomorrow: should yield 3 intervals
calls = itvals(pd.Interval(pdl2pd(pdl.yesterday().add(days=-1)), pdl2pd(pdl.today().add(days=1))))
print( list( map( lambda i: (pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()), calls)))
# day before yesterday -> day after tomorrow: should yield 4 intervals
calls = itvals(
pd.Interval(pdl2pd(pdl.yesterday().add(days=-1)),
pdl2pd(pdl.tomorrow().add(days=1))))
print(
list(
map(
lambda i:
(pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()),
calls)))
# 2 days before yesterday -> 2day after tomorrow: should yield 6 intervals
calls = itvals(
pd.Interval(pdl2pd(pdl.yesterday().add(days=-2)),
pdl2pd(pdl.tomorrow().add(days=2))))
print(list(map( lambda i:
(pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()),
calls)))
# Further tests on record intervals pandas
if False:
itvals = RecordIntervalsPandas()
calls = itvals(pd.Interval(tstwodaysago, tstomorrow, closed='left'))
display_calls(calls)
calls = itvals( pd.Interval(tstwodaysago, tsyesterday))
display_calls(calls)
calls = itvals(
pd.Interval(tstwodaysago, tsintwodays))
display_calls(calls)
calls = itvals(
pd.Interval(pdl2pd(pdl.yesterday().add(days=-2)),
pdl2pd(pdl.tomorrow().add(days=2))))
display_calls(calls)
# proof-of_concept of decorator to modify function parameters
if False:
class dector_arg:
# a toy model
def __init__(self,
pos_arg=None,
f_arg=None,
name_kwarg=None,
f_kwarg=None):
'''
:param pos_arg: the positional argument
:param f_arg: the function to apply to the positional argument
:param name_kwarg: the keyword argument
:param f_kwarg: the function to apply to the keyword argument
'''
self.args = {}
self.kwargs = {}
if pos_arg:
self.args[pos_arg] = f_arg
print(self.args)
if name_kwarg:
self.kwargs[name_kwarg] = f_kwarg
print(self.kwargs)
def __call__(self, f):
'''
the decorator action
:param f: the function to decorate
:return: a function whose arguments
have the function f_args and f_kwargs
pre-applied.
'''
self.f = f
def inner_func(*args, **kwargs):
print(f'function passed: {self.f}')
print(f'args passed: {args}')
print(f'kwargs passed: {kwargs}')
largs = list(args)
for i, f in self.args.items():
print(i)
print(args[i])
largs[i] = f(args[i])
for name, f in self.kwargs.items():
kwargs[name] = f(kwargs[name])
return self.f(*largs, **kwargs)
return inner_func
dec = dector_arg(pos_arg=0,
f_arg=lambda x: x + 1,
name_kwarg='z',
f_kwarg=lambda x: x + 1)
@dector_arg(1, lambda x: x + 1, 'z', lambda x: x + 1)
def g(x, y, z=3):
'''
The decorated function should add one to the second
positional argument and
:param x:
:param y:
:param z:
:return:
'''
print(f'x->{x}')
print(f'y->{y}')
print(f'z->{z}')
g(1, 10, z=100)
if False:
memo = MemoizationWithIntervals()
# testing MemoizationWithIntervals
# typical mechanism
if False:
@MemoizationWithIntervals(
None, ['interval'],
aggregation=list,
debug=True,
memoization=klepto.lru_cache(
maxsize=200,
cache=klepto.archives.hdf_archive(
f'{pdl.today().to_date_string()}_memoisation.hdf5'),
keymap=klepto.keymaps.stringmap(typed=False, flat=False)))
def function_with_interval_param(dummy1,dummy2, kdummy=1,
interval=pd.Interval(tstwodaysago, tstomorrow)):
time.sleep(1)
print('****')
print(f'dummy1: {dummy1}, dummy2: {dummy2}')
print(f'kdummy: {kdummy}')
print(f'interval: {interval}')
return [dummy1, dummy2, kdummy, interval]
print('=*=*=*=* MECHANISM DEMONSTRATION =*=*=*=*')
print('==== First pass ===')
print("initialisation with an interval from yesterday to today")
# function_with_interval_params(pd.Interval(pdl.yesterday(), pdl.today(),closed='left'),
# interval1 = pd.Interval(pdl.yesterday().add(days=0),
# pdl.today(), closed='both')
# )
print( f'Final result:\n{function_with_interval_param(0, 1, interval=pd.Interval(tsyesterday, tstoday))}')
print('==== Second pass ===')
print("request for data from the day before yesterday to today")
print("expected split in two intervals with results from yesterday to today being cached")
print(
f'Final result: {function_with_interval_param(0,1, interval=pd.Interval(tstwodaysago, tstoday))}'
)
print('==== 3rd pass ===')
print("request for data from three days to yesterday")
print("expected split in two intervals")
print(f'Final result:\n {function_with_interval_param(0,1, interval=pd.Interval(tsthreedaysago, tsyesterday))}' )
print('==== 4th pass ===')
print("request for data from three days to tomorrow")
print("expected split in three intervals")
print(f'Final result:\n\
{function_with_interval_param(0,1, interval1= pd.Interval(tsthreedaysago, tstomorrow))}' )
print('==== 5th pass ===')
print("request for data from two days ago to today with different first argument")
print("No caching expected and one interval")
print( f'Final result:\n{function_with_interval_param(1, 1, interval=pd.Interval(tstwodaysago, tstoday))}' )
print('==== 6th pass ===')
print("request for data from three days ago to today with different first argument")
print("Two intervals expected")
print( f'Final result: {function_with_interval_param(1, 1, interval=pd.Interval(tsthreedaysago, tstoday))}' )
# Testing with an interval as position argument and one interval as keyword argument
if False:
@MemoizationWithIntervals(
[0], ['interval1'],
aggregation=list,
debug=True,
memoization=klepto.lru_cache(
maxsize=200,
cache=klepto.archives.hdf_archive(
f'{pdl.today().to_date_string()}_memoisation.hdf5'),
keymap=klepto.keymaps.stringmap(typed=False, flat=False)))
def function_with_interval_params(interval0,
interval1=pd.Interval(tstwodaysago, tstomorrow)):
time.sleep(1)
print('***')
print(f'interval0: {interval0}')
print(f'interval1: {interval1}')
return (interval0, interval1)
print('=*=*=*=* DEMONSTRATION WITH TWO INTERVAL PARAMETERS =*=*=*=*')
print('==== First pass ===')
print(f'Initialisation: first interval:\nyest to tday - second interval: two days ago to tomorrow')
print(f'Final result:\n{function_with_interval_params(pd.Interval(tsyesterday, tstoday))}')
print('==== Second pass ===')
print(f'Call with first interval:\n3 days ago to tday - second interval: unchanged')
print('Expected caching and split of first interval in two')
print( f'Final result: {function_with_interval_params(pd.Interval(tsthreedaysago, tstoday))}' )
print('==== 3rd pass ===')
print(f'Call with first interval:\nunchanged - second interval: yest to today')
print('Expected only cached results and previous split of first interval')
print(f'Final result:\n {function_with_interval_params(pd.Interval(tsthreedaysago, tstoday), interval1 =
|
pd.Interval(tsyesterday, tstoday)
|
pandas.Interval
|
"""
Testing the ``modelchain`` module.
SPDX-FileCopyrightText: 2019 oemof developer group <<EMAIL>>
SPDX-License-Identifier: MIT
"""
import pandas as pd
import numpy as np
import pytest
from pandas.util.testing import assert_series_equal
import windpowerlib.wind_turbine as wt
import windpowerlib.modelchain as mc
class TestModelChain:
@classmethod
def setup_class(self):
"""Setup default values"""
self.test_turbine = {'hub_height': 100,
'turbine_type': 'E-126/4200',
'power_curve': pd.DataFrame(
data={'value': [0.0, 4200 * 1000],
'wind_speed': [0.0, 25.0]})}
temperature_2m = np.array([[267], [268]])
temperature_10m = np.array([[267], [266]])
pressure_0m = np.array([[101125], [101000]])
wind_speed_8m = np.array([[4.0], [5.0]])
wind_speed_10m = np.array([[5.0], [6.5]])
roughness_length = np.array([[0.15], [0.15]])
self.weather_df = pd.DataFrame(
np.hstack((temperature_2m, temperature_10m, pressure_0m,
wind_speed_8m, wind_speed_10m, roughness_length)),
index=[0, 1],
columns=[np.array(['temperature', 'temperature', 'pressure',
'wind_speed', 'wind_speed',
'roughness_length']),
np.array([2, 10, 0, 8, 10, 0])])
def test_temperature_hub(self):
# Test modelchain with temperature_model='linear_gradient'
test_mc = mc.ModelChain(wt.WindTurbine(**self.test_turbine))
# Test modelchain with temperature_model='interpolation_extrapolation'
test_mc_2 = mc.ModelChain(
wt.WindTurbine(**self.test_turbine),
temperature_model='interpolation_extrapolation')
# Parameters for tests
temperature_2m = np.array([[267], [268]])
temperature_10m = np.array([[267], [266]])
weather_df = pd.DataFrame(np.hstack((temperature_2m,
temperature_10m)),
index=[0, 1],
columns=[np.array(['temperature',
'temperature']),
np.array([2, 10])])
# temperature_10m is closer to hub height than temperature_2m
temp_exp = pd.Series(data=[266.415, 265.415], name=10)
assert_series_equal(test_mc.temperature_hub(weather_df), temp_exp)
temp_exp = pd.Series(data=[267.0, 243.5])
assert_series_equal(test_mc_2.temperature_hub(weather_df), temp_exp)
# change heights of temperatures so that old temperature_2m is now used
weather_df.columns = [np.array(['temperature', 'temperature']),
np.array([10, 200])]
temp_exp = pd.Series(data=[266.415, 267.415], name=10)
assert_series_equal(test_mc.temperature_hub(weather_df), temp_exp)
temp_exp =
|
pd.Series(data=[267.0, 267.052632])
|
pandas.Series
|
"""Visualizes burst data."""
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
def to_pandas(ebursts, offsets, svo, unit='s'):
"""Exports burst and offset data to dataframes for a single term.
ebursts is an edgebust dict from the SVO object
offsets is an offsets dict from the SVO object
"""
svos = " | ".join(svo)
bdf = pd.DataFrame(ebursts)
bdf[1] = pd.to_datetime(bdf[1], unit=unit)
bdf[2] = pd.to_datetime(bdf[2], unit=unit)
bdf.columns = ['level', 'start', 'end']
bdf['svo'] = svos
odf = pd.DataFrame()
i = pd.to_datetime(offsets, unit='s')
odf['Date'], odf['Year'], odf['Month'], odf[
'Day'] = i.date, i.year, i.month, i.day
odf = odf.set_index(i)
odf['svo'] = svos
return bdf, odf
def plot_bursts(odf,
bdf,
lowest_level=0,
title=True,
daterange=None,
xrangeoffsets=3,
s=None,
gamma=None):
"""Plots burst and offset data.
odf = an offsets dataframe
bdf = an edgeburst dataframe
lowest_level = subset the burst dataframe with bursts greater than or equal to the specified level
daterange = a tuple with two elements: a start date and end date as *strings*. format is 'year-month-day'
xrangeoffsets = the number of days to add before and after the min and max x dates
"""
svo_title = str(set(bdf['svo']).pop())
fig, (axa, axb) = plt.subplots(2, sharey=False, sharex=True)
fig.set_figwidth(10)
fig.set_figheight(6)
formatter = mdates.DateFormatter("%b %d\n%Y")
axb.xaxis.set_major_formatter(formatter)
# offsets plot
day_freq = odf.resample('D').size()
axa.plot(day_freq, color='#32363A')
axa.xaxis.set_major_formatter(formatter)
axa.xaxis_date()
axa.tick_params(axis='both', which='both', length=0)
axa.set_ylabel('Daily offsets')
if daterange:
axa.set_xlim(pd.Timestamp(daterange[0]), pd.Timestamp(daterange[1]))
# bursts plot
days = [day_freq.index[0]]
levels = [0]
for i in range(1, len(day_freq.index)):
period_start = odf.resample('D').size().index[i - 1]
period_end = odf.resample('D').size().index[i]
max_burst = set()
days.append(period_end)
for j in range(len(bdf)):
burst_start = bdf['start'][j]
burst_end = bdf['end'][j]
level = bdf['level'][j]
if burst_end < period_start or period_end < burst_start :
pass
else:
max_burst.add(level)
levels.append(max(max_burst))
finaldf = pd.DataFrame({"start": days, "level": levels})
if lowest_level > 0:
bdf = bdf[bdf['level'] >= lowest_level]
xmin = min(bdf['start'])
xmax = max(bdf['start'])
if xmin == xmax:
raise Exception("There must be at least two bursts at or above the specified level. Try reducing the `lowest_level` parameter.")
daterange = ((xmin +
|
pd.DateOffset(days=2)
|
pandas.DateOffset
|
"""pytest unit tests for the feature_grouper module"""
import numpy as np
import pandas as pd
from scipy.linalg import cholesky
from scipy.stats import norm
import feature_grouper
def get_test_features(covariance_matrix, num_samples):
"""
Generate num_samples drawn from a normal distribution
given a square covariance matrix.
"""
# Ensure the covariance matrix is square
shape = covariance_matrix.shape
assert len(shape) == 2
assert shape[0] == shape[1]
c = cholesky(covariance_matrix, lower=True)
xr = np.random.RandomState(11).normal(size=(shape[0], num_samples))
X = np.dot(c, xr)
return X.T
def test_version():
assert feature_grouper.__version__ == "0.1.0"
def test_cluster():
"""Test that the function finds expected clusters given example data"""
# Features 1 and 2 have correlation of 0.6, both are negatively
# correlated with Feature 0
cov = np.array([[3.4, -2.75, -2.0], [-2.75, 5.5, 1.5], [-2.0, 1.5, 1.25]])
features = get_test_features(cov, 30)
clusters = feature_grouper.cluster(features, 0.1)
assert np.array_equal(clusters, np.array([1, 0, 0]))
clusters = feature_grouper.cluster(features, 0.5)
assert np.array_equal(clusters, np.array([1, 0, 0]))
clusters = feature_grouper.cluster(features, 0.7)
assert np.array_equal(clusters, np.array([2, 0, 1]))
def test_make_loadings():
"""Test that the expected loading matrix is given from the example data"""
cov = np.array([[3.4, -2.75, -2.0], [-2.75, 5.5, 1.5], [-2.0, 1.5, 1.25]])
features = get_test_features(cov, 30)
threshold = 0.5
clusters = feature_grouper.cluster(features, threshold)
load_matrix = feature_grouper.make_loadings(clusters, threshold)
expected = np.array([[0.0, 0.70710678, 0.70710678], [1.0, 0.0, 0.0]])
assert np.allclose(load_matrix, expected, rtol=0.0001)
def test_fit_X():
"""Test that FeatureGrouper.fit() results in the correct loading matrix."""
cov = np.array([[3.4, -2.75, -2.0], [-2.75, 5.5, 1.5], [-2.0, 1.5, 1.25]])
features = get_test_features(cov, 30)
threshold = 0.5
fg = feature_grouper.FeatureGrouper(0.5)
fg.fit(features)
expected = np.array([[0.0, 0.70710678, 0.70710678], [1.0, 0.0, 0.0]])
assert np.allclose(fg.components_, expected, rtol=0.0001)
def test_fit_transform_X():
cov = np.array([[3.4, -2.75, -2.0], [-2.75, 5.5, 1.5], [-2.0, 1.5, 1.25]])
features = get_test_features(cov, 10)
threshold = 0.5
fg = feature_grouper.FeatureGrouper(0.5)
transformed = fg.fit_transform(features)
expected = np.array(
[
[-4.07622073, 3.22583515],
[-0.1235076, -0.52749254],
[1.86870475, -0.89349396],
[5.81390654, -4.89247768],
[-1.47844261, -0.0152761],
[-0.78533797, -0.58937111],
[2.02293676, -0.98949565],
[1.24869368, 0.58157378],
[-0.838455, 0.77637916],
[0.99094234, -1.96487481],
]
)
assert np.allclose(expected, transformed, rtol=0.0001)
def test_inverse_transform():
cov = np.array([[3.4, -2.75, -2.0], [-2.75, 5.5, 1.5], [-2.0, 1.5, 1.25]])
features = get_test_features(cov, 10)
threshold = 0.5
fg = feature_grouper.FeatureGrouper(0.5)
transformed = fg.fit_transform(features)
inversed = fg.inverse_transform(transformed)
transformed2 = fg.transform(inversed)
assert np.allclose(transformed, transformed2, rtol=0.0001)
def test_pandas():
"""Test that the transformations work with a pandas.DataFrame input"""
cov = np.array([[3.4, -2.75, -2.0], [-2.75, 5.5, 1.5], [-2.0, 1.5, 1.25]])
colnames = ["foo", "bar", "baz"]
df = pd.DataFrame(get_test_features(cov, 30), columns=colnames)
fg = feature_grouper.FeatureGrouper(0.5)
df_comp =
|
pd.DataFrame(fg.components_, columns=colnames, index=["C1", "C2"])
|
pandas.DataFrame
|
import pandas as pd
import warnings
import os
import logging
import logzero
from logzero import logger
from datetime import datetime
import numpy as np
warnings.filterwarnings('ignore')
MILL_SECS_TO_SECS = 1000
class SparkLogger():
def __init__(self, filepath, txt_filename):
self.filepath = filepath
self.txt_filename = txt_filename
self.log_df = pd.read_json(self.filepath, lines=True)
def filter_df(self, event_types, log_df):
df_list = []
for event_type in event_types:
df = log_df[log_df['Event'] == event_type]
df.dropna(axis=1, how='all', inplace=True)
df_list.append(df)
return df_list
def calculate_time_duration(self, df, column_start='Submission Time', column_end='Completion Time'):
df['Duration'] = (df[column_end] - df[column_start]) / MILL_SECS_TO_SECS
df[column_end] = df[column_end].apply(lambda x: datetime.utcfromtimestamp(x/MILL_SECS_TO_SECS).strftime('%Y-%m-%d %H:%M:%S.%f'))
df[column_start] = df[column_start].apply(lambda x: datetime.utcfromtimestamp(x/MILL_SECS_TO_SECS).strftime('%Y-%m-%d %H:%M:%S.%f'))
df[column_end] =
|
pd.to_datetime(df[column_end], format='%Y-%m-%d %H:%M:%S.%f')
|
pandas.to_datetime
|
import pytest
import mock
from freezegun import freeze_time
from datetime import date, datetime
import pandas as pd
from delphi_google_symptoms.pull import (
pull_gs_data, preprocess, format_dates_for_query, pull_gs_data_one_geolevel, get_date_range)
from delphi_google_symptoms.constants import METRICS, COMBINED_METRIC
good_input = {
"state": "test_data/small_states_daily.csv",
"county": "test_data/small_counties_daily.csv"
}
bad_input = {
"missing_cols": "test_data/bad_state_missing_cols.csv",
"invalid_fips": "test_data/bad_county_invalid_fips.csv"
}
symptom_names = ["symptom_" +
metric.replace(" ", "_") for metric in METRICS]
keep_cols = ["open_covid_region_code", "date"] + symptom_names
new_keep_cols = ["geo_id", "timestamp"] + METRICS + [COMBINED_METRIC]
class TestPullGoogleSymptoms:
@freeze_time("2021-01-05")
@mock.patch("pandas_gbq.read_gbq")
@mock.patch("delphi_google_symptoms.pull.initialize_credentials")
def test_good_file(self, mock_credentials, mock_read_gbq):
# Set up fake data.
state_data = pd.read_csv(
good_input["state"], parse_dates=["date"])[keep_cols]
county_data = pd.read_csv(
good_input["county"], parse_dates=["date"])[keep_cols]
# Mocks
mock_read_gbq.side_effect = [state_data, county_data]
mock_credentials.return_value = None
dfs = pull_gs_data("", datetime.strptime(
"20201230", "%Y%m%d"), datetime.combine(date.today(), datetime.min.time()), 0)
for level in ["county", "state"]:
df = dfs[level]
assert (
df.columns.values
== ["geo_id", "timestamp"] + METRICS + [COMBINED_METRIC]
).all()
# combined_symptoms is nan when both Anosmia and Ageusia are nan
assert sum(~df.loc[
(df[METRICS[0]].isnull())
& (df[METRICS[1]].isnull()), COMBINED_METRIC].isnull()) == 0
# combined_symptoms is not nan when either Anosmia or Ageusia isn't nan
assert sum(df.loc[
(~df[METRICS[0]].isnull())
& (df[METRICS[1]].isnull()), COMBINED_METRIC].isnull()) == 0
assert sum(df.loc[
(df[METRICS[0]].isnull())
& (~df[METRICS[1]].isnull()), COMBINED_METRIC].isnull()) == 0
def test_missing_cols(self):
df = pd.read_csv(bad_input["missing_cols"])
with pytest.raises(KeyError):
preprocess(df, "state")
def test_invalid_fips(self):
df = pd.read_csv(bad_input["invalid_fips"])
with pytest.raises(AssertionError):
preprocess(df, "county")
class TestPullHelperFuncs:
@freeze_time("2021-01-05")
def test_get_date_range_recent_export_start_date(self):
output = get_date_range(
datetime.strptime("20201230", "%Y%m%d"),
datetime.combine(date.today(), datetime.min.time()),
14
)
expected = [datetime(2020, 12, 24),
datetime(2021, 1, 5)]
assert set(output) == set(expected)
@freeze_time("2021-01-05")
def test_get_date_range(self):
output = get_date_range(
datetime.strptime("20200201", "%Y%m%d"),
datetime.combine(date.today(), datetime.min.time()),
14
)
expected = [datetime(2020, 12, 16),
datetime(2021, 1, 5)]
assert set(output) == set(expected)
def test_format_dates_for_query(self):
date_list = [datetime(2016, 12, 30), datetime(2021, 1, 5)]
output = format_dates_for_query(date_list)
expected = ["2016-12-30", "2021-01-05"]
assert output == expected
@mock.patch("pandas_gbq.read_gbq")
def test_pull_one_gs_no_dates(self, mock_read_gbq):
mock_read_gbq.return_value =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
#The MIT License (MIT)
#
#Copyright (c) 2014 <NAME>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
'Orbitals class'
import numpy as np
import pandas as pd
from chemtools.calculators.gamessus import GamessLogParser, GamessDatParser
from chemtools.calculators.gamessreader import DictionaryFile, tri2full
class Orbitals(pd.DataFrame):
'''
A convenience class for handling GAMESS(US) orbitals.
'''
def __init__(self, *args, **kwargs):
super(Orbitals, self).__init__(*args, **kwargs)
@classmethod
def from_files(cls, name=None, logfile=None, dictfile=None, datfile=None):
'''
Initialize the `Orbitals` instance based on orbital information parsed from the `logfile`
and read from the `dictfile`.
Args:
name : str
One of `hf` or `ci`
logfile : str
Name of the GAMESS(US) log file
dictfile : str
Name of the GAMESS(US) dictionary file .F10
datfile : str :
Name of the GAMESS(US) dat file
'''
if name == 'hf':
evec_record = 15
eval_record = 17
syml_record = 255
elif name == 'ci':
evec_record = 19
eval_record = 21
syml_record = 256
elif name == 'local':
pass
else:
raise ValueError('name should be one either "hf", "ci" or "local"')
if name in ['hf', 'ci']:
# parse the number of aos and mos
glp = GamessLogParser(logfile)
nao = glp.get_number_of_aos()
nmo = glp.get_number_of_mos()
# read the relevant record from the dictfile
df = DictionaryFile(dictfile)
# read the orbitals
vector = df.read_record(evec_record)
vecs = vector[:nao*nmo].reshape((nao, nmo), order='F')
# read the eigenvectors
evals = df.read_record(eval_record)
evals = evals[:nmo]
# read symmetry labels
symlab = df.read_record(syml_record)
symlab = symlab[:nmo]
data = dict([
('symlabels', [s.strip() for s in symlab]),
('eigenvals', evals),
('gindex', range(1, nmo + 1)),
])
elif name == 'local':
gdp = GamessDatParser(datfile)
vecs = gdp.get_orbitals(name)
nao, nmo = vecs.shape
data = dict([('gindex', range(1, nmo + 1)),])
dataframe = cls(data=data)
dataframe.nao = nao
dataframe.nmo = nmo
dataframe.name = name
dataframe.logfile = logfile
dataframe.dictfile = dictfile
dataframe.datfile = datfile
dataframe.coeffs =
|
pd.DataFrame(data=vecs)
|
pandas.DataFrame
|
from pymoab import core, types
from pymoab.rng import Range
import dagmc_stats.DagmcFile as df
import dagmc_stats.DagmcQuery as dq
import pandas as pd
import numpy as np
import warnings
import pytest
test_env = {'three_vols': 'tests/3vols.h5m',
'single_cube': 'tests/single-cube.h5m', 'pyramid': 'tests/pyramid.h5m'}
def test_pandas_data_frame():
"""Tests the initialization of pandas data frames
"""
single_cube = df.DagmcFile(test_env['single_cube'])
single_cube_query = dq.DagmcQuery(single_cube)
exp_vert_data = pd.DataFrame()
assert(single_cube_query._vert_data.equals(exp_vert_data))
exp_tri_data =
|
pd.DataFrame()
|
pandas.DataFrame
|
import matplotlib.pyplot as plt
import re
import pandas as pd
import matplotlib
matplotlib.use('TkAgg')
def exercici_1(path):
"""Mostra el nombre de occurrències de la paraula Huffington Post i
urls acabades en .pdf o .pdf/.
Parameters:
path: Ruta del fitxer font que conté les dades de covid_approval_polls.csv.
"""
with open(path, "r") as file1:
file1 = file1.read()
# Contamos el numero de occurrencias de Huffington Post
counter1 = file1.count('Huffington Post')
# Contamos el numero de occurrencias de urls terminadas en .pdf o .pdf/
counter2 = re.findall(r"(https?://.*\.pdf/?$)", file1, flags=re.MULTILINE)
print('The pattern Huffington_Post appears', counter1, "times")
print('The pattern url_pdf appears', len(counter2), "times")
def exercici_2(path, path2, path3):
"""Retorna el df approval_polls, concern_polls, pollster_ratings filtrat amb gent no banejada
i en mostra les seves dimensions.
Parameters:
path: Ruta del fitxer font que conté les dades de covid_approval_polls.csv.
path2: Ruta del fitxer font que conté les dades de covid_concern_polls.csv
path3: Ruta del fitxer font que conté les dades de pollster_ratings.xlsx.
"""
# Creació dels df a partir dels fitxers font
df1 =
|
pd.read_csv(path)
|
pandas.read_csv
|
from __future__ import division
"""Class definitions."""
import os, warnings
from os.path import join
import numpy as np
import pandas as pd
from pandas import DataFrame, Series, Index
import six
import abc
from copy import deepcopy
from functools import reduce
from nltools.data import Adjacency, design_matrix
from nltools.stats import (downsample,
upsample,
transform_pairwise)
from nltools.utils import (set_decomposition_algorithm)
from sklearn.metrics.pairwise import pairwise_distances, cosine_similarity
from sklearn.utils import check_random_state
from feat.utils import read_feat, read_affectiva, read_facet, read_openface, wavelet, calc_hist_auc, load_h5, get_resource_path
from feat.plotting import plot_face
from nilearn.signal import clean
from scipy.signal import convolve
class FexSeries(Series):
"""
This is a sub-class of pandas series. While not having additional methods
of it's own required to retain normal slicing functionality for the
Fex class, i.e. how slicing is typically handled in pandas.
All methods should be called on Fex below.
"""
_metadata = ['au_columns', 'emotion_columns', 'facebox_columns', 'landmark_columns', 'facepose_columns', 'gaze_columns', 'time_columns', 'design_columns', 'fex_columns', 'filename', 'sampling_freq', 'features', 'sessions', 'detector']
def __init__(self, *args, **kwargs):
self.sampling_freq = kwargs.pop('sampling_freq', None)
self.sessions = kwargs.pop('sessions', None)
super().__init__(*args, **kwargs)
@property
def _constructor(self):
return FexSeries
@property
def _constructor_expanddim(self):
return Fex
def __finalize__(self, other, method=None, **kwargs):
""" propagate metadata from other to self """
# NOTE: backported from pandas master (upcoming v0.13)
for name in self._metadata:
object.__setattr__(self, name, getattr(other, name, None))
return self
class Fex(DataFrame):
"""Fex is a class to represent facial expression (Fex) data. It is essentially
an enhanced pandas df, with extra attributes and methods. Methods
always return a new design matrix instance.
Args:
filename: (str, optional) path to file
detector: (str, optional) name of software used to extract Fex. (Feat, FACET, OpenFace, or Affectiva)
sampling_freq (float, optional): sampling rate of each row in Hz;
defaults to None
features (pd.Dataframe, optional): features that correspond to each
Fex row
sessions: Unique values indicating rows associated with a specific
session (e.g., trial, subject, etc). Must be a 1D array of
n_samples elements; defaults to None
"""
# __metaclass__ = abc.ABCMeta
# Need to specify attributes for pandas.
_metadata = ['au_columns', 'emotion_columns', 'facebox_columns', 'landmark_columns', 'facepose_columns', 'gaze_columns', 'time_columns', 'design_columns', 'fex_columns', 'filename', 'sampling_freq', 'features', 'sessions', 'detector']
def __finalize__(self, other, method=None, **kwargs):
"""propagate metadata from other to self """
self = super().__finalize__(other, method=method, **kwargs)
# merge operation: using metadata of the left object
if method == "merge":
for name in self._metadata:
print("self", name, self.au_columns, other.left.au_columns)
object.__setattr__(self, name, getattr(other.left, name, None))
# concat operation: using metadata of the first object
elif method == "concat":
for name in self._metadata:
object.__setattr__(self, name, getattr(other.objs[0], name, None))
return self
def __init__(self, *args, **kwargs):
### Columns ###
self.au_columns = kwargs.pop('au_columns', None)
self.emotion_columns = kwargs.pop('emotion_columns', None)
self.facebox_columns = kwargs.pop('facebox_columns', None)
self.landmark_columns = kwargs.pop('landmark_columns', None)
self.facepose_columns = kwargs.pop('facepose_columns', None)
self.gaze_columns = kwargs.pop('gaze_columns', None)
self.time_columns = kwargs.pop('time_columns', None)
self.design_columns = kwargs.pop('design_columns', None)
### Meta data ###
self.filename = kwargs.pop('filename', None)
self.sampling_freq = kwargs.pop('sampling_freq', None)
self.detector = kwargs.pop('detector', None)
self.features = kwargs.pop('features', None)
self.sessions = kwargs.pop('sessions', None)
super().__init__(*args, **kwargs)
if self.sessions is not None:
if not len(self.sessions) == len(self):
raise ValueError('Make sure sessions is same length as data.')
self.sessions = np.array(self.sessions)
# if (self.fex_columns is None) and (not self._metadata):
# try:
# self.fex_columns = self._metadata
# except:
# print('Failed to import _metadata to fex_columns')
# Set _metadata attributes on series: Kludgy solution
for k in self:
self[k].sampling_freq = self.sampling_freq
self[k].sessions = self.sessions
@property
def _constructor(self):
return Fex
@property
def _constructor_sliced(self):
return FexSeries
def _ixs(self, i, axis=0):
""" Override indexing to ensure Fex._metadata is propogated correctly
when integer indexing
i : int, slice, or sequence of integers
axis : int
"""
result = super()._ixs(i, axis=axis)
# Override columns
if axis == 1:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
label = self.columns[i]
if isinstance(i, slice):
# need to return view
lab_slice = slice(label[0], label[-1])
return self.loc[:, lab_slice]
else:
if isinstance(label, Index):
return self._take(i, axis=1, convert=True)
index_len = len(self.index)
# if the values returned are not the same length
# as the index (iow a not found value), iget returns
# a 0-len ndarray. This is effectively catching
# a numpy error (as numpy should really raise)
values = self._data.iget(i)
if index_len and not len(values):
values = np.array([np.nan] * index_len, dtype=object)
result = self._constructor_sliced(
values, index=self.index, name=label, fastpath=True,
sampling_freq=self.sampling_freq, sessions=self.sessions)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def aus(self):
"""Returns the Action Units data
Returns:
DataFrame: Action Units data
"""
return self[self.au_columns]
def emotions(self):
"""Returns the emotion data
Returns:
DataFrame: emotion data
"""
return self[self.emotion_columns]
def landmark(self):
"""Returns the landmark data
Returns:
DataFrame: landmark data
"""
return self[self.landmark_columns]
def landmark_x(self):
"""Returns the x landmarks.
Returns:
DataFrame: x landmarks.
"""
######## TODO: NATSORT columns before returning #######
x_cols = [col for col in self.landmark_columns if 'x' in col]
return self[x_cols]
def landmark_y(self):
"""Returns the y landmarks.
Returns:
DataFrame: y landmarks.
"""
y_cols = [col for col in self.landmark_columns if 'y' in col]
return self[y_cols]
def facebox(self):
"""Returns the facebox data
Returns:
DataFrame: facebox data
"""
return self[self.facebox_columns]
def time(self):
"""Returns the time data
Returns:
DataFrame: time data
"""
return self[self.time_columns]
def design(self):
"""Returns the design data
Returns:
DataFrame: time data
"""
return self[self.design_columns]
def read_file(self, *args, **kwargs):
"""Loads file into FEX class
Returns:
DataFrame: Fex class
"""
if self.detector=='FACET':
return self.read_facet(self.filename)
elif self.detector=='OpenFace':
return self.read_openface(self.filename)
elif self.detector=='Affectiva':
return self.read_affectiva(self.filename)
elif self.detector=='Feat':
return self.read_feat(self.filename)
else:
print("Must specifiy which detector [Feat, FACET, OpenFace, or Affectiva]")
def info(self):
"""Print class meta data.
"""
attr_list = []
for name in self._metadata:
attr_list.append(name +": "+ str(getattr(self, name, None))+'\n')
print(f"{self.__class__}\n" + "".join(attr_list))
### Class Methods ###
def read_feat(self, filename=None, *args, **kwargs):
# Check if filename exists in metadata.
if not filename:
try:
filename = self.filename
except:
print("filename must be specified.")
result = read_feat(filename, *args, **kwargs)
return result
def read_facet(self, filename=None, *args, **kwargs):
# Check if filename exists in metadata.
if not filename:
try:
filename = self.filename
except:
print("filename must be specified.")
result = read_facet(filename, *args, **kwargs)
for name in self._metadata:
attr_value = getattr(self, name, None)
if attr_value and getattr(result, name, None) == None:
setattr(result, name, attr_value)
return result
def read_openface(self, filename=None, *args, **kwargs):
if not filename:
try:
filename = self.filename
except:
print("filename must be specified.")
result = read_openface(filename, *args, **kwargs)
for name in self._metadata:
attr_value = getattr(self, name, None)
if attr_value and getattr(result, name, None) == None:
setattr(result, name, attr_value)
return result
def read_affectiva(self, filename=None, *args, **kwargs):
if not filename:
try:
filename = self.filename
except:
print("filename must be specified.")
result = read_affectiva(filename, *args, **kwargs)
for name in self._metadata:
attr_value = getattr(self, name, None)
if attr_value and getattr(result, name, None) == None:
setattr(result, name, attr_value)
return result
def itersessions(self):
''' Iterate over Fex sessions as (session, series) pairs.
Returns:
it: a generator that iterates over the sessions of the fex instance
'''
for x in np.unique(self.sessions):
yield x, self.loc[self.sessions==x, :]
def append(self, data, session_id=None, axis=0):
''' Append a new Fex object to an existing object
Args:
data: (Fex) Fex instance to append
session_id: session label
axis: ([0,1]) Axis to append. Rows=0, Cols=1
Returns:
Fex instance
'''
if not isinstance(data, self.__class__):
raise ValueError('Make sure data is a Fex instance.')
if self.empty:
out = data.copy()
if session_id is not None:
out.sessions = np.repeat(session_id, len(data))
else:
if self.sampling_freq != data.sampling_freq:
raise ValueError('Make sure Fex objects have the same '
'sampling frequency')
if axis==0:
out = self.__class__(pd.concat([self, data],
axis=axis,
ignore_index=True),
sampling_freq=self.sampling_freq)
if session_id is not None:
out.sessions = np.hstack([self.sessions, np.repeat(session_id, len(data))])
if self.features is not None:
if data.features is not None:
if self.features.shape[1]==data.features.shape[1]:
out.features = self.features.append(data.features, ignore_index=True)
else:
raise ValueError('Different number of features in new dataset.')
else:
out.features = self.features
elif data.features is not None:
out = data.features
elif axis==1:
out = self.__class__(pd.concat([self, data], axis=axis),
sampling_freq=self.sampling_freq)
if self.sessions is not None:
if data.sessions is not None:
if np.array_equal(self.sessions, data.sessions):
out.sessions = self.sessions
else:
raise ValueError('Both sessions must be identical.')
else:
out.sessions = self.sessions
elif data.sessions is not None:
out.sessions = data.sessions
if self.features is not None:
out.features = self.features
if data.features is not None:
out.features.append(data.features, axis=1, ignore_index=True)
elif data.features is not None:
out.features = data.features
else:
raise ValueError('Axis must be 1 or 0.')
return out
def regress(self):
NotImplemented
def ttest(self, threshold_dict=None):
NotImplemented
def predict(self, *args, **kwargs):
NotImplemented
def downsample(self, target, **kwargs):
""" Downsample Fex columns. Relies on nltools.stats.downsample,
but ensures that returned object is a Fex object.
Args:
target(float): downsampling target, typically in samples not
seconds
kwargs: additional inputs to nltools.stats.downsample
"""
df_ds = downsample(self, sampling_freq=self.sampling_freq,
target=target, **kwargs)
if self.features is not None:
ds_features = downsample(self.features,
sampling_freq=self.sampling_freq,
target=target, **kwargs)
else:
ds_features = self.features
return self.__class__(df_ds, sampling_freq=target, features=ds_features)
def upsample(self, target, target_type='hz', **kwargs):
""" Upsample Fex columns. Relies on nltools.stats.upsample,
but ensures that returned object is a Fex object.
Args:
target(float): upsampling target, default 'hz' (also 'samples',
'seconds')
kwargs: additional inputs to nltools.stats.upsample
"""
df_us = upsample(self, sampling_freq=self.sampling_freq,
target=target, target_type=target_type, **kwargs)
if self.features is not None:
us_features = upsample(self.features,
sampling_freq=self.sampling_freq,
target=target, target_type=target_type,
**kwargs)
else:
us_features = self.features
return self.__class__(df_us, sampling_freq=target, features=us_features)
def distance(self, method='euclidean', **kwargs):
""" Calculate distance between rows within a Fex() instance.
Args:
method: type of distance metric (can use any scikit learn or
sciypy metric)
Returns:
dist: Outputs a 2D distance matrix.
"""
return Adjacency(pairwise_distances(self, metric=method, **kwargs),
matrix_type='Distance')
def rectification(self, std=3):
""" Removes time points when the face position moved
more than N standard deviations from the mean.
Args:
std (default 3): standard deviation from mean to remove outlier face locations
Returns:
data: cleaned FEX object
"""
#### TODO: CHECK IF FACET OR FIND WAY TO DO WITH OTHER ONES TOO #####
if self.facebox_columns and self.au_columns and self.emotion_columns:
cleaned = deepcopy(self)
face_columns = self.facebox_columns
x_m = self.FaceRectX.mean()
x_std = self.FaceRectX.std()
y_m = self.FaceRectY.mean()
y_std = self.FaceRectY.std()
x_bool = (self.FaceRectX>std*x_std+x_m) | (self.FaceRectX<x_m-std*x_std)
y_bool = (self.FaceRectY>std*y_std+y_m) | (self.FaceRectY<y_m-std*y_std)
xy_bool = x_bool | y_bool
cleaned.loc[xy_bool, face_columns + self.au_columns + self.emotion_columns] = np.nan
return cleaned
else:
raise ValueError("Facebox columns need to be defined.")
def baseline(self, baseline='median', normalize=None,
ignore_sessions=False):
''' Reference a Fex object to a baseline.
Args:
method: {'median', 'mean', 'begin', FexSeries instance}. Will subtract baseline
from Fex object (e.g., mean, median). If passing a Fex
object, it will treat that as the baseline.
normalize: (str). Can normalize results of baseline.
Values can be [None, 'db','pct']; default None.
ignore_sessions: (bool) If True, will ignore Fex.sessions
information. Otherwise, method will be applied
separately to each unique session.
Returns:
Fex object
'''
if self.sessions is None or ignore_sessions:
out = self.copy()
if baseline is 'median':
baseline = out.median()
elif baseline is 'mean':
baseline = out.mean()
elif baseline is 'begin':
baseline = out.iloc[0,:]
elif isinstance(baseline, (Series, FexSeries)):
baseline = baseline
elif isinstance(baseline, (Fex, DataFrame)):
raise ValueError('Must pass in a FexSeries not a FexSeries Instance.')
else:
raise ValueError('%s is not implemented please use {mean, median, Fex}' % baseline)
if normalize == 'db':
out = 10*np.log10(out - baseline)/baseline
if normalize == 'pct':
out = 100*(out - baseline)/baseline
else:
out = out - baseline
else:
out = self.__class__(sampling_freq=self.sampling_freq)
for k,v in self.itersessions():
if baseline is 'median':
baseline = v.median()
elif baseline is 'mean':
baseline = v.mean()
elif baseline is 'begin':
baseline = v.iloc[0,:]
elif isinstance(baseline, (Series, FexSeries)):
baseline = baseline
elif isinstance(baseline, (Fex, DataFrame)):
raise ValueError('Must pass in a FexSeries not a FexSeries Instance.')
else:
raise ValueError('%s is not implemented please use {mean, median, Fex}' % baseline)
if normalize == 'db':
out = out.append(10*np.log10(v-baseline)/baseline, session_id=k)
if normalize == 'pct':
out = out.append(100*(v-baseline)/baseline, session_id=k)
else:
out = out.append(v-baseline, session_id=k)
return self.__class__(out, sampling_freq=self.sampling_freq,
features=self.features, sessions=self.sessions)
def clean(self, detrend=True, standardize=True, confounds=None,
low_pass=None, high_pass=None, ensure_finite=False,
ignore_sessions=False, *args, **kwargs):
""" Clean Time Series signal
This function wraps nilearn functionality and can filter, denoise,
detrend, etc.
See http://nilearn.github.io/modules/generated/nilearn.signal.clean.html
This function can do several things on the input signals, in
the following order:
- detrend
- standardize
- remove confounds
- low- and high-pass filter
If Fex.sessions is not None, sessions will be cleaned separately.
Args:
confounds: (numpy.ndarray, str or list of Confounds timeseries)
Shape must be (instant number, confound number),
or just (instant number,). The number of time
instants in signals and confounds must be identical
(i.e. signals.shape[0] == confounds.shape[0]). If a
string is provided, it is assumed to be the name of
a csv file containing signals as columns, with an
optional one-line header. If a list is provided,
all confounds are removed from the input signal,
as if all were in the same array.
low_pass: (float) low pass cutoff frequencies in Hz.
high_pass: (float) high pass cutoff frequencies in Hz.
detrend: (bool) If detrending should be applied on timeseries
(before confound removal)
standardize: (bool) If True, returned signals are set to unit
variance.
ensure_finite: (bool) If True, the non-finite values
(NANs and infs) found in the data will be
replaced by zeros.
ignore_sessions: (bool) If True, will ignore Fex.sessions
information. Otherwise, method will be applied
separately to each unique session.
Returns:
cleaned Fex instance
"""
if self.sessions is not None:
if ignore_sessions:
sessions = None
else:
sessions = self.sessions
else:
sessions = None
return self.__class__(pd.DataFrame(clean(self.values, detrend=detrend,
standardize=standardize,
confounds=confounds,
low_pass=low_pass,
high_pass=high_pass,
ensure_finite=ensure_finite,
t_r=1./np.float(self.sampling_freq),
sessions=sessions,
*args, **kwargs),
columns=self.columns),
sampling_freq=self.sampling_freq,
features=self.features,
sessions=self.sessions)
def decompose(self, algorithm='pca', axis=1, n_components=None,
*args, **kwargs):
''' Decompose Fex instance
Args:
algorithm: (str) Algorithm to perform decomposition
types=['pca','ica','nnmf','fa']
axis: dimension to decompose [0,1]
n_components: (int) number of components. If None then retain
as many as possible.
Returns:
output: a dictionary of decomposition parameters
'''
out = {}
out['decomposition_object'] = set_decomposition_algorithm(
algorithm=algorithm,
n_components=n_components,
*args, **kwargs)
com_names = ['c%s' % str(x+1) for x in range(n_components)]
if axis == 0:
out['decomposition_object'].fit(self.T)
out['components'] = self.__class__(pd.DataFrame(out['decomposition_object'].transform(self.T), index=self.columns, columns=com_names), sampling_freq=None)
out['weights'] = self.__class__(pd.DataFrame(out['decomposition_object'].components_.T,
index=self.index,columns=com_names),
sampling_freq=self.sampling_freq,
features=self.features,
sessions=self.sessions)
if axis == 1:
out['decomposition_object'].fit(self)
out['components'] = self.__class__(pd.DataFrame(out['decomposition_object'].transform(self),
columns=com_names),
sampling_freq=self.sampling_freq,
features=self.features,
sessions=self.sessions)
out['weights'] = self.__class__(pd.DataFrame(out['decomposition_object'].components_, index=com_names, columns=self.columns), sampling_freq=None).T
return out
def extract_mean(self, ignore_sessions=False, *args, **kwargs):
""" Extract mean of each feature
Args:
ignore_sessions: (bool) ignore sessions or extract separately
by sessions if available.
Returns:
Fex: mean values for each feature
"""
if self.sessions is None or ignore_sessions:
feats = pd.DataFrame(self.mean()).T
feats.columns = 'mean_' + feats.columns
return self.__class__(feats, sampling_freq=self.sampling_freq)
else:
feats = pd.DataFrame()
for k,v in self.itersessions():
feats = feats.append(pd.Series(v.mean(), name=k))
feats.columns = 'mean_' + feats.columns
return self.__class__(feats, sampling_freq=self.sampling_freq,
sessions=np.unique(self.sessions))
def extract_min(self, ignore_sessions=False, *args, **kwargs):
""" Extract minimum of each feature
Args:
ignore_sessions: (bool) ignore sessions or extract separately
by sessions if available.
Returns:
Fex: (Fex) minimum values for each feature
"""
if self.sessions is None or ignore_sessions:
feats = pd.DataFrame(self.min()).T
feats.columns = 'min_' + feats.columns
return self.__class__(feats, sampling_freq=self.sampling_freq)
else:
feats = pd.DataFrame()
for k,v in self.itersessions():
feats = feats.append(pd.Series(v.min(), name=k))
feats.columns = 'min_' + feats.columns
return self.__class__(feats, sampling_freq=self.sampling_freq,
sessions=np.unique(self.sessions))
def extract_max(self, ignore_sessions=False, *args, **kwargs):
""" Extract maximum of each feature
Args:
ignore_sessions: (bool) ignore sessions or extract separately
by sessions if available.
Returns:
fex: (Fex) maximum values for each feature
"""
if self.sessions is None or ignore_sessions:
feats = pd.DataFrame(self.max()).T
feats.columns = 'max_' + feats.columns
return self.__class__(feats, sampling_freq=self.sampling_freq)
else:
feats =
|
pd.DataFrame()
|
pandas.DataFrame
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""
Helper module for visualizations
"""
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.metrics import (
precision_recall_curve,
average_precision_score,
roc_curve,
auc,
)
from sklearn.preprocessing import label_binarize
from torch import Tensor
from typing import Callable
def plot_thresholds(
metric_function: Callable[[Tensor, Tensor, float], Tensor],
y_pred: Tensor,
y_true: Tensor,
samples: int = 21,
figsize: tuple = (12, 6),
) -> None:
""" Plot the evaluation metric of the model at different thresholds.
This function will plot the metric for every 0.05 increments of the
threshold. This means that there will be a total of 20 increments.
Args:
metric_function: The metric function
y_pred: predicted probabilities.
y_true: True class indices.
<<<<<<< HEAD
samples: Number of threshold samples
=======
>>>>>>> master
figsize: Figure size (w, h)
"""
metric_name = metric_function.__name__
metrics = []
for threshold in np.linspace(0, 1, samples):
metric = metric_function(y_pred, y_true, threshold=threshold)
metrics.append(metric)
ax =
|
pd.DataFrame(metrics)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import csv
import os
import linreg # src/linreg.py
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold, train_test_split
from sklearn.metrics import r2_score
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import Normalizer
from sklearn.cluster import KMeans
def pca(i, X_train, Y_train, component):
'''
Perform Linear Regression on either train/validation set or on test set for PCA
@params:
i: Number of components
X_train, Y_train: Training data
component: Gas/particulate
'''
# Define the model and fit the trained embedding
pca = PCA(n_components=i)
encoded_X_train = pca.fit_transform(X_train)
return pca, encoded_X_train
def pca_train_test(dims, x, y, folds, component):
'''
Run PCA using k-fold cross validation strategy
@params:
dims: Number of dimensions to train
x, y: Independent/dependent data used for k-fold split
folds: Number of folds to iterate through
component: The name of the gas/particulate
'''
# Metrics file
file_name = f'/home/nick/github_repos/Pollution-Autoencoders/data/model_metrics/pca/{component}_metrics'
# Headers
test_metrics_list = ['fold', 'dim', 'variance', 'r2']
# Write header
with open(file_name,'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(test_metrics_list)
f.close()
# k-fold cross validation; any cross-validation technique can be substituted here
kfold = KFold(n_splits=folds, shuffle=True)
# Number of features to compare
num_of_comp=list(range(1,dims+1))
fold_count=0
print(f'---------- Beginning PCA for gas {component} ----------')
# Loop through train/test data and save the best data with highest R2 scores
for training_index, test_index in kfold.split(x):
# Split X and Y train and test data
X_train, X_test = x[training_index, :], x[test_index, :]
Y_train, Y_test = y[training_index], y[test_index]
fold_count+=1
# Train PCA and save a list of metrics
for i in num_of_comp:
# Train pca model
model, encoded_train_data = pca(
i=i,
X_train=X_train,
Y_train=Y_train,
component=component)
# Create the test embedding
encoded_test_data = model.transform(X_test)
# Perform linear regression
variance, r2 = linreg.regression(encoded_train_data, encoded_test_data, Y_train, Y_test)
# Print result
print(f'fold {fold_count} || dim {i} || variance {variance} || r2 {r2} \n')
# Update test metrics file
test_metrics_list = [fold_count, i, variance, r2]
with open(file_name,'a', newline='') as f:
writer = csv.writer(f)
writer.writerow(test_metrics_list)
f.close()
def interpolate(dims, component):
''' Function to derive the best and worst fold for a given dimension '''
df = pd.read_csv(f'/home/nick/github_repos/Pollution-Autoencoders/data/model_metrics/pca/{component}_metrics')
# Outputs
best_metrics = f'/home/nick/github_repos/Pollution-Autoencoders/data/model_metrics/pca/best_worst/{component}_best_metrics.csv'
worst_metrics = f'/home/nick/github_repos/Pollution-Autoencoders/data/model_metrics/pca/best_worst/{component}_worst_metrics.csv'
# Lists to write to file
best_list=worst_list=['dim', 'variance', 'r2']
# Debug
#print('dim 1 ', df['variance'][0])
#print('dim 190 ', df['variance'][189])
# Write headers for best and worst metrics
with open(best_metrics,'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(best_list)
f.close()
with open(worst_metrics,'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(worst_list)
f.close()
# Fold range that describes indexes of all dimensions
for i in range(dims):
# Reset best and worst variance and index for every dim
best_var, worst_var = 0, 100
best_idx=worst_idx=0
# Retrieve a dict of each fold's variance for a given dimension
fold_var = { i : df['variance'][i], i+dims : df['variance'][i+dims], i+(dims*2) : df['variance'][i+(dims*2)],
i+(dims*3) : df['variance'][i+(dims*3)], i+(dims*4): df['variance'][i+(dims*4)] }
# Search through the values for each given dim
for key, val in fold_var.items():
if val > best_var:
best_var=val
best_idx=key
if val < worst_var:
worst_var=val
worst_idx=key
# Save dim, variance, and r2 scores
best_list = [i+1, df['variance'][best_idx], df['r2'][best_idx]]
worst_list = [i+1, df['variance'][worst_idx], df['r2'][worst_idx]]
print(f'Best fold for dim {i+1} has a score of {best_var}')
print(f'Worst fold for dim {i+1} has a score of {worst_var}\n')
# Write values
with open(best_metrics,'a', newline='') as f:
writer = csv.writer(f)
writer.writerow(best_list)
f.close()
with open(worst_metrics,'a', newline='') as f:
writer = csv.writer(f)
writer.writerow(worst_list)
f.close()
def pca_run(dim, X, X_train, Y_train, component, cities):
'''
Run PCA using k-fold cross validation strategy
@params:
dim: Dimension to embed
X: Normalized data to encode
X_train, Y_train: Train data used in model creation
component: Gas/particulate
cities: List of cities to append to the embedding
'''
vec_file = f'/home/nick/github_repos/Pollution-Autoencoders/data/vec/pca/{component}_vec.csv'
# Train pca model
model, encoded_train_data = pca(
i=dim,
X_train=X_train,
Y_train=Y_train,
component=component)
# Create full embedding
encoded_data = model.transform(X)
# Add city labels and save encoded data
vec_labels = [f'dim_{i}' for i in range(1, dim+1)]
vector_data = pd.DataFrame(data=encoded_data, columns=vec_labels)
vector_data.insert(0, 'city', cities)
vector_data.to_csv(path_or_buf=vec_file, index=None)
def main():
''' Set up soures and constants, call functions as needed '''
### Constants ###
component_names = ['co','no', 'no2', 'o3', 'so2', 'pm2_5', 'pm10', 'nh3']
component_test = 'co'
# Starting dimensions
dims = 190
# K-fold folds
folds = 5
### Input files ###
# Open normalized data and dependent, non-normalized data
#dfx = pd.read_csv(f"{os.environ['HOME']}/github_repos/Pollution-Autoencoders/data/data_norm/co_data_norm.csv")
#dfy = pd.read_csv(f"{os.environ['HOME']}/github_repos/Pollution-Autoencoders/data/data_clean/co_data_clean.csv")
# City names to append
#cities = dfy['city'].values
# Set x as the normalized values, y (non-normalized) as the daily average of final day
#X = dfx.values
#Y = dfy.loc[:, ['co_2021_06_06']].values
# Split into train/test data
#X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.20, random_state=40)
### Function calls ###
#interpolate(dims, component_test)
#for component in component_names:
# interpolate(dims, component)
# pca_train_test(dims, X, Y, folds, component)
#pca_run(dims, X, X_train, Y_train, component_test, cities)
for component in component_names:
# Open normalized data and dependent, non-normalized data
dfx =
|
pd.read_csv(f"{os.environ['HOME']}/github_repos/Pollution-Autoencoders/data/data_norm/{component}_data_norm.csv")
|
pandas.read_csv
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": pandas.StringDtype(),
"cleanBands": pandas.StringDtype(),
"avgTprog": pandas.StringDtype(),
"avgEraseCount": pandas.StringDtype(),
"edtcHandledBandCnt": pandas.StringDtype(),
"bandReloForNLBA": pandas.StringDtype(),
"bandCrossingDuringPliCount": pandas.StringDtype(),
"bitErrBucketNum": pandas.StringDtype(),
"sramCorrectablesTotal": pandas.StringDtype(),
"l1SramCorrErrCnt": pandas.StringDtype(),
"l2SramCorrErrCnt": pandas.StringDtype(),
"parityErrorValue": pandas.StringDtype(),
"parityErrorType": pandas.StringDtype(),
"mrr_LutValidDataSize": pandas.StringDtype(),
"pageProvisionalDefects": pandas.StringDtype(),
"plisWithErasesInProgress": pandas.StringDtype(),
"lastReplayDebug": pandas.StringDtype(),
"externalPreReadFatals": pandas.StringDtype(),
"hostReadCmd": pandas.StringDtype(),
"hostWriteCmd": pandas.StringDtype(),
"trimmedSectors": pandas.StringDtype(),
"trimTokens": pandas.StringDtype(),
"mrrEventsInCodewords": pandas.StringDtype(),
"mrrEventsInSectors": pandas.StringDtype(),
"powerOnMicroseconds": pandas.StringDtype(),
"mrrInXorRecEvents": pandas.StringDtype(),
"mrrFailInXorRecEvents": pandas.StringDtype(),
"mrrUpperpageEvents": pandas.StringDtype(),
"mrrLowerpageEvents": pandas.StringDtype(),
"mrrSlcpageEvents": pandas.StringDtype(),
"mrrReReadTotal": pandas.StringDtype(),
"powerOnResets": pandas.StringDtype(),
"powerOnMinutes": pandas.StringDtype(),
"throttleOnMilliseconds": pandas.StringDtype(),
"ctxTailMagic": pandas.StringDtype(),
"contextDropCount": pandas.StringDtype(),
"lastCtxSequenceId": pandas.StringDtype(),
"currCtxSequenceId": pandas.StringDtype(),
"mbliEraseCount": pandas.StringDtype(),
"pageAverageProgramCount": pandas.StringDtype(),
"bandAverageEraseCount": pandas.StringDtype(),
"bandTotalEraseCount": pandas.StringDtype(),
"bandReloForXorRebuildFail": pandas.StringDtype(),
"defragSpeculativeMiss": pandas.StringDtype(),
"uncorrectableBackgroundScan": pandas.StringDtype(),
"BitErrorsHost57":
|
pandas.StringDtype()
|
pandas.StringDtype
|
# Copyright 2020 Babylon Partners. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Creation of dissimilar term pairs"""
import random
import os
import statistics
import csv
import operator
from collections import OrderedDict
from tqdm import tqdm
from Levenshtein import distance as levenshtein_distance
import pandas as pd
def is_existing_pair(existing_pairs, label1, label2):
return (not existing_pairs.loc[
(existing_pairs['source'] == label1) & (existing_pairs['target'] == label2)].empty or \
not existing_pairs.loc[
(existing_pairs['target'] == label1) & (existing_pairs['source'] == label2)].empty)
# write some statistics about the negative instances (mean, max, min Levenshtein distance)
def write_statistics_to_file(statistics_filename,
distances,
no_of_positive_instances,
dataset_name):
with open(statistics_filename, 'a') as stats:
stats.write(dataset_name + "\n")
stats.write("Number of positive instances: " + str(no_of_positive_instances) + "\n")
stats.write("Mean Levenshtein Distance: " + str(statistics.mean(distances)) + "\n")
stats.write("Median Levenshtein Distance: " + str(statistics.median(distances)) + "\n")
stats.write("Max Levenshtein Distance: " + str(max(distances)) + "\n")
stats.write("Min Levenshtein Distance: " + str(min(distances)) + "\n")
stats.write("\n")
##################################################################
# Random strategy for negative sampling
##################################################################
def create_random_pairs(positive_instances,
positive_pairs_all_datasets,
existing_negatives):
random.seed(42)
# holds the Levenshtein distance of each concept pair
distances = []
# tracks already created negative pairs as tuples, i.e. (l1,l2), to avoid duplicate creation
new_negative_pairs = []
for i, row in tqdm(positive_instances.iterrows(), total=positive_instances.shape[0]):
label1 = row['source']
# initialise random index
random_index = i
# make sure that no term pair duplicates or reverse duplicates are created
# comparing to both positive and negative concept pairs
while random_index == i or\
is_existing_pair(positive_pairs_all_datasets, label1, label2) or\
is_existing_pair(existing_negatives, label1, label2) or\
(label1, label2) in new_negative_pairs or (label2, label1) in new_negative_pairs\
or label1.lower() == label2.lower():
# choose a new random index and source vs target and get a new pairing term
random_index = random.randint(0, positive_instances.shape[0]-1)
source_or_target = random.choice(['source', 'target'])
label2 = positive_instances.loc[random_index][source_or_target]
distances.append(levenshtein_distance(label1.lower(), label2.lower()))
new_negative_pairs.append((label1, label2))
return new_negative_pairs, distances
##################################################################
# Levenshtein strategy for negative sampling
##################################################################
def create_minimal_distance_pairs(positive_instances,
positive_pairs_all_datasets,
existing_negatives):
random.seed(42)
# holds the Levenshtein distance of each concept pair
distances = []
# tracks already created negative pairs as tuples, i.e. (l1,l2), to avoid duplicate creation
new_negative_pairs = []
# find all instances of each source concept
unique_source_concepts = positive_instances.groupby('source')
# for each concept, create a list of usable concepts that are not positive similarity instances
# and choose the ones with smallest Levenshtein distance as a difficult negative sample
for label1, group in tqdm(unique_source_concepts, total=unique_source_concepts.ngroups):
possible_targets = get_possible_targets(group, new_negative_pairs, positive_instances)
distances_possible_terms, possible_targets = \
get_levenshtein_possible_targets(possible_targets, label1)
# find the N minimal distances (for N positive pairs of the concept)
# and the respective pairing concept with this minimal distance
sorted_targets_and_distances = \
[(label, d) for d, label in sorted(zip(distances_possible_terms, possible_targets),
key=operator.itemgetter(0))]
min_dist_tuples = []
for i in range(0, len(group)):
# get the smallest Levenshtein distance
if not min_dist_tuples:
min_dist_tuples, sorted_targets_and_distances = \
get_min_distance_tuples(sorted_targets_and_distances)
# choose a random term with minimal distance
label2, distance = min_dist_tuples.pop(random.randint(0, len(min_dist_tuples)-1))
while is_existing_pair(positive_pairs_all_datasets, label1, label2) or \
is_existing_pair(existing_negatives, label1, label2):
if not min_dist_tuples:
min_dist_tuples, sorted_targets_and_distances = \
get_min_distance_tuples(sorted_targets_and_distances)
label2, distance = min_dist_tuples.pop(random.randint(0, len(min_dist_tuples) - 1))
new_negative_pairs.append((label1, label2))
distances.append(distance)
return new_negative_pairs, distances
def get_min_distance_tuples(sorted_targets_and_distances):
min_dist_tuples = []
min_label, min_distance = sorted_targets_and_distances.pop(0)
min_dist_tuples.append((min_label, min_distance))
# find all terms with the same minimal dinstance
while sorted_targets_and_distances[0][1] == min_distance:
min_dist_tuples.append(sorted_targets_and_distances.pop(0))
return min_dist_tuples, sorted_targets_and_distances
def get_possible_targets(group, new_negative_pairs, positive_instances):
# exclude the similarity pairs of this concept from table to be used to create negative pair
usable_labels = positive_instances.drop(group.index)
# all targets of the current concept are synonyms
# that should not be paired with the current concept,
# so is of course the current concept itself
synonyms = group['target'].tolist()
label1 = positive_instances.loc[group.index.tolist()[0], 'source']
synonyms.append(label1)
# find all concepts that are paired with the synonyms (as source or target)
concepts_to_exclude = \
usable_labels[usable_labels.target.isin(synonyms)]['source'].tolist()
concepts_to_exclude = \
concepts_to_exclude + usable_labels[usable_labels.source.isin(synonyms)]['target'].tolist()
# exclude all concept pairs containing a concept that's also paired with a synonym
usable_labels = usable_labels[
~usable_labels.source.isin(concepts_to_exclude)]
usable_labels = usable_labels[~usable_labels.target.isin(concepts_to_exclude)]
# the sources and targets of the remaining pairs can be paired with the current concept
usable_list = \
usable_labels['source'].unique().tolist() + usable_labels['target'].unique().tolist()
usable_list = list(OrderedDict.fromkeys(usable_list))
# make sure no reverse duplicates are created,
# i.e. if (X, lab1) already occurs in the negative instances,
# exlude X - note that (lab1, X) won't occur in the neg samples
# since same concepts are handled together
labels_from_existing_negative_instances = \
[lab for (lab, l) in new_negative_pairs if l == label1]
usable_list_final = \
[x for x in usable_list if x not in labels_from_existing_negative_instances]
return usable_list_final
# for each potential pairing of terms, compute their Levenshtein distance and store it in a list
# record labels that have Levenshtein distance 0 (i.e. only the casing of the concepts is different)
# to exlcude them later
def get_levenshtein_possible_targets(possible_targets, label1):
distances_possible_terms = []
distance0 = []
for i, label2 in enumerate(possible_targets):
d = levenshtein_distance(label2.lower(), label1.lower())
if d == 0:
distance0.append(i)
else:
distances_possible_terms.append(d)
new_possible_targets = [x for i, x in enumerate(possible_targets) if i not in distance0]
return distances_possible_terms, new_possible_targets
##################################################################
def negative_sampling(strategy,
full_new_dataset_path,
positive_instances,
statistics_path,
positive_pairs_all_datasets,
existing_negatives):
# create negative instances according to chosen strategy
if strategy == 'simple':
new_negative_pairs, distances =\
create_random_pairs(positive_instances, positive_pairs_all_datasets, existing_negatives)
elif strategy == 'advanced':
new_negative_pairs, distances = \
create_minimal_distance_pairs(positive_instances,
positive_pairs_all_datasets,
existing_negatives)
else:
raise Exception('Unknown negative sampling strategy chosen!')
# positive instances
positive_pairs_with_scores = []
for i, row in positive_instances.iterrows():
positive_pairs_with_scores.append(row['source'] + "\t" + row['target'] + "\t1\n")
# negative instances
new_negative_pairs_with_scores = \
[label1 + "\t" + label2 + "\t0\n" for (label1, label2) in new_negative_pairs]
new_dataset_with_scores = positive_pairs_with_scores + new_negative_pairs_with_scores
random.shuffle(new_dataset_with_scores)
# save newly created dataset
with open(full_new_dataset_path + '_' + strategy + '.txt', "w") as output:
output.writelines(new_dataset_with_scores)
# save statistics about new negative instances
write_statistics_to_file(statistics_path + '_' + strategy + '.txt',
distances, positive_instances.shape[0],
full_new_dataset_path + '_' + strategy)
return new_negative_pairs
def read_existing_positive_instances(positive_instance_datasets, dataset_path):
# get all positive instances
li = []
for f in positive_instance_datasets:
# all positive instances in the FSN_SYN datasets are also in the SYN_SYN datasets,
# so no need to load them
if "FSN_SYN" in f or f.startswith('._'):
continue
df = pd.read_csv(os.path.join(dataset_path, f), sep="\t",
quoting=csv.QUOTE_NONE, keep_default_na=False,
header=0, names=['source', 'target'])
li.append(df)
return pd.concat(li, axis=0, ignore_index=True)
##################################################################
# MAIN
##################################################################
def negative_instances(dataset_path, strategies):
# path to save statistics
statistics_path = dataset_path + "negative_sampling_statistics"
# ORDER MATTERS!
positive_instance_datasets = [
'possibly_equivalent_to_easy_distance5.tsv',
'possibly_equivalent_to_hard_distance5.tsv',
'replaced_by_easy_distance5.tsv',
'replaced_by_hard_distance5.tsv',
'same_as_easy_distance5.tsv',
'same_as_hard_distance5.tsv',
'FSN_SYN_easy_distance5.tsv',
'FSN_SYN_hard_distance5.tsv',
'SYN_SYN_easy_distance5.tsv',
'SYN_SYN_hard_distance5.tsv'
]
positive_pairs_all_datasets = read_existing_positive_instances(positive_instance_datasets,
dataset_path)
# consider the random and advanced strategy separately
# as negative instances are considered separately
for strategy in strategies:
# dataframes to keep track of already created negative instances (to prevent duplicates)
existing_negatives_to_consider = \
|
pd.DataFrame(columns=['source', 'target', 'trueScore'])
|
pandas.DataFrame
|
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib.pyplot as plt
class EventMatrix(object):
def __init__(self, datetimes, symbols):
'''
:param datetimes:
:param symbols:
Constructs A pandas dataframe indexed by datetimes and with columns for each symbol.
The constructor fills this with all NANs and an abstract base method exists to be customized.
'''
# Build an empty event matrix with an index of all the datetimes and columns for each symbol.
# Fill with NANs
self.event_matrix = pd.DataFrame({'Date': datetimes})
self.event_matrix = self.event_matrix.set_index('Date')
self.event_matrix.tz_localize(tz='America/New_York')
self.event_matrix = self.event_matrix.sort_index()
self.event_matrix = self.event_matrix.loc[~self.event_matrix.index.duplicated(keep='first')]
# Prints True is sorted
#print(self.event_matrix.index.is_monotonic)
self.symbols = symbols
for symbol in self.symbols:
self.event_matrix[symbol] = np.nan
def build_event_matrix(self, start_date, end_date):
'''
Implement this method a derived class.
:param start_date:
:param end_date:
:return: FIll up the event matrix with 1's in the row/column for which there was an event.
'''
raise NotImplementedError("Please Implement this method in a base class")
class CarsCavcsResult(object):
def __init__(self, num_events,
cars, cars_std_err, cars_t_test, cars_significant, cars_positive, cars_num_stocks_positive,
cars_num_stocks_negative,
cavcs, cavcs_std_err, cavcs_t_test, cavcs_significant, cavcs_positive, cavcs_num_stocks_positive,
cavcs_num_stocks_negative):
"""
:param num_events: the number of events in the matrix
:param cars: time series of Cumulative Abnormal Return
:params cars_std_err: std error of the CARs
:param cars_t_test: t-test statistic that checks whether the CARs of all stock are significantly different from 0
:param cars_significant: True if the CARs of all stocks are significant
:param cars_positive: True if the CAR is positive
:param cars_num_stocks_positive: The number of stocks for which the CAR was significantly positive
:param cars_num_stocks_negative: The number of stocks for which the CAR was significantly negative
:param cavcs: time series of Cumulative Abnormal Volume Changes
:params cavcs_std_err: std error of the CAVCs
:param cavcs_t_test: t-test statistic that checks whether the CAVCs of all stock are significantly different from 0
:param cavcs_significant: True if the CAVCs of all stocks are significant
:param cavcs_positive: True if the CAVC is positive
:param cavcs_num_stocks_positive: The number of stocks for which the CAVC was significantly positive
:param cavcs_num_stocks_negative: The number of stocks for which the CAVC was significantly negative
All of the above t-tests are significant when they are in the 95% confidence levels
"""
self.num_events = num_events
self.cars = cars
self.cars_std_err = cars_std_err
self.cars_t_test = cars_t_test
self.cars_significant = cars_significant
self.cars_positive = cars_positive
self.cars_num_stocks_positive = cars_num_stocks_positive
self.cars_num_stocks_negative = cars_num_stocks_negative
self.cavcs = cavcs
self.cavcs_std_err = cavcs_std_err
self.cavcs_t_test = cavcs_t_test
self.cavcs_significant = cavcs_significant
self.cavcs_positive = cavcs_positive
self.cavcs_num_stocks_positive = cavcs_num_stocks_positive
self.cavcs_num_stocks_negative = cavcs_num_stocks_negative
def results_as_string(self):
result_string = 'Number of events processed: ' + str(self.num_events) + '\n'
result_string += 'CARS Results' + '\n'
result_string += ' Number of stocks with +CARS: ' + str(self.cars_num_stocks_positive) + '\n'
result_string += ' Number of stocks with -CARS: ' + str(self.cars_num_stocks_negative) + '\n'
result_string += ' CARS t-test value: ' + str(self.cars_t_test) + '\n'
result_string += ' CARS significant: ' + str(self.cars_significant) + '\n'
result_string += ' CARS positive: ' + str(self.cars_positive) + '\n'
result_string += 'CAVCS Results' + '\n'
result_string += ' Number of stocks with +CAVCS: ' + str(self.cavcs_num_stocks_positive) + '\n'
result_string += ' Number of stocks with -CAVCS: ' + str(self.cavcs_num_stocks_negative) + '\n'
result_string += ' CAVCS full t-test value: ' + str(self.cavcs_t_test) + '\n'
result_string += ' CAVCS significant: ' + str(self.cavcs_significant) + '\n'
result_string += ' CAVCS positive: ' + str(self.cavcs_positive) + '\n'
return result_string
class Calculator(object):
def __init__(self):
pass
def calculate_using_naive_benchmark(self, event_matrix, stock_data, market_symbol, look_back, look_forward):
"""
:param event_matrix:
:param stock_data:
:param market_symbol:
:param look_back:
:param look_forward:
:return car: time series of Cumulative Abnormal Return
:return std_err: the standard error
:return num_events: the number of events in the matrix
Most of the code was from here:
https://github.com/brettelliot/QuantSoftwareToolkit/blob/master/QSTK/qstkstudy/EventProfiler.py
"""
# Copy the stock prices into a new dataframe which will become filled with the returns
#import pdb;
#pdb.set_trace()
try:
# For IB
daily_returns = stock_data['Close'].copy()
volumes = stock_data['Volume'].copy()
except KeyError:
# For AV
daily_returns = stock_data['adjusted_close'].copy()
volumes = stock_data['volume'].copy()
# Convert prices into daily returns.
# This is the amount that the specific stock increased or decreased in value for one day.
daily_returns = daily_returns.pct_change().fillna(0)
mypct = lambda x: x[-1] - np.mean(x[:-1])
vlm_changes = volumes.rolling(5, 5).apply(mypct).fillna(0)
# Subtract the market returns from all of the stock's returns. The result is the abnormal return.
# beta = get_beta()
beta = 1.0 # deal with beta later
symbols = daily_returns.index.get_level_values(0).unique()
abnormal_returns = daily_returns.copy()
ex_vols = vlm_changes.copy()
#import pdb;
#pdb.set_trace()
for sym in symbols:
abnormal_returns.loc[sym, slice(None)] -= beta * daily_returns.loc[market_symbol, slice(None)].values
ex_vols.loc[sym, slice(None)] -= beta * vlm_changes.loc[market_symbol, slice(None)].values
#import pdb;
#pdb.set_trace()
# remove the market symbol from the returns and event matrix. It's no longer needed.
del daily_returns[market_symbol]
del vlm_changes[market_symbol]
del abnormal_returns[market_symbol]
del ex_vols[market_symbol]
try:
del event_matrix[market_symbol]
except KeyError as e:
pass
starting_event_num = len(event_matrix[(event_matrix == 1.0).any(axis=1)])
print("Starting number of events: {}".format(starting_event_num))
# The event matrix has a row for every data in the stock data.
# Zero (NaN) out any events in the rows at the beginning and end that would
# not have data.
event_matrix.values[0:look_back, :] = np.NaN
event_matrix.values[-look_forward:, :] = np.NaN
ending_event_num = len(event_matrix[(event_matrix == 1.0).any(axis=1)])
print("Ending number of events: {}".format(ending_event_num))
if (starting_event_num != ending_event_num):
print("{} events were dropped because they require data outside the data range.".format(starting_event_num -
ending_event_num))
# Number of events
i_no_events = int(np.logical_not(np.isnan(event_matrix.values)).sum())
assert i_no_events > 0, "Zero events in the event matrix"
na_all_rets = "False"
na_all_vlms = "False"
# import pdb; pdb.set_trace()
results = pd.DataFrame(index=symbols, columns=['pos', 'neg', 'vpos', 'vneg'])
# Looking for the events and pushing them to a matrix
#print(event_matrix.columns)
#print(symbols)
try:
#for i, s_sym in enumerate(event_matrix.columns):
for s_sym in symbols:
if s_sym == market_symbol:
continue
na_stock_rets = "False"
na_stock_vlms = "False"
for j, dt_date in enumerate(event_matrix.index):
if event_matrix[s_sym][dt_date] == 1:
na_ret = abnormal_returns[s_sym][j - look_back:j + 1 + look_forward]
na_vls = ex_vols[s_sym][j - look_back:j + 1 + look_forward]
if type(na_stock_rets) == type(""):
na_stock_rets = na_ret
na_stock_vlms = na_vls
else:
na_stock_rets = np.vstack((na_stock_rets, na_ret))
na_stock_vlms = np.vstack((na_stock_vlms, na_vls))
# reurns/vols for a particular stock analyze here
# then append to all rets
#import pdb;
#pdb.set_trace()
if type(na_stock_rets) == type("") or type(na_stock_rets) == type(""):
continue
if (np.mean(na_stock_rets) > 0):
results.loc[s_sym, 'pos'] = True
#print(s_sym)
else:
results.loc[s_sym, 'neg'] = True
if (np.mean(na_stock_vlms) > 0):
results.loc[s_sym, 'vpos'] = True
else:
results.loc[s_sym, 'vneg'] = True
if type(na_all_rets) == type(""):
na_all_rets = na_stock_rets
na_all_vlms = na_stock_vlms
else:
na_all_rets = np.vstack((na_all_rets, na_stock_rets))
na_all_vlms = np.vstack((na_all_vlms, na_stock_vlms))
except Exception as e:
#import pdb;
#pdb.set_trace()
#print(e)
raise e
#import pdb;
#pdb.set_trace()
if len(na_all_rets.shape) == 1:
na_all_rets = np.expand_dims(na_all_rets, axis=0)
# Computing daily returns
num_events = len(na_all_rets)
cars = np.mean(na_all_rets, axis=0)
cavs = np.mean(na_all_vlms, axis=0)
cars_std_err = np.std(na_all_rets, axis=0)
cavs_std_err = np.std(na_all_vlms, axis=0)
na_cum_rets = np.cumprod(na_all_rets + 1, axis=1)
na_cum_rets = (na_cum_rets.T / na_cum_rets[:, look_back]).T
na_cum_vlms = np.cumsum(na_all_vlms, axis=1)
cars_cum = np.mean(na_cum_rets, axis=0)
cavs_cum = np.mean(na_cum_vlms, axis=0)
if (cars_cum[-1] - 1) > 0:
cars_positive = True
else:
cars_positive = False
if (cavs_cum[-1]) > 0:
cavs_positive = True
else:
cavs_positive = False
cars_num_stocks_positive = results['pos'].sum()
cars_num_stocks_negative = results['neg'].sum()
cavs_num_stocks_positive = results['vpos'].sum()
cavs_num_stocks_negative = results['vneg'].sum()
std1 = np.std(cars)
cars_t_test = np.mean(cars) / std1 * np.sqrt(len(cars))
std2 = np.std(cavs)
cavs_t_test = np.mean(cavs) / std2 * np.sqrt(len(cavs))
#import pdb;
#pdb.set_trace()
from scipy import stats
# pval1 = 1 - stats.t.cdf(cars_t_test,df=len(cars))
pval1 = 2 * (1 - stats.t.cdf(abs(cars_t_test), df=num_events))
# pvalues = 2*(1-tcdf(abs(t),n-v))
pval2 = 2 * (1 - stats.t.cdf(abs(cavs_t_test), df=num_events))
if (pval1 < .05):
cars_significant = True
else:
cars_significant = False
if (pval2 < .05):
cavs_significant = True
else:
cavs_significant = False
#import pdb;
#pdb.set_trace()
ccr = CarsCavcsResult(num_events,
cars_cum, cars_std_err, cars_t_test, cars_significant,
cars_positive, cars_num_stocks_positive, cars_num_stocks_negative,
cavs_cum, cavs_std_err, cavs_t_test, cavs_significant,
cavs_positive, cavs_num_stocks_positive, cavs_num_stocks_negative)
return ccr
#import pdb;
#pdb.set_trace()
def calculate_using_single_factor_benchmark(self, event_matrix, stock_data, market_symbol, estimation_window=200,
buffer=5,
pre_event_window=10, post_event_window=10):
'''
:param event_matrix:
:param stock_data:
:param market_symbol:
:param estimation_window:
:param buffer:
:param pre_event_window:
:param post_event_window:
:return cars_cavcs_result: An instance of CarsCavcsResult containing the results.
Modeled after http://arno.uvt.nl/show.cgi?fid=129765
'''
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
# The event matrix has a row for every data in the stock data.
# Zero (NaN) out any events in the rows at the beginning and end that would
# not have data.
starting_event_num = len(event_matrix[(event_matrix == 1.0).any(axis=1)])
print("Starting number of events: {}".format(starting_event_num))
event_matrix.values[0:estimation_window + buffer + pre_event_window + post_event_window, :] = np.NaN
event_matrix.values[-estimation_window - buffer - pre_event_window - post_event_window:, :] = np.NaN
ending_event_num = len(event_matrix[(event_matrix == 1.0).any(axis=1)])
print("Ending number of events: {}".format(ending_event_num))
if(starting_event_num != ending_event_num):
print("{} events were dropped because they require data outside the data range.".format(starting_event_num -
ending_event_num))
events = event_matrix[(event_matrix == 1.0).any(axis=1)]
dates = stock_data.loc[market_symbol, slice(None)].index
date1 = events.index[0]
index1 = dates.tolist().index(date1)
wtf = dates[index1]
date11 = dates[index1 - buffer]
date12 = dates[index1 - (buffer + estimation_window)]
#import pdb;
#pdb.set_trace()
# check remove duplicates
stock_data.index.value_counts()
stock_data.drop_duplicates(inplace=True)
# import pdb; pdb.set_trace()
try:
# For IB
closing_prices = stock_data['Close']
volumes = stock_data['Volume']
except KeyError:
# For AV
closing_prices = stock_data['adjusted_close']
volumes = stock_data['volume']
# check for duplicates
closing_prices.index.value_counts()
'''(RGR, 2005-12-30 00:00:00) 2
(SPY, 2000-12-29 00:00:00) 2
(RGR, 2006-12-29 00:00:00) 2'''
# removing duplicates
# now we are ready to do anlaysis
stock_ret = closing_prices.copy()
symbols = stock_data.index.get_level_values(0).unique().tolist()
mypct = lambda x: x[-1] - np.mean(x[:-1])
stock_ret = closing_prices.pct_change().fillna(0)
vlm_changes = volumes.rolling(5, 5).apply(mypct).fillna(0)
# do regeression
pre_stock_returns = stock_ret[
(stock_data.index.get_level_values(1) > date12) & (stock_data.index.get_level_values(1) <= date11)]
pre_stock_vlms = vlm_changes[
(stock_data.index.get_level_values(1) > date12) & (stock_data.index.get_level_values(1) <= date11)]
# **************
# First compute cars ******
# ***************
#import pdb;
#pdb.set_trace()
dates = stock_data.index.get_level_values(1).unique().tolist()
if (market_symbol in symbols):
stocks = [x for x in symbols if x != market_symbol]
else:
raise ValueError('calculate_using_single_factor_benchmark: market_symbol not found in data')
ar1 = ['cars', 'cavs'];
ar2 = ['slope', 'intercept']
from itertools import product
tuples = [(i, j) for i, j in product(ar1, ar2)] # tuples = list(zip(*arrays))
index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])
df_regress = pd.DataFrame(0.0, index=index, columns=symbols)
# import pdb; pdb.set_trace()
for stock in stocks:
# set up data
x1 = pre_stock_returns[market_symbol]
y1 = pre_stock_returns[stock]
slope1, intercept1, cars0 = regress_vals(x1, y1)
cars = np.cumprod(cars0 + 1, axis=0)
# plot if you need
#plot_regressvals(x1, y1, slope1, intercept1, cars, stock)
# the same for cvals
x2 = pre_stock_vlms[market_symbol]
y2 = pre_stock_vlms[stock]
# y2.argsort()[::-1][:n]
# import pdb; pdb.set_trace()
slope2, intercept2, cavs0 = regress_vals(x2, y2)
cavs = np.cumsum(cavs0)
#plot_regressvals(x2, y2, slope2, intercept2, cavs, stock)
# store the regresion values
df_regress.loc[('cars', 'slope'), stock] = slope1
df_regress.loc[('cars', 'intercept'), stock] = intercept1
df_regress.loc[('cavs', 'slope'), stock] = slope2
df_regress.loc[('cavs', 'intercept'), stock] = intercept2
# do the same for volumes
# ***************
# now the event cars and cavs computations
ar11 = stocks
ar12 = ['cars', 'cavs']
tuples2 = [(i, j) for i, j in product(ar11, ar12)] # tuples = list(zip(*arrays))
index2 = pd.MultiIndex.from_tuples(tuples2, names=['first', 'second'])
df_results =
|
pd.DataFrame(0.0, index=index2, columns=['positive', 'significant'])
|
pandas.DataFrame
|
# Long Author List formatting tool
# <NAME> (<EMAIL> 2020)
# Usage: python3 lal.py
# Input: lal_data2.txt with one author per row and up to 5 affiliations
# <First>;<Last>;<Email>;<Group1>;<Group2>;<Group3>;<Group4>;<Group5>
# Example: Heiko;Goelzer;<EMAIL>;IMAU,UU;ULB;nil;nil;nil
# Use 'nil','nan','0' or '-' to fill unused affiliations
# Output: lal_inout2.txt when saving the modified listing, can be used as
# input the next time
# Parsed: lal_parsed.txt when parsed to insert in a manuscript
# Selected lines and selected blocks can be rearranged by dragging, sorted by last name and deleted.
# 'Save' will write the updated list to a file that can be reused later
# 'Parse' will write formatted output that can be copy-pasted
import tkinter as tk;
# Listbox for ordering
class ReorderableListbox(tk.Listbox):
""" A Tkinter listbox with drag & drop reordering of lines """
def __init__(self, master, **kw):
kw['selectmode'] = tk.EXTENDED
tk.Listbox.__init__(self, master, kw)
self.bind('<Button-1>', self.setCurrent)
self.bind('<Control-1>', self.toggleSelection)
self.bind('<B1-Motion>', self.shiftSelection)
self.bind('<Leave>', self.onLeave)
self.bind('<Enter>', self.onEnter)
self.selectionClicked = False
self.left = False
self.unlockShifting()
self.ctrlClicked = False
def orderChangedEventHandler(self):
pass
def onLeave(self, event):
# prevents changing selection when dragging
# already selected items beyond the edge of the listbox
if self.selectionClicked:
self.left = True
return 'break'
def onEnter(self, event):
#TODO
self.left = False
def setCurrent(self, event):
self.ctrlClicked = False
i = self.nearest(event.y)
self.selectionClicked = self.selection_includes(i)
if (self.selectionClicked):
return 'break'
def toggleSelection(self, event):
self.ctrlClicked = True
def moveElement(self, source, target):
if not self.ctrlClicked:
element = self.get(source)
self.delete(source)
self.insert(target, element)
def unlockShifting(self):
self.shifting = False
def lockShifting(self):
# prevent moving processes from disturbing each other
# and prevent scrolling too fast
# when dragged to the top/bottom of visible area
self.shifting = True
def shiftSelection(self, event):
if self.ctrlClicked:
return
selection = self.curselection()
if not self.selectionClicked or len(selection) == 0:
return
selectionRange = range(min(selection), max(selection))
currentIndex = self.nearest(event.y)
if self.shifting:
return 'break'
lineHeight = 12
bottomY = self.winfo_height()
if event.y >= bottomY - lineHeight:
self.lockShifting()
self.see(self.nearest(bottomY - lineHeight) + 1)
self.master.after(500, self.unlockShifting)
if event.y <= lineHeight:
self.lockShifting()
self.see(self.nearest(lineHeight) - 1)
self.master.after(500, self.unlockShifting)
if currentIndex < min(selection):
self.lockShifting()
notInSelectionIndex = 0
for i in selectionRange[::-1]:
if not self.selection_includes(i):
self.moveElement(i, max(selection)-notInSelectionIndex)
notInSelectionIndex += 1
currentIndex = min(selection)-1
self.moveElement(currentIndex, currentIndex + len(selection))
self.orderChangedEventHandler()
elif currentIndex > max(selection):
self.lockShifting()
notInSelectionIndex = 0
for i in selectionRange:
if not self.selection_includes(i):
self.moveElement(i, min(selection)+notInSelectionIndex)
notInSelectionIndex += 1
currentIndex = max(selection)+1
self.moveElement(currentIndex, currentIndex - len(selection))
self.orderChangedEventHandler()
self.unlockShifting()
return 'break'
def deleteSelection(self):
# delete selected items
if len(self.curselection()) == 0:
return
self.delete(min(self.curselection()),max(self.curselection()))
def sortAll(self):
# sort all items alphabetically
temp_list = list(self.get(0, tk.END))
temp_list.sort(key=str.lower)
# delete contents of present listbox
self.delete(0, tk.END)
# load listbox with sorted data
for item in temp_list:
self.insert(tk.END, item)
def sortSelection(self):
# sort selected items alphabetically
if len(self.curselection()) == 0:
return
mmax = max(self.curselection())
mmin = min(self.curselection())
temp_list = list(self.get(mmin,mmax))
#print(temp_list)
# Sort reverse because pushed back in reverse order
temp_list.sort(key=str.lower,reverse=True)
# delete contents of present listbox
self.delete(mmin,mmax)
# load listbox with sorted data
for item in temp_list:
self.insert(mmin, item)
def save(self,df):
# save current list
temp_list = list(self.get(0, tk.END))
# create output df
dfout = pd.DataFrame()
for item in temp_list:
items = item.split(",")
matchl = (df["LastName"].isin([items[0]]))
matchf = (df["FirstName"].isin([items[1]]))
matche = (df["Email"].isin([items[2]]))
dfout = dfout.append(df[matchf & matchl])
dfout.to_csv('lal_inout2.txt', sep=';', header=None, index=None)
print("File saved!")
def parse_word(self,df):
# save current list
temp_list = list(self.get(0, tk.END))
# create output df
dfout = pd.DataFrame()
for item in temp_list:
items = item.split(",")
matchl = (df["LastName"].isin([items[0]]))
matchf = (df["FirstName"].isin([items[1]]))
dfout = dfout.append(df[matchf & matchl])
# parse
first = dfout["FirstName"]
last = dfout["LastName"]
grp = dfout[["Group1","Group2","Group3","Group4","Group5"]]
unique_groups = []
group_ids = []
k = 0
# collect unique groups and indices
for i in range(0,dfout.shape[0]):
groups = []
# loop through max 5 groups
for j in range(0,5):
# Exclude some common dummy place holders
if (grp.iloc[i,j] not in ['nil','nan','0','-']):
if (grp.iloc[i,j] not in unique_groups):
unique_groups.append(grp.iloc[i,j])
k = k + 1
groups.append(k)
else:
ix = unique_groups.index(grp.iloc[i,j])+1
groups.append(ix)
# Add author group ids
group_ids.append(groups)
#print(group_ids)
#print(unique_groups)
# Compose text
with open("lal_parsed_word.txt", "w") as text_file:
# write out names
for i in range(0,dfout.shape[0]):
print(first.iloc[i].strip(), end =" ", file=text_file)
print(last.iloc[i].strip(), end ="", file=text_file)
for j in range(0,len(group_ids[i])):
if j < len(group_ids[i])-1:
print(str(group_ids[i][j]), end =",", file=text_file)
else:
print(str(group_ids[i][j]), end ="", file=text_file)
#print(" ", end ="", file=text_file)
if (i < dfout.shape[0]-1):
# comma and space before next name
print(", ", end ="", file=text_file)
# Add some space between names and affiliations
print("\n\n", file=text_file)
# Write out affiliations
for i in range(0,len(unique_groups)):
print("(", end ="", file=text_file)
print(str(i+1), end ="", file=text_file)
print(")", end =" ", file=text_file)
print(unique_groups[i], end ="\n", file=text_file)
print("File lal_parsed_word.txt written")
# Parse tex \author and \affil
def parse_tex(self,df):
# save current list
temp_list = list(self.get(0, tk.END))
# create output df
dfout = pd.DataFrame()
for item in temp_list:
items = item.split(",")
matchl = (df["LastName"].isin([items[0]]))
matchf = (df["FirstName"].isin([items[1]]))
dfout = dfout.append(df[matchf & matchl])
# parse
first = dfout["FirstName"]
last = dfout["LastName"]
grp = dfout[["Group1","Group2","Group3","Group4","Group5"]]
unique_groups = []
group_ids = []
k = 0
# collect unique groups and indices
for i in range(0,dfout.shape[0]):
groups = []
# loop through max 5 groups
for j in range(0,5):
# Exclude some common dummy place holders
if (grp.iloc[i,j] not in ['nil','nan','0','-']):
if (grp.iloc[i,j] not in unique_groups):
unique_groups.append(grp.iloc[i,j])
k = k + 1
groups.append(k)
else:
ix = unique_groups.index(grp.iloc[i,j])+1
groups.append(ix)
# Add author group ids
group_ids.append(groups)
#print(group_ids)
#print(unique_groups)
# Compose text
with open("lal_parsed_tex.txt", "w") as text_file:
# write out names
for i in range(0,dfout.shape[0]):
print("\\Author[", end ="", file=text_file)
for j in range(0,len(group_ids[i])):
if j < len(group_ids[i])-1:
print(str(group_ids[i][j]), end =",", file=text_file)
else:
print(str(group_ids[i][j]), end ="]", file=text_file)
print("{", end ="", file=text_file)
print(first.iloc[i].strip(), end ="", file=text_file)
print("}{", end ="", file=text_file)
print(last.iloc[i].strip(), end ="", file=text_file)
print("}", end ="\n", file=text_file)
# Add some space between names and affiliations
print("\n", file=text_file)
# Write out affiliations
for i in range(0,len(unique_groups)):
print("\\affil", end ="", file=text_file)
print("[", end ="", file=text_file)
print(str(i+1), end ="", file=text_file)
print("]", end ="", file=text_file)
print("{", end ="", file=text_file)
print(unique_groups[i], end ="}\n", file=text_file)
print("File lal_parsed_tex.txt written")
# Parse simple list of names
def parse_list(self,df):
# save current list
temp_list = list(self.get(0, tk.END))
# create output df
dfout = pd.DataFrame()
for item in temp_list:
items = item.split(",")
matchl = (df["LastName"].isin([items[0]]))
matchf = (df["FirstName"].isin([items[1]]))
dfout = dfout.append(df[matchf & matchl])
# parse
first = dfout["FirstName"]
last = dfout["LastName"]
#print(group_ids)
#print(unique_groups)
# Compose text
with open("lal_parsed_list.txt", "w") as text_file:
# write out names
for i in range(0,dfout.shape[0]):
print(first.iloc[i].strip(), end =" ", file=text_file)
print(last.iloc[i].strip(), end ="", file=text_file)
print("", file=text_file)
print("File lal_parsed_list.txt written!")
# Parse list of names and emails
def parse_email(self,df):
# save current list
temp_list = list(self.get(0, tk.END))
# create output df
dfout =
|
pd.DataFrame()
|
pandas.DataFrame
|
import sys
import numpy as np
import pandas as pd
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import seaborn as sns
from genome_utils import *
#############################
## ##
## General use gene drawer ##
## ##
#############################
def draw_genes_from_gff(ax, gff, promoter_params=[1000,0.2,1000], gene_ylim=[-7.0,-4.5]):
"""
Draw genes on your plot.
Inputs
----------
ax: pyplot.Axes class
An Axes class that contains the plot you want to draw
genes on.
gff: DataFrame
A pandas DataFrame containing definitions.
promoter_params: list
Three parameters that affect how promoters are drawn for
each gene. First is the horizontal length of the promoter
arrow in number of nucleotides. Second is arrow width in
terms of the y-axis varaible being plotted. Third is arrow
head length in number of nucleotides.
Returns
-------
None
"""
assert gene_ylim[0] <= gene_ylim[1], "Provide gene_ylim as [lower_val, higher_val]"
gene_block = [gene_ylim[0], (0.8 * gene_ylim[1]) + (0.2 * gene_ylim[0])]
prom_height= [gene_block[1], gene_ylim[1]]
for i, line in gff.iterrows():
exonStarts = [int(x) for x in line['exonStarts'].split(',')]
exonEnds = [int(x) for x in line['exonEnds'].split(',')]
direction = 2*int(line['strand'] == '+') - 1
for start, end in zip(exonStarts, exonEnds):
ax.fill_betweenx(gene_block,[start,start],[end,end],facecolor='black')
ax.hlines(y=np.mean(gene_block),xmin=exonStarts[0],xmax=exonEnds[-1])
if direction == 1:
x_loc = exonStarts[0]
else:
x_loc = exonEnds[-1]
ax.vlines(x=x_loc,ymin=prom_height[0],ymax=prom_height[1])
ax.arrow(x=x_loc,y=prom_height[1],dx=direction*promoter_params[0],dy=0,
head_width=promoter_params[1], head_length=promoter_params[2], fc='k', ec='k')
return None
################################
## ##
## Guide and Peak combo plots ##
## ##
################################
def plot_hff_cutsites(plot_interval, cutsite_data, peak_data, plot_ids, get_chrom=None):
"""
Plot guide-wise assay summaries for individual replicates at the
guide cut-sites. Include a track of significant peaks above guide
score plot. Important: Before using this script, cutsite_data and
peak_data should be filtered to a single chromosome.
Inputs
----------
plot_interval: list
A two integer list that defines the genomic field of view.
cutsite_data: DataFrame
A pandas DataFrame containing guide-wise activity scores for
the various replicates to be tested.
peak_data: DataFrame
A pandas DataFrame similar to a BED format file. This specifies
DataFrame specfies the significant peaks that are called in each
replicate.
plot_ids:
A list of IDs that correspond to column names in cutsite_data and
peak_data. These IDs will be used to extract relevent data.
Returns
-------
(fig, ax): tuple of pyplot objects
A tuple containing a pyplot.Figure class and a pyplot.Axis class.
"""
if get_chrom is not None:
cutsite_data = cutsite_data[ cutsite_data.index.str.contains(get_chrom+":") ]
peak_data = peak_data[ peak_data['chr'] == get_chrom ]
# Subset cutsite scores
plot_id_slicer = [an_id for an_id in plot_ids if an_id in cutsite_data.columns]
sub_cuts = cutsite_data.loc[:,plot_id_slicer]
sub_cuts['cutsite'] = [ int(coord.split(':')[1].split('-')[1]) - 4
if coord.split(':')[-1] == '+'
else
int(coord.split(':')[1].split('-')[0]) + 3
for coord in sub_cuts.index ]
slice_cuts = check_overlap(plot_interval,np.vstack([cutsite_data['cutsite'].values,
(cutsite_data['cutsite']+1).values]).T)
sub_cuts = sub_cuts.loc[slice_cuts, :]
# Subset peak intervals
sub_peaks= check_overlap(plot_interval, peak_data.loc[:,('start','end')].values)
sub_peaks= peak_data.loc[sub_peaks,:]
sub_peaks= sub_peaks.loc[ sub_peaks['exp_id'].isin(plot_ids) ]
cut_types = np.unique(sub_cuts.columns)
peak_types = np.unique(sub_peaks['exp_id'])
color_cycle= plt.rcParams['axes.prop_cycle'].by_key()['color']
cycle_len = len(color_cycle)
col_dict = { exp_id: color_cycle[idx % cycle_len]
for idx, exp_id
in enumerate(plot_ids) }
score_max = np.nanmax(sub_cuts.loc[:, plot_id_slicer].values)
score_min = np.nanmin(sub_cuts.loc[:, plot_id_slicer].values)
score_gap = score_max - \
score_min
fig = plt.figure(figsize=(12,6))
ax = plt.subplot(111)
for i, exp_id in enumerate(col_dict.keys()):
sub_sub_peaks = sub_peaks.loc[ sub_peaks['exp_id'] == exp_id, : ]
peak_position = score_max + ( ( 0.2+(i*0.05) ) * score_gap )
for j, row in sub_sub_peaks.iterrows():
ax.hlines(y=peak_position,
xmin=row['start'], xmax=row['end'],
color=col_dict[exp_id])
if exp_id in cut_types:
null_filter = sub_cuts[exp_id].isnull()
ax.scatter(sub_cuts.loc[~null_filter,'cutsite'].values,
sub_cuts.loc[~null_filter,exp_id].values,
color=col_dict[exp_id],s=8,alpha=0.5)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xlim(*plot_interval)
custom_lines = [ Line2D([0], [0], color=col_dict[color]) for color in plot_ids ]
ax.legend(custom_lines, plot_ids,bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.tight_layout()
return fig, ax
def plot_combined_cutsites(plot_interval, cutsite_data, peak_data, plot_ids, merge_style='replicate', get_chrom=None):
# Figure out how many reps we have for each assay
uniq_assays = list(peak_data['assay'].unique())
assay_reps = [ peak_data.loc[peak_data['assay'] == assay,'replicate'] \
.unique()
for assay in uniq_assays ]
assay_count = [ len(rep_list) for rep_list in assay_reps ]
assay2thresh= { assay: thresh for assay, thresh in zip(uniq_assays, assay_count) }
if get_chrom is not None:
cutsite_data = cutsite_data[ cutsite_data.index.str.contains(get_chrom+":") ]
peak_data = peak_data[ peak_data['chr'] == get_chrom ]
else:
pass
# Subset cutsite scores
plot_id_slicer = [an_id for an_id in plot_ids if an_id in cutsite_data.columns]
sub_cuts = cutsite_data.loc[:,plot_id_slicer]
sub_cuts['cutsite'] = [ int(coord.split(':')[1].split('-')[1]) - 4
if coord.split(':')[-1] == '+'
else
int(coord.split(':')[1].split('-')[0]) + 3
for coord in sub_cuts.index ]
slice_cuts = check_overlap(plot_interval,np.vstack([cutsite_data['cutsite'].values,
(cutsite_data['cutsite']+1).values]).T)
sub_cuts = sub_cuts.loc[slice_cuts, :]
# Subset peak intervals
sub_peaks= check_overlap(plot_interval, peak_data.loc[:,('start','end')].values)
sub_peaks= peak_data.loc[sub_peaks,:]
sub_peaks= sub_peaks.loc[ sub_peaks['exp_id'].isin(plot_ids) ]
cut_types = np.unique(sub_cuts.columns)
peak_types = np.unique(sub_peaks['exp_id'])
score_max = np.nanmax(sub_cuts.loc[:, plot_id_slicer].values)
score_min = np.nanmin(sub_cuts.loc[:, plot_id_slicer].values)
score_gap = score_max - \
score_min
fig = plt.figure(figsize=(12,6))
ax = plt.subplot(111)
avail_data = peak_data.loc[peak_data['exp_id'].isin(plot_ids),('exp_id','assay','replicate')].drop_duplicates()
exp2assay = {}
assay2exp = {}
for row in avail_data.iterrows():
exp2assay[row[1]['exp_id']] = row[1]['assay']
try:
assay2exp[row[1]['assay']].append(row[1]['exp_id'])
except:
assay2exp[row[1]['assay']] = [row[1]['exp_id']]
for i, assay in enumerate(avail_data['assay'].unique()):
color = plt.rcParams['axes.prop_cycle'].by_key()['color'][i]
peak_position = score_max + ( ( 0.2+(i*0.05) ) * score_gap )
if merge_style == 'overlap':
for exp_id in assay2exp[assay]:
sub_sub_peaks = sub_peaks.loc[ sub_peaks['exp_id'] == exp_id, : ]
for j, row in sub_sub_peaks.iterrows():
ax.hlines(y=peak_position,
xmin=row['start'], xmax=row['end'],
color=color)
elif merge_style == 'replicate':
collect_merge = []
for exp_id in assay2exp[assay]:
exp_data = sub_peaks.loc[ sub_peaks['exp_id'] == exp_id, : ]
if exp_data.shape[0] > 0:
sub_merge = merge_bed(exp_data)
collect_merge.append( sub_merge )
merge_reps = pd.DataFrame()
if assay2thresh[assay] == 1:
if len(collect_merge) == 1:
merge_reps = collect_merge[0]
else:
pass
elif len(collect_merge) > 1:
collect_merge =
|
pd.concat(collect_merge,axis=0)
|
pandas.concat
|
import requests
from bs4 import BeautifulSoup
from datetime import date, datetime, timedelta
import pandas as pd
import re
def blue_gym():
i = date.today()
zacatek = []
konec = []
trener = []
obsazenost = []
nazev = []
odkaz = []
a = 0
while a <= 1:
day = i.strftime("%d-%m-%Y")
url = 'http://cz.boofit.net/bluegym/rozvrh-a-rezervace/aktualni-rozvrh/1119/' + str(day)
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
dany_den = soup.find_all('div', class_ = 'col7-sm-7')
for den in dany_den:
datum = den.find('dt')
datum = '.'.join(re.findall(r'\d+', datum.text))
cviceni = den.find_all('p', class_ = 'lesson')
for lekce in cviceni:
zacatek.append(datetime.strptime(datum + ' ' + lekce.text.split()[0],'%d.%m.%Y %H:%M').isoformat())
konec.append(datetime.strptime(datum + ' ' + lekce.text.split()[2],'%d.%m.%Y %H:%M').isoformat())
trener.append(lekce.text.split()[-2])
obsazenost.append(lekce.text.split()[-1])
nazev.append(' '.join(lekce.text.split()[3:-2]))
odkaz.append(url)
i = i + timedelta(days = 7)
a+=1
nadpisy = ['nazev', 'zacatek', 'konec', 'obsazenost', 'trener', 'url']
df =
|
pd.DataFrame(data = (nazev, zacatek, konec, obsazenost, trener, odkaz), index = nadpisy)
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2020/11/8 17:28
Desc: 新浪财经-债券-沪深债券-实时行情数据和历史行情数据
http://vip.stock.finance.sina.com.cn/mkt/#hs_z
"""
import datetime
import re
from mssdk.utils import demjson
import pandas as pd
import requests
from py_mini_racer import py_mini_racer
from tqdm import tqdm
from mssdk.bond.cons import (
zh_sina_bond_hs_count_url,
zh_sina_bond_hs_payload,
zh_sina_bond_hs_url,
zh_sina_bond_hs_hist_url,
)
from mssdk.stock.cons import hk_js_decode
def get_zh_bond_hs_page_count() -> int:
"""
行情中心首页-债券-沪深债券的总页数
http://vip.stock.finance.sina.com.cn/mkt/#hs_z
:return: 总页数
:rtype: int
"""
params = {
"node": "hs_z",
}
res = requests.get(zh_sina_bond_hs_count_url, params=params)
page_count = int(re.findall(re.compile(r"\d+"), res.text)[0]) / 80
if isinstance(page_count, int):
return page_count
else:
return int(page_count) + 1
def bond_zh_hs_spot() -> pd.DataFrame:
"""
新浪财经-债券-沪深债券-实时行情数据, 大量抓取容易封IP
http://vip.stock.finance.sina.com.cn/mkt/#hs_z
:return: 所有沪深债券在当前时刻的实时行情数据
:rtype: pandas.DataFrame
"""
big_df = pd.DataFrame()
page_count = get_zh_bond_hs_page_count()
zh_sina_bond_hs_payload_copy = zh_sina_bond_hs_payload.copy()
for page in tqdm(range(1, page_count + 1), leave=False):
zh_sina_bond_hs_payload_copy.update({"page": page})
res = requests.get(zh_sina_bond_hs_url, params=zh_sina_bond_hs_payload_copy)
data_json = demjson.decode(res.text)
big_df = big_df.append(pd.DataFrame(data_json), ignore_index=True)
big_df.columns = [
'代码',
'-',
'名称',
'最新价',
'涨跌额',
'涨跌幅',
'买入',
'卖出',
'昨收',
'今开',
'最高',
'最低',
'成交量',
'成交额',
'-',
'-',
'-',
'-',
'-',
'-',
]
big_df = big_df[[
'代码',
'名称',
'最新价',
'涨跌额',
'涨跌幅',
'买入',
'卖出',
'昨收',
'今开',
'最高',
'最低',
'成交量',
'成交额',
]]
big_df['最新价'] = pd.to_numeric(big_df['最新价'])
big_df['买入'] = pd.to_numeric(big_df['买入'])
big_df['卖出'] = pd.to_numeric(big_df['卖出'])
big_df['昨收'] = pd.to_numeric(big_df['昨收'])
big_df['今开'] = pd.to_numeric(big_df['今开'])
big_df['最高'] = pd.to_numeric(big_df['最高'])
big_df['最低'] = pd.to_numeric(big_df['最低'])
return big_df
def bond_zh_hs_daily(symbol: str = "sh010107") -> pd.DataFrame:
"""
新浪财经-债券-沪深债券-历史行情数据, 大量抓取容易封IP
http://vip.stock.finance.sina.com.cn/mkt/#hs_z
:param symbol: 沪深债券代码; e.g., sh010107
:type symbol: str
:return: 指定沪深债券代码的日 K 线数据
:rtype: pandas.DataFrame
"""
res = requests.get(
zh_sina_bond_hs_hist_url.format(
symbol, datetime.datetime.now().strftime("%Y_%m_%d")
)
)
js_code = py_mini_racer.MiniRacer()
js_code.eval(hk_js_decode)
dict_list = js_code.call(
"d", res.text.split("=")[1].split(";")[0].replace('"', "")
) # 执行js解密代码
data_df = pd.DataFrame(dict_list)
data_df["date"] = pd.to_datetime(data_df["date"]).dt.date
data_df['open'] =
|
pd.to_numeric(data_df['open'])
|
pandas.to_numeric
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.preprocessing.text import Tokenizer, text_to_word_sequence
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing import text, sequence
import os
import logging
import gensim
from gensim.models import FastText, Word2Vec
from tqdm.autonotebook import *
from sklearn.externals import joblib
def select_admission(admission, BASIC_INFO):
ATIME = pd.to_datetime(admission['ADMITTIME'])
ayear = ATIME.dt.year
amonth = ATIME.dt.month
aday = ATIME.dt.day
ahour = ATIME.dt.hour
admission['AYEAR'] = ayear
admission['AMONTH'] = amonth
admission['ADAY'] = aday
admission['AHOUR'] = ahour
ad = pd.DataFrame()
for col in BASIC_INFO:
ad[col] = admission[col].copy()
ad.fillna('nan',inplace=True)
sparse_col = BASIC_INFO[6:]
for col in sparse_col:
lbe = LabelEncoder()
ad[col] = list(lbe.fit_transform(ad[col].values))
return ad
def cal_age(ad, patients):
birth = pd.DataFrame({'SUBJECT_ID':patients['SUBJECT_ID'], 'DOB':patients['DOB']})
ad = pd.merge(ad,birth, how='left', on='SUBJECT_ID')
ayear= ad['AYEAR'].values
btime = pd.to_datetime(ad['DOB'])
age = ayear-btime.dt.year
ad['AGE'] = age
return ad
def diag_ccs(diagnose, ccs):
ccs = ccs.applymap(lambda x: x.replace('\'', ''))
ccs2 = pd.DataFrame({'ICD9_CODE':ccs['\'ICD-9-CM CODE\''], 'LV1':ccs['\'CCS LVL 1\''], 'LV2':ccs['\'CCS LVL 2\''], 'LV3':ccs['\'CCS LVL 3\''], 'LV4':ccs['\'CCS LVL 4\''],})
diagnose = pd.merge(diagnose,ccs2,how='left',on='ICD9_CODE')
diagnose[diagnose==' '] = np.NaN
return diagnose
def group_by(data, col, key):
data[col] = data[col].astype(str)
col_list = data.groupby(key)[col].apply(list).reset_index()
return col_list
def add_labels(ad, diag):
diag_drop = diag[pd.notnull(diag['LV1'])]
# add lv1(label) to ad
lv1_list = group_by(diag_drop, 'LV1', 'HADM_ID')
lv1_list['LV1'] = lv1_list['LV1'].apply(lambda x: list(set(x)))
ad = pd.merge(ad, lv1_list, how='left', on='HADM_ID')
return ad
def set_sort(x):
x2 = list(set(x))
x2.sort(key=x.index)
return x2
def add_mm(ad, multimodal, mm_names):
for i in range(len(multimodal)):
modal = multimodal[i]
col = mm_names[i]
filename = './data_raw/'+modal+'.csv'
#print(filename,col)
data = pd.read_csv(filename)
data.sort_values('HADM_ID', inplace=True)
data[col] = data[col].astype(str)
data_list = data.groupby(['HADM_ID'])[col].apply(list).reset_index()
data_list.rename(columns={col:modal},inplace=True)
temp = data_list[modal].values
temp = list(map(list,map(set_sort,temp)))
data_list[modal] = temp
ad = pd.merge(ad,data_list,how='left',on='HADM_ID')
return ad
def add_mm2(ad, modal):
filename = './data_raw/'+modal+'.csv'
data = pd.read_csv(filename)
data.sort_values('HADM_ID',inplace=True)
data.fillna(0, inplace=True)
temp1 = data['SPEC_ITEMID'].values
temp2 = data['ORG_ITEMID'].values
temp3 = []
for i in range(len(data)):
x1 = []
if temp1[i]!=0:
x1.append(str(int(temp1[i])))
if temp2[i]!=0:
x1.append(str(int(temp2[i])))
temp3.append(x1)
data[modal] = temp3
micro = data.groupby(['HADM_ID'])[modal].apply(list).reset_index()
micro[modal] = micro[modal].apply(lambda x: [j for i in x for j in i])
temp = micro[modal].values
temp = list(map(list,map(set_sort,temp)))
micro[modal] = temp
ad = pd.merge(ad,micro, how='left',on='HADM_ID' )
return ad
def group_patients(ad, col_sets, key):
ad_list = group_by(ad, col_sets[0], key)
for col in col_sets[1:]:
temp = group_by(ad, col, key)
ad_list = pd.merge(ad_list, temp, how='left', on=key)
return ad_list
def group_mm_res(ad_list, ad, multimodal):
last = ad.drop_duplicates(subset='SUBJECT_ID',keep='last')
last2 = pd.DataFrame({'SUBJECT_ID':last['SUBJECT_ID'].copy()})
for col in multimodal:
last2[col] = last[col].copy()
ad_list = pd.merge(ad_list, last2, how='left', on='SUBJECT_ID')
return ad_list
def add_labels_res(ad_list, ad):
last = ad.drop_duplicates(subset='SUBJECT_ID',keep='last')
last2 = pd.DataFrame({'SUBJECT_ID':last['SUBJECT_ID'].copy(), 'LV1':last['LV1'].copy()})
ad_list =
|
pd.merge(ad_list, last2, how='left', on='SUBJECT_ID')
|
pandas.merge
|
import sys
sys.path.insert(0,'../../functions')
import pandas as pd
import numpy as np
from my_functions import fit_area_richness
# parameters
# ---
# where to find the island area and richness data
fname_area = '../../data/processed/island_area.csv' # island_name,area_sq_m,area_sq_km
fname_rich = '../../data/processed/island_richness.csv' # island_name,richness
# which island subsets to fit - correspond to file names
subsets = ['all', 'peninsula_only', 'riau_only', 'survey_only']
dirname_subsets = '../../data/processed/island_subsets/'
# which rho values to assume
rhos = [2115, 1259, 1700] # these are density birds per km^2
# 2115:
# Estimated by taking the average density in Sheldon et al (2011)
# of the primary rainforest habitats (albizia softwoods, logged forest softwoods, peatswamp softwoods).
# https://lkcnhm.nus.edu.sg/app/uploads/2017/04/59rbz295-309.pdf
# 1259
# Estimated from Figure 3 of Castelletta et al (2005) of resident birds in Old Secondary Forests.
# https://www.sciencedirect.com/science/article/abs/pii/S0006320704001740
# range of K values to fit
Ks = [ i for i in range(1, 15) ] # just a bit of a guess
# create a dataframe: island_name, area, richness
# ---
df_area =
|
pd.read_csv(fname_area)
|
pandas.read_csv
|
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_invalid_terms(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[0:4, "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
with pytest.raises(TypeError):
Term()
# more invalid
with pytest.raises(ValueError):
store.select("df", "df.index[3]")
with pytest.raises(SyntaxError):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(
path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']"
)
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
with pytest.raises(ValueError):
read_hdf(path, "dfq", where="A>0 or C>0")
def test_same_name_scoping(self, setup_path):
with ensure_clean_store(setup_path) as store:
import pandas as pd
df = DataFrame(
np.random.randn(20, 2), index=pd.date_range("20130101", periods=20)
)
store.put("df", df, format="table")
expected = df[df.index > pd.Timestamp("20130105")]
import datetime # noqa
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_series(self, setup_path):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
self._check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(self, setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
@td.xfail_non_writeable
def test_tuple_index(self, setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self, setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(
l, r, check_dtype=True, check_index_type=True, check_series_type=True
)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
self._check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(self, setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
pytest.skip("known failer on some windows platforms")
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_frame(self, compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
self._check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
def test_empty_series_frame(self, setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
self._check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"dtype", [np.int64, np.float64, np.object, "m8[ns]", "M8[ns]"]
)
def test_empty_series(self, dtype, setup_path):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(self, setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
frame = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self, setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self, setup_path):
# GH 13884
df = pd.DataFrame({"A": [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize("UTC")
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(self, table_format, setup_path):
# GH #13492
idx = pd.Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = pd.Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(self, setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_store_mixed(self, compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["int1"] = 1
df["int2"] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
tm.assert_frame_equal(store["obj"], df2)
# check that can store Series of all of these types
self._check_roundtrip(
df1["obj1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["bool1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["int1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_select_with_dups(self, setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_overwrite_node(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
def test_select(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
dict(ts=bdate_range("2012-01-01", periods=300), A=np.random.randn(300))
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
dict(
A=np.random.rand(20),
B=np.random.rand(20),
index=np.arange(20, dtype="f8"),
)
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
dict(
ts=bdate_range("2012-01-01", periods=300),
A=np.random.randn(300),
B=range(300),
users=["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ ["a{i:03d}".format(i=i) for i in range(100)],
)
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
"df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']"
)
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + ["a{i:03d}".format(i=i) for i in range(60)]
result = store.select(
"df", "ts>=Timestamp('2012-02-01') and users=selector"
)
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(self, setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '{end_dt}'".format(end_dt=end_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
|
_maybe_remove(store, "df")
|
pandas.tests.io.pytables.common._maybe_remove
|
from datetime import (
datetime,
timedelta,
)
from importlib import reload
import string
import sys
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
import pandas.util._test_decorators as td
from pandas import (
NA,
Categorical,
CategoricalDtype,
Index,
Interval,
NaT,
Series,
Timedelta,
Timestamp,
cut,
date_range,
)
import pandas._testing as tm
class TestAstypeAPI:
def test_arg_for_errors_in_astype(self):
# see GH#14878
ser = Series([1, 2, 3])
msg = (
r"Expected value of kwarg 'errors' to be one of \['raise', "
r"'ignore'\]\. Supplied value is 'False'"
)
with pytest.raises(ValueError, match=msg):
ser.astype(np.float64, errors=False)
ser.astype(np.int8, errors="raise")
@pytest.mark.parametrize("dtype_class", [dict, Series])
def test_astype_dict_like(self, dtype_class):
# see GH#7271
ser = Series(range(0, 10, 2), name="abc")
dt1 = dtype_class({"abc": str})
result = ser.astype(dt1)
expected = Series(["0", "2", "4", "6", "8"], name="abc")
tm.assert_series_equal(result, expected)
dt2 = dtype_class({"abc": "float64"})
result = ser.astype(dt2)
expected = Series([0.0, 2.0, 4.0, 6.0, 8.0], dtype="float64", name="abc")
tm.assert_series_equal(result, expected)
dt3 = dtype_class({"abc": str, "def": str})
msg = (
"Only the Series name can be used for the key in Series dtype "
r"mappings\."
)
with pytest.raises(KeyError, match=msg):
ser.astype(dt3)
dt4 = dtype_class({0: str})
with pytest.raises(KeyError, match=msg):
ser.astype(dt4)
# GH#16717
# if dtypes provided is empty, it should error
if dtype_class is Series:
dt5 = dtype_class({}, dtype=object)
else:
dt5 = dtype_class({})
with pytest.raises(KeyError, match=msg):
ser.astype(dt5)
class TestAstype:
@pytest.mark.parametrize("dtype", np.typecodes["All"])
def test_astype_empty_constructor_equality(self, dtype):
# see GH#15524
if dtype not in (
"S",
"V", # poor support (if any) currently
"M",
"m", # Generic timestamps raise a ValueError. Already tested.
):
init_empty = Series([], dtype=dtype)
with tm.assert_produces_warning(DeprecationWarning):
as_type_empty = Series([]).astype(dtype)
tm.assert_series_equal(init_empty, as_type_empty)
@pytest.mark.parametrize("dtype", [str, np.str_])
@pytest.mark.parametrize(
"series",
[
Series([string.digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series([string.digits * 10, tm.rands(63), tm.rands(64), np.nan, 1.0]),
],
)
def test_astype_str_map(self, dtype, series):
# see GH#4405
result = series.astype(dtype)
expected = series.map(str)
tm.assert_series_equal(result, expected)
def test_astype_float_to_period(self):
result = Series([np.nan]).astype("period[D]")
expected = Series([NaT], dtype="period[D]")
tm.assert_series_equal(result, expected)
def test_astype_no_pandas_dtype(self):
# https://github.com/pandas-dev/pandas/pull/24866
ser = Series([1, 2], dtype="int64")
# Don't have PandasDtype in the public API, so we use `.array.dtype`,
# which is a PandasDtype.
result = ser.astype(ser.array.dtype)
tm.assert_series_equal(result, ser)
@pytest.mark.parametrize("dtype", [np.datetime64, np.timedelta64])
def test_astype_generic_timestamp_no_frequency(self, dtype, request):
# see GH#15524, GH#15987
data = [1]
s = Series(data)
if np.dtype(dtype).name not in ["timedelta64", "datetime64"]:
mark = pytest.mark.xfail(reason="GH#33890 Is assigned ns unit")
request.node.add_marker(mark)
msg = (
fr"The '{dtype.__name__}' dtype has no unit\. "
fr"Please pass in '{dtype.__name__}\[ns\]' instead."
)
with pytest.raises(ValueError, match=msg):
s.astype(dtype)
def test_astype_dt64_to_str(self):
# GH#10442 : testing astype(str) is correct for Series/DatetimeIndex
dti = date_range("2012-01-01", periods=3)
result = Series(dti).astype(str)
expected = Series(["2012-01-01", "2012-01-02", "2012-01-03"], dtype=object)
tm.assert_series_equal(result, expected)
def test_astype_dt64tz_to_str(self):
# GH#10442 : testing astype(str) is correct for Series/DatetimeIndex
dti_tz = date_range("2012-01-01", periods=3, tz="US/Eastern")
result = Series(dti_tz).astype(str)
expected = Series(
[
"2012-01-01 00:00:00-05:00",
"2012-01-02 00:00:00-05:00",
"2012-01-03 00:00:00-05:00",
],
dtype=object,
)
tm.assert_series_equal(result, expected)
def test_astype_datetime(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0)])
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)])
s[1] = np.nan
assert s.dtype == "M8[ns]"
s = s.astype("O")
assert s.dtype == np.object_
def test_astype_datetime64tz(self):
s = Series(date_range("20130101", periods=3, tz="US/Eastern"))
# astype
result = s.astype(object)
expected = Series(s.astype(object), dtype=object)
tm.assert_series_equal(result, expected)
result = Series(s.values).dt.tz_localize("UTC").dt.tz_convert(s.dt.tz)
tm.assert_series_equal(result, s)
# astype - object, preserves on construction
result = Series(s.astype(object))
expected = s.astype(object)
tm.assert_series_equal(result, expected)
# astype - datetime64[ns, tz]
with tm.assert_produces_warning(FutureWarning):
# dt64->dt64tz astype deprecated
result = Series(s.values).astype("datetime64[ns, US/Eastern]")
tm.assert_series_equal(result, s)
with tm.assert_produces_warning(FutureWarning):
# dt64->dt64tz astype deprecated
result = Series(s.values).astype(s.dtype)
tm.assert_series_equal(result, s)
result = s.astype("datetime64[ns, CET]")
expected = Series(date_range("20130101 06:00:00", periods=3, tz="CET"))
tm.assert_series_equal(result, expected)
def test_astype_str_cast_dt64(self):
# see GH#9757
ts = Series([Timestamp("2010-01-04 00:00:00")])
s = ts.astype(str)
expected = Series(["2010-01-04"])
tm.assert_series_equal(s, expected)
ts = Series([Timestamp("2010-01-04 00:00:00", tz="US/Eastern")])
s = ts.astype(str)
expected = Series(["2010-01-04 00:00:00-05:00"])
tm.assert_series_equal(s, expected)
def test_astype_str_cast_td64(self):
# see GH#9757
td = Series([Timedelta(1, unit="d")])
ser = td.astype(str)
expected = Series(["1 days"])
tm.assert_series_equal(ser, expected)
def test_dt64_series_astype_object(self):
dt64ser = Series(date_range("20130101", periods=3))
result = dt64ser.astype(object)
assert isinstance(result.iloc[0], datetime)
assert result.dtype == np.object_
def test_td64_series_astype_object(self):
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="timedelta64[ns]")
result = tdser.astype(object)
assert isinstance(result.iloc[0], timedelta)
assert result.dtype == np.object_
@pytest.mark.parametrize(
"data, dtype",
[
(["x", "y", "z"], "string"),
pytest.param(
["x", "y", "z"],
"arrow_string",
marks=td.skip_if_no("pyarrow", min_version="1.0.0"),
),
(["x", "y", "z"], "category"),
(3 * [Timestamp("2020-01-01", tz="UTC")], None),
(3 * [Interval(0, 1)], None),
],
)
@pytest.mark.parametrize("errors", ["raise", "ignore"])
def test_astype_ignores_errors_for_extension_dtypes(self, data, dtype, errors):
# https://github.com/pandas-dev/pandas/issues/35471
from pandas.core.arrays.string_arrow import ArrowStringDtype # noqa: F401
ser = Series(data, dtype=dtype)
if errors == "ignore":
expected = ser
result = ser.astype(float, errors="ignore")
tm.assert_series_equal(result, expected)
else:
msg = "(Cannot cast)|(could not convert)"
with pytest.raises((ValueError, TypeError), match=msg):
ser.astype(float, errors=errors)
@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64])
def test_astype_from_float_to_str(self, dtype):
# https://github.com/pandas-dev/pandas/issues/36451
s = Series([0.1], dtype=dtype)
result = s.astype(str)
expected = Series(["0.1"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"value, string_value",
[
(None, "None"),
(np.nan, "nan"),
(NA, "<NA>"),
],
)
def test_astype_to_str_preserves_na(self, value, string_value):
# https://github.com/pandas-dev/pandas/issues/36904
s = Series(["a", "b", value], dtype=object)
result = s.astype(str)
expected = Series(["a", "b", string_value], dtype=object)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["float32", "float64", "int64", "int32"])
def test_astype(self, dtype):
s = Series(np.random.randn(5), name="foo")
as_typed = s.astype(dtype)
assert as_typed.dtype == dtype
assert as_typed.name == s.name
@pytest.mark.parametrize("value", [np.nan, np.inf])
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
def test_astype_cast_nan_inf_int(self, dtype, value):
# gh-14265: check NaN and inf raise error when converting to int
msg = "Cannot convert non-finite values \\(NA or inf\\) to integer"
s = Series([value])
with pytest.raises(ValueError, match=msg):
s.astype(dtype)
@pytest.mark.parametrize("dtype", [int, np.int8, np.int64])
def test_astype_cast_object_int_fail(self, dtype):
arr = Series(["car", "house", "tree", "1"])
msg = r"invalid literal for int\(\) with base 10: 'car'"
with pytest.raises(ValueError, match=msg):
arr.astype(dtype)
def test_astype_cast_object_int(self):
arr = Series(["1", "2", "3", "4"], dtype=object)
result = arr.astype(int)
tm.assert_series_equal(result, Series(np.arange(1, 5)))
def test_astype_unicode(self):
# see GH#7758: A bit of magic is required to set
# default encoding to utf-8
digits = string.digits
test_series = [
Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series(["データーサイエンス、お前はもう死んでいる"]),
]
former_encoding = None
if sys.getdefaultencoding() == "utf-8":
test_series.append(Series(["野菜食べないとやばい".encode()]))
for s in test_series:
res = s.astype("unicode")
expec = s.map(str)
tm.assert_series_equal(res, expec)
# Restore the former encoding
if former_encoding is not None and former_encoding != "utf-8":
reload(sys)
sys.setdefaultencoding(former_encoding)
def test_astype_bytes(self):
# GH#39474
result = Series(["foo", "bar", "baz"]).astype(bytes)
assert result.dtypes == np.dtype("S3")
class TestAstypeString:
@pytest.mark.parametrize(
"data, dtype",
[
([True, NA], "boolean"),
(["A", NA], "category"),
(["2020-10-10", "2020-10-10"], "datetime64[ns]"),
(["2020-10-10", "2020-10-10", NaT], "datetime64[ns]"),
(
["2012-01-01 00:00:00-05:00", NaT],
"datetime64[ns, US/Eastern]",
),
([1, None], "UInt16"),
(["1/1/2021", "2/1/2021"], "period[M]"),
(["1/1/2021", "2/1/2021", NaT], "period[M]"),
(["1 Day", "59 Days", NaT], "timedelta64[ns]"),
# currently no way to parse IntervalArray from a list of strings
],
)
def test_astype_string_to_extension_dtype_roundtrip(self, data, dtype, request):
if dtype == "boolean" or (
dtype in ("period[M]", "datetime64[ns]", "timedelta64[ns]") and NaT in data
):
mark = pytest.mark.xfail(
reason="TODO StringArray.astype() with missing values #GH40566"
)
request.node.add_marker(mark)
# GH-40351
s = Series(data, dtype=dtype)
tm.assert_series_equal(s, s.astype("string").astype(dtype))
class TestAstypeCategorical:
def test_astype_categorical_to_other(self):
cat = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])
ser = Series(np.random.RandomState(0).randint(0, 10000, 100)).sort_values()
ser = cut(ser, range(0, 10500, 500), right=False, labels=cat)
expected = ser
tm.assert_series_equal(ser.astype("category"), expected)
tm.assert_series_equal(ser.astype(CategoricalDtype()), expected)
msg = r"Cannot cast object dtype to float64"
with pytest.raises(ValueError, match=msg):
ser.astype("float64")
cat = Series(Categorical(["a", "b", "b", "a", "a", "c", "c", "c"]))
exp = Series(["a", "b", "b", "a", "a", "c", "c", "c"])
tm.assert_series_equal(cat.astype("str"), exp)
s2 = Series(Categorical(["1", "2", "3", "4"]))
exp2 = Series([1, 2, 3, 4]).astype("int")
tm.assert_series_equal(s2.astype("int"), exp2)
# object don't sort correctly, so just compare that we have the same
# values
def cmp(a, b):
tm.assert_almost_equal(np.sort(np.unique(a)), np.sort(np.unique(b)))
expected = Series(np.array(ser.values), name="value_group")
cmp(ser.astype("object"), expected)
cmp(ser.astype(np.object_), expected)
# array conversion
tm.assert_almost_equal(np.array(ser), np.array(ser.values))
tm.assert_series_equal(ser.astype("category"), ser)
tm.assert_series_equal(ser.astype(CategoricalDtype()), ser)
roundtrip_expected = ser.cat.set_categories(
ser.cat.categories.sort_values()
).cat.remove_unused_categories()
result = ser.astype("object").astype("category")
tm.assert_series_equal(result, roundtrip_expected)
result = ser.astype("object").astype(CategoricalDtype())
tm.assert_series_equal(result, roundtrip_expected)
def test_astype_categorical_invalid_conversions(self):
# invalid conversion (these are NOT a dtype)
cat = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])
ser = Series(np.random.randint(0, 10000, 100)).sort_values()
ser = cut(ser, range(0, 10500, 500), right=False, labels=cat)
msg = (
"dtype '<class 'pandas.core.arrays.categorical.Categorical'>' "
"not understood"
)
with pytest.raises(TypeError, match=msg):
ser.astype(Categorical)
with pytest.raises(TypeError, match=msg):
ser.astype("object").astype(Categorical)
def test_astype_categoricaldtype(self):
s = Series(["a", "b", "a"])
result = s.astype(CategoricalDtype(["a", "b"], ordered=True))
expected = Series(Categorical(["a", "b", "a"], ordered=True))
tm.assert_series_equal(result, expected)
result = s.astype(CategoricalDtype(["a", "b"], ordered=False))
expected = Series(Categorical(["a", "b", "a"], ordered=False))
tm.assert_series_equal(result, expected)
result = s.astype(CategoricalDtype(["a", "b", "c"], ordered=False))
expected = Series(
Categorical(["a", "b", "a"], categories=["a", "b", "c"], ordered=False)
)
tm.assert_series_equal(result, expected)
tm.assert_index_equal(result.cat.categories, Index(["a", "b", "c"]))
@pytest.mark.parametrize("name", [None, "foo"])
@pytest.mark.parametrize("dtype_ordered", [True, False])
@pytest.mark.parametrize("series_ordered", [True, False])
def test_astype_categorical_to_categorical(
self, name, dtype_ordered, series_ordered
):
# GH#10696, GH#18593
s_data = list("abcaacbab")
s_dtype = CategoricalDtype(list("bac"), ordered=series_ordered)
s = Series(s_data, dtype=s_dtype, name=name)
# unspecified categories
dtype = CategoricalDtype(ordered=dtype_ordered)
result = s.astype(dtype)
exp_dtype = CategoricalDtype(s_dtype.categories, dtype_ordered)
expected = Series(s_data, name=name, dtype=exp_dtype)
tm.assert_series_equal(result, expected)
# different categories
dtype = CategoricalDtype(list("adc"), dtype_ordered)
result = s.astype(dtype)
expected = Series(s_data, name=name, dtype=dtype)
tm.assert_series_equal(result, expected)
if dtype_ordered is False:
# not specifying ordered, so only test once
expected = s
result = s.astype("category")
tm.assert_series_equal(result, expected)
def test_astype_bool_missing_to_categorical(self):
# GH-19182
s = Series([True, False, np.nan])
assert s.dtypes == np.object_
result = s.astype(CategoricalDtype(categories=[True, False]))
expected = Series(Categorical([True, False, np.nan], categories=[True, False]))
tm.assert_series_equal(result, expected)
def test_astype_categories_raises(self):
# deprecated GH#17636, removed in GH#27141
s = Series(["a", "b", "a"])
with pytest.raises(TypeError, match="got an unexpected"):
s.astype("category", categories=["a", "b"], ordered=True)
@pytest.mark.parametrize("items", [["a", "b", "c", "a"], [1, 2, 3, 1]])
def test_astype_from_categorical(self, items):
ser =
|
Series(items)
|
pandas.Series
|
import json
import numpy as np
from collections import OrderedDict
from src.evaluation.summary_loader import load_processed_dataset
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
sns.set()
sns.set_style("darkgrid")
n_videos = 50
videos = {}
n_splits = 5
x_axis = []
y_axis = []
df = pd.DataFrame(columns=['Videos', 'F1-scores', 'Split Type'])
# original splits
for split in range(n_splits):
path = '../results/TVSum/video_scores/original splits/video_scores{}.txt'.format(split)
print(path)
with open(path, 'r') as infile:
videos = json.load(infile)
print(videos.keys())
for key in videos.keys():
# d = {'Videos': key, 'F1-scores': videos[key]}
d =
|
pd.Series({'Videos': key, 'F1-scores': videos[key], 'Split Type': 'Original splits'})
|
pandas.Series
|
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn.model_selection import train_test_split
################Stage-1: Sentence Level Classification
df_train = pd.read_csv('ngramsTrain.csv',header=None)
df_test = pd.read_csv('ngramsTest.csv',header=None)
#Encoding 9 classes for classification
mapping = {"bn_en_":0,"en_":1,"gu_en_":2,"hi_en_":3,"kn_en_":4,"ml_en_":5,"mr_en_":6,"ta_en_":7,"te_en_":8}
classes = ["bn_en_","en_","gu_en_","hi_en_","kn_en_","ml_en_","mr_en_","ta_en_","te_en_"]
languages = ["bengali","english","gujarati","hindi","kannada","malayalam","marathi","tamil","telugu"]
print("Building Sentence Level Classifier..........")
df_train = df_train.replace(mapping)
df_test = df_test.replace(mapping)
y_train = df_train[0]
x_train = df_train[1]
y_test = df_test[0]
x_test = df_test[1]
cv = CountVectorizer()
cv.fit(x_train)
new_x = cv.transform(x_train)
train_dataset = new_x.toarray()
######Naive Bayes Classifier
nb = MultinomialNB()
nb.fit(train_dataset,y_train)
######MaxEntropy i.e., Multi Class Logistic Regression
lg = LogisticRegression(random_state=0)
lg.fit(train_dataset,y_train)
new_x_test = cv.transform(x_test)
y_pred = nb.predict(new_x_test)
y1_pred = lg.predict(new_x_test)
print("F1 Score of Naive bayes for sentence classifier is ",metrics.accuracy_score(y_test,y_pred))
print("F1 Score of Logistic Regression for sentence classifier is ",metrics.accuracy_score(y_test,y1_pred))
print("Successfully built sentence level classifier......")
####################Stage 2: Building Binary Classifiers
print("\nBuilding Binary Classifiers........")
def ngram_generator(n,word):
i=0
n_grams=''
j=1
while(j<=n):
i=0
while(i<=len(word)-j):
n_grams+=word[i:i+j]+' '
i+=1
j+=1
return n_grams
#Constructing 9 dataframes from given language files
en_df = pd.read_csv('eng2.txt',header=None)
lis = []
for i in range(len(en_df)):
lis.append(1)
t = en_df[0]
en_df[0] = lis
en_df[1] = t
te_df = pd.read_csv('telugu.txt',header=None)
for i in range(len(te_df)):
te_df[0][i] = ngram_generator(5,te_df[0][i])
lis = []
for i in range(len(te_df)):
lis.append(8)
t = te_df[0]
te_df[0] = lis
te_df[1] = t
hi_df = pd.read_csv('hindiW.txt',header=None)
for i in range(len(hi_df)):
hi_df[0][i] = ngram_generator(5,hi_df[0][i])
lis = []
for i in range(len(hi_df)):
lis.append(3)
t = hi_df[0]
hi_df[0] = lis
hi_df[1] = t
ta_df = pd.read_csv('tamil.txt',header=None)
for i in range(len(ta_df)):
ta_df[0][i] = ngram_generator(5,ta_df[0][i])
lis = []
for i in range(len(ta_df)):
lis.append(7)
t = ta_df[0]
ta_df[0] = lis
ta_df[1] = t
mr_df = pd.read_csv('marati.txt',header=None)
for i in range(len(mr_df)):
mr_df[0][i] = ngram_generator(5,mr_df[0][i])
lis = []
for i in range(len(mr_df)):
lis.append(6)
t = mr_df[0]
mr_df[0] = lis
mr_df[1] = t
kn_df =
|
pd.read_csv('kannadaW.txt',header=None)
|
pandas.read_csv
|
import joblib
import pandas as pd
import numpy as np
import xgboost
from sklearn.metrics import roc_curve, auc
from src.config import config
from sklearn.model_selection import StratifiedKFold
from src.tuning.utils import find_optimal_cutoff_auc
from src.utils.model import Model
from src import __version__ as _version
class XgbClassifier(Model):
"""Model xgboost
"""
def __init__(self, tag=None):
if tag not in [None, 'training']:
raise Exception("tag must be None or 'training'")
self.model = None
self.threshold = 0.5 # For future prediction
self.roc_auc = None
self.num_boost_round = None
# Parameter of the model
# ==============================
self.params = {
'learning_rate': (0.01, 0.1),
'min_child_weight': (1, 20),
'gamma': (0, 5),
'subsample': (0.8, 1),
'colsample_bytree': (0.3, 0.8),
'max_depth': (2, 8)
}
self.num_boost_round = None
# Defined splitter of dataset for evaluate and bayes_evaluate function
# ==============================
self.kfold = 5
self.skf = StratifiedKFold(n_splits=self.kfold, random_state=42, shuffle=True)
# Variable
# ==============================
self.X_train = None
self.y_train = None
self.X_test = None
self.y_test = None
if tag == "training":
self.load_data()
def load_data(self):
# Load processed DataFrame
# ==============================
train = pd.read_csv(f"{config.DATASET_DIR}/{config.TRAINING_DATA_FILE}", sep=";")
test = pd.read_csv(f"{config.DATASET_DIR}/{config.TESTING_DATA_FILE}", sep=";")
# get columns name
features_col = [col for col in train.columns if col not in config.TARGET]
target_col = config.TARGET
# Dataset
self.X_train = train[features_col]
self.y_train = train[target_col]
self.X_test = test[features_col]
self.y_test = test[target_col]
def fit(self):
X =
|
pd.concat([self.X_train, self.X_test], axis=0)
|
pandas.concat
|
#!/usr/bin/env python
# coding: utf-8
import sys
sys.path.append("../")
import pandas as pd
import numpy as np
import pathlib
import pickle
import os
import itertools
import argparse
import logging
from sklearn.preprocessing import MultiLabelBinarizer
import helpers.feature_helpers as fh
OUTPUT_DF_TR = 'df_steps_tr.csv'
OUTPUT_DF_VAL = 'df_steps_val.csv'
OUTPUT_DF_TRAIN = 'df_steps_train.csv'
OUTPUT_DF_TEST = 'df_steps_test.csv'
OUTPUT_DF_SESSIONS = 'df_sessions.csv'
OUTPUT_ENCODING_DICT = 'enc_dicts_v02.pkl'
OUTPUT_CONFIG = 'config.pkl'
OUTPUT_NORMLIZATIONS_VAL = 'normalizations_val.pkl'
OUTPUT_NORMLIZATIONS_SUBM = 'normalizations_submission.pkl'
DEFAULT_FEATURES_DIR_NAME = 'nn_vnormal'
DEFAULT_PREPROC_DIR_NAME = 'data_processed_vnormal'
DEFAULT_SPLIT = 'normal'
def setup_args_parser():
parser = argparse.ArgumentParser(description='Create cv features')
parser.add_argument('--processed_data_dir_name', help='path to preprocessed data', default=DEFAULT_PREPROC_DIR_NAME)
parser.add_argument('--features_dir_name', help='features directory name', default=DEFAULT_FEATURES_DIR_NAME)
parser.add_argument('--split_option', help='split type. Options: normal, future', default=DEFAULT_SPLIT)
parser.add_argument('--debug', help='debug mode (verbose output and no saving)', action='store_true')
return parser
def setup_logger(debug):
logger = logging.getLogger()
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)
if debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
return logger
def main():
parser = setup_args_parser()
args = parser.parse_args()
logger = setup_logger(args.debug)
logger.info(100*'-')
logger.info('Running 014_Features_General_01.py')
logger.info(100*'-')
logger.info('split option: %s' % args.split_option)
logger.info('processed data directory name: %s' % args.processed_data_dir_name)
logger.info('features directory name: %s' % args.features_dir_name)
#Set up arguments
# # split_option
# if args.split_option=='normal':
# SPLIT_OPTION = 'normal'
# elif args.split_option=='future':
# SPLIT_OPTION = 'leave_out_only_clickout_with_nans'
is_normal = args.split_option=='normal'
# processed data path
DATA_PATH = '../data/' + args.processed_data_dir_name + '/'
#os.makedirs(DATA_PATH) if not os.path.exists(DATA_PATH) else None
logger.info('processed data path: %s' % DATA_PATH)
# features data path
FEATURES_PATH = '../features/' + args.features_dir_name + '/'
#os.makedirs(FEATURES_PATH) if not os.path.exists(FEATURES_PATH) else None
logger.info('features path: %s' % FEATURES_PATH)
# End of set up arguments
config = pickle.load(open(DATA_PATH+OUTPUT_CONFIG, "rb" ))
df_steps_tr = pd.read_csv(DATA_PATH+OUTPUT_DF_TR)
df_steps_val = pd.read_csv(DATA_PATH+OUTPUT_DF_VAL)
df_steps_train = pd.read_csv(DATA_PATH+OUTPUT_DF_TRAIN)
df_steps_test = pd.read_csv(DATA_PATH+OUTPUT_DF_TEST)
df_sessions = pd.read_csv(DATA_PATH+OUTPUT_DF_SESSIONS)
enc_dict = pickle.load(open(DATA_PATH+OUTPUT_ENCODING_DICT, "rb" ))
df_items =
|
pd.read_csv(FEATURES_PATH+'Item_Features.csv', index_col=['item_id_enc'])
|
pandas.read_csv
|
import os, sys
sys.path.insert(0, os.path.abspath(".."))
from features.get_temporal import get_temporal
from features.helper import helper
import seaborn as sn
import matplotlib.pyplot as plt
import pandas as pd
def plot_temporal(k):
##to get churn and nonchurn and make a dataframe
labels=helper()
x,y=labels.get_user_id(k)
c_id=x
nc_id=y
print(len(c_id),len(nc_id))
c_label=[1 for i in c_id]
nc_label=[0 for i in nc_id]
c_df=pd.DataFrame({'OwnerUserId': c_id,'label': c_label})
nc_df=pd.DataFrame({'OwnerUserId': nc_id,'label': nc_label})
fin_ldf=c_df.append(nc_df,ignore_index=True)
print(fin_ldf.shape)
##Get our features
t_df=get_temporal(k)
print(t_df.shape)
result_df =
|
pd.merge(t_df, fin_ldf, on='OwnerUserId')
|
pandas.merge
|
"""
python 3.8
portions of code and/or methodology based on https://github.com/thinkingmachines/ph-poverty-mapping
Extract features features OSM data
download OSM data from
http://download.geofabrik.de/asia/philippines.html#
buildings (polygons)
types : residential, damaged, commercial, industrial, education, health
For each type, we calculated
- the total number of buildings (count poly features intersecting with buffer)
- the total area of buildings (sum of area of poly features which intersect with buffer)
- the mean area of buildings (avg area of poly features which intersect with buffer)
- the proportion of the cluster area occupied by the buildings (ratio of total area of buildings which intersect with buffer to buffer area)
pois (points)
types: 100+ different types
For each type, we calculated
- the total number of each POI within a proximity of the area (point in poly)
roads (lines)
types: primary, trunk, paved, unpaved, intersection
for each type of road, we calculated
- the distance to the closest road (point to line vertice dist)
- total number of roads (count line features which intersect with buffer)
- total road length (length of lines which intersect with buffer)
"""
import os
import math
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point
from shapely.ops import nearest_points
from sklearn.neighbors import BallTree
import numpy as np
project_dir = "/Users/sasanfaraj/Desktop/folders/AidData/PHL_WORK"
data_dir = os.path.join(project_dir, 'data')
date = "210101"
# >>>>>>>>>>>>>>>>>>>>
# DHS CLUSTERS
geom_label = "dhs-buffers"
geom_path = os.path.join(data_dir, 'dhs_buffers.geojson')
geom_id = "DHSID"
# load buffers/geom created during data prep
buffers_gdf = gpd.read_file(geom_path)
# calculate area of each buffer
# convert to UTM 51N (meters) first, then back to WGS84 (degrees)
buffers_gdf = buffers_gdf.to_crs("EPSG:32651") # UTM 51N
buffers_gdf["buffer_area"] = buffers_gdf.area
buffers_gdf = buffers_gdf.to_crs("EPSG:4326") # WGS84
# >>>>>>>>>>>>>>>>>>>>
# KC CLUSTERS
# geom_label = "kc-5km-buffers"
# geom_path = os.path.join(data_dir, 'kc_clusters_5km-buffer.geojson')
# geom_id = "cluster_name"
# # load point geom created during prep
# buffers_gdf = gpd.read_file(geom_path)
# buffers_gdf.columns = [i if i != "cluster" else "cluster_name" for i in buffers_gdf.columns]
# # calculate area of each buffer
# # convert to UTM 51N (meters) first, then back to WGS84 (degrees)
# buffers_gdf = buffers_gdf.to_crs("EPSG:32651") # UTM 51N
# buffers_gdf["buffer_area"] = buffers_gdf.area
# buffers_gdf = buffers_gdf.to_crs("EPSG:4326") # WGS84
# >>>>>>>>>>>>>>>>>>>>
# OSM PLACES
# geom_label = "osm-places-3km-buffers"
# geom_path = os.path.join(data_dir, 'osm-places_3km-buffer_{}.geojson'.format(date))
# geom_id = "osm_id"
# # load buffers/geom created during data prep
# buffers_gdf = gpd.read_file(geom_path)
# # calculate area of each buffer
# # convert to UTM 51N (meters) first, then back to WGS84 (degrees)
# buffers_gdf = buffers_gdf.to_crs("EPSG:32651") # UTM 51N
# buffers_gdf["buffer_area"] = buffers_gdf.area
# buffers_gdf = buffers_gdf.to_crs("EPSG:4326") # WGS84
# >>>>>>>>>>>>>>>>>>>>
# ---------------------------------------------------------
# pois
# count of each type of pois (100+) in each buffer
print("Running pois...")
osm_pois_shp_path = os.path.join(data_dir, 'osm/philippines-{}-free.shp/gis_osm_pois_free_1.shp'.format(date))
osm_pois_a_shp_path = os.path.join(data_dir, 'osm/philippines-{}-free.shp/gis_osm_pois_a_free_1.shp'.format(date))
raw_pois_geo = gpd.read_file(osm_pois_shp_path)
raw_pois_a_geo = gpd.read_file(osm_pois_a_shp_path)
pois_geo = pd.concat([raw_pois_geo, raw_pois_a_geo])
"""
# manually generate crosswalk
# first prep CSV with all types - can combine multiple OSM timesteps (see below)
# then in Excel/whatever, assign group to each type/fclass
type_df = pd.DataFrame({"type": list(set(pois_geo["fclass"]))})
type_df["group"]= 0
type_df.to_csv(os.path.join(project_dir, "OSM/crosswalks/pois_type_crosswalk.csv"), index=False, encoding="utf-8")
"""
# load crosswalk for types and assign any not grouped to "other"
pois_type_crosswalk_path = os.path.join(project_dir, 'OSM/osm_code/crosswalks/pois_type_crosswalk.csv')
pois_type_crosswalk_df = pd.read_csv(pois_type_crosswalk_path)
pois_type_crosswalk_df.loc[pois_type_crosswalk_df["group"] == "0", "group"] = "other"
# merge new classification and assign any features without a type to unclassifid
pois_geo = pois_geo.merge(pois_type_crosswalk_df, left_on="fclass", right_on="type", how="left")
pois_geo.loc[pois_geo["fclass"].isna(), "group"] = "unclassified"
# show breakdown of groups
print(pois_geo.group.value_counts())
# group_field = "fclass"
group_field = "group"
# split by group
# pois_group_list = ["all"] + [i for i in set(pois_geo[group_field])]
pois_group_list = [i for i in set(pois_geo[group_field])]
# copy of buffers gdf to use for output
buffers_gdf_pois = buffers_gdf.copy(deep=True)
for group in pois_group_list:
print(group)
# subet by group
if group == "all":
pois_geo_subset = pois_geo.reset_index(inplace=True).copy(deep=True)
else:
pois_geo_subset = pois_geo.loc[pois_geo[group_field] == group].reset_index().copy(deep=True)
# query to find pois in each buffer
bquery = pois_geo_subset.sindex.query_bulk(buffers_gdf.geometry)
# pois dataframe where each column contains a cluster and one building in it (can have multiple rows per cluster)
bquery_df = pd.DataFrame({"cluster": bquery[0], "pois": bquery[1]})
# add pois data to spatial query dataframe
bquery_full = bquery_df.merge(pois_geo_subset, left_on="pois", right_index=True, how="left")
# aggregate spatial query df with pois info, by cluster
bquery_agg = bquery_full.groupby("cluster").agg({"pois": "count"})
bquery_agg.columns = [group + "_pois_count"]
# join cluster back to original buffer_geo dataframe with columns for specific building type queries
z1 = buffers_gdf.merge(bquery_agg, left_index=True, right_on="cluster", how="left")
# not each cluster will have relevant pois, set those to zero
z1.fillna(0, inplace=True)
# set index and drop unnecessary columns
if z1.index.name != "cluster": z1.set_index("cluster", inplace=True)
z2 = z1[group + "_pois_count"]
# merge group columns back to main cluster dataframe
buffers_gdf_pois = buffers_gdf_pois.merge(z2, left_index=True, right_index=True)
# output final features
pois_feature_cols = [geom_id] + [i for i in buffers_gdf_pois.columns if "_pois_" in i]
pois_features = buffers_gdf_pois[pois_feature_cols].copy(deep=True)
pois_features_path = os.path.join(data_dir, 'osm/features/{}_pois_{}.csv'.format(geom_label, date))
pois_features.to_csv(pois_features_path, index=False, encoding="utf-8")
# ---------------------------------------------------------
# traffic
# count of each type of traffic item in each buffer
print("Running traffic...")
osm_traffic_shp_path = os.path.join(data_dir, 'osm/philippines-{}-free.shp/gis_osm_traffic_free_1.shp'.format(date))
osm_traffic_a_shp_path = os.path.join(data_dir, 'osm/philippines-{}-free.shp/gis_osm_traffic_a_free_1.shp'.format(date))
raw_traffic_geo = gpd.read_file(osm_traffic_shp_path)
raw_traffic_a_geo = gpd.read_file(osm_traffic_a_shp_path)
traffic_geo = pd.concat([raw_traffic_geo, raw_traffic_a_geo])
"""
# manually generate crosswalk
# first prep CSV with all types - can combine multiple OSM timesteps (see below)
# then in Excel/whatever, assign group to each type/fclass
type_df = pd.DataFrame({"type": list(set(traffic_geo["fclass"]))})
type_df["group"]= 0
type_df.to_csv(os.path.join(project_dir, "OSM/crosswalks/traffic_type_crosswalk.csv"), index=False, encoding="utf-8")
"""
# load crosswalk for types and assign any not grouped to "other"
traffic_type_crosswalk_path = os.path.join(project_dir, 'OSM/osm_code/crosswalks/traffic_type_crosswalk.csv')
traffic_type_crosswalk_df = pd.read_csv(traffic_type_crosswalk_path)
traffic_type_crosswalk_df.loc[traffic_type_crosswalk_df["group"] == "0", "group"] = "other"
# merge new classification and assign any features without a type to unclassifid
traffic_geo = traffic_geo.merge(traffic_type_crosswalk_df, left_on="fclass", right_on="type", how="left")
traffic_geo.loc[traffic_geo["fclass"].isna(), "group"] = "unclassified"
# show breakdown of groups
print(traffic_geo.group.value_counts())
# group_field = "fclass"
group_field = "group"
# split by group
# traffic_group_list = ["all"] + [i for i in set(traffic_geo[group_field])]
traffic_group_list = [i for i in set(traffic_geo[group_field])]
# copy of buffers gdf to use for output
buffers_gdf_traffic = buffers_gdf.copy(deep=True)
for group in traffic_group_list:
print(group)
# subet by group
if group == "all":
traffic_geo_subset = traffic_geo.copy(deep=True)
else:
traffic_geo_subset = traffic_geo.loc[traffic_geo[group_field] == group].reset_index().copy(deep=True)
# query to find traffic in each buffer
bquery = traffic_geo_subset.sindex.query_bulk(buffers_gdf.geometry)
# traffic dataframe where each column contains a cluster and one building in it (can have multiple rows per cluster)
bquery_df = pd.DataFrame({"cluster": bquery[0], "traffic": bquery[1]})
# add traffic data to spatial query dataframe
bquery_full = bquery_df.merge(traffic_geo_subset, left_on="traffic", right_index=True, how="left")
# aggregate spatial query df with traffic info, by cluster
bquery_agg = bquery_full.groupby("cluster").agg({"traffic": "count"})
bquery_agg.columns = [group + "_traffic_count"]
# join cluster back to original buffer_geo dataframe with columns for specific building type queries
z1 = buffers_gdf.merge(bquery_agg, left_index=True, right_on="cluster", how="left")
# not each cluster will have relevant traffic, set those to zero
z1.fillna(0, inplace=True)
# set index and drop unnecessary columns
if z1.index.name != "cluster": z1.set_index("cluster", inplace=True)
z2 = z1[group + "_traffic_count"]
# merge group columns back to main cluster dataframe
buffers_gdf_traffic = buffers_gdf_traffic.merge(z2, left_index=True, right_index=True)
# output final features
traffic_feature_cols = [geom_id] + [i for i in buffers_gdf_traffic.columns if "_traffic_" in i]
traffic_features = buffers_gdf_traffic[traffic_feature_cols].copy(deep=True)
traffic_features_path = os.path.join(data_dir, 'osm/features/{}_traffic_{}.csv'.format(geom_label, date))
traffic_features.to_csv(traffic_features_path, index=False, encoding="utf-8")
# ---------------------------------------------------------
# transport
# count of each type of transport item in each buffer
print("Running transport...")
osm_transport_shp_path = os.path.join(data_dir, 'osm/philippines-{}-free.shp/gis_osm_transport_free_1.shp'.format(date))
osm_transport_a_shp_path = os.path.join(data_dir, 'osm/philippines-{}-free.shp/gis_osm_transport_a_free_1.shp'.format(date))
raw_transport_geo = gpd.read_file(osm_transport_shp_path)
raw_transport_a_geo = gpd.read_file(osm_transport_a_shp_path)
transport_geo = pd.concat([raw_transport_geo, raw_transport_a_geo])
"""
manually generate crosswalk
first prep CSV with all types - can combine multiple OSM timesteps (see below)
then in Excel/whatever, assign group to each type/fclass
type_df = pd.DataFrame({"type": list(set(transport_geo["fclass"]))})
type_df["group"]= 0
type_df.to_csv(os.path.join(project_dir, "OSM/crosswalks/transport_type_crosswalk.csv"), index=False, encoding="utf-8")
"""
# load crosswalk for types and assign any not grouped to "other"
transport_type_crosswalk_path = os.path.join(project_dir, 'OSM/osm_code/crosswalks/transport_type_crosswalk.csv')
transport_type_crosswalk_df = pd.read_csv(transport_type_crosswalk_path)
transport_type_crosswalk_df.loc[transport_type_crosswalk_df["group"] == "0", "group"] = "other"
# merge new classification and assign any features without a type to unclassifid
transport_geo = transport_geo.merge(transport_type_crosswalk_df, left_on="fclass", right_on="type", how="left")
transport_geo.loc[transport_geo["fclass"].isna(), "group"] = "unclassified"
# show breakdown of groups
print(transport_geo.group.value_counts())
# group_field = "fclass"
group_field = "group"
# split by group
# transport_group_list = ["all"] + [i for i in set(transport_geo[group_field])]
transport_group_list = [i for i in set(transport_geo[group_field])]
# copy of buffers gdf to use for output
buffers_gdf_transport = buffers_gdf.copy(deep=True)
for group in transport_group_list:
print(group)
# subet by group
if group == "all":
transport_geo_subset = transport_geo.copy(deep=True)
else:
transport_geo_subset = transport_geo.loc[transport_geo[group_field] == group].reset_index().copy(deep=True)
# query to find transport in each buffer
bquery = transport_geo_subset.sindex.query_bulk(buffers_gdf.geometry)
# transport dataframe where each column contains a cluster and one building in it (can have multiple rows per cluster)
bquery_df = pd.DataFrame({"cluster": bquery[0], "transport": bquery[1]})
# add transport data to spatial query dataframe
bquery_full = bquery_df.merge(transport_geo_subset, left_on="transport", right_index=True, how="left")
# aggregate spatial query df with transport info, by cluster
bquery_agg = bquery_full.groupby("cluster").agg({"transport": "count"})
bquery_agg.columns = [group + "_transport_count"]
# join cluster back to original buffer_geo dataframe with columns for specific building type queries
z1 = buffers_gdf.merge(bquery_agg, left_index=True, right_on="cluster", how="left")
# not each cluster will have relevant transport, set those to zero
z1.fillna(0, inplace=True)
# set index and drop unnecessary columns
if z1.index.name != "cluster": z1.set_index("cluster", inplace=True)
z2 = z1[group + "_transport_count"]
# merge group columns back to main cluster dataframe
buffers_gdf_transport = buffers_gdf_transport.merge(z2, left_index=True, right_index=True)
# output final features
transport_feature_cols = [geom_id] + [i for i in buffers_gdf_transport.columns if "_transport_" in i]
transport_features = buffers_gdf_transport[transport_feature_cols].copy(deep=True)
transport_features_path = os.path.join(data_dir, 'osm/features/{}_transport_{}.csv'.format(geom_label, date))
transport_features.to_csv(transport_features_path, index=False, encoding="utf-8")
# ---------------------------------------------------------
# # buildings
# # for each type of building (and all buildings combined)
# # count of buildings in each buffer, average areas of buildings in each buffer, total area of building in each buffer, ratio of building area to total area of buffer
print("Running buildings...")
osm_buildings_shp_path = os.path.join(data_dir, 'osm/philippines-{}-free.shp/gis_osm_buildings_a_free_1.shp'.format(date))
buildings_geo_raw = gpd.read_file(osm_buildings_shp_path)
"""
manually generate crosswalk
first prep CSV with all types - can combine multiple OSM timesteps (see below)
then in Excel/whatever, assign group to each type/fclass
type_df = pd.DataFrame({"type": list(set(buildings_geo["type"]))})
type_df["group"]= 0
type_df.to_csv(os.path.join(project_dir, "OSM/crosswalks/building_type_crosswalk.csv"), index=False, encoding="utf-8")
"""
# # load crosswalk for building types and assign any not grouped to "other"
building_type_crosswalk_path = os.path.join(project_dir, 'OSM/osm_code/osm_code/crosswalks/building_type_crosswalk.csv')
building_type_crosswalk_df = pd.read_csv(building_type_crosswalk_path)
building_type_crosswalk_df.loc[building_type_crosswalk_df["group"] == "0", "group"] = "other"
# # merge new classification and assign any buildings without a type to unclassifid
buildings_geo_raw = buildings_geo_raw.merge(building_type_crosswalk_df, on="type", how="left")
buildings_geo_raw.loc[buildings_geo_raw["type"].isna(), "group"] = "unclassified"
group_field = "group"
# # show breakdown of groups
print(buildings_geo_raw.group.value_counts())
buildings_geo = buildings_geo_raw.copy(deep=True)
# # split by building types
# # group_list = ["residential"]
# # group_list = ["all"] + [i for i in set(buildings_geo["group"]) if i not in ["other", "unclassified"]]
buildings_group_list = [i for i in set(buildings_geo["group"]) if i not in ["other", "unclassified"]]
buildings_group_list = [i for i in buildings_group_list if str(i) != 'nan'] #removes nan from building_group_list - Sasan
buildings_group_list = buildings_group_list + ['all'] #add a section for all buildings into group lost
if "all" not in buildings_group_list:
buildings_geo = buildings_geo.loc[buildings_geo["group"].isin(buildings_group_list)]
# calculate area of each building
# convert to UTM 51N (meters) first, then back to WGS84 (degrees)
buildings_geo = buildings_geo.to_crs("EPSG:32651") # UTM 51N
buildings_geo["area"] = buildings_geo.area
buildings_geo = buildings_geo.to_crs("EPSG:4326") # WGS84
# copy of buffers gdf to use for output
buffers_gdf_buildings = buffers_gdf.copy(deep=True)
for group in buildings_group_list:
print(group)
# subet by group
if group == "all":
buildings_geo_subset = buildings_geo.copy(deep=True)
else:
buildings_geo_subset = buildings_geo.loc[buildings_geo[group_field] == group].reset_index().copy(deep=True)
# query to find buildings in each buffer
bquery = buildings_geo_subset.sindex.query_bulk(buffers_gdf.geometry)
# building dataframe where each column contains a cluster and one building in it (can have multiple rows per cluster)
bquery_df = pd.DataFrame({"cluster": bquery[0], "building": bquery[1]})
# add building data to spatial query dataframe
bquery_full = bquery_df.merge(buildings_geo_subset, left_on="building", right_index=True, how="left")
# aggregate spatial query df with building info, by cluster
bquery_agg = bquery_full.groupby("cluster").agg({
"area": ["count", "mean", "sum"]
})
# rename agg df
basic_building_cols = ["buildings_count", "buildings_avgarea", "buildings_totalarea"]
bquery_agg.columns = ["{}_{}".format(group, i) for i in basic_building_cols]
# join cluster back to original buffer_geo dataframe with columns for specific building type queries
z1 = buffers_gdf.merge(bquery_agg, left_index=True, right_on="cluster", how="left")
# not each cluster will have relevant buildings, set those to zero
z1.fillna(0, inplace=True)
# calculate ratio for building type
z1["{}_buildings_ratio".format(group)] = z1["{}_buildings_totalarea".format(group)] / z1["buffer_area"]
# set index and drop unnecessary columns
if z1.index.name != "cluster": z1.set_index("cluster", inplace=True)
z2 = z1[bquery_agg.columns.to_list() + ["{}_buildings_ratio".format(group)]]
# merge group columns back to main cluster dataframe
buffers_gdf_buildings = buffers_gdf_buildings.merge(z2, left_index=True, right_index=True)
# output final features
buildings_feature_cols = [geom_id] + [i for i in buffers_gdf_buildings.columns if "_buildings_" in i]
buildings_features = buffers_gdf_buildings[buildings_feature_cols].copy(deep=True)
buildings_features_path = os.path.join(data_dir, 'osm/features/{}_buildings_{}.csv'.format(geom_label, date))
buildings_features.to_csv(buildings_features_path, index=False, encoding="utf-8")
# ---------------------------------------------------------
# roads
# for each type of road
# distance to closest road from cluster centroid, total number of roads in each cluster, and total length of roads in each cluster
print("Running roads...")
osm_roads_shp_path = os.path.join(data_dir, 'osm/philippines-{}-free.shp/gis_osm_roads_free_1.shp'.format(date))
roads_geo = gpd.read_file(osm_roads_shp_path)
# get each road length
# convert to UTM 51N (meters) first, then back to WGS84 (degrees)
roads_geo = roads_geo.to_crs("EPSG:32651") # UTM 51N
roads_geo["road_length"] = roads_geo.geometry.length
roads_geo = roads_geo.to_crs("EPSG:4326") # WGS84
"""
# manually generate crosswalk
# first prep CSV with all types - can combine multiple OSM timesteps (see below)
# then in Excel/whatever, assign group to each type/fclass
type_df = pd.DataFrame({"type": list(set(roads_geo["fclass"]))})
type_df["group"]= 0
type_df.to_csv(os.path.join(project_dir, "OSM/crosswalks/roads_type_crosswalk.csv"), index=False, encoding="utf-8")
"""
# load crosswalk for types and assign any not grouped to "other"
roads_type_crosswalk_path = os.path.join(project_dir, 'OSM/osm_code/crosswalks/roads_type_crosswalk.csv')
roads_type_crosswalk_df =
|
pd.read_csv(roads_type_crosswalk_path)
|
pandas.read_csv
|
from typing import Dict, Union
import numpy as np
from keras import Input, Model
from keras.layers import Flatten, Dense, Dropout
from numpy import ndarray
from pandas import DataFrame
from sklearn.base import ClassifierMixin
from src.encoding.encoding_parser import EncodingParser
from src.predictive_model.models import PredictiveModels
class NNClassifier(ClassifierMixin):
"""
Neural Network classifier, implements the same methods as the sklearn models to make it simple to add
"""
# noinspection PyTypeChecker
def __init__(self, **kwargs: Dict[str, Union[int, str, float]]):
"""initializes the Neural Network classifier
:param kwargs: configuration containing the predictive_model parameters, encoding and training parameters
"""
self._n_hidden_layers = int(kwargs['n_hidden_layers'])
self._n_hidden_units = int(kwargs['n_hidden_units'])
self._activation = str(kwargs['activation'])
self._n_epochs = int(kwargs['n_epochs'])
self._encoding = str(kwargs['encoding'])
self._dropout_rate = float(kwargs['dropout_rate'])
self._is_binary_classifier = bool(kwargs['is_binary_classifier'])
self._encoding_parser = EncodingParser(self._encoding, self._is_binary_classifier,
task=PredictiveModels.CLASSIFICATION.value)
self._model = None
self.classes_ = None # this is set for compatibility reasons
def fit(self, train_data: DataFrame, targets: ndarray) -> None:
"""creates and fits the predictive_model
first the encoded data is parsed, then the predictive_model created and then trained
:param train_data: encoded training dataset
:param targets: encoded target dataset
"""
targets =
|
DataFrame(targets, columns=['label'])
|
pandas.DataFrame
|
# %%
import datetime
import pandas
import altair
from plot_shared import plot_points_average_and_trend
# %%
df = pandas.read_csv('https://api.coronavirus.data.gov.uk/v2/data?areaType=nation&metric=cumPeopleVaccinatedFirstDoseByPublishDate&metric=cumPeopleVaccinatedSecondDoseByPublishDate&format=csv')
df.rename(columns={
'cumPeopleVaccinatedFirstDoseByPublishDate': 'First',
'cumPeopleVaccinatedSecondDoseByPublishDate': 'Second',
'areaName': 'Nation',
'date': 'Publication Date'
}, inplace=True)
df = df.drop(columns=['areaCode','areaType']).melt(id_vars=['Publication Date','Nation'], var_name='Dose', value_name='People')
# %%
ni = pandas.read_csv('../sam/doses.csv')
ni['Dose'] = ni['Dose'].str.replace('Dose 1', 'First')
ni['Dose'] = ni['Dose'].str.replace('Dose 2', 'Second')
ni['Dose'] = ni['Dose'].str.replace('Dose 3', 'Third')
# %%
history = df[df['Nation']=='Northern Ireland'][['Publication Date','Dose','People']]
ni.rename(columns={'Date':'Publication Date','Total':'People'}, inplace=True)
all = history.merge(ni, on=['Publication Date','Dose'], how='outer', suffixes=('','_bot'))
all['People'] = all['People'].fillna(all['People_bot'])
all = all[['Publication Date','Dose','People']]
# %%
boosters = all[all['Dose']=='Booster'][['Publication Date','People']]
boosters['Publication Date'] = pandas.to_datetime(boosters['Publication Date'])
dose2s = all[all['Dose']=='Second'][['Publication Date','People']]
dose2s['Publication Date'] = pandas.to_datetime(dose2s['Publication Date'])
dose2s['Booster Target Date 6M'] = pandas.to_datetime(dose2s['Publication Date']) + pandas.to_timedelta(183, unit='d')
dose2s['Booster Target Date 7M'] = pandas.to_datetime(dose2s['Publication Date']) + pandas.to_timedelta(183+31, unit='d')
dose2s['Booster Target Date 8M'] =
|
pandas.to_datetime(dose2s['Publication Date'])
|
pandas.to_datetime
|
import pandas as pd
import pymmwr as pm
import datetime
import warnings
import io
import requests
warnings.simplefilter(action='ignore')
def read_fips_codes(filepath):
# read file
fips_codes = pd.read_csv(filepath)
# take state code from all fips codes
fips_codes['state_abbr'] = fips_codes['location'].str[:2]
# match state abbrevaition with state fips code
fips_codes['state_abbr'] = fips_codes['state_abbr'].apply(lambda x: fips_codes[fips_codes.location ==x].abbreviation.tolist()[0] if str(x) in fips_codes['location'].tolist() else 'NA')
# only output "location (fips code)","location_name","(state) abbreviation"
fips_codes = fips_codes.drop('abbreviation',axis=1)
fips_codes.rename({'state_abbr': 'abbreviation'}, axis=1, inplace=True)
return fips_codes
def get_epi_data(date):
# The format
format_str = '%m/%d/%y'
dt = datetime.datetime.strptime(date, format_str).date()
epi = pm.date_to_epiweek(dt)
return epi.year, epi.week, epi.day
def pre_process (df):
# convert matrix to repeating row format
df_truth = df.unstack()
df_truth = df_truth.reset_index()
# get epi data from date
df_truth['year'], df_truth['week'], df_truth['day'] = \
zip(*df_truth['level_0'].map(get_epi_data))
return df_truth
def get_byday (df_truth):
# only output "location", "epiweek", "value"
df_truth = df_truth.drop(['location_long'], axis=1)
df_byday = df_truth.rename(columns={"level_0": "date"})
# select columns
df_byday = df_byday[["date", "location", "location_name", "value"]]
# ensure value column is integer
df_byday['value'] = df_byday['value'].astype(int)
# change to yyyy/mm/dd format
df_byday['date'] = pd.to_datetime(df_byday['date'])
return df_byday
def configure_JHU_data(county_truth, state_nat_truth, target):
# pre process both county truth and state_nat_truth
county_truth = pre_process(county_truth)
state_nat_truth = pre_process(state_nat_truth)
# rename columns
county_truth = county_truth.rename(columns={0: "value","FIPS": "location_long"})
state_nat_truth = state_nat_truth.rename(columns={0: "value","level_1": "location_long"})
# Get state IDs
county_truth = county_truth.merge(fips_codes, left_on='location_long', right_on='location', how='left')
state_nat_truth = state_nat_truth.merge(fips_codes, left_on='location_long', right_on='location_name', how='left')
# Only keeps counties in the US
county_truth = county_truth[county_truth.location.notnull()]
# Drop NAs
county_truth = county_truth.dropna(subset=['location', 'value'])
state_nat_truth = state_nat_truth.dropna(subset=['location', 'value'])
# add leading zeros to state code
state_nat_truth['location'] = state_nat_truth['location'].apply(lambda x: '{0:0>2}'.format(x))
county_truth['location'] = county_truth['location'].apply(lambda x: '{0:0>2}'.format(x))
'''
####################################
# Daily truth data output for reference
####################################
'''
county_truth_byday = get_byday(county_truth)
state_nat_truth_byday = get_byday(state_nat_truth)
df_byday = state_nat_truth_byday.append(county_truth_byday)
file_path = '../data-truth/truth-' + target + '.csv'
df_byday.to_csv(file_path, index=False)
'''
####################################
# Truth data output for visualization
####################################
'''
# Only visualize certain states, not county truths
states = ['US', 'Alabama', 'Alaska', 'Arizona', 'Arkansas', 'California', 'Colorado', 'Connecticut',
'Delaware', 'Florida', 'Georgia', 'Hawaii', 'Idaho', 'Illinois', 'Indiana', 'Iowa', 'Kansas', 'Kentucky',
'Louisiana', 'Maine', 'Maryland', 'Massachusetts', 'Michigan', 'Minnesota', 'Mississippi', 'Missouri',
'Montana', 'Nebraska', 'Nevada', 'New Hampshire', 'New Jersey', 'New Mexico', 'New York',
'North Carolina', 'North Dakota', 'Ohio', 'Oklahoma', 'Oregon', 'Pennsylvania', 'Rhode Island',
'South Carolina', 'South Dakota', 'Tennessee', 'Texas', 'Utah', 'Vermont', 'Virginia', 'Washington',
'West Virginia', 'Wisconsin', 'Wyoming', 'District of Columbia']
state_nat_truth = state_nat_truth.drop(['location_name'], axis=1)
state_nat_truth = state_nat_truth[state_nat_truth["location_long"].isin(states)]
df_truth = state_nat_truth
# Observed data on the seventh day
# or group by week for incident deaths
if target in ('Incident Deaths','Incident Cases'):
df_vis = df_truth.groupby(['week', 'location_long'], as_index=False).agg({'level_0': 'last',
'value': 'sum',
'year': 'last',
'day': 'last',
'location': 'last',
'abbreviation': 'last'})
df_vis = df_vis[df_vis['day'] == 7]
else:
df_vis = df_truth[df_truth['day'] == 7]
# shift epiweek on axis
df_vis['week'] = df_vis['week'] + 1
# add leading zeros to epi week
df_vis['week'] = df_vis['week'].apply(lambda x: '{0:0>2}'.format(x))
# define epiweek
df_vis['epiweek'] = df_vis['year'].astype(str) + df_vis['week']
# Replace US with "nat" this is NECESSARY for visualization code!
df_vis.loc[df_vis["location_long"] == "US", "abbreviation"] = "nat"
# only output "location", "epiweek", "value"
df_truth_short = df_vis[["abbreviation", "epiweek", "value"]]
df_truth_short = df_truth_short.rename(columns={"abbreviation": "location"})
df_truth_short["value"].replace({0: 0.1}, inplace=True)
file_path = '../visualization/vis-master/covid-csv-tools/dist/truth/' + target + '.json'
# write to json
with open(file_path, 'w') as f:
f.write(df_truth_short.to_json(orient='records'))
def get_county_truth(df):
county = df[
|
pd.notnull(df.FIPS)
|
pandas.notnull
|
import os
from io import StringIO
from pathlib import Path
import pandas as pd
import pandas._testing as pt
import pytest
from pyplotutil.datautil import Data, DataSet
csv_dir_path = os.path.join(os.path.dirname(__file__), "data")
test_data = """\
a,b,c,d,e
1,0.01,10.0,3.5,100
2,0.02,20.0,7.5,200
3,0.03,30.0,9.5,300
4,0.04,40.0,11.5,400
"""
test_dataset = """\
tag,a,b,c,d,e
tag01,0,1,2,3,4
tag01,5,6,7,8,9
tag01,10,11,12,13,14
tag01,15,16,17,18,19
tag01,20,21,22,23,24
tag01,25,26,27,28,29
tag02,10,11,12,13,14
tag02,15,16,17,18,19
tag02,110,111,112,113,114
tag02,115,116,117,118,119
tag02,120,121,122,123,124
tag02,125,126,127,128,129
tag03,20,21,22,23,24
tag03,25,26,27,28,29
tag03,210,211,212,213,214
tag03,215,216,217,218,219
tag03,220,221,222,223,224
tag03,225,226,227,228,229
"""
@pytest.mark.parametrize("cls", [str, Path])
def test_data_init_path(cls) -> None:
csv_path = os.path.join(csv_dir_path, "test.csv")
path = cls(csv_path)
expected_df = pd.read_csv(csv_path)
data = Data(path)
assert data.datapath == Path(csv_path)
pt.assert_frame_equal(data.dataframe, expected_df)
def test_data_init_StringIO() -> None:
csv_path = os.path.join(csv_dir_path, "test.csv")
expected_df = pd.read_csv(csv_path)
data = Data(StringIO(test_data))
assert data.datapath is None
pt.assert_frame_equal(data.dataframe, expected_df)
def test_data_init_DataFrame() -> None:
csv_path = os.path.join(csv_dir_path, "test.csv")
expected_df = pd.read_csv(csv_path)
if isinstance(expected_df, pd.DataFrame):
data = Data(expected_df)
assert data.datapath is None
pt.assert_frame_equal(data.dataframe, expected_df)
else:
pytest.skip(f"Expected DataFram type: {type(expected_df)}")
def test_data_init_kwds() -> None:
csv_path = os.path.join(csv_dir_path, "test.csv")
expected_df = pd.read_csv(csv_path, usecols=[0, 1])
data = Data(csv_path, usecols=[0, 1])
assert len(data.dataframe.columns) == 2
pt.assert_frame_equal(data.dataframe, expected_df)
def test_data_getitem() -> None:
df = pd.DataFrame([[0, 1, 2], [3, 4, 5], [6, 7, 8]], columns=["a", "b", "c"])
data = Data(df)
pt.assert_series_equal(data["a"], df.a) # type: ignore
pt.assert_series_equal(data["b"], df.b) # type: ignore
pt.assert_series_equal(data["c"], df.c) # type: ignore
def test_data_getitem_no_header() -> None:
df = pd.DataFrame([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
data = Data(df)
pt.assert_series_equal(data[0], df[0]) # type: ignore
pt.assert_series_equal(data[1], df[1]) # type: ignore
pt.assert_series_equal(data[2], df[2]) # type: ignore
def test_data_len() -> None:
df = pd.DataFrame([[0, 1, 2], [3, 4, 5], [6, 7, 8]], columns=["a", "b", "c"])
data = Data(df)
assert len(data) == len(df)
def test_data_getattr() -> None:
df = pd.DataFrame([[0, 1, 2], [3, 4, 5], [6, 7, 8]], columns=["a", "b", "c"])
data = Data(df)
pt.assert_index_equal(data.columns, pd.Index(["a", "b", "c"]))
assert data.shape == (3, 3)
assert data.to_csv() == ",a,b,c\n0,0,1,2\n1,3,4,5\n2,6,7,8\n"
assert data.iat[1, 2] == 5
assert data.at[2, "a"] == 6
def test_data_attributes() -> None:
df = pd.DataFrame([[0, 1, 2], [3, 4, 5], [6, 7, 8]], columns=["a", "b", "c"])
data = Data(df)
pt.assert_series_equal(data.a, df.a) # type: ignore
pt.assert_series_equal(data.b, df.b) # type: ignore
pt.assert_series_equal(data.c, df.c) # type: ignore
def test_data_param() -> None:
csv_path = os.path.join(csv_dir_path, "test.csv")
data = Data(csv_path)
assert data.param("b") == 0.01
def test_data_param_list() -> None:
csv_path = os.path.join(csv_dir_path, "test.csv")
data = Data(csv_path)
assert data.param(["c", "e"]) == [10.0, 100]
@pytest.mark.parametrize("cls", [str, Path])
def test_dataset_init_path(cls) -> None:
csv_path = os.path.join(csv_dir_path, "test_dataset.csv")
path = cls(csv_path)
raw_df = pd.read_csv(csv_path)
dataset = DataSet(path)
assert dataset.datapath == Path(csv_path)
pt.assert_frame_equal(dataset.dataframe, raw_df)
if isinstance(raw_df, pd.DataFrame):
groups = raw_df.groupby("tag")
datadict = dataset._datadict
pt.assert_frame_equal(
datadict["tag01"].dataframe,
groups.get_group("tag01").reset_index(drop=True),
)
pt.assert_frame_equal(
datadict["tag02"].dataframe,
groups.get_group("tag02").reset_index(drop=True),
)
pt.assert_frame_equal(
datadict["tag03"].dataframe,
groups.get_group("tag03").reset_index(drop=True),
)
else:
pytest.skip(f"Expected DataFram type: {type(raw_df)}")
def test_dataset_init_StringIO() -> None:
csv_path = os.path.join(csv_dir_path, "test_dataset.csv")
raw_df = pd.read_csv(csv_path)
dataset = DataSet(StringIO(test_dataset))
assert dataset.datapath is None
pt.assert_frame_equal(dataset.dataframe, raw_df)
if isinstance(raw_df, pd.DataFrame):
groups = raw_df.groupby("tag")
datadict = dataset._datadict
pt.assert_frame_equal(
datadict["tag01"].dataframe,
groups.get_group("tag01").reset_index(drop=True),
)
pt.assert_frame_equal(
datadict["tag02"].dataframe,
groups.get_group("tag02").reset_index(drop=True),
)
pt.assert_frame_equal(
datadict["tag03"].dataframe,
groups.get_group("tag03").reset_index(drop=True),
)
else:
pytest.skip(f"Expected DataFram type: {type(raw_df)}")
def test_dataset_init_DataFrame() -> None:
csv_path = os.path.join(csv_dir_path, "test_dataset.csv")
raw_df = pd.read_csv(csv_path)
if isinstance(raw_df, pd.DataFrame):
dataset = DataSet(raw_df)
groups = raw_df.groupby("tag")
assert dataset.datapath is None
pt.assert_frame_equal(dataset.dataframe, raw_df)
pt.assert_frame_equal(
dataset._datadict["tag01"].dataframe,
groups.get_group("tag01").reset_index(drop=True),
)
pt.assert_frame_equal(
dataset._datadict["tag02"].dataframe,
groups.get_group("tag02").reset_index(drop=True),
)
pt.assert_frame_equal(
dataset._datadict["tag03"].dataframe,
groups.get_group("tag03").reset_index(drop=True),
)
else:
pytest.skip(f"Expected DataFram type: {type(raw_df)}")
def test_dataset_non_default_tag() -> None:
csv_path = os.path.join(csv_dir_path, "test_dataset_label.csv")
raw_df = pd.read_csv(csv_path)
dataset = DataSet(csv_path, by="label")
assert dataset.datapath == Path(csv_path)
pt.assert_frame_equal(dataset.dataframe, raw_df)
if isinstance(raw_df, pd.DataFrame):
groups = raw_df.groupby("label")
pt.assert_frame_equal(
dataset._datadict["label01"].dataframe,
groups.get_group("label01").reset_index(drop=True),
)
pt.assert_frame_equal(
dataset._datadict["label02"].dataframe,
groups.get_group("label02").reset_index(drop=True),
)
pt.assert_frame_equal(
dataset._datadict["label03"].dataframe,
groups.get_group("label03").reset_index(drop=True),
)
else:
pytest.skip(f"Expected DataFram type: {type(raw_df)}")
def test_dataset_no_tag() -> None:
csv_path = os.path.join(csv_dir_path, "test.csv")
raw_df = pd.read_csv(csv_path)
dataset = DataSet(csv_path)
pt.assert_frame_equal(dataset.dataframe, raw_df)
if isinstance(raw_df, pd.DataFrame):
assert len(dataset.datadict) == 1
|
pt.assert_frame_equal(dataset.datadict["0"].dataframe, raw_df)
|
pandas._testing.assert_frame_equal
|
import json
import logging
import os
import pathlib
import sys
from collections import OrderedDict
from datetime import datetime
import click
import humanfriendly
import pandas
__version__ = '1.1.5'
logger = logging.getLogger()
@click.group()
@click.option('--debug', is_flag=True)
@click.pass_context
def cli(ctx, debug):
"""
This is a tool to generate an excel file based on a provided source excel and transformation mapping
"""
log_format = '%(asctime)s|%(levelname)s|%(name)s|(%(funcName)s):-%(message)s'
logging.basicConfig(level=logging.DEBUG if debug else logging.INFO, stream=sys.stdout, format=log_format)
if ctx.invoked_subcommand not in ['version']:
logger.info(f'{"-" * 20} Starting Logging for {ctx.invoked_subcommand} (v{__version__}) {"-" * 20}')
def process_column_mappings(source_df, column_mappings):
out_df = source_df.copy(deep=True)
name_map = {}
exclude_columns = []
pending_columns = False
for x in column_mappings:
if x[0][:3] == '[-]':
exclude_columns.append(x[0][3:])
elif x[0] == '*':
pending_columns = True
else:
name_map.update({x[0]: x[1] if x[1] != '_' else x[0]})
index_map = {'_': []}
for mapping in column_mappings:
index = mapping[2]
value = mapping[0] if mapping[1] == '_' else mapping[1]
if index == '_':
if value != '*' and value[:3] != '[-]':
index_map['_'].append(value)
continue
if index not in index_map:
index_map[index] = value
exclude_columns.append(value)
else:
raise Exception(f'Cannot have same column index for multiple columns, please check your column mapping\n'
f'{index=}, {mapping=}')
out_df = out_df.rename(columns=name_map)
pending_columns_list = list(set(out_df.columns).difference(exclude_columns)) if pending_columns else []
return {'df': out_df, 'index_map': index_map, 'pending_columns': pending_columns_list}
def process_mappings(source_df_dict, mappings):
worksheets_dict = {}
for mapping in mappings:
count = -1
for sheet_identifier, sheet_mapping in mapping.items():
count += 1
entry = get_dict_entry(count, sheet_identifier, source_df_dict)
sheet_name = entry.get('name')
if sheet_name not in worksheets_dict:
# noinspection PyArgumentList
worksheets_dict.update({sheet_name: {
'source': entry.get('item').copy(deep=True),
'dest': {}
}})
dest_sheet_name = sheet_mapping.get('dest_worksheet_name') or sheet_name
dest_sheet_name = sheet_name if dest_sheet_name == '_' else dest_sheet_name
mapping_processed = process_column_mappings(worksheets_dict.get(sheet_name).get('source'),
sheet_mapping.get('columns'))
mapping_processed.update({'merge_columns': sheet_mapping.get('merge_columns')})
worksheets_dict[sheet_name]['dest'].update({dest_sheet_name: mapping_processed})
return worksheets_dict
@cli.command()
@click.argument('source', nargs=-1)
@click.argument('mapping')
@click.option('-o', '--output', help='relative or absolute path to output file')
@click.pass_context
def transform(ctx, **kwargs):
transform_spreadsheets(**kwargs)
def transform_spreadsheets(source, mapping, output):
"""Produces a new spreadsheet with transformation mapping applied"""
s_time = datetime.now()
try:
source_paths = [get_path(x) for x in source]
mapping_path = get_path(mapping, make_dir=False)
output_path = get_path(output or 'excel_transform_output.xlsx', make_dir=True)
source_dfs = OrderedDict()
try:
logger.info('processing mappings file')
with open(mapping_path) as f:
mappings = json.load(f)
except Exception as e:
logger.critical(f'Encountered error trying to read the mapping file:\n{e}')
sys.exit()
logger.info('processing source files')
for source_path in source_paths:
try:
source_dfs.update({source_path.stem: pandas.read_excel(source_path, sheet_name=None)})
except Exception as e:
logger.critical(f'Encountered error processing source file: {source_path}\n{e}')
sys.exit()
count = -1
processed_source = {}
for identifier, mapping in mappings.items():
if '__' == identifier[:2]:
continue
count += 1
entry = get_dict_entry(count, identifier, source_dfs)
logger.info(f'processing mappings for: {entry.get("name")}')
processed_source.update({entry.get('name'): process_mappings(entry.get("item"), mapping)})
logger.info('grouping processed source data by destination worksheet')
dest_worksheet_dict = {}
for worksheets in processed_source.values():
for data in worksheets.values():
for dest_worksheet_name, dest_data in data['dest'].items():
if dest_worksheet_name not in dest_worksheet_dict:
dest_worksheet_dict[dest_worksheet_name] = []
dest_worksheet_dict[dest_worksheet_name].append(dest_data)
logger.info('merging destination worksheet data')
out_dict = {}
for dest_worksheet_name, data_list in dest_worksheet_dict.items():
temp_df =
|
pandas.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
from simple_ts_forecast.utils import transform_date_start
class Model:
def __init__(self, df, n=14, verbose=False, column_name='price', **params):
self.n = n
self.verbose = verbose
self.column_name = column_name
self.params = params
def fit(self, df, verbose=False):
pass
def predict(self, df):
raise NotImplementedError()
def predict_for_report(self, df, date_start, date_end):
dates = pd.date_range(date_start, date_end)
preds = {}
for pivot in dates:
signal = df.loc[:pivot].dropna()
pred = self.__class__(signal, n=self.n, column_name=self.column_name, **self.params).predict(signal)
self.insert_to_dict(preds, [self.column_name], pred)
date_start = transform_date_start(date_start, self.n)
date_end = transform_date_start(date_end, self.n)
dates =
|
pd.date_range(date_start, date_end)
|
pandas.date_range
|
import sys
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
import matplotlib
import matplotlib.pyplot as plt
if len(sys.argv) != 3:
print ("Usage: ", sys.argv[0], " <input CSV> <output CSV>")
print (" got:", sys.argv)
sys.exit(2)
else :
InputFile=sys.argv[1]
OutputFile=sys.argv[2]
#if
# Import the file and drop the last column
df =
|
pd.read_csv(InputFile, delimiter=',')
|
pandas.read_csv
|
import numpy as np
from nose.tools import raises
import pandas as pds
import pysat
class TestBasics():
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing', tag='10',
clean_level='clean')
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst
def add(self, function, kind='add', at_pos='end', *args, **kwargs):
'''Adds a function to the object's custom queue'''
self.testInst.custom.add(function, kind, at_pos, *args, **kwargs)
@raises(ValueError)
def test_single_modifying_custom_function(self):
"""Test if custom function works correctly. Modify function that
returns pandas object. Modify function returns an object which will
produce an Error.
"""
def custom1(inst):
inst.data['doubleMLT'] = 2.0 * inst.data.mlt
return 5.0 * inst.data['mlt']
self.testInst.custom.add(custom1, 'modify')
self.testInst.load(2009, 1)
def test_single_adding_custom_function(self):
"""Test if custom function works correctly. Add function that returns
pandas object.
"""
def custom1(inst):
d = 2.0 * inst['mlt']
d.name = 'doubleMLT'
return d
self.add(custom1, 'add')
self.testInst.load(2009, 1)
assert (self.testInst['doubleMLT'].values == 2.0 *
self.testInst['mlt'].values).all()
def test_single_adding_custom_function_wrong_times(self):
"""Only the data at the correct time should be accepted, otherwise it
returns nan
"""
def custom1(inst):
new_index = inst.index+
|
pds.DateOffset(milliseconds=500)
|
pandas.DateOffset
|
import pull_mdsplus as pull
import pandas as pd
import numpy as np
import meas_locations as geo
import MDSplus as mds
import itertools
from scipy import interpolate
def load_gfile_mds(shot, time, tree="EFIT01", exact=False, connection=None, tunnel=True):
"""
This is scavenged from the load_gfile_d3d script on the EFIT repository,
except updated to run on python3.
shot: Shot to get gfile for.
time: Time of the shot to load gfile for, in ms.
tree: One of the EFIT trees to get the data from.
exact: If True will raise error if time does not exactly match any gfile
times. False will grab the closest time.
connection: An MDSplus connection to atlas.
tunnel: Set to True if accessing outside DIII-D network.
returns: The requested gfile as a dictionary.
"""
# Connect to server, open tree and go to g-file
if connection is None:
if tunnel is True:
connection = mds.Connection("localhost")
else:
connection = mds.Connection('atlas.gat.com')
connection.openTree(tree, shot)
base = 'RESULTS:GEQDSK:'
# get time slice
print("\nLoading gfile:")
print(" Shot: " + str(shot))
print(" Tree: " + tree)
print(" Time: " + str(time))
signal = 'GTIME'
k = np.argmin(np.abs(connection.get(base + signal).data() - time))
time0 = int(connection.get(base + signal).data()[k])
if (time != time0):
if exact:
raise RuntimeError(tree + ' does not exactly contain time %.2f' %time + ' -> Abort')
else:
print('Warning: ' + tree + ' does not exactly contain time %.2f' %time + ' the closest time is ' + str(time0))
print('Fetching time slice ' + str(time0))
time = time0
# store data in dictionary
g = {'shot': shot, 'time': time}
# get header line
header = connection.get(base + 'ECASE').data()[k]
# get all signals, use same names as in read_g_file
translate = {'MW': 'NR', 'MH': 'NZ', 'XDIM': 'Xdim', 'ZDIM': 'Zdim', 'RZERO': 'R0',
'RMAXIS': 'RmAxis', 'ZMAXIS': 'ZmAxis', 'SSIMAG': 'psiAxis', 'SSIBRY': 'psiSep',
'BCENTR': 'Bt0', 'CPASMA': 'Ip', 'FPOL': 'Fpol', 'PRES': 'Pres',
'FFPRIM': 'FFprime', 'PPRIME': 'Pprime', 'PSIRZ': 'psiRZ', 'QPSI': 'qpsi',
'NBBBS': 'Nlcfs', 'LIMITR': 'Nwall'}
for signal in translate:
g[translate[signal]] = connection.get(base + signal).data()[k]
g['R1'] = connection.get(base + 'RGRID').data()[0]
g['Zmid'] = 0.0
RLIM = connection.get(base + 'LIM').data()[:, 0]
ZLIM = connection.get(base + 'LIM').data()[:, 1]
g['wall'] = np.vstack((RLIM, ZLIM)).T
RBBBS = connection.get(base + 'RBBBS').data()[k][:int(g['Nlcfs'])]
ZBBBS = connection.get(base + 'ZBBBS').data()[k][:int(g['Nlcfs'])]
g['lcfs'] = np.vstack((RBBBS, ZBBBS)).T
KVTOR = 0
RVTOR = 1.7
NMASS = 0
RHOVN = connection.get(base + 'RHOVN').data()[k]
# convert floats to integers
for item in ['NR', 'NZ', 'Nlcfs', 'Nwall']:
g[item] = int(g[item])
# convert single (float32) to double (float64) and round
for item in ['Xdim', 'Zdim', 'R0', 'R1', 'RmAxis', 'ZmAxis', 'psiAxis', 'psiSep', 'Bt0', 'Ip']:
g[item] = np.round(np.float64(g[item]), 7)
# convert single arrays (float32) to double arrays (float64)
for item in ['Fpol', 'Pres', 'FFprime', 'Pprime', 'psiRZ', 'qpsi', 'lcfs', 'wall']:
g[item] = np.array(g[item], dtype=np.float64)
# Construct (R,Z) grid for psiRZ
g['dR'] = g['Xdim']/(g['NR'] - 1)
g['R'] = g['R1'] + np.arange(g['NR'])*g['dR']
g['dZ'] = g['Zdim']/(g['NZ'] - 1)
NZ2 = int(np.floor(0.5*g['NZ']))
g['Z'] = g['Zmid'] + np.arange(-NZ2, NZ2+1)*g['dZ']
# normalize psiRZ
g['psiRZn'] = (g['psiRZ'] - g['psiAxis']) / (g['psiSep'] - g['psiAxis'])
return g
def rbs_into_df(number, probe, conn, start=2500, end=5000, step=500, verbal=False):
"""
Pulls RBS data from the MDSplus tree 'dp_probes' and puts it into a
DataFrame ready for analysis. Require ssh to r2d2 if remote.
number: Probe number.
probe: One of A, B or C.
conn: An MDSplus Connection returned via the pull.thin_connect function.
start: Start of time that will be analyzed (i.e. the first gfile loaded).
end: End of time for analysis (i.e. the last gfile loaded).
step: Time step for the above.
returns: A DataFrame formatted and ready to be filled with data (R-Rsep,
R-Rsep_omp, etc.)
"""
# Create array of times to be sampled.
times = np.arange(start, end, step)
# Get shots probe was in for and Rprobe. Same for U and D sides, obviously.
shots = pull.pull_shots(conn, probe + 'U', verbal=verbal)
rprobe = pull.pull_rprobe(conn, probe + 'U', probe_corr=True, verbal=verbal)
print("Shots to be analyzed: " + str(shots))
# Then pull the RBS data.
print('\nLoading ' + probe + 'U' + str(number) + ' data...')
rbs_dict_U = pull.pull_all_rbs(conn, number, probe + 'U', verbal=verbal)
print('\nLoading ' + probe + 'D' + str(number) + ' data...')
rbs_dict_D = pull.pull_all_rbs(conn, number, probe + 'D', verbal=verbal)
# Now prepare the DataFrame. Will have set of data at each time, at each
# shot. So essentially len(times)*len(shots) DataFrames stacked together.
rbs_df_U = pd.DataFrame(rbs_dict_U)
rbs_df_D = pd.DataFrame(rbs_dict_D)
# Want 'locs' as an index.
rbs_df_U.set_index('locs', inplace=True)
rbs_df_D.set_index('locs', inplace=True)
# Create set of DataFrames, len(times) of them, to be 'stacked' on top of each other.
rbs_df_U = pd.concat(list(itertools.repeat(rbs_df_U, len(times))), keys=times, names=['times'])
rbs_df_D = pd.concat(list(itertools.repeat(rbs_df_D, len(times))), keys=times, names=['times'])
# Now do it again, except with shots.
rbs_df_U = pd.concat(list(itertools.repeat(rbs_df_U, len(shots))), keys=shots, names=['shots'])
rbs_df_D = pd.concat(list(itertools.repeat(rbs_df_D, len(shots))), keys=shots, names=['shots'])
return rbs_df_U, rbs_df_D, rprobe
def fill_in_rbs_df(rbs_df_U, rbs_df_D, probe, rprobe, conn, verbal=False):
"""
Takes the rbs_df from above and fill it in with R-Rsep, R-Rsep_omp, etc. It
returns all if it, so that it may then be averaged and get the std. dev. of
after all the data colloction has taken place. Requires ssh to atlas if remote.
rbs_df_U: The DataFrame returned from rbs_into_df. Likewise for D.
probe: One of A, B or C.
rprobe: Radial position of probe tip returned from rbs_into_df.
conn: An MDSplus Connection object from the mds.Connection function (different
procedure compared to connecting to r2d2).
returns: Filled in rbs_df.
"""
if verbal:
print("Analyzing atlas relevant data...")
# Get the shots, times and locs from the rbs_df index. np.unique will sort
# the locs (don't want), so returning the indices and reordering will fix this.
shots = np.unique(rbs_df_U.index.get_level_values('shots').values)
times = np.unique(rbs_df_U.index.get_level_values('times').values)
locs_U, order_U = np.unique(rbs_df_U.index.get_level_values('locs').values, return_index=True)
locs_D, order_D = np.unique(rbs_df_D.index.get_level_values('locs').values, return_index=True)
locs_U = locs_U[order_U]
locs_D = locs_D[order_D]
# Extra columns to be filled out.
rbs_df_U['R-Rsep (cm)'] = pd.Series(); rbs_df_D['R-Rsep (cm)'] = pd.Series()
rbs_df_U['R-Rsep omp (cm)'] = pd.Series(); rbs_df_D['R-Rsep omp (cm)'] = pd.Series()
rbs_df_U['Psin'] = pd.Series(); rbs_df_D['Psin'] = pd.Series()
rbs_df_U['R (cm)'] = pd.Series(); rbs_df_D['R (cm)'] = pd.Series()
# Establish the Z to be used depending on the probe.
if probe == 'A': Z_probe = -0.188
elif probe == 'B': Z_probe = -0.1546
elif probe == 'C': Z_probe = -0.2054
else: print("Error in probe entry.")
for shot in shots:
for time in times:
try:
# Load gfile.
gfile = load_gfile_mds(shot, time, connection=conn, tunnel=True)
# Create grid of R's and Z's.
Rs, Zs = np.meshgrid(gfile['R'], gfile['Z'])
# Z and R of magnetic axis (where omp is), in m.
Z_axis = gfile['ZmAxis']
R_axis = gfile['RmAxis']
# Z's and R's of the separatrix, in m.
Zes = np.copy(gfile['lcfs'][:, 1][13:-17])
Res = np.copy(gfile['lcfs'][:, 0][13:-17])
# Only want right half of everything.
Rs_trunc = Rs > R_axis
# Interpolation functions of psin(R, Z) and R(psin, Z).
f_psin = interpolate.Rbf(Rs[Rs_trunc], Zs[Rs_trunc], gfile['psiRZn'][Rs_trunc])
f_Romp = interpolate.Rbf(gfile['psiRZn'][Rs_trunc], Zs[Rs_trunc], Rs[Rs_trunc], epsilon=0.00001)
f_Rs = interpolate.interp1d(Zes, Res, assume_sorted=False)
# R of the separatrix at each probe Z in cm.
Rsep = f_Rs(Z_probe) * 100.0
Rsep_omp = f_Rs(Z_axis) * 100.0
# Get R of each location along the probe in cm, then R-Rsep.
R_locs_U = geo.calc_R_meas(rprobe, locs_U, probe + 'U')
RminRsep_U = R_locs_U - Rsep
R_locs_D = geo.calc_R_meas(rprobe, locs_D, probe + 'D')
RminRsep_D = R_locs_D - Rsep
# Get the corresponding psins of each location along the probe.
psin_locs_U = f_psin(R_locs_U / 100.0, np.full((len(R_locs_U),), Z_probe))
psin_locs_D = f_psin(R_locs_D / 100.0, np.full((len(R_locs_D),), Z_probe))
# Calculate R_loc at the omp, then R-Rsep omp.
R_locs_omp_U = f_Romp(psin_locs_U, np.full((len(psin_locs_U),), Z_axis)) * 100.0
RminRsep_omp_U = R_locs_omp_U - Rsep_omp
R_locs_omp_D = f_Romp(psin_locs_D, np.full((len(psin_locs_D),), Z_axis)) * 100.0
RminRsep_omp_D = R_locs_omp_D - Rsep_omp
except:
print("Error loading this time.")
# Finally store all these in the corresponding part of the DataFrame.
rbs_df_U.loc[shot].loc[time]['R-Rsep (cm)'] = pd.Series(RminRsep_U, index=rbs_df_U.loc[shot].loc[time].index)
rbs_df_U.loc[shot].loc[time]['R-Rsep omp (cm)'] = pd.Series(RminRsep_omp_U, index=rbs_df_U.loc[shot].loc[time].index)
rbs_df_U.loc[shot].loc[time]['Psin'] = pd.Series(psin_locs_U, index=rbs_df_U.loc[shot].loc[time].index)
rbs_df_U.loc[shot].loc[time]['R (cm)'] = pd.Series(R_locs_U, index=rbs_df_U.loc[shot].loc[time].index)
rbs_df_D.loc[shot].loc[time]['R-Rsep (cm)'] = pd.Series(RminRsep_D, index=rbs_df_D.loc[shot].loc[time].index)
rbs_df_D.loc[shot].loc[time]['R-Rsep omp (cm)'] = pd.Series(RminRsep_omp_D, index=rbs_df_D.loc[shot].loc[time].index)
rbs_df_D.loc[shot].loc[time]['Psin'] = pd.Series(psin_locs_D, index=rbs_df_D.loc[shot].loc[time].index)
rbs_df_D.loc[shot].loc[time]['R (cm)'] = pd.Series(R_locs_D, index=rbs_df_D.loc[shot].loc[time].index)
return rbs_df_U, rbs_df_D
def rbs_df_stats(rbs_df, U_or_D, verbal=False):
"""
Computes the average of each data point at each location along the probe.
rbs_df: DataFrame returned from the above 'fill_in_rbs_df'.
returns: DataFrame of averages at each location for each time during each
shot.
"""
if False:
print("Aggregating statistics over all shots and times...")
# First get how many locations there are. np.unique will sort them (don't want),
# so return index will give the indices to preserve order.
locs, order = np.unique(rbs_df.index.get_level_values('locs').values, return_index=True)
locs = locs[order]
nlocs = locs.size
# The DataFrames that will hold our results.
rbs_stat_df = pd.DataFrame()
err_df = pd.DataFrame()
# To understand the indexing here, it'd be best to get this DataFrame into
# a terminal and see how it works in there. It shows the beauty of pandas.
for idx in range(0, nlocs):
# Get the mean values at each location.
rbs_stat_df = rbs_stat_df.append(rbs_df[idx::nlocs].mean(axis=0), ignore_index=True)
# Get the standard deviations at each location.
err_df = err_df.append(rbs_df[idx::nlocs].std(axis=0), ignore_index=True)
# Rename columns to appropriate names. The last two are already errors in rbs_stat_df,
# so std. dev. of them isn't really a thing. Trash them.
suf = U_or_D.upper()
err_df.columns = ['Psin Error ' + suf, 'trash1', 'R-Rsep Error ' + suf + ' (cm)',
'R-Rsep omp Error ' + suf + ' (cm)', 'trash2', 'trash3']
err_df.drop(['trash1', 'trash2', 'trash3'], axis=1, inplace=True)
# Put into one DataFrame to return.
rbs_stat_df = rbs_stat_df.join(err_df)
# Add locs in just because.
rbs_stat_df['Distance from Tip ' + suf + ' (cm)'] = locs
# Fix a couple column names. Sort Columns.
rbs_stat_df.rename(columns={'areal':'W Areal Density ' + suf + ' (1e15 W/cm2)',
'areal_err':'W Areal Density Error ' + suf + ' (1e15 W/cm2)',
'Psin': 'Psin ' + suf,
'R (cm)': 'R ' + suf + ' (cm)',
'R-Rsep (cm)': 'R-Rsep ' + suf + ' (cm)',
'R-Rsep omp (cm)':'R-Rsep omp ' + suf + ' (cm)'},
inplace=True)
# Put data in meaningful order.
rbs_stat_df = rbs_stat_df[['Distance from Tip ' + suf + ' (cm)',
'R ' + suf + ' (cm)',
'R-Rsep ' + suf + ' (cm)',
'R-Rsep Error ' + suf + ' (cm)',
'R-Rsep omp ' + suf + ' (cm)',
'R-Rsep omp Error ' + suf + ' (cm)',
'Psin ' + suf,
'Psin Error ' + suf,
'W Areal Density ' + suf + ' (1e15 W/cm2)',
'W Areal Density Error ' + suf + ' (1e15 W/cm2)']]
#rbs_stat_df = rbs_stat_df.sort_index(axis=1)
return rbs_stat_df
def get_lams(number, probe, conn, verbal):
"""
This functions pulls in the LAMS data and returns it as a Dataframe.
number: The probe number.
probe: One of A, B or C.
conn: An MDSplus connection to r2d2 returned via the pull.thin_connect function.
verbal: Set to True if you want feedback as the program runs.
"""
print("")
# Pull the LAMS data and put it into a dataframe.
lams_dict_U = pull.pull_lams(conn, number, probe + 'U', verbal=True)
lams_dict_D = pull.pull_lams(conn, number, probe + 'D', verbal=True)
lams_df_U = pd.DataFrame(lams_dict_U)
lams_df_D = pd.DataFrame(lams_dict_D)
lams_df =
|
pd.concat((lams_df_U, lams_df_D), axis=1)
|
pandas.concat
|
"""
MIT License
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import time
from time import strptime, mktime
from pandas import Timestamp, DateOffset, to_datetime, Series, NaT, isnull
import calendar
from calendar import timegm
from datetime import datetime, timedelta
import pytz
from pytz import timezone
from stocklook.config import config
import logging as lg
log = lg.getLogger(__name__)
# Time-related helper methods
TZ = 'PYTZ_TIMEZONE'
GLOBAL_TIMEOUT_MAP = dict()
def timestamp_to_local(dt):
"""
Convert nearly any time object to local time.
:param dt:
The following objects are tested:
- utc integer/float/numeric string
- datetime.datetime
- pandas.Timestamp
- date or datetime string coercible by pandas.Timestamp algos
-
:return:
"""
try:
return localize_utc_int(dt)
except:
if not dt:
return None
if isinstance(dt, str):
# convert a string-ish object to a
# pandas.Timestamp (way smarter than datetime)
utc_dt =
|
Timestamp(dt)
|
pandas.Timestamp
|
import gc
import gc
import pickle
import gensim
import pandas as pd
from pandarallel import pandarallel
pandarallel.initialize(nb_workers=4)
from gensim.models import Word2Vec
from sklearn.preprocessing import StandardScaler
def get_agg_features(dfs, f1, f2, agg, log):
# 判定特殊情况
if type(f1) == str:
f1 = [f1]
if agg != 'size':
data = log[f1 + [f2]]
else:
data = log[f1]
f_name = '_'.join(f1) + "_" + f2 + "_" + agg
# 聚合操作
if agg == "size":
tmp = pd.DataFrame(data.groupby(f1).size()).reset_index()
elif agg == "count":
tmp = pd.DataFrame(data.groupby(f1)[f2].count()).reset_index()
elif agg == "mean":
tmp = pd.DataFrame(data.groupby(f1)[f2].mean()).reset_index()
elif agg == "unique":
tmp = pd.DataFrame(data.groupby(f1)[f2].nunique()).reset_index()
elif agg == "max":
tmp = pd.DataFrame(data.groupby(f1)[f2].max()).reset_index()
elif agg == "min":
tmp = pd.DataFrame(data.groupby(f1)[f2].min()).reset_index()
elif agg == "sum":
tmp = pd.DataFrame(data.groupby(f1)[f2].sum()).reset_index()
elif agg == "std":
tmp = pd.DataFrame(data.groupby(f1)[f2].std()).reset_index()
elif agg == "median":
tmp = pd.DataFrame(data.groupby(f1)[f2].median()).reset_index()
else:
raise Exception("agg error")
# 赋值聚合特征
for df in dfs:
try:
del df[f_name]
except:
pass
tmp.columns = f1 + [f_name]
df[f_name] = df.merge(tmp, on=f1, how='left')[f_name]
del tmp
del data
gc.collect()
return [f_name]
def sequence_text(dfs, f1, f2, log):
f_name = 'sequence_text_' + f1 + '_' + f2
print(f_name)
# 遍历log,获得用户的点击序列
dic, items = {}, []
for item in log[[f1, f2]].values:
try:
dic[item[0]].append(str(item[1]))
except:
dic[item[0]] = [str(item[1])]
for key in dic:
items.append([key, ' '.join(dic[key])])
# 赋值序列特征
temp = pd.DataFrame(items)
temp.columns = [f1, f_name]
temp = temp.drop_duplicates(f1)
for df in dfs:
try:
del df[f_name]
except:
pass
temp.columns = [f1] + [f_name]
df[f_name] = df.merge(temp, on=f1, how='left')[f_name]
gc.collect()
del temp
del items
del dic
return [f_name]
def kfold(train_df, test_df, log_data, pivot):
# 先对log做kflod统计,统计每条记录中pivot特征的性别年龄分布
kfold_features = ['age_{}'.format(i) for i in range(10)] + ['gender_{}'.format(i) for i in range(2)]
log = log_data[kfold_features + ['user_id', pivot, 'fold']]
tmps = []
for fold in range(6):
tmp = pd.DataFrame(
log[(log['fold'] != fold) & (log['fold'] != 5)].groupby(pivot)[kfold_features].mean()).reset_index()
tmp.columns = [pivot] + kfold_features
tmp['fold'] = fold
tmps.append(tmp)
tmp = pd.concat(tmps, axis=0).reset_index()
tmp = log[['user_id', pivot, 'fold']].merge(tmp, on=[pivot, 'fold'], how='left')
del log
del tmps
gc.collect()
# 获得用户点击的所有记录的平均性别年龄分布
tmp_mean = pd.DataFrame(tmp.groupby('user_id')[kfold_features].mean()).reset_index()
tmp_mean.columns = ['user_id'] + [f + '_' + pivot + '_mean' for f in kfold_features]
for df in [train_df, test_df]:
temp = df.merge(tmp_mean, on='user_id', how='left')
temp = temp.fillna(-1)
for f1 in [f + '_' + pivot + '_mean' for f in kfold_features]:
df[f1] = temp[f1]
del temp
gc.collect()
del tmp
del tmp_mean
gc.collect()
def kfold_sequence(train_df, test_df, log_data, pivot):
# 先对log做kflod统计,统计每条记录中pivot特征的性别年龄分布
kfold_features = ['age_{}'.format(i) for i in range(10)] + ['gender_{}'.format(i) for i in range(2)]
log = log_data[kfold_features + [pivot, 'fold', 'user_id']]
tmps = []
for fold in range(6):
tmp = pd.DataFrame(
log[(log['fold'] != fold) & (log['fold'] != 5)].groupby(pivot)[kfold_features].mean()).reset_index()
tmp.columns = [pivot] + kfold_features
tmp['fold'] = fold
tmps.append(tmp)
tmp = pd.concat(tmps, axis=0).reset_index()
tmp = log[[pivot, 'fold', 'user_id']].merge(tmp, on=[pivot, 'fold'], how='left')
tmp = tmp.fillna(-1)
tmp[pivot + '_fold'] = tmp[pivot] * 10 + tmp['fold']
del log
del tmps
gc.collect()
# 获得用户点击记录的年龄性别分布序列
tmp[pivot + '_fold'] = tmp[pivot + '_fold'].astype(int)
kfold_sequence_features = sequence_text([train_df, test_df], 'user_id', pivot + '_fold', tmp)
tmp = tmp.drop_duplicates([pivot + '_fold']).reset_index(drop=True)
# 对每条记录年龄性别分布进行标准化
kfold_features = ['age_{}'.format(i) for i in range(10)] + ['gender_{}'.format(i) for i in range(2)]
ss = StandardScaler()
ss.fit(tmp[kfold_features])
tmp[kfold_features] = ss.transform(tmp[kfold_features])
for f in kfold_features:
tmp[f] = tmp[f].apply(lambda x: round(x, 4))
# 将每条记录年龄性别分布转成w2v形式的文件
with open('data/sequence_text_user_id_' + pivot + '_fold' + ".{}d".format(12), 'w') as f:
f.write(str(len(tmp)) + ' ' + '12' + '\n')
for item in tmp[[pivot + '_fold'] + kfold_features].values:
f.write(' '.join([str(int(item[0]))] + [str(x) for x in item[1:]]) + '\n')
tmp = gensim.models.KeyedVectors.load_word2vec_format(
'data/sequence_text_user_id_' + pivot + '_fold' + ".{}d".format(12), binary=False)
pickle.dump(tmp, open('data/sequence_text_user_id_' + pivot + '_fold' + ".{}d".format(12), 'wb'))
del tmp
gc.collect()
return kfold_sequence_features
if __name__ == "__main__":
# 读取数据
cl = pd.DataFrame([])
click_log = pd.read_pickle('data/click.pkl')
train_df =
|
pd.read_pickle('data/train_user.pkl')
|
pandas.read_pickle
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : utils.py
@Desc : 工具模块
@Project : orfd-platform
@Contact : <EMAIL>
@License : (C)Copyright 2018-2019, TheFreer.NET
@WebSite : www.thefreer.net
@Modify Time @Author @Version
------------ ------- --------
2019/05/29 0:44 the freer 2.1
'''
import re
import pandas as pd
from collections import Counter
from setting import PATTERNS, AVG_SEGMENT_LENGTH, AVG_SEGMENT_NUMBER, AVG_DOC_LENGTH
# def is_valid_email(string):
# if re.match(PATTERNS["email"], string):
# return 1
# return 0
#
# def is_valid_contact(string):
# if re.match(PATTERNS["contact"], string):
# return 1
# return 0
#
# def is_valid_url(string):
# if re.match(PATTERNS["url"], string):
# return 1
# return 0
#
# def is_valid_time(string):
# if re.match(PATTERNS["work_time"], string):
# return 1
# return 0
#
def is_fresh(string):
'''
判断输入工作要求是否包括:"接受应届生"
:param string:
:return:
'''
if len(re.split(",", string)) > 1:
return 1
return 0
def split_require(input_list):
'''
分割工作要求
:param input_list: 输入要求列表
:return: 分割结果列表
'''
edu_requires = []
work_requires = []
for inp in input_list:
try:
inp = re.sub(r",.*", "", inp)
r_list = re.split("_", inp)
edu_requires.append(r_list[0])
work_requires.append(r_list[1])
except:
edu_requires.append(inp)
work_requires.append(inp)
return edu_requires, work_requires
def split_welfare(string):
'''
将福利文本分割,数据采集结果福利信息被保存为统一格式:w1_w2_w3
:param string: 输入福利
:return: 福利列表
'''
try:
tmp_list = re.split(",", string)
welfare = re.split(r"_", tmp_list[0])
except:
welfare = ["None"]
return welfare
def welfare_map(w_list, dic):
'''
对输入福利类别进行映射
:param w_list: 类别列表
:param dic: 类别:Label 字典
:return: 编码结果列表
'''
new_welfare = []
for w in w_list:
if w in dic.keys():
new_welfare.append(dic[w])
else:
new_welfare.append(dic["others"])
return new_welfare
def welfare_count(input_list):
'''
统计输入类别列表的类别频率
:param input_list: 输入类别列表
:return: Counter 对象,保存了类别频率排序结果
'''
welfare_list = []
for inp in input_list:
welfare_list += inp
return Counter(welfare_list)
def split_doc(doc):
'''
处理输入段落文本,输出长度 < 168的句段
:param doc: 输入段落
:return: 句段
'''
seg_list = re.split(PATTERNS["segment"], doc)
segment = ""
for seg in seg_list:
if len(seg) > AVG_SEGMENT_LENGTH:
segment += seg
if len(segment) > AVG_DOC_LENGTH:
segment = segment[:AVG_DOC_LENGTH]
if len(segment) < AVG_DOC_LENGTH and len(seg_list) < AVG_SEGMENT_NUMBER:
segment = "".join(seg_list)
if len(segment) < AVG_SEGMENT_LENGTH:
segment = "".join(seg_list)
print(len(segment))
return segment
def split_doc_2(doc):
'''
返回对输入段落分段及过滤处理之后的长度
:param doc: 输入段落
:return: 处理之后的长度
'''
seg_list = re.split(PATTERNS["segment"], doc)
segment = ""
for seg in seg_list:
if len(seg) > AVG_SEGMENT_LENGTH:
segment += seg
return len(segment)
def split_dataset(ori, tri, tes, frac=0.9216):
'''
划分原始数据集为训练集和测试集
:param ori: 原始数据集路径
:param tri: 输出测试集路径
:param tes: 输出测试集路径
:param frac: 划分比例:tes:tri
:return:
'''
origin_data = pd.read_csv(ori) # frac=0.9216
fake = origin_data[origin_data[list(origin_data.columns)[-1]] == 0].sample(frac=frac, random_state=0, axis=0)
real = origin_data[origin_data[list(origin_data.columns)[-1]] == 1].sample(len(fake), random_state=0, axis=0)
train_data =
|
pd.concat([fake, real], axis=0, join="outer")
|
pandas.concat
|
from flask import Flask, g, jsonify, json, request
from flask_cors import CORS
import numpy as np
import os
import pandas as pd
import pysam
from scipy.cluster.hierarchy import linkage, to_tree
import zipfile
def genotype(gt: tuple) -> int:
"""Convert genotype tuple to dosage (0/1/2)"""
return None if gt == (None, None) else gt[0] + gt[1]
def variant_record(variant_id, vcf):
"""Get record for one variant from VCF"""
chrom, pos = variant_id.split(":")
chrom = chrom.replace("chr", "")
pos = int(pos)
recs = list(vcf.fetch(chrom, pos - 1, pos, reopen=True))
assert len(recs) == 1, f"Genotype retrieval error: {variant_id}"
return recs[0]
def geno_matrix(ids, vcf):
"""Get genotype matrix for a list of SNPs
Assumes SNPs are in close proximity on a chromosome, e.g. in a cis-window.
"""
chrom = ids[0].split(":")[0].replace("chr", "")
pos = [int(x.split(":")[1]) for x in ids]
genos = {}
for rec in vcf.fetch(chrom, min(pos) - 1, max(pos) + 1):
if rec.id in ids:
genos[rec.id] = [genotype(rec.samples[s]["GT"]) for s in vcf.header.samples]
mat = np.array([genos[id] if id in genos else [None] * len(vcf.header.samples) for id in ids])
return mat
def get_newick(node, newick, parentdist, leaf_names):
"""Save dendrogram in Newick format
from https://stackoverflow.com/questions/28222179/save-dendrogram-to-newick-format/31878514#31878514
"""
if node.is_leaf():
return "%s:%g%s" % (leaf_names[node.id], parentdist - node.dist, newick)
if len(newick) > 0:
newick = "):%g%s" % (parentdist - node.dist, newick)
else:
newick = ");"
newick = get_newick(node.get_left(), newick, node.dist, leaf_names)
newick = get_newick(node.get_right(), ",%s" % (newick), node.dist, leaf_names)
newick = "(%s" % (newick)
return newick
def row_tree(d):
"""Get Newick representation of matrix for clustering"""
clust = linkage(d, method="average", optimal_ordering=True)
tree = to_tree(clust)
return get_newick(tree, "", tree.dist, d.index)
def validate_genes(ids, genes):
"""Return valid gene IDs for a list of gene IDs/names"""
valid = []
for id in ids:
if id in genes.index:
valid.append(id)
else:
x = list(genes.loc[genes["geneSymbol"] == id, :].index)
if len(x) > 0:
valid.append(x[0])
else:
id2 = id[0].upper() + id[1:].lower()
x = list(genes.loc[genes["geneSymbol"] == id2, :].index)
if len(x) > 0:
valid.append(x[0])
return valid
def format_per_tissue_gene_info(info: list, tissues: list):
"""Collect per-tissue expression and eQTL indicators into a list"""
for gene in info:
gene["statusInTissue"] = []
for tissue in tissues:
item = {
"tissueSiteDetailId": tissue,
"expressed": gene["expr_" + tissue],
"tested": gene["tested_" + tissue],
"eqtl": gene["eqtl_" + tissue],
}
gene["statusInTissue"].append(item)
del gene["expr_" + tissue]
del gene["tested_" + tissue]
del gene["eqtl_" + tissue]
# def load_tpm(path):
# tpm = {}
# expr = pd.read_csv(path, sep="\t")
# samples = pd.read_csv("../data/ref/metadata.csv")
# samples = samples.loc[samples["QC_pass"] == "pass", :]
# expr = expr.loc[:, expr.columns.isin(samples["library"])]
# tis_conv = {"Acbc": "NAcc", "IL": "IL", "LHB": "LHb", "PL": "PL", "VoLo": "OFC"}
# tis = pd.Series([tis_conv[x.split("_")[1]] for x in expr.columns])
# for tissue in tis.unique():
# tpm[tissue] = expr.loc[:, list(tis == tissue)]
# return tpm
def cis_pval(tissue, gene, variant):
"""Return nominal p-value for a given cis-window variant"""
with zipfile.ZipFile(f"../data/cis_pvals/{tissue}.zip", "r") as archive:
fname = f"{tissue}/{gene}.txt"
if fname in archive.namelist():
df = pd.read_csv(archive.open(fname), sep="\t", index_col="variant_id")
if variant in df.index:
return df.loc[variant, "pval_nominal"]
return None
def single_tissue(gene):
"""Return table of significant cis-eSNPs for a gene"""
with zipfile.ZipFile(f"../data/singleTissueEqtl.zip", "r") as archive:
fname = f"singleTissueEqtl/{gene}.txt"
if fname in archive.namelist():
d = pd.read_csv(archive.open(fname), sep="\t", dtype={"chromosome": str})
d["geneId"] = gene
return d
return None
tissueInfo = pd.read_csv("../data/tissueInfo.txt", sep="\t")
tissueInfo = tissueInfo.to_dict(orient="records")
topExpr = pd.read_csv("../data/topExpressedGene.txt", sep="\t")
genes = pd.read_csv("../data/gene.txt", sep="\t", index_col="geneId").fillna("")
tissues = [tissue["tissueSiteDetailId"] for tissue in tissueInfo]
dataset = {tissue["tissueSiteDetailId"]: tissue["dataset"] for tissue in tissueInfo}
med_expr = pd.read_csv(
"../data/medianGeneExpression.txt.gz", sep="\t", index_col="geneId"
)
tpm = {}
for tissue in tissues:
tpm_file = f"../data/expr/{tissue}.expr.tpm.bed.gz"
tpm[tissue] = pd.read_csv(
tpm_file, sep="\t", dtype={"#chr": str}, index_col="gene_id"
)
tpm[tissue].drop(columns=["#chr", "start", "end"], inplace=True)
iqn = {}
for tissue in tissues:
iqn_file = f"../data/expr/{tissue}.expr.iqn.bed.gz"
iqn[tissue] = pd.read_csv(
iqn_file, sep="\t", dtype={"#chr": str}, index_col="gene_id"
)
iqn[tissue].drop(columns=["#chr", "start", "end"], inplace=True)
# vcf = pysam.VariantFile("../data/ratgtex.vcf.gz")
vcf = {}
for dset in set(dataset.values()):
vcf[dset] = pysam.VariantFile(f"../data/geno/{dset}.vcf.gz")
ref_vcf = vcf["BLA_NAcc2_PL2"]
exons =
|
pd.read_csv("../data/exon.txt", sep="\t", dtype={"chromosome": str})
|
pandas.read_csv
|
import numpy as np
import pandas as pd
from sklearn import *
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from matplotlib import pyplot
import time
import os
showPlot=True
#prepare data
data_file_name = "../data3.csv"
print('***** Linear Regression Model without CountTransaction Feature *****')
#read and prepare data from datafile
data_csv = pd.read_csv(data_file_name, delimiter = ';',header=None, usecols=[2,5,6,7,8,9,10,11,12,13,14])
#Lire ligne par ligne
data = data_csv[1:]
#Renommer les colonne
data.columns = ['SumRetrait','ConsommationHier','MSemaineDernier','MSemaine7','ConsoMmJrAnP','ConsoMmJrMP',
'ConsoMMJrSmDer','MoyenneMoisPrec','MoyenneMMSAnPrec','MoyenneMMmAnPrec','ConsommationMaxMDer']
# print (data.head(10))
# pd.options.display.float_format = '{:,.0f}'.format
#supprimer les lignes dont la valeur est null ( au moins une valeur null)
data = data.dropna ()
#Output Y avec son type
y=data['SumRetrait'].astype(float)
cols=['ConsommationHier','MSemaineDernier','MSemaine7','ConsoMmJrAnP','ConsoMmJrMP','ConsoMMJrSmDer','MoyenneMoisPrec','MoyenneMMSAnPrec','MoyenneMMmAnPrec','ConsommationMaxMDer']
x=data[cols].astype(float)
x_train ,x_test ,y_train ,y_test = train_test_split( x,y, test_size=0.2 , random_state=1116)
print(type(y_test))
#Design the Regression Model
regressor =LinearRegression()
##training
regressor.fit(x_train,y_train)
#Make prediction
y_pred =regressor.predict(x_test)
# print (y_pred)
# print("---- test----")
# print(y_test)
# for i in range(len(y_pred)):
# print("Real = %s , Predicted = %s" % (y_test[i], y_pred[i]))
YArray = y_test.as_matrix()
#print(YArray)
testData = pd.DataFrame(YArray)
preddData = pd.DataFrame(y_pred)
meanError = np.abs((YArray - y_pred)/YArray)*100
meanError2 = np.abs((YArray - y_pred))
print("mean: %s", meanError.mean()," - ", meanError2.mean())
dataF =
|
pd.concat([testData,preddData], axis=1)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
@created on: 2/17/20,
@author: <NAME>,
@version: v0.0.1
@system name: badgod
Description:
..todo::
"""
import os
import subprocess
import glob
import librosa
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
import pysptk
from math import pi
import torch
import wavio
from joblib import Parallel, delayed
from pyannote.audio.utils.signal import Binarize
from pyts.image import GramianAngularField
from tqdm import tqdm
from collections import defaultdict
from scipy.fftpack import fft, hilbert
import urllib
from covid_19.datagen.vggish import vggish_input
from covid_19.datagen.vggish import vggish_params
from covid_19.datagen.vggish import vggish_slim
import tensorflow as tf
import sys
import pathlib
from covid_19.utils.file_utils import delete_file
import json
sys.path.append(str(pathlib.Path(__file__).parent.absolute()) + '/vggish')
tf.compat.v1.disable_v2_behavior()
SR = 22050
FRAME_LEN = int(SR / 10) # 100 ms
HOP = int(FRAME_LEN / 2) # 50% overlap, meaning 5ms hop length
MFCC_dim = 13 # the MFCC dimension
SR_VGG = 16000
# Vggish
def download(url, dst_dir):
"""Download file.
If the file not exist then download it.
Args:url: Web location of the file.
Returns: path to downloaded file.
"""
filename = url.split('/')[-1]
filepath = os.path.join(dst_dir, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' %
(filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(url, filepath, _progress)
statinfo = os.stat(filepath)
print('Successfully downloaded:', filename, statinfo.st_size, 'bytes.')
return filepath
def sta_fun_2(npdata): # 1D np array
"""Extract various statistical features from the numpy array provided as input.
:param np_data: the numpy array to extract the features from
:type np_data: numpy.ndarray
:return: The extracted features as a vector
:rtype: numpy.ndarray
"""
# perform a sanity check
if npdata is None:
raise ValueError("Input array cannot be None")
# perform the feature extraction
Mean = np.mean(npdata, axis=0)
Std = np.std(npdata, axis=0)
# finally return the features in a concatenated array (as a vector)
return np.concatenate((Mean, Std), axis=0).reshape(1, -1)
print("\nTesting your install of VGGish\n")
# Paths to downloaded VGGish files.
checkpoint_path = str(pathlib.Path(__file__).parent.absolute()) + "/vggish/vggish_model.ckpt"
if not os.path.exists(checkpoint_path): # automatically download the checkpoint if not exist.
url = 'https://storage.googleapis.com/audioset/vggish_model.ckpt'
download(url, str(pathlib.Path(__file__).parent.absolute()) + '/vggish')
sess = tf.compat.v1.Session()
vggish_slim.define_vggish_slim()
vggish_slim.load_vggish_slim_checkpoint(sess, checkpoint_path)
features_tensor = sess.graph.get_tensor_by_name(vggish_params.INPUT_TENSOR_NAME)
embedding_tensor = sess.graph.get_tensor_by_name(
vggish_params.OUTPUT_TENSOR_NAME
)
def tensorflow_close():
if sess is not None:
sess.close()
def vggish_features(signal):
input_batch = vggish_input.waveform_to_examples(
signal, SR_VGG
) # ?x96x64 --> ?x128
[features] = sess.run(
[embedding_tensor], feed_dict={features_tensor: input_batch}
)
features = sta_fun_2(features)
return features
def mfcc_features(audio, sampling_rate, normalise=False):
mfcc = librosa.feature.mfcc(y=audio, n_mfcc=40, sr=sampling_rate)
if normalise:
mfcc_norm = np.mean(mfcc.T, axis=0)
return mfcc_norm
else:
return mfcc
def mel_filters(audio, sampling_rate, normalise=False):
mel_spec = librosa.feature.melspectrogram(y=audio, n_mels=40, sr=sampling_rate)
if normalise:
return np.mean(librosa.power_to_db(mel_spec, ref=np.max).T)
else:
return librosa.power_to_db(mel_spec, ref=np.max)
def cut_audio(audio, sampling_rate, sample_size_in_seconds, overlap):
"""
Method to split a audio signal into pieces based on `sample_size_in_seconds` and `overlap` parameters
:param audio: The main audio signal to be split
:param sampling_rate: The rate at which audio is sampled
:param sample_size_in_seconds: number of seconds in each split
:param overlap: in seconds, how much of overlap is required within splits
:return: List of splits
"""
if overlap >= sample_size_in_seconds:
raise Exception("Please maintain this condition: sample_size_in_seconds > overlap")
def add_to_audio_list(y):
if len(y) / sampling_rate < sample_size_in_seconds:
raise Exception(
f'Length of audio lesser than `sampling size in seconds` - {len(y) / sampling_rate} seconds, required {sample_size_in_seconds} seconds')
y = y[:required_length]
audio_list.append(y)
audio_list = []
required_length = sample_size_in_seconds * sampling_rate
audio_in_seconds = len(audio) // sampling_rate
# Check if the main audio file is larger than the required number of seconds
if audio_in_seconds >= sample_size_in_seconds:
start = 0
end = sample_size_in_seconds
left_out = None
# Until highest multiple of sample_size_in_seconds is reached, ofcourse, wrt audio_in_seconds, run this loop
while end <= audio_in_seconds:
index_at_start, index_at_end = start * sampling_rate, end * sampling_rate
one_audio_sample = audio[index_at_start:index_at_end]
add_to_audio_list(one_audio_sample)
left_out = audio_in_seconds - end
start = (start - overlap) + sample_size_in_seconds
end = (end - overlap) + sample_size_in_seconds
# Whatever is left out after the iteration, just include that to the final list.
# Eg: if 3 seconds is left out and sample_size_in_seconds is 5 seconds, then cut the last 5 seconds of the audio
# and append to final list.
if left_out > 0:
one_audio_sample = audio[-sample_size_in_seconds * sampling_rate:]
add_to_audio_list(one_audio_sample)
# Else, just repeat the required number of seconds at the end. The repeated audio is taken from the start
else:
less_by = sample_size_in_seconds - audio_in_seconds
excess_needed = less_by * sampling_rate
one_audio_sample = np.append(audio, audio[-excess_needed:])
# This condition is for samples which are too small and need to be repeated
# multiple times to satisfy the `sample_size_in_seconds` parameter
while len(one_audio_sample) < (sampling_rate * sample_size_in_seconds):
one_audio_sample = np.hstack((one_audio_sample, one_audio_sample))
add_to_audio_list(one_audio_sample)
return audio_list
def envelope(y, rate, threshold):
mask = []
y = pd.Series(y).apply(np.abs)
y_mean = y.rolling(window=int(rate / 10), min_periods=1, center=True).mean()
for e, mean in enumerate(y_mean):
# print('Mean - ', e, int(e/rate) ,mean) if e%500==0 else None
if mean > threshold:
mask.append(True)
else:
mask.append(False)
return mask
def get_shimmer_jitter_from_opensmile(audio, index, sr):
wavio.write(f'temp_{str(index)}.wav', audio, sr, sampwidth=3)
subprocess.call(
["SMILExtract", "-C", os.environ['OPENSMILE_CONFIG_DIR'] + "/IS10_paraling.conf", "-I",
f"temp_{str(index)}.wav", "-O",
f"temp_{str(index)}.arff"])
# Read file and extract shimmer and jitter features from the generated arff file
file = open(f"temp_{str(index)}.arff", "r")
data = file.readlines()
# First 3 values are title, empty line and name | Last 5 values are numeric data,
# and bunch of empty lines and unwanted text
# headers = data[3:-5]
headers = data[3:data.index('@data\n')]
headers = headers[:headers.index('@attribute class numeric\n')]
# Last line of data is where the actual numeric data is. It is in comma separated string format. After splitting,
# remove the first value which is name and the last value which is class
numeric_data = data[-1].split(',')[1:-1]
assert len(headers) == len(numeric_data), "Features generated from opensmile are not matching with its headers"
# data_needed = {x.strip(): float(numeric_data[e]) for e, x in enumerate(headers) if 'jitter' in x or 'shimmer' in x}
data_needed = [float(numeric_data[e]) for e, x in enumerate(headers) if 'jitter' in x or 'shimmer' in x]
# clean up all files
delete_file(f'temp_{str(index)}.wav')
delete_file(f'temp_{str(index)}.arff')
return data_needed
def sta_fun(np_data):
"""Extract various statistical features from the numpy array provided as input.
:param np_data: the numpy array to extract the features from
:type np_data: numpy.ndarray
:return: The extracted features as a vector
:rtype: numpy.ndarray
"""
# perform a sanity check
if np_data is None:
raise ValueError("Input array cannot be None")
# perform the feature extraction
dat_min = np.min(np_data)
dat_max = np.max(np_data)
dat_mean = np.mean(np_data)
dat_rms = np.sqrt(np.sum(np.square(np_data)) / len(np_data))
dat_median = np.median(np_data)
dat_qrl1 = np.percentile(np_data, 25)
dat_qrl3 = np.percentile(np_data, 75)
dat_lower_q = np.quantile(np_data, 0.25, interpolation="lower")
dat_higher_q = np.quantile(np_data, 0.75, interpolation="higher")
dat_iqrl = dat_higher_q - dat_lower_q
dat_std = np.std(np_data)
s =
|
pd.Series(np_data)
|
pandas.Series
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 5 00:04:41 2020
@author: shashanknigam
web parser for amazon:
Things to be extracted: 1. Title of the product span id = "productTitle"
2. Number of rating : span id = acrCustomerReviewText
3. Average rating given:span class a-icon-alt
4. Description: div id = featurebullets_feature_div.text
5. Product description: heading description format h3:a-spacing-mini :- neighboring text p class="a-spacing-base"
6. Other features if any h4 class="a-spacing-mini" p : afterwards.
-- later consideration 6.5: Comparison id=HLCXComparisonTable
item heading: tr class="comparison_table_image_row"
img.src :Name
class="a-row a-spacing-top-small"
7. Product information div id = "productDetails_detailBullets_sections1"
1. Product dimensions th label td value
2. Item weight
3. Shipping weight
4. Manufacturer
5. ASIN
6. Model Number
7. Customer reviews
8. Best sellers rank
9. Warantee if any
8. Question answers: div =class="a-section a-spacing-none askBtfTopQuestionsContainer" ; span class = "a-text-bold" next sibling id (class="a-declarative")the child question next span class= askLongText class="a-color-tertiary a-nowrap" for r the next teritory wrap
9. Customer reviews: all if possible : - class="cr-lighthouse-term " (terms)
1. data-hook="review-star-rating" user rating
2. data-hook="review-title"
3. class="a-row a-spacing-small review-data" detailed review
4. data-hook="see-all-reviews-link-foot"
5. class="a-last"
10. Price: span id = priceblock_ourprice
Hanumanji
a-section celwidget
cr-dp-lighthut
["a-fixed-left-grid","a-spacing-base"]
['a-fixed-left-grid-col', 'a-col-right']
reviews-medley-footer
id="cr-dp-desktop-lighthut"
["a-fixed-right-grid-col","cm_cr_grid_center_right"]
"""
"""
Getting each details out:
"""
from selenium import webdriver
import time
from bs4 import BeautifulSoup as soup
import bs4
import sys
import traceback
import numpy as np
import pandas as pd
import gc
product_dict={"ASIN":[],"Name":[]}
productDetails = {"ASIN":[],"Name":[],"Average Rating":[],"TotalRating":[],"Price":[],"Features":[]}
Description = {"ASIN":[],"ShortDescription":[],"LongDescription":[]}
productReview = {"ASIN":[],"Date":[],"Rating":[],"Title":[],"Detail":[]}
productQA = {"ASIN":[],"Question":[],"Answer":[]}
productInformation={"ASIN":[]} #Rest of the fields are optional
productRating={"ASIN":[],"5":[],"4":[],"3":[],"2":[],"1":[]}
ASIN=""
failed = []
#QA= {"Question":[],"Answers":[],"ASIN":[]}
#customerReviews = {"ASIN":[],"UserRating":[],"Title":[],"detailedReview":[]}
pages=0
driver = 0
ASIN_LIST = []
def initASIN_LIST():
global ASIN_LIST
df = pd.read_excel('/Users/shashanknigam/downloads/nlp_project/shopBot/webscraping/AmazonDataSet/ProductDictionary.xlsx')
ASIN_LIST = list(df['ASIN'])
def readWebpage(url,driver_not_in_use=-1):
try:
global pages
global driver
driver = np.random.randint(0,2)
while driver==driver_not_in_use:
driver = np.random.randint(0,2)
if driver ==0:
browser = webdriver.Safari()
elif driver==1:
browser = webdriver.Chrome('/Users/shashanknigam/Downloads/Beautiful Soup/chromedriver')
#elif driver==2:
# browser=webdriver.Firefox('/Users/shashanknigam/Downloads/Beautiful Soup/')
browser.get(url)
contents = browser.page_source
#time.sleep(1)
browser.close()
del browser
return contents
except:
try:
driver = np.random.randint(0,2)
if driver ==0:
browser = webdriver.Safari()
elif driver==1:
browser = webdriver.Chrome('/Users/shashanknigam/Downloads/Beautiful Soup/chromedriver')
#elif driver==2:
# browser=webdriver.Firefox('/Users/shashanknigam/Downloads/Beautiful Soup/')
browser.get(url)
browser.close()
del browser
return contents
except:
print(sys.exc_info())
print(traceback.format_exc())
return None
#time.sleep(10)
def getSoup(url):
global driver
w = readWebpage(url)
if w is not None:
s = soup(w,'html.parser')
while "Robot Check" in s.text:
w = readWebpage(url,driver)
s = soup(w,'html.parser')
else:
s=None
return s
def get(s,tag,attr=None):
if attr is None:
return s.find_all(tag)
else:
#print("searching for attribute:"+attr)
tags = s.find_all(tag)
return [t for t in tags if attr in t.attrs.keys()]
def getNextSibling(tag):
while True:
if tag.next_sibling == '' or tag.next_sibling is None:
return None
elif tag.next_sibling in ['\n','\xa0'] or tag.next_sibling.name=='br':
tag = tag.next_sibling
else:
return tag.next_sibling
def getNextSiblingText(tag):
while True:
#print(tag)
if tag.next_sibling == '' or tag.next_sibling is None:
return ''
elif tag.next_sibling in ['\n','\xa0'] or tag.next_sibling.name=='br' or tag.next_sibling==' ':
tag = tag.next_sibling
else:
if isinstance(tag.next_sibling,bs4.element.Tag):
return tag.next_sibling.text
else:
return str(tag.next_sibling)
def parseQA(url,QA,ASIN):
s=getSoup(url)
if s is not None:
s_div = get(s,'div','class')
qa_div = [q for q in s_div if q['class']==['celwidget']]
if len(qa_div)>1:
qa_div = qa_div[1]
elif len(qa_div)==1:
qa_div = qa_div[0]
else:
qa_div=None
if qa_div is not None:
qa=get(qa_div,'div','class')
qa_inner = [q for q in qa if q['class']==['a-fixed-left-grid-col', 'a-col-right']]
#print("qa_inner",len(qa_inner))
for i in qa_inner:
qa_inner_temp=get(i,'div','class')
qa_inner_inner=[q for q in qa_inner_temp if q['class']==['a-fixed-left-grid-col', 'a-col-right']]
#print(len(qa_inner_inner))
if len(qa_inner_inner)>1:
QA['ASIN'].append(ASIN)
QA['Question'].append(qa_inner_inner[0].text.strip())
QA['Answer'].append(qa_inner_inner[1].span.text.strip())
#QA[qa_inner_inner[0].text.strip()]=qa_inner_inner[1].span.text.strip()
elif len(qa_inner_inner)==1:
#print(qa_inner_inner)
QA['ASIN'].append(ASIN)
QA['Question'].append(qa_inner_inner[0].text.strip())
QA['Answer'].append('')
#QA[qa_inner_inner[0].text.strip()]=''
li = get(s,'li','class')
li_last = [l for l in li if l['class']==['a-last']]
next_url = ""
if len(li_last)!=0:
if 'https://www.amazon.com/' not in li_last[0].a['href']:
next_url='https://www.amazon.com/'+li_last[0].a['href']
else:
next_url= li_last[0].a['href']
else:
next_url=""
s.decompose()
else:
next_url=""
return QA,next_url
def parseReview(url,review,ASIN):
#cm_cr-review_list
s=getSoup(url)
if s is not None:
s_div = get(s,'div','id')
div_reviews = [d for d in s_div if d['id']=="cm_cr-review_list"]
if len(div_reviews)>0:
div_reviews=div_reviews[0]
div_review = get(div_reviews,"div","data-hook")
div_r = [r for r in div_review if r['data-hook']=='review']
for i in div_r:
try:
rating_i = get(i,'i','data-hook')
rating = [r for r in rating_i if r['data-hook']=="review-star-rating"]
rating = rating[0].text.strip()
span_d = get(i,'span','data-hook')
date = [d for d in span_d if d['data-hook']=="review-date"]
date = date[0].text.strip()
review_t = get(i,'a','data-hook')
review_title=[t for t in review_t if t['data-hook']=="review-title"]
review_title = review_title[0].text.strip()
review_b=[b for b in span_d if b['data-hook']=="review-body"]
review_b = review_b[0].text.strip()
review["ASIN"].append(ASIN)
review["Rating"].append(rating)
review["Date"].append(date)
review["Title"].append(review_title)
review["Body"].append(review_b)
except:
print(sys.exc_info())
print(traceback.format_exc())
pass
li = get(s,'li','class')
next_url = [l for l in li if l['class']==["a-last"]]
if len(next_url)>0:
url ='https://www.amazon.com'+next_url[0].a['href']
else:
print("Error")
url=None
else:
url=None
s.decompose()
else:
url=None
#span
# data-hook = "review-date"
# i data-hook "review-star-rating"
# span data-hook "review-title"
#a-section review aok-relative
return url,review
def appendExcel(filename,df1):
df = pd.read_excel(filename,index_col=0)
df = df.append(df1)
df.to_excel(filename)
df=None
def parseAmazon(url):
#global pages
#global product_dict,productDetails,Description,productQA,productInformation,ASIN,productReview,failed
global pages,failed,ASIN_LIST
if pages==0:
initASIN_LIST()
product_dict={"ASIN":[],"Name":[]}
productDetails = {"ASIN":[],"Average Rating":[],"TotalRating":[],"Price":[],"Features":[]}
Description = {"ASIN":[],"ShortDescription":[],"LongDescription":[]}
productReview = {"ASIN":[],"Date":[],"Rating":[],"Title":[],"Body":[]}
productQA = {"ASIN":[],"Question":[],"Answer":[]}
productInformation={"ASIN":[]} #Rest of the fields are optional
productRating={"ASIN":[],"5":[],"4":[],"3":[],"2":[],"1":[]}
ASIN=""
s=getSoup(url)
if s is not None:
s_span = get(s,'span','id')
try:
title = [t for t in s_span if t['id']=="productTitle"]
title = title[0].text.strip()
numberOfRating = [r for r in s_span if r['id']=="acrCustomerReviewText"]
if len(numberOfRating)>0:
numberOfRating = numberOfRating[0].text.strip()
else:
numberOfRating="Unk"
averageRating = [i for i in s_span if i['id']=="acrPopover"]
if len(averageRating)>0:
averageRating = averageRating[0].text.strip()
else:
averageRating="Unk"
productPrice = [p for p in s_span if (p['id']=="priceblock_ourprice" or p['id']=="priceblock_saleprice")]
if len(productPrice)>0:
productPrice = productPrice[0].text
else:
productPrice ="Unk"
s_div = get(s,'div','id')
features = [f for f in s_div if f['id']=="feature-bullets"]
if len(features)>0:
features = features[0].text.strip().replace('\n','').replace('\t','')
else:
features=""
try:
product_Information =[pi for pi in s_div if pi['id']=='prodDetails']
pi_th = get(product_Information[0],'th')
pi_td = get(product_Information[0],'td')
pi_th_text = [t.text.strip() for t in pi_th if t.text.strip()!='']
pi_td_text = [t.text.strip().replace('\n','').replace('\t','') for t in pi_td if t.text.strip()!='']
#print(pi_th_text,pi_td_text)
label_col = []
if pages!=0:
columns = pd.read_excel('/Users/shashanknigam/downloads/nlp_project/shopBot/webscraping/AmazonDataSet/ProductInformation.xlsx').columns
else:
columns= None
#print(columns)
for i in range(len(pi_th_text)):
if pi_th_text[i]!="Customer Reviews":
if pi_th_text[i]=="ASIN":
ASIN = pi_td_text[i]
label_col.append(pi_th_text[i])
if columns is None:
if pi_th_text[i] not in productInformation.keys() :
productInformation[pi_th_text[i]]=[]
productInformation[pi_th_text[i]].append(pi_td_text[i])
else:
productInformation[pi_th_text[i]].append(pi_td_text[i])
else:
if pi_th_text[i] not in productInformation.keys() and pi_th_text[i] in columns:
productInformation[pi_th_text[i]]=[]
productInformation[pi_th_text[i]].append(pi_td_text[i])
elif pi_th_text[i] in columns:
productInformation[pi_th_text[i]].append(pi_td_text[i])
#for i in productInformation.keys():
# if i not in label_col:
# productInformation[i].append("")
if len(pi_th_text)==0:
heading=""
body=""
for i in range(0,len(pi_td_text)-1,2):
#print(i,len(pi_td_text))
heading = pi_td_text[i]
body = pi_td_text[i+1]
#print(i,heading,body)
if heading=="ASIN":
ASIN = body
#print(ASIN)
if heading!="Customer Reviews":
if columns is None:
if heading not in productInformation.keys():
productInformation[heading]=[]
productInformation[heading].append(body)
else:
productInformation[heading].append(body)
else:
if heading not in productInformation.keys() and heading in columns:
productInformation[heading]=[]
productInformation[heading].append(body)
elif heading in columns:
productInformation[heading].append(body)
except:
ASIN="Not available"
#print(sys.exc_info())
#print(traceback.format_exc())
if ASIN not in ASIN_LIST:
productDescription = [p for p in s_div if p['id']=="aplus"]
if len(productDescription)!=0:
h3_title = get(productDescription[0],'h3')
h4_title = get(productDescription[0],'h4')
p_description = get(productDescription[0],'p')
h3_title_text = [text.text.strip() for text in h3_title if text.text!="" and text.text.strip()!='']
p_description_text = [text.text.strip() for text in p_description if text.text!="" and text.text is not None and text.text.strip()!='']
h4_title_text =[text.text.strip() for text in h4_title if text.text!="" and text.text.strip()!='']
j=0
for i in range(len(h3_title_text)):
if h3_title_text[i] not in ["OTHER FEATURES","FEATURES"]:
Description['ASIN'].append(ASIN)
Description['ShortDescription'].append(h3_title_text[i])
Description['LongDescription'].append(p_description_text[j])
#product_description[h3_title_text[i]]=p_description_text[j]
j+=1
for i in range(len(h4_title_text)):
Description['ASIN'].append(ASIN)
Description['ShortDescription'].append(h4_title_text[i])
if j<len(p_description_text)-1:
Description['LongDescription'].append(p_description_text[j])
else:
Description['LongDescription'].append("")
#product_description[h4_title_text[i]]=p_description_text[j]
j+=1
else:
productDescription = [p for p in s_div if p['id']=="productDescription"]
#print(productDescription)
if len(productDescription)>0:
productDescription_b = get(productDescription[0],'b')
for i in productDescription_b:
#print(i.text.strip(),getNextSiblingText(i).strip())
if getNextSiblingText(i).strip()!='':
Description['ASIN'].append(ASIN)
Description['ShortDescription'].append(i.text.strip())
Description['LongDescription'].append(getNextSiblingText(i).strip())
# product_description[i.text.strip()] = getNextSiblingText(i).strip()
#print(Description)
qa_desc = [q for q in s_div if q['id']=='ask_lazy_load_div']
qa_url = qa_desc[0].a['href']
#QA = {}
while qa_url!='':
productQA,qa_url=parseQA(qa_url,productQA,ASIN)
review_summary = [d for d in s_div if d['id']=='reviewsMedley'][0]
rev_span = get(review_summary,'span','class')
#global productRating
rev_span = [r for r in rev_span if r['class']==["a-size-base"]]
#print(rev_span)
productRating['ASIN'].append(ASIN)
for i in [0,2,4,6,8]:
if "1" in rev_span[i].text.strip():
productRating["1"].append(rev_span[i+1].text.strip())
elif "2" in rev_span[i].text.strip():
productRating["2"].append(rev_span[i+1].text.strip())
elif "3" in rev_span[i].text.strip():
productRating["3"].append(rev_span[i+1].text.strip())
elif "4" in rev_span[i].text.strip():
productRating["4"].append(rev_span[i+1].text.strip())
else:
productRating["5"].append(rev_span[i+1].text.strip())
# rating[rev_span[i].text.strip()] = rev_span[i+1].text.strip()
rev_div = get(review_summary,'div','id')
rev_div_footer = [r for r in rev_div if r['id']=="reviews-medley-footer" or "footer" in r['id']]
#print(len(rev_div_footer),rev_div_footer)
if len(rev_div_footer)>0:
try:
if 'https://www.amazon.com' in rev_div_footer[0].a['href']:
rating_url = rev_div_footer[0].a['href']
else:
rating_url = 'https://www.amazon.com'+rev_div_footer[0].a['href']
except:
rating_url = None
while rating_url is not None:
rating_url,productReview=parseReview(rating_url,productReview,ASIN)
product_dict['ASIN'].append(ASIN)
product_dict['Name'].append(title)
productDetails['ASIN'].append(ASIN)
productDetails['Average Rating'].append(averageRating)
productDetails['TotalRating'].append(numberOfRating)
productDetails['Price'].append(productPrice)
productDetails['Features'].append(features)
#(productReview)
#print(productRating)
print("URL processed",pages+1)
if pages==0:
pd.DataFrame(product_dict).to_excel('/Users/shashanknigam/downloads/nlp_project/shopBot/webscraping/AmazonDataSet/ProductDictionary.xlsx')
pd.DataFrame(productDetails).to_excel('/Users/shashanknigam/downloads/nlp_project/shopBot/webscraping/AmazonDataSet/ProductDetails.xlsx')
pd.DataFrame(Description).to_excel('/Users/shashanknigam/downloads/nlp_project/shopBot/webscraping/AmazonDataSet/Description.xlsx')
pd.DataFrame(productQA).to_excel('/Users/shashanknigam/downloads/nlp_project/shopBot/webscraping/AmazonDataSet/QA_'+ASIN+'.xlsx')
pd.DataFrame(productInformation).to_excel('/Users/shashanknigam/downloads/nlp_project/shopBot/webscraping/AmazonDataSet/ProductInformation.xlsx')
pd.DataFrame(productRating).to_excel('/Users/shashanknigam/downloads/nlp_project/shopBot/webscraping/AmazonDataSet/productRating.xlsx')
pd.DataFrame(productReview).to_excel('/Users/shashanknigam/downloads/nlp_project/shopBot/webscraping/AmazonDataSet/prodcutReview_'+ASIN+'.xlsx')
else:
appendExcel('/Users/shashanknigam/downloads/nlp_project/shopBot/webscraping/AmazonDataSet/ProductDictionary.xlsx',pd.DataFrame(product_dict))
appendExcel('/Users/shashanknigam/downloads/nlp_project/shopBot/webscraping/AmazonDataSet/ProductDetails.xlsx',pd.DataFrame(productDetails))
appendExcel('/Users/shashanknigam/downloads/nlp_project/shopBot/webscraping/AmazonDataSet/Description.xlsx',pd.DataFrame(Description))
appendExcel('/Users/shashanknigam/downloads/nlp_project/shopBot/webscraping/AmazonDataSet/ProductInformation.xlsx',pd.DataFrame(productInformation))
appendExcel('/Users/shashanknigam/downloads/nlp_project/shopBot/webscraping/AmazonDataSet/productRating.xlsx',
|
pd.DataFrame(productRating)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 4 16:22:57 2020
@author: Natalie
"""
import os
import sys
import click
import pickle
import pandas as pd
import numpy as np
import geopandas as gpd
import imageio
from shapely.geometry import Point
import json
from bokeh.io import output_file
from bokeh.plotting import figure, show
from bokeh.models import (BasicTicker, CDSView, ColorBar, ColumnDataSource,
CustomJS, CustomJSFilter, FactorRange,
GeoJSONDataSource, HoverTool, Legend,
LinearColorMapper, PrintfTickFormatter, Slider, Whisker)
from bokeh.layouts import row, column, gridplot, grid, widgetbox
from bokeh.models.widgets import Tabs, Panel
from bokeh.palettes import brewer
from bokeh.transform import transform, factor_cmap
import click # command-line interface
from yaml import load, dump, SafeLoader # pyyaml library for reading the parameters.yml file
from microsim.column_names import ColumnNames
# Functions for preprocessing
# ---------------------------
def calc_nr_days(data_file):
# figure out nr days by reading in e.g. retail dangers pickle file of run 0
pickle_in = open(data_file,"rb")
dangers = pickle.load(pickle_in)
pickle_in.close()
filter_col = [col for col in dangers if col.startswith(ColumnNames.LOCATION_DANGER)]
# don't use the column simply called 'Danger'
filter_col = filter_col[1:len(filter_col)]
nr_days = len(filter_col)
return nr_days
def create_venue_dangers_dict(locations_dict,r_range,data_dir,start_day,end_day,start_run,nr_runs):
'''
Reads in venue pickle files (venues from locations_dict) and populates dangers_dict_3d (raw data: venue, day, run), dangers_dict (mean across runs) and dangers_dict_std (standard deviation across runs)
Possible output includes:
dangers_dict # mean (value to be plotted)
dangers_dict_std # standard deviation (could plot as error bars)
dangers_dict_3d # full 3D data (for debugging)
'''
dangers_dict = {}
dangers_dict_std = {}
dangers_dict_3d = {}
for key, value in locations_dict.items():
#for r in range(nr_runs):
for r in r_range:
data_file = os.path.join(data_dir, f"{r}",f"{locations_dict[key]}.pickle")
pickle_in = open(data_file,"rb")
dangers = pickle.load(pickle_in)
pickle_in.close()
filter_col = [col for col in dangers if col.startswith('Danger')]
# don't use the column simply called 'Danger'
filter_col = filter_col[1:len(filter_col)]
#nr_days = len(filter_col)
# # set row index to ID
# dangers.set_index('ID', inplace = True)
dangers_colnames = filter_col[start_day:end_day+1]
dangers_rownames = dangers.index
dangers_values = dangers[filter_col[start_day:end_day+1]]
if r == start_run:
dangers_3d = np.zeros((dangers.shape[0],dangers_values.shape[1],nr_runs))
dangers_3d[:,:,r-start_run] = dangers_values
dangers_dict_3d[key] = dangers_3d
dangers_dict[key] = pd.DataFrame(data=dangers_3d.mean(axis=2), index=dangers_rownames, columns=dangers_colnames)
dangers_dict_std[key] = pd.DataFrame(data=dangers_3d.std(axis=2), index=dangers_rownames, columns=dangers_colnames)
return dangers_dict, dangers_dict_std, dangers_dict_3d
def create_difference_dict(dict_sc0,dict_sc1,lookup_dict):
dict_out = {}
for key, value in lookup_dict.items():
dict_out[key] = dict_sc1[key].subtract(dict_sc0[key])
return dict_out
def create_msoa_dangers_dict(dangers_dict,keys,msoa_codes):
'''
Converts dangers_dict to MSOA level data for the appropriate venue types. Produces average danger score (sum dangers in MSOA / total nr venues in MSOA)
Output: dangers_msoa_dict
'''
dangers_msoa_dict = {}
for k in range(0,len(keys)):
dangers = dangers_dict[keys[k]]
msoa_code = msoa_codes[k]
dangers['MSOA'] = msoa_code
# count nr for this condition per area
msoa_sum = dangers.groupby(['MSOA']).agg('sum')
msoa_count = dangers.groupby(['MSOA']).agg('count')
msoa_avg = msoa_sum.div(msoa_count, axis='index')
dangers_msoa_dict[keys[k]] = msoa_avg
return dangers_msoa_dict
def create_counts_dict(conditions_dict,r_range,data_dir,start_day,end_day,start_run,nr_runs,age_cat):
'''
Counts per condition (3D, mean and standard deviation)
Produces 5 types of counts:
msoacounts: nr per msoa and day
agecounts: nr per age category and day
totalcounts: nr per day (across all areas)
cumcounts: nr per MSOA and day
uniquecounts: nr with 'final' disease status across time period e.g. someone who is presymptomatic, symptomatic and recoverd is only counted once as recovered
Output:
msoas # list of msoas
totalcounts_dict, cumcounts_dict, agecounts_dict, msoacounts_dict, cumcounts_dict_3d, totalcounts_dict_std, cumcounts_dict_std, agecounts_dict_std, msoacounts_dict_std, totalcounts_dict_3d, agecounts_dict_3d, msoacounts_dict_3d, uniquecounts_dict_3d, uniquecounts_dict_std, uniquecounts_dict
'''
# start with empty dictionaries
msoas = []
msoacounts_dict_3d = {}
totalcounts_dict_3d = {}
cumcounts_dict_3d = {}
agecounts_dict_3d = {}
uniquecounts_dict_3d = {}
msoacounts_dict = {}
agecounts_dict = {}
totalcounts_dict = {}
cumcounts_dict = {}
uniquecounts_dict = {}
msoacounts_dict_std = {}
agecounts_dict_std = {}
totalcounts_dict_std = {}
cumcounts_dict_std = {}
uniquecounts_dict_std = {}
nr_days = end_day - start_day + 1
dict_days = [] # empty list for column names 'Day0' etc
for d in range(start_day, end_day+1):
dict_days.append(f'Day{d}')
age_cat_str = []
for a in range(age_cat.shape[0]):
age_cat_str.append(f"{age_cat[a,0]}-{age_cat[a,1]}")
# first, create 3d dictionaries
for r in r_range:
# read in pickle file individuals (disease status)
data_file = os.path.join(data_dir, f"{r}", "Individuals.pickle")
pickle_in = open(data_file,"rb")
individuals_tmp = pickle.load(pickle_in)
pickle_in.close()
# if first ever run, keep copy and initialise 3D frame for aggregating
if r == start_run:
individuals = individuals_tmp.copy()
msoas.extend(sorted(individuals.area.unique())) # populate list of msoas (previously empty outside this function)
area_individuals = individuals['area'] # keep area per person to use later
# next bit of code is to restrict to user specified day range
# first, find all columns starting with disease_status
filter_col = [col for col in individuals if col.startswith('disease_status')]
# don't use the column simply called 'disease_status'
filter_col = filter_col[1:len(filter_col)]
counts_colnames = filter_col[start_day:end_day+1]
# User defined age brackets
individuals.insert(7, 'Age0', np.zeros((len(individuals),1)))
for a in range(age_cat.shape[0]):
individuals['Age0'] = np.where((individuals['age'] >= age_cat[a,0]) & (individuals['age'] <= age_cat[a,1]), a+1, individuals['Age0'])
age_cat_col = individuals['Age0'].values
# temporary workaround if no continuous age
#age_cat_col = individuals['Age1'].values
# add age brackets column to individuals_tmp
individuals_tmp.insert(7, 'Age0', age_cat_col)
uniquecounts_df = pd.DataFrame()
# select right columns
subset = individuals_tmp[counts_colnames]
for key, value in conditions_dict.items():
#print(key)
if r == start_run:
msoacounts_dict_3d[key] = np.zeros((len(msoas),nr_days,nr_runs))
cumcounts_dict_3d[key] = np.zeros((len(msoas),nr_days,nr_runs))
agecounts_dict_3d[key] = np.zeros((age_cat.shape[0],nr_days,nr_runs))
totalcounts_dict_3d[key] = np.zeros((nr_days,nr_runs))
uniquecounts_dict_3d[key] = np.zeros(nr_runs)
# find all rows with condition (dict value)
indices = subset[subset.eq(value).any(1)].index
# create new df of zeros and replace with 1 at indices
cumcounts_end = pd.DataFrame(np.zeros((subset.shape[0], 1)))
cumcounts_end.loc[indices] = 1
uniquecounts_df[key] = cumcounts_end.values[:,0]
# loop aroud days
msoacounts_run = np.zeros((len(msoas),nr_days))
cumcounts_run = np.zeros((len(msoas),nr_days))
agecounts_run = np.zeros((age_cat.shape[0],nr_days))
for day in range(0, nr_days):
#print(day)
# count nr for this condition per area
msoa_count_temp = individuals_tmp[subset.iloc[:,day] == conditions_dict[key]].groupby(['area']).agg({subset.columns[day]: ['count']})
if msoa_count_temp.shape[0] == len(msoas):
msoa_count_temp = msoa_count_temp.values
msoacounts_run[:,day] = msoa_count_temp[:, 0]
elif msoa_count_temp.empty == False:
#print('check MSOAs')
# in case some entries don't exist
# start with empty dataframe
tmp_df = pd.DataFrame(np.zeros(len(msoas)), columns = ['tmp'], index=msoas)
# drop multiindex to prevent warning msg
msoa_count_temp.columns = msoa_count_temp.columns.droplevel(0)
# merge with obtained counts - NaN will appear
tmp_df = pd.merge(tmp_df, msoa_count_temp, how='left', left_index=True,right_index=True)
# replace NaN by 0
tmp_df = tmp_df.fillna(0)
msoacounts_run[:,day] = tmp_df.iloc[:,1].values
# cumulative counts
# select right columns
tmp_cum = subset.iloc[:,0:day+1]
indices = tmp_cum[tmp_cum.eq(value).any(1)].index
# create new df of zeros and replace with 1 at indices
tmp_df = pd.DataFrame(np.zeros((tmp_cum.shape[0], 1)))
tmp_df.loc[indices] = 1
# merge with MSOA df
tmp_df = tmp_df.merge(area_individuals, left_index=True, right_index=True)
cumcounts_tmp = tmp_df.groupby(['area']).sum()
if cumcounts_tmp.shape[0] == len(msoas):
cumcounts_tmp = cumcounts_tmp.values
cumcounts_run[:,day] = cumcounts_tmp[:, 0]
elif cumcounts_tmp.empty == False:
#print('check MSOAs')
# in case some entries don't exist
# start with empty dataframe
tmp_df = pd.DataFrame(np.zeros(len(msoas)), columns = ['tmp'], index=msoas)
# drop multiindex to prevent warning msg
cumcounts_tmp.columns = cumcounts_tmp.columns.droplevel(0)
# merge with obtained counts - NaN will appear
tmp_df = pd.merge(tmp_df, cumcounts_tmp, how='left', left_index=True,right_index=True)
# replace NaN by 0
tmp_df = tmp_df.fillna(0)
cumcounts_run[:,day] = tmp_df.iloc[:,1].values
# count nr for this condition per age bracket
age_count_temp = individuals_tmp[subset.iloc[:,day] == conditions_dict[key]].groupby(['Age0']).agg({subset.columns[day]: ['count']})
if age_count_temp.shape[0] == 6:
age_count_temp = age_count_temp.values
agecounts_run[:,day] = age_count_temp[:, 0]
elif age_count_temp.empty == False:
# in case some entries don't exist
# start with empty dataframe
tmp_df = pd.DataFrame(np.zeros(age_cat.shape[0]), columns = ['tmp'], index=list(range(1,age_cat.shape[0]+1)))
# drop multilevel index to prevent warning msg
age_count_temp.columns = age_count_temp.columns.droplevel(0)
# merge with obtained counts - NaN will appear
tmp_df = pd.merge(tmp_df, age_count_temp, how='left', left_index=True,right_index=True)
# replace NaN by 0
tmp_df = tmp_df.fillna(0)
agecounts_run[:,day] = tmp_df.iloc[:,1].values
#age_count_temp.loc['2'].count
# get current values from dict
msoacounts = msoacounts_dict_3d[key]
cumcounts = cumcounts_dict_3d[key]
agecounts = agecounts_dict_3d[key]
totalcounts = totalcounts_dict_3d[key]
# add current run's values
msoacounts[:,:,r-start_run] = msoacounts_run
cumcounts[:,:,r-start_run] = cumcounts_run
agecounts[:,:,r-start_run] = agecounts_run
totalcounts[:,r-start_run] = msoacounts_run.sum(axis=0)
# write out to dict
msoacounts_dict_3d[key] = msoacounts
cumcounts_dict_3d[key] = cumcounts
agecounts_dict_3d[key] = agecounts
totalcounts_dict_3d[key] = totalcounts
uniquecounts_df[key] = uniquecounts_df[key]*(value+1)
uniquecounts_df['maxval'] = uniquecounts_df.max(axis = 1)
for key, value in conditions_dict.items():
# get current values from dict
uniquecounts = uniquecounts_dict_3d[key]
# add current run's values
uniquecounts[r-start_run] = uniquecounts_df[uniquecounts_df.maxval == (value+1)].shape[0]
# write out to dict
uniquecounts_dict_3d[key] = uniquecounts
# next, create mean and std
for key, value in conditions_dict.items():
# get current values from dict
msoacounts = msoacounts_dict_3d[key]
cumcounts = cumcounts_dict_3d[key]
agecounts = agecounts_dict_3d[key]
totalcounts = totalcounts_dict_3d[key]
uniquecounts = uniquecounts_dict_3d[key]
# aggregate
msoacounts_std = msoacounts.std(axis=2)
msoacounts = msoacounts.mean(axis=2)
cumcounts_std = cumcounts.std(axis=2)
cumcounts = cumcounts.mean(axis=2)
agecounts_std = agecounts.std(axis=2)
agecounts = agecounts.mean(axis=2)
totalcounts_std = totalcounts.std(axis=1)
totalcounts = totalcounts.mean(axis=1)
uniquecounts_std = uniquecounts.std()
uniquecounts = uniquecounts.mean()
# write out to dict
msoacounts_dict[key] = pd.DataFrame(data=msoacounts, index=msoas, columns=dict_days)
msoacounts_dict_std[key] = pd.DataFrame(data=msoacounts_std, index=msoas, columns=dict_days)
cumcounts_dict[key] = pd.DataFrame(data=cumcounts, index=msoas, columns=dict_days)
cumcounts_dict_std[key] = pd.DataFrame(data=cumcounts_std, index=msoas, columns=dict_days)
agecounts_dict[key] = pd.DataFrame(data=agecounts, index=age_cat_str, columns=dict_days)
agecounts_dict_std[key] = pd.DataFrame(data=agecounts_std, index=age_cat_str, columns=dict_days)
totalcounts_dict[key] = pd.Series(data=totalcounts, index=dict_days)
totalcounts_dict_std[key] = pd.Series(data=totalcounts_std, index=dict_days)
uniquecounts_dict[key] = pd.Series(data=uniquecounts, index=["total"])
uniquecounts_dict_std[key] = pd.Series(data=uniquecounts_std, index=["total"])
return msoas, totalcounts_dict, cumcounts_dict, agecounts_dict, msoacounts_dict, cumcounts_dict_3d, totalcounts_dict_std, cumcounts_dict_std, agecounts_dict_std, msoacounts_dict_std, totalcounts_dict_3d, agecounts_dict_3d, msoacounts_dict_3d, uniquecounts_dict_3d, uniquecounts_dict_std, uniquecounts_dict
# ********
# PROGRAM ENTRY POINT
# Uses 'click' library so that it can be run from the command line
# ********
@click.command()
@click.option('-p', '--parameters_file', default="./model_parameters/default_dashboard.yml", type=click.Path(exists=True),
help="Parameters file to use to configure the dashboard. Default: ./model_parameters/default_dashboard.yml")
def create_dashboard(parameters_file):
# FUNCTIONS FOR PLOTTING
# ----------------------
# plot 1a: heatmap condition
def plot_heatmap_condition(condition2plot):
""" Create heatmap plot: x axis = time, y axis = MSOAs, colour = nr people with condition = condition2plot. condition2plot is key to conditions_dict."""
# Prep data
var2plot = msoacounts_dict[condition2plot]
var2plot = var2plot.rename_axis(None, axis=1).rename_axis('MSOA', axis=0)
var2plot.columns.name = 'Day'
# reshape to 1D array or rates with a month and year for each row.
df_var2plot = pd.DataFrame(var2plot.stack(), columns=['condition']).reset_index()
source = ColumnDataSource(df_var2plot)
# add better colour
mapper_1 = LinearColorMapper(palette=colours_ch_cond[condition2plot], low=0, high=var2plot.max().max())
# create fig
s1 = figure(title="Heatmap",
x_range=list(var2plot.columns), y_range=list(var2plot.index), x_axis_location="above")
s1.rect(x="Day", y="MSOA", width=1, height=1, source=source,
line_color=None, fill_color=transform('condition', mapper_1))
color_bar_1 = ColorBar(color_mapper=mapper_1, location=(0, 0), orientation = 'horizontal', ticker=BasicTicker(desired_num_ticks=len(colours_ch_cond[condition2plot])))
s1.add_layout(color_bar_1, 'below')
s1.axis.axis_line_color = None
s1.axis.major_tick_line_color = None
s1.axis.major_label_text_font_size = "7px"
s1.axis.major_label_standoff = 0
s1.xaxis.major_label_orientation = 1.0
# Create hover tool
s1.add_tools(HoverTool(
tooltips=[
( f'Nr {condition2plot}', '@condition'),
( 'Day', '@Day' ),
( 'MSOA', '@MSOA'),
],
))
s1.toolbar.autohide = False
plotref_dict[f"hm{condition2plot}"] = s1
# plot 1b: heatmap venue
def plot_heatmap_danger(venue2plot):
""" Create heatmap plot: x axis = time, y axis = MSOAs, colour =danger score. """
# Prep data
var2plot = dangers_msoa_dict[venue2plot]
var2plot.columns.name = 'Day'
# reshape to 1D array or rates with a month and year for each row.
df_var2plot = pd.DataFrame(var2plot.stack(), columns=['venue']).reset_index()
source = ColumnDataSource(df_var2plot)
# add better colour
mapper_1 = LinearColorMapper(palette=colours_ch_danger, low=0, high=var2plot.max().max())
# Create fig
s1 = figure(title="Heatmap",
x_range=list(var2plot.columns), y_range=list(var2plot.index), x_axis_location="above")
s1.rect(x="Day", y="MSOA", width=1, height=1, source=source,
line_color=None, fill_color=transform('venue', mapper_1))
color_bar_1 = ColorBar(color_mapper=mapper_1, location=(0, 0), orientation = 'horizontal', ticker=BasicTicker(desired_num_ticks=len(colours_ch_danger)))
s1.add_layout(color_bar_1, 'below')
s1.axis.axis_line_color = None
s1.axis.major_tick_line_color = None
s1.axis.major_label_text_font_size = "7px"
s1.axis.major_label_standoff = 0
s1.xaxis.major_label_orientation = 1.0
# Create hover tool
s1.add_tools(HoverTool(
tooltips=[
( 'danger score', '@venue'),
( 'Day', '@Day' ),
( 'MSOA', '@MSOA'),
],
))
s1.toolbar.autohide = False
plotref_dict[f"hm{venue2plot}"] = s1
# plot 2: disease conditions across time
def plot_cond_time(flag):
# build ColumnDataSource
if flag == "daily":
title_fig = "Daily counts"
name_plotref = "cond_time_daily"
data_s2 = dict(totalcounts_dict)
data_s2["days"] = days
for key, value in totalcounts_dict.items():
data_s2[f"{key}_std_upper"] = totalcounts_dict[key] + totalcounts_dict_std[key]
data_s2[f"{key}_std_lower"] = totalcounts_dict[key] - totalcounts_dict_std[key]
elif flag == "cumulative":
title_fig = "Cumulative counts"
name_plotref = "cond_time_cumulative"
data_s2 = {"days": days}
for key, value in totalcounts_dict.items():
data_s2[f"{key}"] = cumcounts_dict[key].sum(axis=0)
data_s2[f"{key}_std_upper"] = cumcounts_dict[key].sum(axis=0) + cumcounts_dict_std[key].sum(axis=0)
data_s2[f"{key}_std_lower"] = cumcounts_dict[key].sum(axis=0) - cumcounts_dict_std[key].sum(axis=0)
source_2 = ColumnDataSource(data=data_s2)
# Create fig
s2 = figure(background_fill_color="#fafafa",title=title_fig, x_axis_label='Time', y_axis_label='Nr of people',toolbar_location='above')
legend_it = []
for key, value in totalcounts_dict.items():
c1 = s2.line(x = 'days', y = key, source = source_2, line_width=2, line_color=colour_dict[key],muted_color="grey", muted_alpha=0.2)
c2 = s2.square(x = 'days', y = key, source = source_2, fill_color=colour_dict[key], line_color=colour_dict[key], size=5, muted_color="grey", muted_alpha=0.2)
# c3 = s2.rect('days', f"{key}_std_upper", 0.2, 0.01, source = source_2, line_color="black",muted_color="grey", muted_alpha=0.2)
# c4 = s2.rect('days', f"{key}_std_lower", 0.2, 0.01, source = source_2, line_color="black",muted_color="grey", muted_alpha=0.2)
c5 = s2.segment('days', f"{key}_std_lower", 'days', f"{key}_std_upper", source = source_2, line_color="black",muted_color="grey", muted_alpha=0.2)
legend_it.append((f"nr {key}", [c1,c2,c5]))
legend = Legend(items=legend_it)
legend.click_policy="hide"
# Misc
tooltips = tooltips_cond_basic.copy()
tooltips.append(tuple(( 'Day', '@days' )))
s2.add_tools(HoverTool(
tooltips=tooltips,
))
s2.add_layout(legend, 'right')
s2.toolbar.autohide = False
plotref_dict[name_plotref] = s2
# plot 3: Conditions across MSOAs
def plot_cond_msoas():
# build ColumnDataSource
data_s3 = {}
data_s3["msoa_nr"] = msoas_nr
data_s3["msoa_name"] = msoas
for key, value in cumcounts_dict.items():
data_s3[key] = cumcounts_dict[key].iloc[:,nr_days-1]
data_s3[f"{key}_std_upper"] = cumcounts_dict[key].iloc[:,nr_days-1] + cumcounts_dict_std[key].iloc[:,nr_days-1]
data_s3[f"{key}_std_lower"] = cumcounts_dict[key].iloc[:,nr_days-1] - cumcounts_dict_std[key].iloc[:,nr_days-1]
# old
# data_s3[key] = cumcounts_dict[key]
# data_s3[f"{key}_std_upper"] = cumcounts_dict[key] + cumcounts_dict_std[key]
# data_s3[f"{key}_std_lower"] = cumcounts_dict[key] - cumcounts_dict_std[key]
source_3 = ColumnDataSource(data=data_s3)
# Create fig
s3 = figure(background_fill_color="#fafafa",title="MSOA", x_axis_label='Nr people', y_axis_label='MSOA',toolbar_location='above')
legend_it = []
for key, value in msoacounts_dict.items():
c1 = s3.circle(x = key, y = 'msoa_nr', source = source_3, fill_color=colour_dict[key], line_color=colour_dict[key], size=5,muted_color="grey", muted_alpha=0.2)
c2 = s3.segment(f"{key}_std_lower", 'msoa_nr', f"{key}_std_upper", 'msoa_nr', source = source_3, line_color="black",muted_color="grey", muted_alpha=0.2)
legend_it.append((key, [c1,c2]))
legend = Legend(items=legend_it)
legend.click_policy="hide"
# Misc
s3.yaxis.ticker = data_s3["msoa_nr"]
MSOA_dict = dict(zip(data_s3["msoa_nr"], data_s3["msoa_name"]))
s3.yaxis.major_label_overrides = MSOA_dict
tooltips = tooltips_cond_basic.copy()
tooltips.append(tuple(( 'MSOA', '@msoa_name' )))
s3.add_tools(HoverTool(
tooltips=tooltips,
))
s3.add_layout(legend, 'right')
s3.toolbar.autohide = False
plotref_dict["cond_msoas"] = s3
# plot 4a: choropleth
def plot_choropleth_condition_slider(condition2plot):
# Prepare data
max_val = 0
merged_data =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding:utf-8 -*-
# !/usr/bin/env python
"""
Date: 2022/5/12 16:53
Desc: 百度股市通-经济数据
https://gushitong.baidu.com/calendar
"""
import pandas as pd
import requests
def news_economic_baidu(date: str = "20220502") -> pd.DataFrame:
"""
百度股市通-经济数据
https://gushitong.baidu.com/calendar
:param date: 查询日期
:type date: str
:return: 经济数据
:rtype: pandas.DataFrame
"""
start_date = "-".join([date[:4], date[4:6], date[6:]])
end_date = "-".join([date[:4], date[4:6], date[6:]])
url = "https://finance.pae.baidu.com/api/financecalendar"
params = {
"start_date": start_date,
"end_date": end_date,
"market": "",
"cate": "economic_data",
}
r = requests.get(url, params=params)
data_json = r.json()
big_df = pd.DataFrame()
for item in data_json["Result"]:
if not item["list"] == []:
temp_df = pd.DataFrame(item["list"])
temp_df.columns = [
"日期",
"时间",
"-",
"事件",
"重要性",
"前值",
"预期",
"公布",
"-",
"-",
"地区",
"-",
]
temp_df = temp_df[
[
"日期",
"时间",
"地区",
"事件",
"公布",
"预期",
"前值",
"重要性",
]
]
temp_df["公布"] = pd.to_numeric(temp_df["公布"], errors="coerce")
temp_df["预期"] = pd.to_numeric(temp_df["预期"], errors="coerce")
temp_df["前值"] = pd.t
|
o_numeric(temp_df["前值"], errors="coerce")
|
pandas.to_numeric
|
import os
import unittest
from unittest.mock import mock_open, patch, call, MagicMock
import pandas as pd
import xarray as xr
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
from pywatts.core.computation_mode import ComputationMode
from pywatts.core.pipeline import Pipeline
from pywatts.core.start_step import StartStep
from pywatts.core.step import Step
from pywatts.core.run_setting import RunSetting
from pywatts.modules import MissingValueDetector, SKLearnWrapper
from pywatts.summaries import RMSE
pipeline_json = {'id': 1,
'name': 'Pipeline',
'modules': [{'class': 'SKLearnWrapper',
'is_fitted': False,
'module': 'pywatts.modules.wrappers.sklearn_wrapper',
'name': 'StandardScaler',
'params': {'copy': True, 'with_mean': True, 'with_std': True},
'sklearn_module': os.path.join('test_pipeline', 'StandardScaler.pickle')},
{'class': 'SKLearnWrapper',
'is_fitted': False,
'module': 'pywatts.modules.wrappers.sklearn_wrapper',
'name': 'LinearRegression',
'params': {'copy_X': True,
'fit_intercept': True,
'n_jobs': None,
'positive': False,
'normalize': 'deprecated'},
'sklearn_module': os.path.join('test_pipeline', 'LinearRegression.pickle')}],
'steps': [{'class': 'StartStep',
'default_run_setting': {'computation_mode': 4},
'id': 1,
'index': 'input',
'input_ids': {},
'last': False,
'module': 'pywatts.core.start_step',
'name': 'input',
'target_ids': {}},
{'batch_size': None,
'callbacks': [],
'class': 'Step',
'default_run_setting': {'computation_mode': 4},
'condition': None,
'id': 2,
'input_ids': {1: 'input'},
'last': False,
'module': 'pywatts.core.step',
'module_id': 0,
'name': 'StandardScaler',
'target_ids': {},
'train_if': None},
{'batch_size': None,
'callbacks': [],
'class': 'Step',
'default_run_setting': {'computation_mode': 4},
'condition': None,
'id': 3,
'input_ids': {2: 'x'},
'last': True,
'module': 'pywatts.core.step',
'module_id': 1,
'name': 'LinearRegression',
'target_ids': {},
'train_if': None}],
'version': 1}
class TestPipeline(unittest.TestCase):
@patch("pywatts.core.pipeline.FileManager")
def setUp(self, fm_mock) -> None:
self.fm_mock = fm_mock()
self.pipeline = Pipeline()
def tearDown(self) -> None:
self.pipeline = None
def test_add_input_as_positional(self):
# Should fail with an better error message
SKLearnWrapper(LinearRegression())(x=self.pipeline["input"])
def test_add_only_module(self):
SKLearnWrapper(LinearRegression())(x=self.pipeline["input"])
# nodes 1 plus startstep
self.assertEqual(len(self.pipeline.id_to_step), 2)
def test_add_module_which_is_not_in_a_list(self):
wrapper = SKLearnWrapper(LinearRegression())(input=self.pipeline["input"])
SKLearnWrapper(LinearRegression())(x=wrapper)
# nodes 1 plus startstep
self.assertEqual(len(self.pipeline.id_to_step), 3)
def test_add_pipeline_without_index(self):
# This should raise an exception since pipeline might get multiple columns in the input dataframe
with self.assertRaises(Exception) as context:
SKLearnWrapper(StandardScaler())(x=self.pipeline) # This should fail
self.assertEqual(
"Adding a pipeline as input might be ambigious. Specifiy the desired column of your dataset by using pipeline[<column_name>]",
str(context.exception))
def test_add_module_with_inputs(self):
scaler1 = SKLearnWrapper(StandardScaler())(x=self.pipeline["x"])
scaler2 = SKLearnWrapper(StandardScaler())(x=self.pipeline["test1"])
SKLearnWrapper(LinearRegression())(input_1=scaler1, input_2=scaler2)
# Three modules plus start step and one collect step
self.assertEqual(5, len(self.pipeline.id_to_step))
def test_add_module_with_one_input_without_a_list(self):
scaler = SKLearnWrapper(StandardScaler())(input=self.pipeline["test"])
SKLearnWrapper(LinearRegression())(input=scaler)
# Three modules plus start step and one collect step
self.assertEqual(3, len(self.pipeline.id_to_step))
@patch('pywatts.core.pipeline.FileManager')
@patch('pywatts.core.pipeline.json')
@patch("builtins.open", new_callable=mock_open)
def test_to_folder(self, mock_file, json_mock, fm_mock):
scaler = SKLearnWrapper(StandardScaler())(input=self.pipeline["input"])
SKLearnWrapper(LinearRegression())(x=scaler)
fm_mock_object = MagicMock()
fm_mock.return_value = fm_mock_object
fm_mock_object.get_path.side_effect = [
os.path.join('test_pipeline', 'StandardScaler.pickle'),
os.path.join('test_pipeline', 'LinearRegression.pickle'),
os.path.join('test_pipeline', 'pipeline.json'),
]
self.pipeline.to_folder("test_pipeline")
calls_open = [call(os.path.join('test_pipeline', 'StandardScaler.pickle'), 'wb'),
call(os.path.join('test_pipeline', 'LinearRegression.pickle'), 'wb'),
call(os.path.join('test_pipeline', 'pipeline.json'), 'w')]
mock_file.assert_has_calls(calls_open, any_order=True)
args, kwargs = json_mock.dump.call_args
assert kwargs["obj"]["id"] == pipeline_json["id"]
assert kwargs["obj"]["name"] == pipeline_json["name"]
assert kwargs["obj"]["modules"] == pipeline_json["modules"]
assert kwargs["obj"]["steps"] == pipeline_json["steps"]
@patch('pywatts.core.pipeline.FileManager')
@patch('pywatts.modules.sklearn_wrapper.pickle')
@patch('pywatts.core.pipeline.json')
@patch("builtins.open", new_callable=mock_open)
@patch('pywatts.core.pipeline.os.path.isdir')
def test_from_folder(self, isdir_mock, mock_file, json_mock, pickle_mock, fm_mock):
scaler = StandardScaler()
linear_regression = LinearRegression()
isdir_mock.return_value = True
json_mock.load.return_value = pipeline_json
pickle_mock.load.side_effect = [scaler, linear_regression]
pipeline = Pipeline.from_folder("test_pipeline")
calls_open = [call(os.path.join("test_pipeline", "StandardScaler.pickle"), "rb"),
call(os.path.join("test_pipeline", "LinearRegression.pickle"), "rb"),
call(os.path.join("test_pipeline", "pipeline.json"), "r")]
mock_file.assert_has_calls(calls_open, any_order=True)
json_mock.load.assert_called_once()
assert pickle_mock.load.call_count == 2
isdir_mock.assert_called_once()
self.assertEqual(3, len(pipeline.id_to_step))
def test_module_naming_conflict(self):
# This test should check, that modules with the same name do not lead to an error
# What should this test?
# self.fail()
pass
def test_add_with_target(self):
SKLearnWrapper(LinearRegression())(input=self.pipeline["input"], target=self.pipeline["target"])
self.assertEqual(3, len(self.pipeline.id_to_step))
def test_multiple_same_module(self):
reg_module = SKLearnWrapper(module=LinearRegression())
reg_one = reg_module(x=self.pipeline["test"], target=self.pipeline["target"])
reg_two = reg_module(x=self.pipeline["test2"], target=self.pipeline["target"])
detector = MissingValueDetector()
detector(dataset=reg_one)
detector(dataset=reg_two)
# Three start steps (test, test2, target), two regressors two detectors
self.assertEqual(7, len(self.pipeline.id_to_step))
modules = []
for element in self.pipeline.id_to_step.values():
if isinstance(element, Step) and not element.module in modules:
modules.append(element.module)
# One sklearn wrappers, one missing value detector
self.assertEqual(2, len(modules))
self.pipeline.train(
pd.DataFrame({"test": [1, 2, 2, 3, 4], "test2": [2, 2, 2, 2, 2], "target": [2, 2, 4, 4, -5]},
index=pd.DatetimeIndex(pd.date_range('2000-01-01', freq='24H', periods=5))))
@patch('pywatts.core.pipeline.Pipeline._create_summary')
@patch('pywatts.core.pipeline.FileManager')
def test_add_pipeline_to_pipeline_and_train(self, fm_mock, create_summary_mock):
sub_pipeline = Pipeline()
detector = MissingValueDetector()
detector(dataset=sub_pipeline["regression"])
regressor = SKLearnWrapper(LinearRegression(), name="regression")(x=self.pipeline["test"],
target=self.pipeline["target"])
sub_pipeline(regression=regressor)
summary_formatter_mock = MagicMock()
self.pipeline.train(pd.DataFrame({"test": [24, 24], "target": [12, 24]}, index=pd.to_datetime(
['2015-06-03 00:00:00', '2015-06-03 01:00:00'])), summary_formatter=summary_formatter_mock)
for step in self.pipeline.id_to_step.values():
assert step.current_run_setting.computation_mode == ComputationMode.FitTransform
create_summary_mock.assert_has_calls([call(summary_formatter_mock), call(summary_formatter_mock)])
@patch('pywatts.core.pipeline.FileManager')
def test_add_pipeline_to_pipeline_and_test(self, fm_mock):
# Add some steps to the pipeline
# Assert that the computation is set to fit_transform if the ComputationMode was default
step = MagicMock()
step.computation_mode = ComputationMode.Default
step.finished = False
time = pd.date_range('2000-01-01', freq='24H', periods=7)
ds = xr.Dataset({'foo': ('time', [2, 3, 4, 5, 6, 7, 8]), 'time': time})
subpipeline = Pipeline()
subpipeline.add(module=step)
# BUG: In step_factory.py -> create_step the file_manager of the pipeline is accessed
# and the pipeline is None...
# subpipeline(self.pipeline)
# self.pipeline.test(ds)
# step.set_computation_mode.assert_called_once_with(ComputationMode.Transform)
# step.reset.assert_called_once()
@patch("pywatts.core.pipeline.FileManager")
@patch('pywatts.core.pipeline.json')
@patch("builtins.open", new_callable=mock_open)
def test_add_pipeline_to_pipeline_and_save(self, open_mock, json_mock, fm_mock):
sub_pipeline = Pipeline()
detector = MissingValueDetector()
detector(dataset=sub_pipeline["regressor"])
regressor = SKLearnWrapper(LinearRegression())(x=self.pipeline["test"])
sub_pipeline(regression=regressor)
self.pipeline.to_folder(path="path")
self.assertEqual(json_mock.dump.call_count, 2)
def create_summary_in_subpipelines(self):
assert False
@patch('pywatts.core.pipeline.FileManager')
def test__collect_batch_results_naming_conflict(self, fm_mock):
step_one = MagicMock()
step_one.name = "step"
step_two = MagicMock()
step_two.name = "step"
result_step_one = MagicMock()
result_step_two = MagicMock()
merged_result = {
"step": result_step_one,
"step_1": result_step_two
}
step_one.get_result.return_value = {"step": result_step_one}
step_two.get_result.return_value = {"step_1": result_step_two}
result = self.pipeline._collect_results([step_one, step_two])
# Assert that steps are correclty called.
step_one.get_result.assert_called_once_with(None, None, return_all=True)
step_two.get_result.assert_called_once_with(None, None, return_all=True)
# Assert return value is correct
self.assertEqual(merged_result, result)
@patch("pywatts.core.pipeline.FileManager")
def test_get_params(self, fm_mock):
result = Pipeline(batch=pd.Timedelta("1h")).get_params()
self.assertEqual(result, {
"batch": pd.Timedelta("1h")
})
def test_set_params(self):
self.pipeline.set_params(batch=pd.Timedelta("2h"))
self.assertEqual(self.pipeline.get_params(),
{
"batch": pd.Timedelta("2h")
})
def test__collect_batch_results(self):
step_one = MagicMock()
step_one.name = "step_one"
step_two = MagicMock()
step_two.name = "step_two"
result_step_one = MagicMock()
result_step_two = MagicMock()
merged_result = {
"step_one": result_step_one,
"step_two": result_step_two
}
step_one.get_result.return_value = {"step_one": result_step_one}
step_two.get_result.return_value = {"step_two": result_step_two}
result = self.pipeline._collect_results([step_one, step_two])
# Assert that steps are correclty called.
step_one.get_result.assert_called_once_with(None, None, return_all=True)
step_two.get_result.assert_called_once_with(None, None, return_all=True)
# Assert return value is correct
self.assertEqual(merged_result, result)
@patch("pywatts.core.pipeline.FileManager")
@patch("pywatts.core.pipeline.xr.concat")
def test_batched_pipeline(self, concat_mock, fm_mock):
# Add some steps to the pipeline
time = pd.date_range('2000-01-01', freq='1H', periods=7)
da = xr.DataArray([2, 3, 4, 3, 3, 1, 2], dims=["time"], coords={'time': time})
# Assert that the computation is set to fit_transform if the ComputationMode was default
first_step = MagicMock()
first_step.run_setting = RunSetting(ComputationMode.Default)
first_step.finished = False
first_step.further_elements.side_effect = [True, True, True, True, False]
first_step.get_result.return_value = {"one": da}
self.pipeline.set_params(pd.Timedelta("24h"))
self.pipeline.add(module=first_step)
data = pd.DataFrame({"test": [1, 2, 2, 3], "test2": [2, 2, 2, 2]},
index=pd.DatetimeIndex(pd.date_range('2000-01-01', freq='24H', periods=4)))
self.pipeline.test(data)
first_step.set_run_setting.assert_called_once()
self.assertEqual(first_step.set_run_setting.call_args[0][0].computation_mode, ComputationMode.Transform)
calls = [
call(pd.Timestamp('2000-01-01 00:00:00', freq='24H'), pd.Timestamp('2000-01-02 00:00:00', freq='24H'),
return_all=True),
call(pd.Timestamp('2000-01-02 00:00:00', freq='24H'), pd.Timestamp('2000-01-03 00:00:00', freq='24H'),
return_all=True),
call(pd.Timestamp('2000-01-03 00:00:00', freq='24H'), pd.Timestamp('2000-01-04 00:00:00', freq='24H'),
return_all=True),
call(pd.Timestamp('2000-01-04 00:00:00', freq='24H'), pd.Timestamp('2000-01-05 00:00:00', freq='24H'),
return_all=True),
]
first_step.get_result.assert_has_calls(calls, any_order=True)
self.assertEqual(concat_mock.call_count, 3)
@patch("pywatts.core.pipeline.FileManager")
@patch("pywatts.core.pipeline.xr.concat")
def test_batch_2H_transform(self, concat_mock, fm_mock):
time = pd.date_range('2000-01-01', freq='1H', periods=7)
da = xr.DataArray([2, 3, 4, 3, 3, 1, 2], dims=["time"], coords={'time': time})
pipeline = Pipeline(batch=pd.Timedelta("2h"))
step_one = MagicMock()
step_one.get_result.return_value = {"step": da}
step_one.name = "step"
result_mock = MagicMock()
concat_mock.return_value = result_mock
pipeline.start_steps["foo"] = StartStep("foo"), None
pipeline.start_steps["foo"][0].last = False
step_one.further_elements.side_effect = [True, True, True, True, False]
pipeline.add(module=step_one, input_ids=[1])
result = pipeline.transform(foo=da)
self.assertEqual(concat_mock.call_count, 3)
self.assertEqual(step_one.get_result.call_count, 4)
self.assertEqual(step_one.further_elements.call_count, 5)
self.assertEqual({"step": result_mock}, result)
@patch('pywatts.core.pipeline.FileManager')
@patch("pywatts.core.pipeline._get_time_indexes", return_value=["time"])
def test_transform_pipeline(self, get_time_indexes_mock, fm_mock):
input_mock = MagicMock()
input_mock.indexes = {"time": ["20.12.2020"]}
step_two = MagicMock()
result_mock = MagicMock()
step_two.name = "mock"
step_two.get_result.return_value = {"mock": result_mock}
self.pipeline.add(module=step_two, input_ids=[1])
result = self.pipeline.transform(x=input_mock)
step_two.get_result.assert_called_once_with("20.12.2020", None, return_all=True)
get_time_indexes_mock.assert_called_once_with({"x": input_mock})
self.assertEqual({"mock": result_mock}, result)
@patch("pywatts.core.pipeline.FileManager")
@patch("pywatts.core.pipeline.Pipeline.from_folder")
def test_load(self, from_folder_mock, fm_mock):
created_pipeline = MagicMock()
from_folder_mock.return_value = created_pipeline
pipeline = Pipeline.load({'name': 'Pipeline',
'class': 'Pipeline',
'module': 'pywatts.core.pipeline',
'pipeline_path': 'save_path'})
from_folder_mock.assert_called_once_with("save_path")
self.assertEqual(created_pipeline, pipeline)
@patch("pywatts.core.pipeline.FileManager")
@patch("pywatts.core.pipeline.Pipeline.to_folder")
@patch("pywatts.core.pipeline.os")
def test_save(self, os_mock, to_folder_mock, fm_mock):
os_mock.path.join.return_value = "save_path"
os_mock.path.isdir.return_value = False
sub_pipeline = Pipeline(batch=pd.Timedelta("1h"))
detector = MissingValueDetector()
detector(dataset=sub_pipeline["test"])
fm_mock = MagicMock()
fm_mock.basic_path = "path_to_save"
result = sub_pipeline.save(fm_mock)
to_folder_mock.assert_called_once_with("save_path")
os_mock.path.join.assert_called_once_with("path_to_save", "Pipeline")
self.assertEqual({'name': 'Pipeline',
'class': 'Pipeline',
'module': 'pywatts.core.pipeline',
'params': {'batch': '0 days 01:00:00'},
'pipeline_path': 'save_path'}, result)
@patch("pywatts.core.pipeline.FileManager")
@patch("pywatts.core.pipeline.xr.concat")
def test_batch_1_transform(self, concat_mock, fm_mock):
time = pd.date_range('2000-01-01', freq='1H', periods=7)
da = xr.DataArray([2, 3, 4, 3, 3, 1, 2], dims=["time"], coords={'time': time})
pipeline = Pipeline(batch=pd.Timedelta("1h"))
step_one = MagicMock()
step_one.get_result.return_value = {"step": da}
step_one.name = "step"
result_mock = MagicMock()
concat_mock.return_value = result_mock
pipeline.start_steps["foo"] = StartStep("foo"), None
pipeline.start_steps["foo"][0].last = False
step_one.further_elements.side_effect = [True, True, True, True, True, True, True, False]
pipeline.add(module=step_one, input_ids=[1])
result = pipeline.transform(foo=da)
self.assertEqual(concat_mock.call_count, 6)
self.assertEqual(step_one.get_result.call_count, 7)
self.assertEqual(step_one.further_elements.call_count, 8)
self.assertEqual({"step": result_mock}, result)
@patch('pywatts.core.pipeline.FileManager')
def test_test(self, fm_mock):
# Add some steps to the pipeline
# Assert that the computation is set to fit_transform if the ComputationMode was default
first_step = MagicMock()
first_step.computation_mode = ComputationMode.Default
first_step.finished = False
time = pd.date_range('2000-01-01', freq='1H', periods=7)
da = xr.DataArray([2, 3, 4, 3, 3, 1, 2], dims=["time"], coords={'time': time})
first_step.get_result.return_value = {"first": da}
second_step = MagicMock()
second_step.computation_mode = ComputationMode.Train
second_step.finished = False
second_step.get_result.return_value = {"Second": da}
self.pipeline.add(module=first_step)
self.pipeline.add(module=second_step)
self.pipeline.test(pd.DataFrame({"test": [1, 2, 2, 3, 4], "test2": [2, 2, 2, 2, 2]},
index=pd.DatetimeIndex(pd.date_range('2000-01-01', freq='24H', periods=5))))
first_step.get_result.assert_called_once_with(pd.Timestamp('2000-01-01 00:00:00', freq='24H'), None,
return_all=True)
second_step.get_result.assert_called_once_with(pd.Timestamp('2000-01-01 00:00:00', freq='24H'), None,
return_all=True)
first_step.set_run_setting.assert_called_once()
self.assertEqual(first_step.set_run_setting.call_args[0][0].computation_mode, ComputationMode.Transform)
second_step.set_run_setting.assert_called_once()
self.assertEqual(second_step.set_run_setting.call_args[0][0].computation_mode, ComputationMode.Transform)
first_step.reset.assert_called_once()
second_step.reset.assert_called_once()
@patch('pywatts.core.pipeline.FileManager')
def test_train(self, fmmock):
# Add some steps to the pipeline
time = pd.date_range('2000-01-01', freq='1H', periods=7)
da = xr.DataArray([2, 3, 4, 3, 3, 1, 2], dims=["time"], coords={'time': time})
# Assert that the computation is set to fit_transform if the ComputationMode was default
first_step = MagicMock()
first_step.computation_mode = ComputationMode.Default
first_step.finished = False
first_step.get_result.return_value = {"first": da}
second_step = MagicMock()
second_step.computation_mode = ComputationMode.Train
second_step.finished = False
second_step.get_result.return_value = {"second": da}
self.pipeline.add(module=first_step)
self.pipeline.add(module=second_step)
data = pd.DataFrame({"test": [1, 2, 2, 3, 4], "test2": [2, 2, 2, 2, 2]},
index=pd.DatetimeIndex(pd.date_range('2000-01-01', freq='24H', periods=5)))
result, summary = self.pipeline.train(data, summary=True)
first_step.set_run_setting.assert_called_once()
self.assertEqual(first_step.set_run_setting.call_args[0][0].computation_mode, ComputationMode.FitTransform)
second_step.set_run_setting.assert_called_once()
self.assertEqual(second_step.set_run_setting.call_args[0][0].computation_mode, ComputationMode.FitTransform)
first_step.get_result.assert_called_once_with(pd.Timestamp('2000-01-01 00:00:00', freq='24H'), None,
return_all=True)
second_step.get_result.assert_called_once_with(pd.Timestamp('2000-01-01 00:00:00', freq='24H'), None,
return_all=True)
first_step.reset.assert_called_once()
second_step.reset.assert_called_once()
xr.testing.assert_equal(result["second"], da)
@patch("builtins.open", new_callable=mock_open)
def test_horizon_greater_one_regression_inclusive_summary_file(self, open_mock):
lin_reg = LinearRegression()
self.fm_mock.get_path.return_value = "summary_path"
multi_regressor = SKLearnWrapper(lin_reg)(foo=self.pipeline["foo"], target=self.pipeline["target"],
target2=self.pipeline["target2"])
RMSE()(y=self.pipeline["target"], prediction=multi_regressor["target"])
time =
|
pd.date_range('2000-01-01', freq='24H', periods=5)
|
pandas.date_range
|
import os
import pandas as pd
import matplotlib.pyplot as plt
import datapackage as dp
import plotly.io as pio
import plotly.offline as offline
from plots import (
hourly_plot,
stacked_plot,
price_line_plot,
price_scatter_plot,
merit_order_plot,
filling_level_plot,
)
results = [r for r in os.listdir("results") if "plots" not in r]
country = "DE"
# shadow prices
sorted = {}
unsorted = {}
for r in results:
path = os.path.join("results", r, "output", "shadow_prices.csv")
sprices =
|
pd.read_csv(path, index_col=[0], parse_dates=True)
|
pandas.read_csv
|
import numpy
import pandas as pd
import math as m
#Moving Average
def MA(df, n):
MA = pd.Series( df['Close'].rolling(window = n,center=False).mean(), name = 'MA_' + str(n), index=df.index )
# df = df.join(MA)
return MA
#Exponential Moving Average
def EMA(df, n):
EMA = pd.Series(pd.ewma(df['Close'], span = n, min_periods = n - 1), name = 'EMA_' + str(n))
df = df.join(EMA)
return df
#Momentum
def MOM(df, n):
M = pd.Series(df['Close'].diff(n), name = 'Momentum_' + str(n))
df = df.join(M)
return df
#Rate of Change
def ROC(df, n):
M = df['Close'].diff(n - 1)
N = df['Close'].shift(n - 1)
ROC = pd.Series(M / N, name = 'ROC_' + str(n))
df = df.join(ROC)
return df
#Average True Range
def ATR_2(df, n):
def TR(args):
print(args)
return 0
i = 0
TR_l = [0]
# while i < df.index[-1]:
df.rolling(2,).apply(TR)
for i in range(0, df.shape[0]-1):
# TR = max(df.get_value(i + 1, 'High'), df.get_value(i, 'Close')) - min(df.get_value(i + 1, 'Low'), df.get_value(i, 'Close'))
TR = max( df.ix[i + 1, 'High'], df.ix[i, 'Close']) - min(df.ix[i + 1, 'Low'], df.ix[i, 'Close'])
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l, index=df.index)
ATR = pd.Series(TR_s.rolling(window= n, center=False).mean(), name = 'ATR_' + str(n))
return ATR
#Average True Range
def ATR(df, n):
def TR(args):
print(args)
return 0
atr3 = pd.DataFrame( {'a' : abs( df['High'] - df['Low'] ), 'b' : abs( df['High'] - df['Close'].shift() ), 'c' : abs(df['Low']-df['Close'].shift() ) } )
return atr3.max(axis=1).rolling(window=n).mean()
#Bollinger Bands
def BBANDS(df, n):
MA = pd.Series(pd.rolling_mean(df['Close'], n))
MSD = pd.Series(pd.rolling_std(df['Close'], n))
b1 = 4 * MSD / MA
B1 = pd.Series(b1, name = 'BollingerB_' + str(n))
df = df.join(B1)
b2 = (df['Close'] - MA + 2 * MSD) / (4 * MSD)
B2 = pd.Series(b2, name = 'Bollinger%b_' + str(n))
df = df.join(B2)
return df
#Pivot Points, Supports and Resistances
def PPSR(df):
PP = pd.Series((df['High'] + df['Low'] + df['Close']) / 3)
R1 = pd.Series(2 * PP - df['Low'])
S1 = pd.Series(2 * PP - df['High'])
R2 = pd.Series(PP + df['High'] - df['Low'])
S2 = pd.Series(PP - df['High'] + df['Low'])
R3 = pd.Series(df['High'] + 2 * (PP - df['Low']))
S3 = pd.Series(df['Low'] - 2 * (df['High'] - PP))
psr = {'PP':PP, 'R1':R1, 'S1':S1, 'R2':R2, 'S2':S2, 'R3':R3, 'S3':S3}
PSR = pd.DataFrame(psr)
df = df.join(PSR)
return df
#Stochastic oscillator %K
def STOK(df):
SOk = pd.Series((df['Close'] - df['Low']) / (df['High'] - df['Low']), name = 'SO%k')
df = df.join(SOk)
return df
#Stochastic oscillator %D
def STO(df, n):
SOk = pd.Series((df['Close'] - df['Low']) / (df['High'] - df['Low']), name = 'SO%k')
SOd = pd.Series(pd.ewma(SOk, span = n, min_periods = n - 1), name = 'SO%d_' + str(n))
df = df.join(SOd)
return df
#Trix
def TRIX(df, n):
EX1 = pd.ewma(df['Close'], span = n, min_periods = n - 1)
EX2 = pd.ewma(EX1, span = n, min_periods = n - 1)
EX3 = pd.ewma(EX2, span = n, min_periods = n - 1)
i = 0
ROC_l = [0]
while i + 1 <= df.index[-1]:
ROC = (EX3[i + 1] - EX3[i]) / EX3[i]
ROC_l.append(ROC)
i = i + 1
Trix = pd.Series(ROC_l, name = 'Trix_' + str(n))
df = df.join(Trix)
return df
#Average Directional Movement Index
def ADX(df, n, n_ADX):
i = 0
UpI = []
DoI = []
while i + 1 <= df.index[-1]:
UpMove = df.get_value(i + 1, 'High') - df.get_value(i, 'High')
DoMove = df.get_value(i, 'Low') - df.get_value(i + 1, 'Low')
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else: UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else: DoD = 0
DoI.append(DoD)
i = i + 1
i = 0
TR_l = [0]
while i < df.index[-1]:
TR = max(df.get_value(i + 1, 'High'), df.get_value(i, 'Close')) - min(df.get_value(i + 1, 'Low'), df.get_value(i, 'Close'))
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l)
ATR = pd.Series(pd.ewma(TR_s, span = n, min_periods = n))
UpI = pd.Series(UpI)
DoI = pd.Series(DoI)
PosDI = pd.Series(pd.ewma(UpI, span = n, min_periods = n - 1) / ATR)
NegDI = pd.Series(pd.ewma(DoI, span = n, min_periods = n - 1) / ATR)
ADX = pd.Series(pd.ewma(abs(PosDI - NegDI) / (PosDI + NegDI), span = n_ADX, min_periods = n_ADX - 1), name = 'ADX_' + str(n) + '_' + str(n_ADX))
df = df.join(ADX)
return df
#MACD, MACD Signal and MACD difference
def MACD(df, n_fast, n_slow):
EMAfast = pd.Series(pd.ewma(df['Close'], span = n_fast, min_periods = n_slow - 1))
EMAslow = pd.Series(pd.ewma(df['Close'], span = n_slow, min_periods = n_slow - 1))
MACD = pd.Series(EMAfast - EMAslow, name = 'MACD_' + str(n_fast) + '_' + str(n_slow))
MACDsign = pd.Series(pd.ewma(MACD, span = 9, min_periods = 8), name = 'MACDsign_' + str(n_fast) + '_' + str(n_slow))
MACDdiff = pd.Series(MACD - MACDsign, name = 'MACDdiff_' + str(n_fast) + '_' + str(n_slow))
df = df.join(MACD)
df = df.join(MACDsign)
df = df.join(MACDdiff)
return df
#Mass Index
def MassI(df):
Range = df['High'] - df['Low']
EX1 = pd.ewma(Range, span = 9, min_periods = 8)
EX2 = pd.ewma(EX1, span = 9, min_periods = 8)
Mass = EX1 / EX2
MassI = pd.Series(pd.rolling_sum(Mass, 25), name = 'Mass Index')
df = df.join(MassI)
return df
#Vortex Indicator: http://www.vortexindicator.com/VFX_VORTEX.PDF
def Vortex(df, n):
i = 0
TR = [0]
while i < df.index[-1]:
Range = max(df.get_value(i + 1, 'High'), df.get_value(i, 'Close')) - min(df.get_value(i + 1, 'Low'), df.get_value(i, 'Close'))
TR.append(Range)
i = i + 1
i = 0
VM = [0]
while i < df.index[-1]:
Range = abs(df.get_value(i + 1, 'High') - df.get_value(i, 'Low')) - abs(df.get_value(i + 1, 'Low') - df.get_value(i, 'High'))
VM.append(Range)
i = i + 1
VI = pd.Series(pd.rolling_sum(pd.Series(VM), n) / pd.rolling_sum(pd.Series(TR), n), name = 'Vortex_' + str(n))
df = df.join(VI)
return df
#KST Oscillator
def KST(df, r1, r2, r3, r4, n1, n2, n3, n4):
M = df['Close'].diff(r1 - 1)
N = df['Close'].shift(r1 - 1)
ROC1 = M / N
M = df['Close'].diff(r2 - 1)
N = df['Close'].shift(r2 - 1)
ROC2 = M / N
M = df['Close'].diff(r3 - 1)
N = df['Close'].shift(r3 - 1)
ROC3 = M / N
M = df['Close'].diff(r4 - 1)
N = df['Close'].shift(r4 - 1)
ROC4 = M / N
KST = pd.Series(
|
pd.rolling_sum(ROC1, n1)
|
pandas.rolling_sum
|
doc = """
This jupyter notebook is authored by ygg_anderson for the Token Engineering Commons. See appropriate licensing. 🐧 🐧 🐧
"""
import param
import panel as pn
import pandas as pd
import hvplot.pandas
import holoviews as hv
import numpy as np
from scipy.stats.mstats import gmean
import os
pn.extension()
APP_PATH = './'
sheets = [
"Total Impact Hours so far",
"IH Predictions",
"#8 Jan 1",
"#7 Dec 18",
"#6 Dec 4",
"#5 Nov 20",
"#4 Nov 6",
"#3 Oct 23",
"#2 Oct 9",
"#1 Sept 24",
"#0 Sept 7 (historic)",
] + [f"#{i} IH Results" for i in range(9)]
sheets = {i:sheet for i, sheet in enumerate(sheets)}
def read_excel(sheet_name="Total Impact Hours so far", header=1, index_col=0, usecols=None) -> pd.DataFrame:
data = pd.read_excel(
os.path.join(APP_PATH, "data", "TEC Praise Quantification.xlsx"),
sheet_name=sheet_name,
engine='openpyxl',
header=header,
index_col=index_col,
usecols=usecols
).reset_index().dropna(how='any')
return data
def read_impact_hour_data():
impact_hour_data_1 = read_excel()
impact_hour_data_2 = read_excel(sheet_name="IH Predictions", header=0, index_col=0, usecols='A:I').drop(index=19)
return (impact_hour_data_1, impact_hour_data_2)
def read_cstk_data():
# Load CSTK data
cstk_data = pd.read_csv('CSTK_DATA.csv', header=None).reset_index().head(100)
cstk_data.columns = ['CSTK Token Holders', 'CSTK Tokens']
cstk_data['CSTK Tokens Capped'] = cstk_data['CSTK Tokens'].apply(lambda x: min(x, cstk_data['CSTK Tokens'].sum()/10))
return cstk_data
class ImpactHoursData(param.Parameterized):
historic = pd.read_csv('data/IHPredictions.csv').query('Model=="Historic"')
optimistic =
|
pd.read_csv('data/IHPredictions.csv')
|
pandas.read_csv
|
# %%
from functools import reduce
import numpy as np
import pandas as pd
from pandas.tseries.offsets import DateOffset
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
# %%
def build_gvkeys(prc, fund):
gvkeys_fund = fund.gvkey.unique()
gvkeys_prc = prc[prc.close > 5].gvkey.unique()
gvkeys = np.intersect1d(gvkeys_fund, gvkeys_prc)
return gvkeys
def fill_year(df):
first_date = df["date"].iloc[0]
last_date = df["date"].iloc[-1]
date_index = pd.date_range(
pd.to_datetime(first_date),
|
pd.to_datetime(last_date)
|
pandas.to_datetime
|
import os
import threading
import time
from datetime import datetime
import pandas as pd
import logging
import yaml
from flask import Flask
from paho.mqtt import client as mqtt_client
logging.basicConfig(level=logging.DEBUG, filename="logfile", filemode="a+",
format="%(asctime)-15s %(levelname)-8s %(message)s")
app = Flask(__name__)
@app.route('/isAlive')
def index():
return "true"
class Service(threading.Thread):
def __init__(self, host, port):
threading.Thread.__init__(self)
self.host = host
self.port = port
self.client_id = f'python-mqtt-{5}'
self.payload = []
def connect_mqtt(self):
def on_connect(client, userdata, flags, rc):
if rc == 0:
logging.info("Connected to MQTT Broker!")
else:
logging.info("Failed to connect, return code %d\n", rc)
client = mqtt_client.Client(self.client_id)
client.on_connect = on_connect
client.connect(self.host, self.port)
return client
def subscribe(self, client: mqtt_client):
# get all info
def on_message(client, userdata, msg):
print(f"Received `{msg.payload.decode()}` from `{msg.topic}` topic")
logging.info(f"Received `{msg.payload.decode()}` from `{msg.topic}` topic")
try:
if msg.topic == '/channel/TEMPERATURE-max-limit':
with open("CONTROL-MAX-temperature.csv", 'a+') as f:
f.write("" + datetime.now().timestamp().__int__().__str__() + "," + msg.payload.decode() + "\n")
if msg.topic == '/channel/TEMPERATURE-min-limit':
with open("CONTROL-MIN-temperature.csv", 'a+') as f:
f.write("" + datetime.now().timestamp().__int__().__str__() + "," + msg.payload.decode() + "\n")
if msg.topic == '/channel/TEMP-sensor':
with open("./TEMP-sensor.csv", 'a+') as f:
f.write("" + datetime.now().timestamp().__int__().__str__() + "," + msg.payload.decode() + "\n")
if msg.topic == '/channel/BLANKET-prediction':
with open("./BLANKET-prediction.csv", 'a+') as f:
f.write("" + datetime.now().timestamp().__int__().__str__() + "," + msg.payload.decode() + "\n")
if msg.topic == '/channel/BLANKET-sensor':
with open("./BLANKET-sensor.csv", 'a+') as f:
f.write("" + datetime.now().timestamp().__int__().__str__() + "," + msg.payload.decode() + "\n")
if msg.topic == '/channel/HR-sensor':
with open("./HR-sensor.csv", 'a+') as f:
f.write("" + datetime.now().timestamp().__int__().__str__() + "," + msg.payload.decode() + "\n")
except Exception as excM:
logging.info(
f"Exeption: {excM} -- {time.asctime(time.localtime(time.time()))}")
pidP = os.getpid()
os.kill(pidP, 2)
client.subscribe('/channel/TEMPERATURE-max-limit')
client.on_message = on_message
client.subscribe('/channel/TEMPERATURE-min-limit')
client.on_message = on_message
client.subscribe('/channel/TEMP-sensor')
client.on_message = on_message
client.subscribe('/channel/BLANKET-prediction')
client.on_message = on_message
client.subscribe('/channel/BLANKET-sensor')
client.on_message = on_message
client.subscribe('/channel/HR-sensor')
client.on_message = on_message
def run(self):
client = self.connect_mqtt()
self.subscribe(client)
client.loop_forever()
class Planning(threading.Thread):
def __init__(self, service, MINTEMPERATUREBLANKET, MAXTEMPERATUREBLANKET,INTERVALLEVEL1,INTERVALLEVEL2, HRMIN, HRMAX):
threading.Thread.__init__(self)
self.service = service
self.MINTEMPERATUREBLANKET = MINTEMPERATUREBLANKET
self.MAXTEMPERATUREBLANKET = MAXTEMPERATUREBLANKET
self.HRMIN = HRMIN
self.HRMAX = HRMAX
self.INTERVALLEVEL1 = INTERVALLEVEL1
self.INTERVALLEVEL2 = INTERVALLEVEL2
def connect_mqtt(self):
def on_connect(client, userdata, flags, rc):
if rc == 0:
logging.info("Connected to MQTT Broker!")
else:
logging.info("Failed to connect, return code %d\n", rc)
try:
client = mqtt_client.Client(f'python-mqtt-{6}')
client.on_connect = on_connect
client.connect(self.service.host, self.service.port)
return client
except Exception as excM:
logging.info(f"Execption: {excM} -- {time.asctime(time.localtime(time.time()))}")
pidP = os.getpid()
os.kill(pidP, 2)
# convert to integer and check temperature
def checkRange(self, blanket, actual, minimum, maximum, hr, prevision):
actual = int(actual)
minimum = int(minimum)
maximum = int(maximum)
hr = int(hr)
blanket = int(blanket)
prevision = int(prevision)
# normale
if minimum < actual < maximum:
return prevision
# battiti + hr bassi
elif actual <= minimum:
if prevision > (blanket + self.INTERVALLEVEL1):
return prevision
if hr < self.HRMIN:
return blanket+self.INTERVALLEVEL1
else:
return blanket + self.INTERVALLEVEL1
elif actual >= maximum:
if prevision < (blanket - self.INTERVALLEVEL2):
return prevision
if hr > self.HRMIN:
return blanket-self.INTERVALLEVEL2
else:
return blanket - self.INTERVALLEVEL1
elif actual < maximum and hr > self.HRMAX:
return blanket
return prevision
def respectBlanketSettings(self,blanket):
if blanket < self.MINTEMPERATUREBLANKET:
return self.MINTEMPERATUREBLANKET
if blanket >self.MAXTEMPERATUREBLANKET:
return self.MAXTEMPERATUREBLANKET
else:
return blanket
def publish(self, client):
while True:
try:
time.sleep(1)
dataPREVISIONING = pd.read_csv("./BLANKET-prediction.csv")
dataPREVISIONING = pd.DataFrame(dataPREVISIONING, columns=['Date', 'PREDICT'])
dataBLANKETSENSOR = pd.read_csv("./BLANKET-sensor.csv")
dataBLANKETSENSOR = pd.DataFrame(dataBLANKETSENSOR, columns=['Date', 'TEMP'])
dataCONTROLMAX = pd.read_csv("CONTROL-MAX-temperature.csv")
dataCONTROLMAX = pd.DataFrame(dataCONTROLMAX, columns=['Date', 'TEMP'])
dataCONTROLMIN = pd.read_csv("CONTROL-MIN-temperature.csv")
dataCONTROLMIN = pd.DataFrame(dataCONTROLMIN, columns=['Date', 'TEMP'])
dataHRSENSOR =
|
pd.read_csv("./HR-sensor.csv")
|
pandas.read_csv
|
# coding: utf8
from collections import deque
from collections import Counter
# noinspection PyPackageRequirements
import pytest
from pandas import DataFrame
# noinspection PyProtectedMember
from dfqueue.core.dfqueue import QueuesHandler, QueueHandlerItem, QueueBehaviour
def test_singleton():
handler_a = QueuesHandler()
handler_b = QueuesHandler()
assert id(handler_a) != id(handler_b)
assert id(handler_a._QueuesHandler__instance) == id(handler_b._QueuesHandler__instance)
assert handler_a.default_queue_name == handler_b.default_queue_name
def test_valid_get_item():
handler = QueuesHandler()
default_queue_name = handler.default_queue_name
queue_data = handler[default_queue_name]
assert isinstance(queue_data, dict)
assert len(queue_data) == len(QueueHandlerItem)
assert all([item in queue_data for item in QueueHandlerItem])
assert isinstance(queue_data[QueueHandlerItem.QUEUE], deque)
assert queue_data[QueueHandlerItem.DATAFRAME] is None
assert isinstance(queue_data[QueueHandlerItem.MAX_SIZE], int)
def test_invalid_get_item():
handler = QueuesHandler()
invalid_queue_name = "UNKNOWN"
with pytest.raises(AssertionError):
handler[invalid_queue_name]
@pytest.mark.parametrize("queue_iterable,dataframe,max_size,counter,behaviour", [
(deque(), DataFrame(), 1, Counter(), QueueBehaviour.LAST_ITEM),
(deque((1, {"A": "a", "B": "b"})), DataFrame(), 1, {1: Counter({"A": 1, "B": 1})},
QueueBehaviour.ALL_ITEMS),
(deque(), DataFrame(), 1234567890, {}, QueueBehaviour.LAST_ITEM),
([], DataFrame(), 1, {}, QueueBehaviour.ALL_ITEMS),
([(1, {"A": "a", "B": "b"})],
|
DataFrame()
|
pandas.DataFrame
|
"""
"""
import pandas as pd
import numpy as np
from sips.macros import sports as sps
from sips.h import helpers as h
def to_hot_map(strings, output="np"):
"""
given a list of strings it will return a dict
string : one hotted np array
"""
str_set = set(strings)
length = len(str_set)
hots = {}
for i, s in enumerate(str_set):
hot_arr = np.zeros(length)
hot_arr[i] = 1
if hots.get(s) is None:
if output == "list":
hot_arr = list(hot_arr)
hots[s] = hot_arr
return hots
def to_hot_maps(columns, strings_list, output="np"):
return {
col: to_hot_map(strings, output=output)
for col, strings in zip(columns, strings_list)
}
def hot_teams_dict(sports=["nfl", "nba", "nhl"]):
"""
"""
team_list = []
sorted_sports = sorted(sports)
for s in sorted_sports:
if s == "nfl":
team_list += sps.nfl.teams
elif s == "nba":
team_list += sps.nba.teams
elif s == "nhl":
team_list += sps.nhl.teams
elif s == "mlb":
team_list += sps.mlb.teams
teams_dict = to_hot_map(team_list, output="list")
return teams_dict
def hot_statuses_dict():
statuses = [
"GAME_END",
"HALF_TIME",
"INTERRUPTED",
"IN_PROGRESS",
"None",
"PRE_GAME",
]
statuses_dict = to_hot_map(statuses, output="list")
return statuses_dict
def hot_sports_dict():
statuses = ["BASK", "FOOT", "HCKY", "BASE"]
statuses_dict = to_hot_map(statuses, output="list")
return statuses_dict
def hot_bool_dict(row):
"""
row type pd.series (row of dataframe)
"""
hot_mkt = np.array([1, 0]) if row.live else np.array([0, 1])
return hot_mkt
def all_hot_maps(sports=["nba"], output="dict"):
"""
"""
hot_maps = [
hot_sports_dict(),
hot_teams_dict(sports=sports),
hot_teams_dict(sports=sports),
hot_statuses_dict(),
]
if output == "dict":
keys = ["sport", "a_team", "h_team", "status", "live"]
hot_maps = {keys[i]: hot_maps[i] for i in range(len(keys) - 1)}
return hot_maps
def hot(df, hot_maps, drop_cold=True, ret_hots_only=False, verbose=False):
"""
df: pd.DataFrame
hot_maps: list(dict)
hot_map: dict
key: str column in df
value: one_hot vector for unique row value
---
returns dataframe
"""
if verbose:
print(f"hot_df cols: {df.columns}")
ret = []
for i, (col_name, hot_map) in enumerate(hot_maps.items()):
ret.append(hot_col(df[col_name], hot_map))
if ret_hots_only:
return ret
ret =
|
pd.concat([df] + ret, axis=1)
|
pandas.concat
|
import pandas as pd
import re
from ..decorators import float_property_decorator, int_property_decorator
from .constants import (SCHEDULE_SCHEME,
SCHEDULE_URL)
from datetime import datetime
from pyquery import PyQuery as pq
from sportsreference import utils
from sportsreference.constants import (WIN,
LOSS,
HOME,
AWAY,
NEUTRAL,
REGULAR_SEASON,
CONFERENCE_TOURNAMENT)
from sportsreference.nba.boxscore import Boxscore
class Game:
"""
A representation of a matchup between two teams.
Stores all relevant high-level match information for a game in a team's
schedule including date, time, opponent, and result.
Parameters
----------
game_data : string
The row containing the specified game information.
"""
def __init__(self, game_data, playoffs=False):
self._game = None
self._date = None
self._time = None
self._datetime = None
self._boxscore = None
self._location = None
self._opponent_abbr = None
self._opponent_name = None
self._result = None
self._points_scored = None
self._points_allowed = None
self._wins = None
self._losses = None
self._streak = None
self._playoffs = playoffs
self._parse_game_data(game_data)
def _parse_boxscore(self, game_data):
"""
Parses the boxscore URI for the game.
The boxscore is embedded within the HTML tag and needs a special
parsing scheme in order to be extracted.
Parameters
----------
game_data : PyQuery object
A PyQuery object containing the information specific to a game.
"""
boxscore = game_data('td[data-stat="box_score_text"]:first')
boxscore = re.sub(r'.*/boxscores/', '', str(boxscore))
boxscore = re.sub(r'\.html.*', '', boxscore)
setattr(self, '_boxscore', boxscore)
def _parse_opponent_abbr(self, game_data):
"""
Parses the opponent's abbreviation for the game.
The opponent's 3-letter abbreviation is embedded within the HTML tag
and needs a special parsing scheme in order to be extracted.
Parameters
----------
game_data : PyQuery object
A PyQuery object containing the information specific to a game.
"""
opponent = game_data('td[data-stat="opp_name"]:first')
opponent = re.sub(r'.*/teams/', '', str(opponent))
opponent = re.sub(r'\/.*.html.*', '', opponent)
setattr(self, '_opponent_abbr', opponent)
def _parse_game_data(self, game_data):
"""
Parses a value for every attribute.
The function looks through every attribute with the exception of those
listed below and retrieves the value according to the parsing scheme
and index of the attribute from the passed HTML data. Once the value
is retrieved, the attribute's value is updated with the returned
result.
Note that this method is called directory once Game is invoked and does
not need to be called manually.
Parameters
----------
game_data : string
A string containing all of the rows of stats for a given game.
"""
for field in self.__dict__:
# Remove the leading '_' from the name
short_name = str(field)[1:]
if short_name == 'datetime' or short_name == 'playoffs':
continue
elif short_name == 'boxscore':
self._parse_boxscore(game_data)
continue
elif short_name == 'opponent_abbr':
self._parse_opponent_abbr(game_data)
continue
value = utils._parse_field(SCHEDULE_SCHEME, game_data, short_name)
setattr(self, field, value)
@property
def dataframe(self):
"""
Returns a pandas DataFrame containing all other class properties and
values. The index for the DataFrame is the boxscore string.
"""
if self._points_allowed is None and self._points_scored is None:
return None
fields_to_include = {
'boxscore_index': self.boxscore_index,
'date': self.date,
'datetime': self.datetime,
'game': self.game,
'location': self.location,
'losses': self.losses,
'opponent_abbr': self.opponent_abbr,
'opponent_name': self.opponent_name,
'playoffs': self.playoffs,
'points_allowed': self.points_allowed,
'points_scored': self.points_scored,
'result': self.result,
'streak': self.streak,
'time': self.time,
'wins': self.wins
}
return pd.DataFrame([fields_to_include], index=[self._boxscore])
@property
def dataframe_extended(self):
"""
Returns a pandas DataFrame representing the Boxscore class for the
game. This property provides much richer context for the selected game,
but takes longer to process compared to the lighter 'dataframe'
property. The index for the DataFrame is the boxscore string.
"""
return self.boxscore.dataframe
@int_property_decorator
def game(self):
"""
Returns an ``int`` to indicate which game in the season was requested.
The first game of the season returns 1.
"""
return self._game
@property
def date(self):
"""
Returns a ``string`` of the date the game took place at, such as 'Wed,
Oct 18, 2017'.
"""
return self._date
@property
def time(self):
"""
Returns a ``string`` of the time the game started in Eastern Time, such
as '8:01p'.
"""
return self._time
@property
def datetime(self):
"""
Returns a datetime object to indicate the month, day, and year the game
took place.
"""
return datetime.strptime(self._date, '%a, %b %d, %Y')
@property
def boxscore(self):
"""
Returns an instance of the Boxscore class containing more detailed
stats on the game.
"""
return Boxscore(self._boxscore)
@property
def boxscore_index(self):
"""
Returns a ``string`` of the URI for a boxscore which can be used to
access or index a game.
"""
return self._boxscore
@property
def location(self):
"""
Returns a ``string`` constant to indicate whether the game was played
in the team's home arena or on the road.
"""
if self._location.lower() == '@':
return AWAY
return HOME
@property
def opponent_abbr(self):
"""
Returns a ``string`` of the opponent's 3-letter abbreviation, such as
'CHI' for the Chicago Bulls.
"""
return self._opponent_abbr
@property
def opponent_name(self):
"""
Returns a ``string`` of the opponent's name, such as '<NAME>'.
"""
return self._opponent_name
@property
def result(self):
"""
Returns a ``string`` constant to indicate whether the team won or lost
the game.
"""
if self._result.lower() == 'l':
return LOSS
return WIN
@int_property_decorator
def points_scored(self):
"""
Returns an ``int`` of the number of points the team scored during the
game.
"""
return self._points_scored
@int_property_decorator
def points_allowed(self):
"""
Returns an ``int`` of the number of points the team allowed during the
game.
"""
return self._points_allowed
@int_property_decorator
def wins(self):
"""
Returns an ``int`` of the number of wins the team has in the season
after the completion of the listed game.
"""
return self._wins
@int_property_decorator
def losses(self):
"""
Returns an ``int`` of the number of losses the team has in the season
after the completion of the listed game.
"""
return self._losses
@property
def streak(self):
"""
Returns a ``string`` of the team's current streak after the conclusion
of the listed game, such as 'W 3' for a 3-game winning streak.
"""
return self._streak
@property
def playoffs(self):
"""
Returns a ``boolean`` variable which evalutes to True when the game was
played in the playoffs and returns False if the game took place in the
regular season.
"""
return self._playoffs
class Schedule:
"""
An object of the given team's schedule.
Generates a team's schedule for the season including wins, losses, and
scores if applicable.
Parameters
----------
abbreviation : string
A team's short name, such as 'PHO' for the Phoenix Suns.
year : string (optional)
The requested year to pull stats from.
"""
def __init__(self, abbreviation, year=None):
self._games = []
self._pull_schedule(abbreviation, year)
def __getitem__(self, index):
"""
Return a specified game.
Returns a specified game as requested by the index number in the array.
The input index is 0-based and must be within the range of the schedule
array.
Parameters
----------
index : int
The 0-based index of the game to return.
Returns
-------
Game instance
If the requested game can be found, its Game instance is returned.
"""
return self._games[index]
def __call__(self, date):
"""
Return a specified game.
Returns a specific game as requested by the passed datetime. The input
datetime must have the same year, month, and day, but can have any time
be used to match the game.
Parameters
----------
date : datetime
A datetime object of the month, day, and year to identify a
particular game that was played.
Returns
-------
Game instance
If the requested game can be found, its Game instance is returned.
Raises
------
ValueError
If the requested date cannot be matched with a game in the
schedule.
"""
for game in self._games:
if game.datetime.year == date.year and \
game.datetime.month == date.month and \
game.datetime.day == date.day:
return game
raise ValueError('No games found for requested date')
def __repr__(self):
"""Returns a ``list`` of all games scheduled for the given team."""
return self._games
def __iter__(self):
"""
Returns an iterator of all of the games scheduled for the given team.
"""
return iter(self.__repr__())
def __len__(self):
"""Returns the number of scheduled games for the given team."""
return len(self.__repr__())
def _add_games_to_schedule(self, schedule, playoff=False):
"""
Add game information to list of games.
Create a Game instance for the given game in the schedule and add it to
the list of games the team has or will play during the season.
Parameters
----------
schedule : PyQuery object
A PyQuery object pertaining to a team's schedule table.
playoff : boolean
Evaluates to True if the game took place in the playoffs.
"""
for item in schedule:
if 'class="thead"' in str(item) or \
'class="over_header thead"' in str(item):
continue # pragma: no cover
game = Game(item, playoff)
self._games.append(game)
def _pull_schedule(self, abbreviation, year):
"""
Download and create objects for the team's schedule.
Given a team abbreviation and season, first download the team's
schedule page and convert to a PyQuery object, then create a Game
instance for every game in the team's schedule and append it to the
'_games' property.
Parameters
----------
abbreviation : string
A team's short name, such as 'DET' for the Detroit Pistons.
year : string
The requested year to pull stats from.
"""
if not year:
year = utils._find_year_for_season('nba')
# If stats for the requested season do not exist yet (as is the
# case right before a new season begins), attempt to pull the
# previous year's stats. If it exists, use the previous year
# instead.
if not utils._url_exists(SCHEDULE_URL % (abbreviation.lower(),
year)) and \
utils._url_exists(SCHEDULE_URL % (abbreviation.lower(),
str(int(year) - 1))):
year = str(int(year) - 1)
doc = pq(SCHEDULE_URL % (abbreviation, year))
schedule = utils._get_stats_table(doc, 'table#games')
if not schedule:
utils._no_data_found()
return
self._add_games_to_schedule(schedule)
if 'id="games_playoffs"' in str(doc):
playoffs = utils._get_stats_table(doc, 'table#games_playoffs')
self._add_games_to_schedule(playoffs, True)
@property
def dataframe(self):
"""
Returns a pandas DataFrame where each row is a representation of the
Game class. Rows are indexed by the boxscore string.
"""
frames = []
for game in self.__iter__():
df = game.dataframe
if df is not None:
frames.append(df)
if frames == []:
return None
return
|
pd.concat(frames)
|
pandas.concat
|
"""
Module to generate counterfactual explanations from a KD-Tree
This code is similar to 'Interpretable Counterfactual Explanations Guided by Prototypes': https://arxiv.org/pdf/1907.02584.pdf
"""
import copy
import timeit
import numpy as np
import pandas as pd
from dice_ml import diverse_counterfactuals as exp
from dice_ml.constants import ModelTypes
from dice_ml.explainer_interfaces.explainer_base import ExplainerBase
class DiceKD(ExplainerBase):
def __init__(self, data_interface, model_interface):
"""Init method
:param data_interface: an interface class to access data related params.
:param model_interface: an interface class to access trained ML model.
"""
self.total_random_inits = 0
super().__init__(data_interface) # initiating data related parameters
# As DiCE KD uses one-hot-encoding
self.data_interface.create_ohe_params()
# initializing model variables
self.model = model_interface
self.model.load_model() # loading pickled trained model if applicable
self.model.transformer.feed_data_params(data_interface)
self.model.transformer.initialize_transform_func()
# loading trained model
self.model.load_model()
# number of output nodes of ML model
if self.model.model_type == ModelTypes.Classifier:
self.num_output_nodes = self.model.get_num_output_nodes2(
self.data_interface.data_df[0:1][self.data_interface.feature_names])
self.predicted_outcome_name = self.data_interface.outcome_name + '_pred'
def _generate_counterfactuals(self, query_instance, total_CFs, desired_range=None, desired_class="opposite",
features_to_vary="all",
permitted_range=None, sparsity_weight=1,
feature_weights="inverse_mad", stopping_threshold=0.5, posthoc_sparsity_param=0.1,
posthoc_sparsity_algorithm="linear", verbose=False):
"""Generates diverse counterfactual explanations
:param query_instance: A dictionary of feature names and values. Test point of interest.
:param total_CFs: Total number of counterfactuals required.
:param desired_range: For regression problems. Contains the outcome range to generate counterfactuals in.
:param desired_class: Desired counterfactual class - can take 0 or 1. Default value is "opposite" to the
outcome class of query_instance for binary classification.
:param features_to_vary: Either a string "all" or a list of feature names to vary.
:param permitted_range: Dictionary with continuous feature names as keys and permitted min-max range in
list as values. Defaults to the range inferred from training data.
If None, uses the parameters initialized in data_interface.
:param sparsity_weight: Parameter to determine how much importance to give to sparsity
:param feature_weights: Either "inverse_mad" or a dictionary with feature names as keys and corresponding
weights as values. Default option is "inverse_mad" where the weight for a continuous
feature is the inverse of the Median Absolute Devidation (MAD) of the feature's
values in the training set; the weight for a categorical feature is equal to 1 by default.
:param stopping_threshold: Minimum threshold for counterfactuals target class probability.
:param posthoc_sparsity_param: Parameter for the post-hoc operation on continuous features to enhance sparsity.
:param posthoc_sparsity_algorithm: Perform either linear or binary search. Takes "linear" or "binary".
Prefer binary search when a feature range is large (for instance, income
varying from 10k to 1000k) and only if the features share a monotonic
relationship with predicted outcome in the model.
:param verbose: Parameter to determine whether to print 'Diverse Counterfactuals found!'
:return: A CounterfactualExamples object to store and visualize the resulting counterfactual explanations
(see diverse_counterfactuals.py).
"""
data_df_copy = self.data_interface.data_df.copy()
features_to_vary = self.setup(features_to_vary, permitted_range, query_instance, feature_weights)
# Prepares user defined query_instance for DiCE.
query_instance_orig = query_instance.copy()
query_instance = self.data_interface.prepare_query_instance(query_instance=query_instance)
# find the predicted value of query_instance
test_pred = self.predict_fn(query_instance)[0]
query_instance[self.data_interface.outcome_name] = test_pred
desired_class = self.misc_init(stopping_threshold, desired_class, desired_range, test_pred)
if desired_range is not None:
if desired_range[0] > desired_range[1]:
raise ValueError("Invalid Range!")
if desired_class == "opposite" and self.model.model_type == ModelTypes.Classifier:
if self.num_output_nodes == 2:
desired_class = 1.0 - test_pred
elif self.num_output_nodes > 2:
raise ValueError("Desired class can't be opposite if the number of classes is more than 2.")
if isinstance(desired_class, int) and desired_class > self.num_output_nodes - 1:
raise ValueError("Desired class should be within 0 and num_classes-1.")
# Partitioned dataset and KD Tree for each class (binary) of the dataset
self.dataset_with_predictions, self.KD_tree, self.predictions = \
self.build_KD_tree(data_df_copy, desired_range, desired_class, self.predicted_outcome_name)
query_instance, cfs_preds = self.find_counterfactuals(data_df_copy,
query_instance, query_instance_orig,
desired_range,
desired_class,
total_CFs, features_to_vary,
permitted_range,
sparsity_weight,
stopping_threshold,
posthoc_sparsity_param,
posthoc_sparsity_algorithm, verbose)
self.cfs_preds = cfs_preds
return exp.CounterfactualExamples(data_interface=self.data_interface,
final_cfs_df=self.final_cfs_df,
test_instance_df=query_instance,
final_cfs_df_sparse=self.final_cfs_df_sparse,
posthoc_sparsity_param=posthoc_sparsity_param,
desired_range=desired_range,
desired_class=desired_class,
model_type=self.model.model_type)
def predict_fn(self, input_instance):
"""returns predictions"""
return self.model.get_output(input_instance, model_score=False)
def do_sparsity_check(self, cfs, query_instance, sparsity_weight):
cfs = cfs.assign(sparsity=np.nan, distancesparsity=np.nan)
for index, row in cfs.iterrows():
cnt = 0
for column in self.data_interface.continuous_feature_names:
if not np.isclose(row[column], query_instance[column].values[0]):
cnt += 1
for column in self.data_interface.categorical_feature_names:
if row[column] != query_instance[column].values[0]:
cnt += 1
cfs.at[index, "sparsity"] = cnt
cfs["distance"] = (cfs["distance"] - cfs["distance"].min()) / (cfs["distance"].max() - cfs["distance"].min())
cfs["sparsity"] = (cfs["sparsity"] - cfs["sparsity"].min()) / (cfs["sparsity"].max() - cfs["sparsity"].min())
cfs["distancesparsity"] = cfs["distance"] + sparsity_weight * cfs["sparsity"]
cfs = cfs.sort_values(by="distancesparsity")
cfs = cfs.drop(["distance", "sparsity", "distancesparsity"], axis=1)
return cfs
def vary_valid(self, KD_query_instance, total_CFs, features_to_vary, permitted_range, query_instance,
sparsity_weight):
"""This function ensures that we only vary features_to_vary when generating counterfactuals"""
# TODO: this should be a user-specified parameter
num_queries = min(len(self.dataset_with_predictions), total_CFs * 10)
cfs = []
if self.KD_tree is not None and num_queries > 0:
KD_tree_output = self.KD_tree.query(KD_query_instance, num_queries)
distances = KD_tree_output[0][0]
indices = KD_tree_output[1][0]
cfs = self.dataset_with_predictions.iloc[indices].copy()
cfs['distance'] = distances
cfs = self.do_sparsity_check(cfs, query_instance, sparsity_weight)
cfs = cfs.drop(self.data_interface.outcome_name, axis=1)
self.final_cfs = pd.DataFrame()
final_indices = []
cfs_preds = []
total_cfs_found = 0
# Iterating through the closest points from the KD tree and checking if any of these are valid
if self.KD_tree is not None and total_CFs > 0:
for i in range(len(cfs)):
if total_cfs_found == total_CFs:
break
valid_cf_found = True
for feature in self.data_interface.feature_names:
if feature not in features_to_vary and cfs[feature].iat[i] != query_instance[feature].values[0]:
valid_cf_found = False
break
if feature in self.data_interface.continuous_feature_names:
if not self.feature_range[feature][0] <= cfs[feature].iat[i] <= self.feature_range[feature][1]:
valid_cf_found = False
break
else:
if not cfs[feature].iat[i] in self.feature_range[feature]:
valid_cf_found = False
break
if valid_cf_found:
if not self.duplicates(cfs, final_indices.copy(), i):
total_cfs_found += 1
final_indices.append(i)
if total_cfs_found > 0:
self.final_cfs = cfs.iloc[final_indices]
self.final_cfs = self.final_cfs.drop([self.predicted_outcome_name], axis=1)
# Finding the predicted outcome for each cf
for i in range(total_cfs_found):
cfs_preds.append(
self.dataset_with_predictions.iloc[final_indices[i]][self.predicted_outcome_name])
return self.final_cfs[:total_CFs], cfs_preds
def duplicates(self, cfs, final_indices, i):
final_indices.append(i)
temp_cfs = cfs.iloc[final_indices]
return temp_cfs.duplicated().iloc[-1]
def find_counterfactuals(self, data_df_copy, query_instance, query_instance_orig, desired_range, desired_class,
total_CFs, features_to_vary, permitted_range,
sparsity_weight, stopping_threshold, posthoc_sparsity_param, posthoc_sparsity_algorithm,
verbose):
"""Finds counterfactuals by querying a K-D tree for the nearest data points in the desired class from the dataset."""
start_time = timeit.default_timer()
# Making the one-hot-encoded version of query instance match the one-hot encoded version of the dataset
query_instance_df_dummies = pd.get_dummies(query_instance_orig)
for col in
|
pd.get_dummies(data_df_copy[self.data_interface.feature_names])
|
pandas.get_dummies
|
"""Backtester"""
from copy import deepcopy
import unittest
import pandas as pd
import pytest
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.preprocessing import StandardScaler
from soam.constants import (
ANOMALY_PLOT,
DS_COL,
FIG_SIZE,
MONTHLY_TIME_GRANULARITY,
PLOT_CONFIG,
Y_COL,
)
from soam.models.prophet import SkProphet
from soam.plotting.forecast_plotter import ForecastPlotterTask
from soam.workflow import (
Backtester,
BaseDataFrameTransformer,
Forecaster,
Transformer,
compute_metrics,
)
from soam.workflow.backtester import METRICS_KEYWORD, PLOT_KEYWORD, RANGES_KEYWORD
from tests.helpers import sample_data_df # pylint: disable=unused-import
def test_compute_metrics():
"""Function to compute performance metrics."""
metrics = {
"mae": mean_absolute_error,
"mse": mean_squared_error,
}
y_true = [3, -0.5, 2, 7]
y_pred = [2.5, 0.0, 2, 8]
expected_output = {'mae': 0.5, 'mse': 0.375}
output = compute_metrics(y_true, y_pred, metrics)
unittest.TestCase().assertDictEqual(expected_output, output)
class SimpleProcessor(BaseDataFrameTransformer):
"""Create a Simple Processor object."""
def __init__(self, **fit_params): # pylint:disable=super-init-not-called
self.preproc = StandardScaler(**fit_params)
def fit(self, df_X):
self.preproc.fit(df_X[Y_COL].values.reshape(-1, 1))
return self
def transform(self, df_X, inplace=True):
if not inplace:
df_X = df_X.copy()
df_X[Y_COL] = self.preproc.transform(df_X[Y_COL].values.reshape(-1, 1)) + 10
return df_X
def assert_backtest_fold_result_common_checks(rv, ranges=None, plots=None):
"""Backtest fold result common checks assertion."""
assert tuple(rv) == (RANGES_KEYWORD, METRICS_KEYWORD, PLOT_KEYWORD)
assert rv[RANGES_KEYWORD] == ranges
assert rv[PLOT_KEYWORD].name == plots
def assert_backtest_fold_result(rv, ranges=None, metrics=None, plots=None):
"""Backtest fold result assertion."""
assert_backtest_fold_result_common_checks(rv, ranges=ranges, plots=plots)
for metric_name, values in metrics.items():
assert metric_name in rv[METRICS_KEYWORD]
if isinstance(values, dict):
for measure_name, value in values.items():
assert value, pytest.approx(rv[METRICS_KEYWORD][measure_name], 0.01)
else:
assert values, pytest.approx(rv[METRICS_KEYWORD][metric_name], 0.01)
def assert_backtest_all_folds_result(rvs, expected_values):
"""Backtest all fold result assertion."""
assert len(rvs) == len(expected_values)
for rv, evs in zip(rvs, expected_values):
assert_backtest_fold_result(rv, **evs)
def assert_backtest_fold_result_aggregated(rv, ranges=None, metrics=None, plots=None):
"""Backtest fold result aggregated assertion."""
assert_backtest_fold_result_common_checks(rv, ranges=ranges, plots=plots)
output_metrics = pd.DataFrame(rv[METRICS_KEYWORD])
expected_metrics = pd.DataFrame(metrics)
pd.testing.assert_frame_equal(output_metrics, expected_metrics, rtol=1e-1)
def assert_backtest_all_folds_result_aggregated(rvs, expected_values):
"""Backtest all fold result aggregated assertion."""
assert len(rvs) == len(expected_values)
for rv, evs in zip(rvs, expected_values):
assert_backtest_fold_result_aggregated(rv, **evs)
def test_integration_backtester_single_fold(
tmp_path, sample_data_df
): # pylint: disable=redefined-outer-name
"""Backtest single fold integration test."""
test_window = 10
train_data = sample_data_df
forecaster = Forecaster(model=SkProphet(), output_length=test_window)
preprocessor = Transformer(SimpleProcessor())
plot_config = deepcopy(PLOT_CONFIG)
plot_config[ANOMALY_PLOT][MONTHLY_TIME_GRANULARITY][FIG_SIZE] = (8, 3)
forecast_plotter = ForecastPlotterTask(
path=tmp_path,
metric_name='test',
time_granularity=MONTHLY_TIME_GRANULARITY,
plot_config=plot_config,
)
metrics = {
"mae": mean_absolute_error,
"mse": mean_squared_error,
}
backtester = Backtester(
forecaster=forecaster,
preprocessor=preprocessor,
forecast_plotter=forecast_plotter,
test_window=test_window,
train_window=30,
metrics=metrics,
)
rvs = backtester.run(train_data)
expected_values = [
{
RANGES_KEYWORD: (
pd.Timestamp('2013-02-01 00:00:00'),
pd.Timestamp('2015-07-01 00:00:00'),
pd.Timestamp('2016-05-01 00:00:00'),
),
METRICS_KEYWORD: {'mae': 0.19286372252777645, 'mse': 0.07077117049346579},
'plots': '0_forecast_2013020100_2015080100_.png',
},
]
assert_backtest_all_folds_result(rvs, expected_values)
def test_integration_backtester_multi_fold(
tmp_path, sample_data_df # pylint: disable=redefined-outer-name
):
"""Backtest multi fold integration test."""
test_window = 30
train_data = pd.concat([sample_data_df] * 3)
train_data[DS_COL] = pd.date_range(
train_data[DS_COL].min(), periods=len(train_data), freq='MS'
)
model = SkProphet()
forecaster = Forecaster(model=model, output_length=test_window)
preprocessor = Transformer(SimpleProcessor())
plot_config = deepcopy(PLOT_CONFIG)
plot_config[ANOMALY_PLOT][MONTHLY_TIME_GRANULARITY][FIG_SIZE] = (8, 3)
forecast_plotter = ForecastPlotterTask(
path=tmp_path,
metric_name='test',
time_granularity=MONTHLY_TIME_GRANULARITY,
plot_config=plot_config,
)
metrics = {
"mae": mean_absolute_error,
"mse": mean_squared_error,
}
backtester = Backtester(
forecaster=forecaster,
preprocessor=preprocessor,
forecast_plotter=forecast_plotter,
test_window=test_window,
train_window=30,
metrics=metrics,
)
rvs = backtester.run(train_data)
expected_values = [
{
RANGES_KEYWORD: (
pd.Timestamp('2013-02-01 00:00:00'),
pd.Timestamp('2015-07-01 00:00:00'),
pd.Timestamp('2018-01-01 00:00:00'),
),
METRICS_KEYWORD: {'mae': 1.140921182444867, 'mse': 2.4605768804352675},
'plots': '0_forecast_2013020100_2015080100_.png',
},
{
RANGES_KEYWORD: (
pd.Timestamp('2015-08-01 00:00:00'),
pd.Timestamp('2018-01-01 00:00:00'),
pd.Timestamp('2020-07-01 00:00:00'),
),
METRICS_KEYWORD: {'mae': 1.600049020613293, 'mse': 4.383723067139095},
'plots': '0_forecast_2015080100_2018020100_.png',
},
{
RANGES_KEYWORD: (
pd.Timestamp('2018-02-01 00:00:00'),
pd.Timestamp('2020-07-01 00:00:00'),
pd.Timestamp('2023-01-01 00:00:00'),
),
METRICS_KEYWORD: {'mae': 3.1358162976127217, 'mse': 12.666965373730687},
'plots': '0_forecast_2018020100_2020080100_.png',
},
]
assert_backtest_all_folds_result(rvs, expected_values)
# TODO: It maybe a good visual aggregation to include all metrics in one plot. This
# TODO: is not possible with the current implementation.
def test_integration_backtester_multi_fold_default_aggregation(
tmp_path, sample_data_df # pylint: disable=redefined-outer-name
):
"""Backtest multi fold default aggregation integration test."""
test_window = 30
train_data = pd.concat([sample_data_df] * 3)
train_data[DS_COL] = pd.date_range(
train_data[DS_COL].min(), periods=len(train_data), freq='MS'
)
model = SkProphet()
forecaster = Forecaster(model=model, output_length=test_window)
preprocessor = Transformer(SimpleProcessor())
plot_config = deepcopy(PLOT_CONFIG)
plot_config[ANOMALY_PLOT][MONTHLY_TIME_GRANULARITY][FIG_SIZE] = (8, 3)
forecast_plotter = ForecastPlotterTask(
path=tmp_path,
metric_name='test',
time_granularity=MONTHLY_TIME_GRANULARITY,
plot_config=plot_config,
)
metrics = {
"mae": mean_absolute_error,
"mse": mean_squared_error,
}
backtester = Backtester(
forecaster=forecaster,
preprocessor=preprocessor,
forecast_plotter=forecast_plotter,
test_window=test_window,
train_window=30,
metrics=metrics,
aggregation="default",
)
rvs = backtester.run(train_data)
expected_values = [
{
RANGES_KEYWORD: (
pd.Timestamp('2013-02-01 00:00:00'),
pd.Timestamp('2023-01-01 00:00:00'),
),
METRICS_KEYWORD: {
'mae': {
'avg': 2.0269522786354313,
'max': 3.135813436023453,
'min': 1.344995687583762,
},
'mse': {
'avg': 6.761216280050696,
'max': 12.666927167728852,
'min': 3.233004063171241,
},
},
'plots': '0_forecast_2018020100_2020080100_.png',
}
]
assert_backtest_all_folds_result_aggregated(rvs, expected_values)
def test_integration_backtester_multi_fold_custom_aggregations(
tmp_path, sample_data_df # pylint: disable=redefined-outer-name
):
"""Backtest multi fold custom aggregation integration test."""
test_window = 30
train_data = pd.concat([sample_data_df] * 3)
train_data[DS_COL] = pd.date_range(
train_data[DS_COL].min(), periods=len(train_data), freq='MS'
)
model = SkProphet()
forecaster = Forecaster(model=model, output_length=test_window)
preprocessor = Transformer(SimpleProcessor())
plot_config = deepcopy(PLOT_CONFIG)
plot_config[ANOMALY_PLOT][MONTHLY_TIME_GRANULARITY][FIG_SIZE] = (8, 3)
forecast_plotter = ForecastPlotterTask(
path=tmp_path,
metric_name='test',
time_granularity=MONTHLY_TIME_GRANULARITY,
plot_config=plot_config,
)
metrics = {
"mae": mean_absolute_error,
"mse": mean_squared_error,
}
aggregation = {
METRICS_KEYWORD: {
"weighted_begining": lambda metrics_list: (
sum(
[
3 * val if idx == 0 else val
for idx, val in enumerate(metrics_list)
]
)
/ (len(metrics_list) + 2)
),
"weighted_ending": lambda metrics_list: (
sum(
[
3 * val if idx == len(metrics_list) - 1 else val
for idx, val in enumerate(metrics_list)
]
)
/ (len(metrics_list) + 2)
),
},
PLOT_KEYWORD: 1,
}
backtester = Backtester(
forecaster=forecaster,
preprocessor=preprocessor,
forecast_plotter=forecast_plotter,
test_window=test_window,
train_window=30,
metrics=metrics,
aggregation=aggregation,
)
rvs = backtester.run(train_data)
expected_values = [
{
RANGES_KEYWORD: (
pd.Timestamp('2013-02-01 00:00:00'),
pd.Timestamp('2023-01-01 00:00:00'),
),
METRICS_KEYWORD: {
'mae': {
'weighted_begining': 1.631725773112123,
'weighted_ending': 2.4296838191792647,
},
'mse': {
'weighted_begining': 4.886483816435117,
'weighted_ending': 8.969039213753284,
},
},
'plots': '0_forecast_2015080100_2018020100_.png',
}
]
assert_backtest_all_folds_result_aggregated(rvs, expected_values)
def test_integration_backtester_multi_fold_custom_metric_aggregation_default_plot(
tmp_path, sample_data_df # pylint: disable=redefined-outer-name
):
"""Backtest multi fold custom metric aggregation default plot integration test."""
test_window = 30
train_data = pd.concat([sample_data_df] * 3)
train_data[DS_COL] = pd.date_range(
train_data[DS_COL].min(), periods=len(train_data), freq='MS'
)
model = SkProphet()
forecaster = Forecaster(model=model, output_length=test_window)
preprocessor = Transformer(SimpleProcessor())
plot_config = deepcopy(PLOT_CONFIG)
plot_config[ANOMALY_PLOT][MONTHLY_TIME_GRANULARITY][FIG_SIZE] = (8, 3)
forecast_plotter = ForecastPlotterTask(
path=tmp_path,
metric_name='test',
time_granularity=MONTHLY_TIME_GRANULARITY,
plot_config=plot_config,
)
metrics = {
"mae": mean_absolute_error,
"mse": mean_squared_error,
}
aggregation = {
METRICS_KEYWORD: {
"weighted_begining": lambda metrics_list: (
sum(
[
3 * val if idx == 0 else val
for idx, val in enumerate(metrics_list)
]
)
/ (len(metrics_list) + 2)
),
"weighted_ending": lambda metrics_list: (
sum(
[
3 * val if idx == len(metrics_list) - 1 else val
for idx, val in enumerate(metrics_list)
]
)
/ (len(metrics_list) + 2)
),
}
}
backtester = Backtester(
forecaster=forecaster,
preprocessor=preprocessor,
forecast_plotter=forecast_plotter,
test_window=test_window,
train_window=30,
metrics=metrics,
aggregation=aggregation,
)
rvs = backtester.run(train_data)
expected_values = [
{
RANGES_KEYWORD: (
|
pd.Timestamp('2013-02-01 00:00:00')
|
pandas.Timestamp
|
from __future__ import division
import re
import os
import sys
import math
import random
import numpy as np
import pandas as pd
from PyDAIR.seq.IgSeq import *
from PyDAIR.io.PyDAIRIO import *
class PyDAIRDiversity:
"""PyDAIRDiversity class.
A class is used for saving the results of diversity analysis.
"""
def __init__(self):
"""PyDAIRDiversity class initialize method.
Set up `None` object in default.
"""
self.rarefaction = {'vdj': None, 'cdr3': None}
self.samplingresampling = {'vdj': None, 'cdr3': None}
class PyDAIRStatsRecord:
"""PyDAIRStatsRecord class.
One PYDAIR file should has one **BLGIHStatsRecord** class object. If there
are more than one PYDAIR files, use a number of **PyDAIRStatsRecord** class
objects to save these data, and save all **PyDAIRStatsRecord** class objects
into **PyDAIRStatsRecords** class.
This class requires the list of V gene names, D gene names, J gene names,
CDR3 nucleotide and protein seuqence, and the stop codon tags.
"""
def __init__(self, name = None, v = None, d = None, j = None, orf = None,
cdr3_nucl_seq = None, cdr3_prot_seq = None,
v_del = None, j_del = None, vj_ins = None,
discard_ambiguous_D = False, productive_only = False):
"""PyDAIRStatsRecord class initialize method.
Args:
name (str): Sample name.
v (list): Assigned V gene names.
d (list): Assigned D gene names.
j (list): Assigned J gene names.
orf (list): A list of ORF.
cdr3_nucl_seq (list): A list of CDR3 nucleotide sequences.
cdr3_prot_seq (list): A list of CDR3 amino acid sequences.
v_del (list): Deleted nucleotides of 3'-end V gene.
j_del (list): Deleted nucleotides of 5'-end J gene.
vj_ins (list): Inserted nucleotides.
discard_ambiguous_D (bool): Default is `False`. If `True`, the sequences
with ambiguous D segment will be discarded before
summarization.
productive_only (bool): Default is `False`. If `True`, the sequences contained
more than one stop codon will be discarded before
summarization.
"""
# set default data
self.name = name
self.vdj = pd.DataFrame({'v': v, 'd': d, 'j': j}, columns = ['v', 'd', 'j']).fillna(value = np.nan)
self.cdr3 = pd.DataFrame({'nucl_seq': cdr3_nucl_seq, 'prot_seq': cdr3_prot_seq,
'nucl_len': pd.Series(cdr3_nucl_seq).str.len(),
'prot_len': pd.Series(cdr3_prot_seq).str.len()},
columns = ['nucl_seq', 'prot_seq', 'nucl_len', 'prot_len']).fillna(value = np.nan)
self.indels = pd.DataFrame({'v_del': v_del, 'v_del_len': pd.Series(v_del).str.len(),
'j_del': j_del, 'j_del_len': pd.Series(j_del).str.len(),
'vj_ins': vj_ins, 'vj_ins_len': pd.Series(vj_ins).str.len()},
columns = ['v_del', 'v_del_len', 'j_del', 'j_del_len',
'vj_ins', 'vj_ins_len']).fillna(value = np.nan)
# set filters
filter_ambigoD = None
if discard_ambiguous_D:
filter_ambigoD = self.vdj.d.notnull()
else:
filter_ambigoD = pd.Series([True] * self.vdj.shape[0])
filter_stopcodon = None
if productive_only:
filter_stopcodon = pd.Series(orf).notnull()
else:
filter_stopcodon = pd.Series([True] * self.vdj.shape[0])
filters = pd.Series(filter_ambigoD & filter_stopcodon)
# filter data
self.vdj = self.vdj[filters]
self.cdr3 = self.cdr3[filters]
self.indels = self.indels[filters]
# diversity study
self.div = PyDAIRDiversity()
def __len__(self):
return self.vdj.shape[0]
def len(self):
"""Retrive the number of sequences analyzed.
"""
return self.__len__()
def get_summary(self, data_type, prob = False, func = 'mean'):
"""Returns summary statistics.
Args:
data_type (str): A string to specify data type. ``v``,
``d``, ``j``, ``vdj``, ``cdr3_nucl_len``,
``cdr3_prot_len``, ``v_del_len``, ``j_del_len``,
and ``vj_ins_len`` are supported.
prob (bool): If ``True``, calculate the probability.
func (str): A string to specify data calculations when data_type is vdj_rarefation.
"""
s = None
if data_type in ['v', 'd', 'j', 'cdr3_prot_len', 'cdr3_nucl_len', 'v_del_len', 'j_del_len', 'vj_ins_len']:
s = self.__get_freq(data_type, prob)
elif data_type == 'vdj':
s = self.__get_freq_vdj(data_type, prob)
elif data_type == 'vdj_rarefaction':
s = self.__get_est_vdj_rarefaction(data_type, func)
else:
raise ValueError(data_type + ' is not supported by get_summary method.')
return s
def __get_freq(self, data_type, prob = False):
freq = None
if data_type == 'v_del_len':
freq = self.indels.v_del_len.value_counts(dropna = False)
freq.index = ['ambiguous' if (type(_i) == np.float and np.isnan(_i)) else _i for _i in freq.index]
freq = freq.sort_index(ascending = True)
elif data_type == 'j_del_len':
freq = self.indels.j_del_len.value_counts(dropna = False)
freq.index = ['ambiguous' if (type(_i) == np.float and np.isnan(_i)) else _i for _i in freq.index]
freq = freq.sort_index(ascending = True)
elif data_type == 'vj_ins_len':
freq = self.indels.vj_ins_len.value_counts(dropna = False)
freq.index = ['ambiguous' if (type(_i) == np.float and np.isnan(_i)) else _i for _i in freq.index]
freq = freq.sort_index(ascending = True)
elif data_type == 'cdr3_prot_len':
# set dropna as True to remove the 0-length CDR3
freq = self.cdr3.prot_len.value_counts(dropna = True)
freq.index = ['ambiguous' if (type(_i) == np.float and np.isnan(_i)) else _i for _i in freq.index]
freq = freq.sort_index(ascending = True)
elif data_type == 'cdr3_nucl_len':
# set dropna as True to remove the 0-length CDR3
freq = self.cdr3.nucl_len.value_counts(dropna = True)
freq.index = ['ambiguous' if (type(_i) == np.float and np.isnan(_i)) else _i for _i in freq.index]
freq = freq.sort_index(ascending = True)
elif data_type == 'v':
freq = self.vdj.v.value_counts(dropna = False)
elif data_type == 'd':
freq = self.vdj.d.value_counts(dropna = False)
freq.index = ['ambiguous' if (type(_i) == np.float and np.isnan(_i)) else _i for _i in freq.index]
elif data_type == 'j':
freq = self.vdj.j.value_counts(dropna = False)
if prob:
freq = freq / freq.sum()
return freq
def __get_freq_vdj(self, data_type, prob):
__sep = '__________'
vdj_combinations = self.vdj.v.replace(np.nan, 'NA') + __sep + \
self.vdj.d.replace(np.nan, 'NA') + __sep + \
self.vdj.j.replace(np.nan, 'NA')
freq = vdj_combinations.value_counts(dropna = False)
if prob:
freq = freq / freq.sum(axis = 2)
vdj_v = []
vdj_d = []
vdj_j = []
for vdj_combination in freq.index:
vv, dd, jj = vdj_combination.split(__sep)
vdj_v.append(vv)
vdj_d.append(dd)
vdj_j.append(jj)
freq = pd.DataFrame({'v': vdj_v, 'd': vdj_d, 'j': vdj_j, 'frequency': freq.values},
columns = ['v', 'd', 'j', 'frequency']).replace('NA', np.nan)
return freq
def __get_est_vdj_rarefaction(self, data_type, func = 'mean'):
rdata = None
if self.div.rarefaction['vdj'] is not None:
if func == 'raw':
rdata = self.div.rarefaction['vdj']
elif func == 'mean':
rdata = self.div.rarefaction['vdj'].mean(axis = 1)
return rdata
def samplingresampling_study(self, data = None, n = 1000):
"""Sampling-resampling study.
Args:
data (str): ``vdj`` or ``cdr3``.
Sampling-resamplig study.
"""
if data is None or data == 'all':
data = ['vdj']
if not isinstance(data, list):
data = [data]
for data_i in data:
if data_i == 'vdj':
dat = self.vdj
__sep = '__________'
dat_for_study = dat.v.replace(np.nan, 'NaN') + __sep + \
dat.d.replace(np.nan, 'NaN') + __sep + \
dat.j.replace(np.nan, 'NaN')
dat_for_study = pd.Series(dat_for_study)
if data_i == 'cdr3':
dat_for_study = pd.Series(self.cdr3.prot_seq)
self.div.samplingresampling[data_i] = self.__samplingresampling_study(dat_for_study, n)
def __samplingresampling_study(self, dat, n):
population = dat
population_size = len(dat)
resampling_size_ratio = range(101)
# get resampling sizes
resampling_sizes = []
for r in resampling_size_ratio:
resampling_sizes.append(int(round(0.01 * r * population_size)))
x = [0] # total sampling sizes
y = [] # the number of new sampled CDR3 sequence
for i in range(n):
y_try_n = []
population_n = list(population)
sampled_n = set([])
for s in range(len(resampling_sizes) - 1):
samplingsize = resampling_sizes[s + 1] - resampling_sizes[s]
if len(population_n) < samplingsize * 1.5:
samplingsize = len(population_n)
if len(population_n) <= samplingsize:
sampled_idx = range(0, len(population_n))
else:
sampled_idx = random.sample(range(0, len(population_n)), samplingsize)
s_left = []
s_smpl = []
for smplidx_in_population_n in range(len(population_n)):
if smplidx_in_population_n in sampled_idx:
s_smpl.append(population_n[smplidx_in_population_n])
else:
s_left.append(population_n[smplidx_in_population_n])
sampled_items = s_smpl
population_n = s_left
sampled_items = set(sampled_items)
y_try_n.append(len(sampled_items.difference(sampled_n)))
sampled_n = sampled_n.union(sampled_items)
if i == 0:
x.append(x[s] + samplingsize)
y.append(y_try_n)
x.pop(0)
y = pd.DataFrame(y, columns = resampling_size_ratio[1:]).T
y.columns = ['try_' + str(n_try + 1) for n_try in range(n)]
return y
def rarefaction_study(self, data = None, n = 1000):
"""Perform rarefaction study for VDJ combination or CDR3 sequenece.
Args:
data (str): One of ``vdj`` or ``cdr3`` can be specified for diversity study.
n (int): The number of performing of capture-recapture procedures.
"""
if data is None or data == 'all':
data = ['vdj']
if not isinstance(data, list):
data = [data]
for data_i in data:
if data_i == 'vdj':
dat = self.vdj
__sep = '__________'
dat_for_study = dat.v.replace(np.nan, 'NaN') + __sep + \
dat.d.replace(np.nan, 'NaN') + __sep + \
dat.j.replace(np.nan, 'NaN')
dat_for_study = pd.Series(dat_for_study)
if data_i == 'cdr3':
dat_for_study = pd.Series(self.cdr3.prot_seq)
self.div.rarefaction[data_i] = self.__rarefaction_study(dat_for_study, n)
def __rarefaction_study(self, dat, n):
population_size = len(dat)
sampling_sizes = self.__get_sampling_sizes(population_size)
sampled_unique_items = [None] * n
# rarefaction study
for i in range(n):
sampled_unique_items_i = []
for s in sampling_sizes:
sampled_id = random.sample(range(0, population_size), s)
sampled_unique_items_i.append(dat[sampled_id].unique().shape[0])
sampled_unique_items[i] = sampled_unique_items_i
# set results into DataFrame class object
sampled_unique_items = pd.DataFrame(sampled_unique_items, columns = sampling_sizes).T
sampled_unique_items.columns = ['try_' + str(n_try + 1) for n_try in range(n)]
return sampled_unique_items
def __get_sampling_sizes(self, sample_size):
sampling_sizes = []
n_digits = int(math.log10(sample_size) + 1)
# 1st sampling sizes
for i in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:
sampling_sizes.append(i * 10 ** (n_digits - 1 - 2))
# 2nd sampling sizes
for j in [2, 4, 6, 8, 10]:
sampling_sizes.append(j * 10 ** (n_digits - 1 - 1))
# 3rd sampling sizes
k = 2
while max(sampling_sizes) < sample_size:
if k * 10 ** (n_digits - 1) < sample_size:
sampling_sizes.append(k * 10 ** (n_digits - 1))
else:
sampling_sizes.append(sample_size)
k += 1
return sampling_sizes
class PyDAIRStatsRecords:
'''PyDAIRStatsRecords for storing the PyDAIR data.
This class provides the list of PyDAIRStatsRecord and some function of list.
'''
def __init__(self):
"""PyDAIRStatsRecords class initialize method.
"""
self.__records = []
self.__index = 0
def __len__(self):
return len(self.__records)
def len(self):
"""Retrive the number of PyDAIRStatsRecord class object.
Retrive the number of PyDAIRStatsRecord objects, thus the number of samples.
"""
return self.__len__()
def __iter__(self):
self.__index = 0
return self
def __next__(self):
try:
stats_record = self.__records[self.__index]
except IndexError:
raise StopIteration
self.__index += 1
return stats_record
def next(self):
"""Return the next IgSeq object from the iterator of PyDAIRStatsRecords class objects.
"""
return self.__next__()
def append(self, stats_record):
"""Append PyDAIRStatsRecord into PyDAIRStatsRecords class object.
"""
self.__records.append(stats_record)
def get_record(self, i):
"""Retrive A PyDAIRStatsRecord.
Args:
i (int): The index of PyDAIRStatsRecord should be returned.
Returns:
PyDAIRStatsRecord: Return a PyDAIRStatsRecord object.
"""
try:
stats_record = self.__records[i]
except IndexError:
raise IndexError
return stats_record
def set_record(self, i, stats_record):
"""Update PyDAIRStatsRecord into PyDAIRStatsRecords class object.
Args:
i (int): The index of PyDAIRStatsRecord should be updated.
stats_record: A PyDAIRStatsRecord that will used for updating.
"""
self.__records[i] = stats_record
class PyDAIRStats:
'''
Statistics analysis of repertoire sequences.
The class for storing the PyDAIR data file. Before use initialize this class,
one should run create PyDAIR format files by other PyDAIR functions. This class
stored many data of analyzed data.
'''
def __init__(self, pydair_file, pydair_id = None,
discard_ambiguous_D = False, productive_only = False):
"""PyDAIRStats class initialize method.
Args:
pydair_file (list): A list the contains multiple PYDAIR file path.
pydair_id (list): A list of sample names.
discard_ambiguous_D (bool): If true, discard ambiguous D before analysis.
productive_only (bool): If true, analyze sequences with stop codons.
"""
if pydair_id is None:
pydair_id = []
for i in range(len(pydair_file)):
pydair_id.append('individual ' + str(i + 1))
self.__pydair_file = pydair_file
self.__pydair_id = pydair_id
self.__discard_ambiguous_D = discard_ambiguous_D
self.__productive_only = productive_only
self.samples = None
# parse PyDAIR files
self.__parse_pydair_files()
def __parse_pydair_files(self):
self.samples = PyDAIRStatsRecords()
for i in range(len(self.__pydair_file)):
v = []
d = []
j = []
orf = []
cdr3_prot_seq = []
cdr3_nucl_seq = []
v_del = []
j_del = []
vj_ins = []
pydair_fh = PyDAIRIO(self.__pydair_file[i], 'r')
for igseq in pydair_fh.parse():
v.append(igseq.v.sbjct.name)
d.append(igseq.d.sbjct.name)
j.append(igseq.j.sbjct.name)
orf.append(igseq.query.orf)
cdr3_data = igseq.get_cdr3_data()
if cdr3_data.nucl_seq is not None and '*' in cdr3_data.prot_seq:
cdr3_data.nucl_seq = None
cdr3_data.prot_seq = None
cdr3_prot_seq.append(cdr3_data.prot_seq)
cdr3_nucl_seq.append(cdr3_data.nucl_seq)
v_del.append(igseq.indels.v_deletion)
j_del.append(igseq.indels.j_deletion)
vj_ins.append(igseq.indels.vj_insertion)
sample_record = PyDAIRStatsRecord(self.__pydair_id[i], v, d, j, orf,
cdr3_nucl_seq, cdr3_prot_seq,
v_del, j_del, vj_ins,
self.__discard_ambiguous_D, self.__productive_only)
self.samples.append(sample_record)
pydair_fh.close()
def rarefaction_study(self, data = None, n = 1000):
"""Rarefaction study for estimating the number of VDJ combination.
Args:
data (str): 'vdj' or 'cdr3'.
Rarefaction study for estimating the number of VDJ combination for each sample.
"""
for bsample in self.samples:
bsample.rarefaction_study(data, n)
def get_summary(self, data_type, prob = False):
'''Get the frequence of usage with data frame class object.
Args:
data_type (str): A string to specify data type. ``v``,
``d``, ``j``, ``vdj``, ``cdr3_nucl_len``,
``cdr3_prot_len``, ``v_del_len``, ``j_del_len``,
and ``vj_ins_len`` are supported.
freq (bool): If `True`, return the frequences of counts.
prob (bool): If prob is `True`, return the probability, and omit `freq`.
Returns:
A Pandas DataFrame class object.
Get the frequence of usages of the ``data_type`` as DataFrame class object.
'''
s = None
if data_type in ['v', 'd', 'j', 'cdr3_prot_len', 'cdr3_nucl_len', 'v_del_len', 'j_del_len', 'vj_ins_len']:
s = self.__get_freq(data_type, prob)
elif data_type == 'vdj':
s = self.__get_freq_vdj(data_type, prob)
elif data_type == 'vdj_rarefaction':
s = self.__get_est_vdj_rarefaction(data_type)
else:
raise ValueError(data_type + ' is not supported by get_summary method.')
return s
def __get_freq(self, data_type, prob = False):
sample_freqs = []
sample_names = []
for sample in self.samples:
sample_freqs.append(sample.get_summary(data_type, prob = prob))
sample_names.append(sample.name)
freq_dataframe = pd.concat(sample_freqs, axis = 1)
freq_dataframe.columns = sample_names
if prob:
freq_dataframe = freq_dataframe / freq_dataframe.sum(axis = 1)
freq_dataframe.columns = sample_names
if data_type in ['v', 'd', 'j']:
freq_dataframe = freq_dataframe.ix[freq_dataframe.mean(axis = 1).sort_values(ascending = False).index]
else:
freq_dataframe = freq_dataframe.set_index([[int(dx) for dx in freq_dataframe.index.values]])
return freq_dataframe
def __get_freq_vdj(self, data_type, prob = False):
__sep = '__________'
sample_freqs = []
sample_names = []
for sample in self.samples:
freq = sample.get_summary(data_type, prob = prob)
freqval = pd.Series(freq.frequency)
freqval.index = freq.v.replace(np.nan, 'NA') + __sep + \
freq.d.replace(np.nan, 'NA') + __sep + \
freq.j.replace(np.nan, 'NA')
sample_freqs.append(freqval)
sample_names.append(sample.name)
freq_dataframe = pd.concat(sample_freqs, axis = 1)
freq_dataframe.columns = sample_names
vdj_v = []
vdj_d = []
vdj_j = []
for vdj_combination in freq_dataframe.index:
vv, dd, jj = vdj_combination.split(__sep)
vdj_v.append(vv)
vdj_d.append(dd)
vdj_j.append(jj)
freq_vdjcmbn = pd.concat([pd.Series(vdj_v),
|
pd.Series(vdj_d)
|
pandas.Series
|
from datetime import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.base import _registry as ea_registry
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
PeriodIndex,
Series,
Timestamp,
cut,
date_range,
notna,
period_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.tseries.offsets import BDay
class TestDataFrameSetItem:
@pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
def test_setitem_dtype(self, dtype, float_frame):
arr = np.random.randn(len(float_frame))
float_frame[dtype] = np.array(arr, dtype=dtype)
assert float_frame[dtype].dtype.name == dtype
def test_setitem_list_not_dataframe(self, float_frame):
data = np.random.randn(len(float_frame), 2)
float_frame[["A", "B"]] = data
tm.assert_almost_equal(float_frame[["A", "B"]].values, data)
def test_setitem_error_msmgs(self):
# GH 7432
df = DataFrame(
{"bar": [1, 2, 3], "baz": ["d", "e", "f"]},
index=Index(["a", "b", "c"], name="foo"),
)
ser = Series(
["g", "h", "i", "j"],
index=Index(["a", "b", "c", "a"], name="foo"),
name="fiz",
)
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df["newcol"] = ser
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0, 2, (4, 4)), columns=["a", "b", "c", "d"])
msg = "incompatible index of inserted column with frame index"
with pytest.raises(TypeError, match=msg):
df["gr"] = df.groupby(["b", "c"]).count()
def test_setitem_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=range(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col, K).reshape(N, K), index=range(N))
tm.assert_frame_equal(df, expected)
def test_setitem_different_dtype(self):
df = DataFrame(
np.random.randn(5, 3), index=np.arange(5), columns=["c", "b", "a"]
)
df.insert(0, "foo", df["a"])
df.insert(2, "bar", df["c"])
# diff dtype
# new item
df["x"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 5 + [np.dtype("float32")],
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
# replacing current (in different block)
df["a"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2,
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
df["y"] = df["a"].astype("int32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2 + [np.dtype("int32")],
index=["foo", "c", "bar", "b", "a", "x", "y"],
)
tm.assert_series_equal(result, expected)
def test_setitem_empty_columns(self):
# GH 13522
df = DataFrame(index=["A", "B", "C"])
df["X"] = df.index
df["X"] = ["x", "y", "z"]
exp = DataFrame(data={"X": ["x", "y", "z"]}, index=["A", "B", "C"])
tm.assert_frame_equal(df, exp)
def test_setitem_dt64_index_empty_columns(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
df = DataFrame(index=np.arange(len(rng)))
df["A"] = rng
assert df["A"].dtype == np.dtype("M8[ns]")
def test_setitem_timestamp_empty_columns(self):
# GH#19843
df = DataFrame(index=range(3))
df["now"] = Timestamp("20130101", tz="UTC")
expected = DataFrame(
[[Timestamp("20130101", tz="UTC")]] * 3, index=[0, 1, 2], columns=["now"]
)
tm.assert_frame_equal(df, expected)
def test_setitem_wrong_length_categorical_dtype_raises(self):
# GH#29523
cat = Categorical.from_codes([0, 1, 1, 0, 1, 2], ["a", "b", "c"])
df = DataFrame(range(10), columns=["bar"])
msg = (
rf"Length of values \({len(cat)}\) "
rf"does not match length of index \({len(df)}\)"
)
with pytest.raises(ValueError, match=msg):
df["foo"] = cat
def test_setitem_with_sparse_value(self):
# GH#8131
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_array = SparseArray([0, 0, 1])
df["new_column"] = sp_array
expected = Series(sp_array, name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_with_unaligned_sparse_value(self):
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_series = Series(SparseArray([0, 0, 1]), index=[2, 1, 0])
df["new_column"] = sp_series
expected = Series(SparseArray([1, 0, 0]), name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_dict_preserves_dtypes(self):
# https://github.com/pandas-dev/pandas/issues/34573
expected = DataFrame(
{
"a": Series([0, 1, 2], dtype="int64"),
"b": Series([1, 2, 3], dtype=float),
"c": Series([1, 2, 3], dtype=float),
}
)
df = DataFrame(
{
"a": Series([], dtype="int64"),
"b": Series([], dtype=float),
"c": Series([], dtype=float),
}
)
for idx, b in enumerate([1, 2, 3]):
df.loc[df.shape[0]] = {"a": int(idx), "b": float(b), "c": float(b)}
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"obj,dtype",
[
(Period("2020-01"), PeriodDtype("M")),
(Interval(left=0, right=5), IntervalDtype("int64", "right")),
(
Timestamp("2011-01-01", tz="US/Eastern"),
DatetimeTZDtype(tz="US/Eastern"),
),
],
)
def test_setitem_extension_types(self, obj, dtype):
# GH: 34832
expected = DataFrame({"idx": [1, 2, 3], "obj": Series([obj] * 3, dtype=dtype)})
df = DataFrame({"idx": [1, 2, 3]})
df["obj"] = obj
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"ea_name",
[
dtype.name
for dtype in ea_registry.dtypes
# property would require instantiation
if not isinstance(dtype.name, property)
]
# mypy doesn't allow adding lists of different types
# https://github.com/python/mypy/issues/5492
+ ["datetime64[ns, UTC]", "period[D]"], # type: ignore[list-item]
)
def test_setitem_with_ea_name(self, ea_name):
# GH 38386
result = DataFrame([0])
result[ea_name] = [1]
expected = DataFrame({0: [0], ea_name: [1]})
tm.assert_frame_equal(result, expected)
def test_setitem_dt64_ndarray_with_NaT_and_diff_time_units(self):
# GH#7492
data_ns = np.array([1, "nat"], dtype="datetime64[ns]")
result = Series(data_ns).to_frame()
result["new"] = data_ns
expected = DataFrame({0: [1, None], "new": [1, None]}, dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
# OutOfBoundsDatetime error shouldn't occur
data_s = np.array([1, "nat"], dtype="datetime64[s]")
result["new"] = data_s
expected = DataFrame({0: [1, None], "new": [1e9, None]}, dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["h", "m", "s", "ms", "D", "M", "Y"])
def test_frame_setitem_datetime64_col_other_units(self, unit):
# Check that non-nano dt64 values get cast to dt64 on setitem
# into a not-yet-existing column
n = 100
dtype = np.dtype(f"M8[{unit}]")
vals = np.arange(n, dtype=np.int64).view(dtype)
ex_vals = vals.astype("datetime64[ns]")
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df[unit] = vals
assert df[unit].dtype == np.dtype("M8[ns]")
assert (df[unit].values == ex_vals).all()
@pytest.mark.parametrize("unit", ["h", "m", "s", "ms", "D", "M", "Y"])
def test_frame_setitem_existing_datetime64_col_other_units(self, unit):
# Check that non-nano dt64 values get cast to dt64 on setitem
# into an already-existing dt64 column
n = 100
dtype = np.dtype(f"M8[{unit}]")
vals = np.arange(n, dtype=np.int64).view(dtype)
ex_vals = vals.astype("datetime64[ns]")
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df["dates"] = np.arange(n, dtype=np.int64).view("M8[ns]")
# We overwrite existing dt64 column with new, non-nano dt64 vals
df["dates"] = vals
assert (df["dates"].values == ex_vals).all()
def test_setitem_dt64tz(self, timezone_frame):
df = timezone_frame
idx = df["B"].rename("foo")
# setitem
df["C"] = idx
tm.assert_series_equal(df["C"], Series(idx, name="C"))
df["D"] = "foo"
df["D"] = idx
tm.assert_series_equal(df["D"], Series(idx, name="D"))
del df["D"]
# assert that A & C are not sharing the same base (e.g. they
# are copies)
v1 = df._mgr.arrays[1]
v2 = df._mgr.arrays[2]
tm.assert_extension_array_equal(v1, v2)
v1base = v1._data.base
v2base = v2._data.base
assert v1base is None or (id(v1base) != id(v2base))
# with nan
df2 = df.copy()
df2.iloc[1, 1] = NaT
df2.iloc[1, 2] = NaT
result = df2["B"]
tm.assert_series_equal(notna(result), Series([True, False, True], name="B"))
tm.assert_series_equal(df2.dtypes, df.dtypes)
def test_setitem_periodindex(self):
rng = period_range("1/1/2000", periods=5, name="index")
df = DataFrame(np.random.randn(5, 3), index=rng)
df["Index"] = rng
rs = Index(df["Index"])
tm.assert_index_equal(rs, rng, check_names=False)
assert rs.name == "Index"
assert rng.name == "index"
rs = df.reset_index().set_index("index")
assert isinstance(rs.index, PeriodIndex)
tm.assert_index_equal(rs.index, rng)
def test_setitem_complete_column_with_array(self):
# GH#37954
df = DataFrame({"a": ["one", "two", "three"], "b": [1, 2, 3]})
arr = np.array([[1, 1], [3, 1], [5, 1]])
df[["c", "d"]] = arr
expected = DataFrame(
{
"a": ["one", "two", "three"],
"b": [1, 2, 3],
"c": [1, 3, 5],
"d": [1, 1, 1],
}
)
expected["c"] = expected["c"].astype(arr.dtype)
expected["d"] = expected["d"].astype(arr.dtype)
assert expected["c"].dtype == arr.dtype
assert expected["d"].dtype == arr.dtype
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("dtype", ["f8", "i8", "u8"])
def test_setitem_bool_with_numeric_index(self, dtype):
# GH#36319
cols = Index([1, 2, 3], dtype=dtype)
df = DataFrame(np.random.randn(3, 3), columns=cols)
df[False] = ["a", "b", "c"]
expected_cols = Index([1, 2, 3, False], dtype=object)
if dtype == "f8":
expected_cols = Index([1.0, 2.0, 3.0, False], dtype=object)
tm.assert_index_equal(df.columns, expected_cols)
@pytest.mark.parametrize("indexer", ["B", ["B"]])
def test_setitem_frame_length_0_str_key(self, indexer):
# GH#38831
df = DataFrame(columns=["A", "B"])
other = DataFrame({"B": [1, 2]})
df[indexer] = other
expected = DataFrame({"A": [np.nan] * 2, "B": [1, 2]})
expected["A"] = expected["A"].astype("object")
tm.assert_frame_equal(df, expected)
def test_setitem_frame_duplicate_columns(self, using_array_manager):
# GH#15695
cols = ["A", "B", "C"] * 2
df = DataFrame(index=range(3), columns=cols)
df.loc[0, "A"] = (0, 3)
df.loc[:, "B"] = (1, 4)
df["C"] = (2, 5)
expected = DataFrame(
[
[0, 1, 2, 3, 4, 5],
[np.nan, 1, 2, np.nan, 4, 5],
[np.nan, 1, 2, np.nan, 4, 5],
],
dtype="object",
)
if using_array_manager:
# setitem replaces column so changes dtype
expected.columns = cols
expected["C"] = expected["C"].astype("int64")
# TODO(ArrayManager) .loc still overwrites
expected["B"] = expected["B"].astype("int64")
else:
# set these with unique columns to be extra-unambiguous
expected[2] = expected[2].astype(np.int64)
expected[5] = expected[5].astype(np.int64)
expected.columns = cols
tm.assert_frame_equal(df, expected)
def test_setitem_frame_duplicate_columns_size_mismatch(self):
# GH#39510
cols = ["A", "B", "C"] * 2
df = DataFrame(index=range(3), columns=cols)
with pytest.raises(ValueError, match="Columns must be same length as key"):
df[["A"]] = (0, 3, 5)
df2 = df.iloc[:, :3] # unique columns
with pytest.raises(ValueError, match="Columns must be same length as key"):
df2[["A"]] = (0, 3, 5)
@pytest.mark.parametrize("cols", [["a", "b", "c"], ["a", "a", "a"]])
def test_setitem_df_wrong_column_number(self, cols):
# GH#38604
df = DataFrame([[1, 2, 3]], columns=cols)
rhs = DataFrame([[10, 11]], columns=["d", "e"])
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
df["a"] = rhs
def test_setitem_listlike_indexer_duplicate_columns(self):
# GH#38604
df = DataFrame([[1, 2, 3]], columns=["a", "b", "b"])
rhs = DataFrame([[10, 11, 12]], columns=["a", "b", "b"])
df[["a", "b"]] = rhs
expected = DataFrame([[10, 11, 12]], columns=["a", "b", "b"])
tm.assert_frame_equal(df, expected)
df[["c", "b"]] = rhs
expected = DataFrame([[10, 11, 12, 10]], columns=["a", "b", "b", "c"])
tm.assert_frame_equal(df, expected)
def test_setitem_listlike_indexer_duplicate_columns_not_equal_length(self):
# GH#39403
df = DataFrame([[1, 2, 3]], columns=["a", "b", "b"])
rhs = DataFrame([[10, 11]], columns=["a", "b"])
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
df[["a", "b"]] = rhs
def test_setitem_intervals(self):
df = DataFrame({"A": range(10)})
ser = cut(df["A"], 5)
assert isinstance(ser.cat.categories, IntervalIndex)
# B & D end up as Categoricals
# the remainder are converted to in-line objects
# containing an IntervalIndex.values
df["B"] = ser
df["C"] = np.array(ser)
df["D"] = ser.values
df["E"] = np.array(ser.values)
df["F"] = ser.astype(object)
assert is_categorical_dtype(df["B"].dtype)
assert is_interval_dtype(df["B"].cat.categories)
assert is_categorical_dtype(df["D"].dtype)
assert is_interval_dtype(df["D"].cat.categories)
# These go through the Series constructor and so get inferred back
# to IntervalDtype
assert is_interval_dtype(df["C"])
assert is_interval_dtype(df["E"])
# But the Series constructor doesn't do inference on Series objects,
# so setting df["F"] doesn't get cast back to IntervalDtype
assert is_object_dtype(df["F"])
# they compare equal as Index
# when converted to numpy objects
c = lambda x: Index(np.array(x))
tm.assert_index_equal(c(df.B), c(df.B))
tm.assert_index_equal(c(df.B), c(df.C), check_names=False)
tm.assert_index_equal(c(df.B), c(df.D), check_names=False)
tm.assert_index_equal(c(df.C), c(df.D), check_names=False)
# B & D are the same Series
tm.assert_series_equal(df["B"], df["B"])
tm.assert_series_equal(df["B"], df["D"], check_names=False)
# C & E are the same Series
tm.assert_series_equal(df["C"], df["C"])
tm.assert_series_equal(df["C"], df["E"], check_names=False)
def test_setitem_categorical(self):
# GH#35369
df = DataFrame({"h": Series(list("mn")).astype("category")})
df.h = df.h.cat.reorder_categories(["n", "m"])
expected = DataFrame(
{"h": Categorical(["m", "n"]).reorder_categories(["n", "m"])}
)
tm.assert_frame_equal(df, expected)
def test_setitem_with_empty_listlike(self):
# GH#17101
index = Index([], name="idx")
result = DataFrame(columns=["A"], index=index)
result["A"] = []
expected = DataFrame(columns=["A"], index=index)
tm.assert_index_equal(result.index, expected.index)
@pytest.mark.parametrize(
"cols, values, expected",
[
(["C", "D", "D", "a"], [1, 2, 3, 4], 4), # with duplicates
(["D", "C", "D", "a"], [1, 2, 3, 4], 4), # mixed order
(["C", "B", "B", "a"], [1, 2, 3, 4], 4), # other duplicate cols
(["C", "B", "a"], [1, 2, 3], 3), # no duplicates
(["B", "C", "a"], [3, 2, 1], 1), # alphabetical order
(["C", "a", "B"], [3, 2, 1], 2), # in the middle
],
)
def test_setitem_same_column(self, cols, values, expected):
# GH#23239
df = DataFrame([values], columns=cols)
df["a"] = df["a"]
result = df["a"].values[0]
assert result == expected
def test_setitem_multi_index(self):
# GH#7655, test that assigning to a sub-frame of a frame
# with multi-index columns aligns both rows and columns
it = ["jim", "joe", "jolie"], ["first", "last"], ["left", "center", "right"]
cols = MultiIndex.from_product(it)
index = date_range("20141006", periods=20)
vals = np.random.randint(1, 1000, (len(index), len(cols)))
df = DataFrame(vals, columns=cols, index=index)
i, j = df.index.values.copy(), it[-1][:]
np.random.shuffle(i)
df["jim"] = df["jolie"].loc[i, ::-1]
tm.assert_frame_equal(df["jim"], df["jolie"])
np.random.shuffle(j)
df[("joe", "first")] = df[("jolie", "last")].loc[i, j]
tm.assert_frame_equal(df[("joe", "first")], df[("jolie", "last")])
np.random.shuffle(j)
df[("joe", "last")] = df[("jolie", "first")].loc[i, j]
tm.assert_frame_equal(df[("joe", "last")], df[("jolie", "first")])
@pytest.mark.parametrize(
"columns,box,expected",
[
(
["A", "B", "C", "D"],
7,
DataFrame(
[[7, 7, 7, 7], [7, 7, 7, 7], [7, 7, 7, 7]],
columns=["A", "B", "C", "D"],
),
),
(
["C", "D"],
[7, 8],
DataFrame(
[[1, 2, 7, 8], [3, 4, 7, 8], [5, 6, 7, 8]],
columns=["A", "B", "C", "D"],
),
),
(
["A", "B", "C"],
np.array([7, 8, 9], dtype=np.int64),
DataFrame([[7, 8, 9], [7, 8, 9], [7, 8, 9]], columns=["A", "B", "C"]),
),
(
["B", "C", "D"],
[[7, 8, 9], [10, 11, 12], [13, 14, 15]],
DataFrame(
[[1, 7, 8, 9], [3, 10, 11, 12], [5, 13, 14, 15]],
columns=["A", "B", "C", "D"],
),
),
(
["C", "A", "D"],
np.array([[7, 8, 9], [10, 11, 12], [13, 14, 15]], dtype=np.int64),
DataFrame(
[[8, 2, 7, 9], [11, 4, 10, 12], [14, 6, 13, 15]],
columns=["A", "B", "C", "D"],
),
),
(
["A", "C"],
DataFrame([[7, 8], [9, 10], [11, 12]], columns=["A", "C"]),
DataFrame(
[[7, 2, 8], [9, 4, 10], [11, 6, 12]], columns=["A", "B", "C"]
),
),
],
)
def test_setitem_list_missing_columns(self, columns, box, expected):
# GH#29334
df = DataFrame([[1, 2], [3, 4], [5, 6]], columns=["A", "B"])
df[columns] = box
tm.assert_frame_equal(df, expected)
def test_setitem_list_of_tuples(self, float_frame):
tuples = list(zip(float_frame["A"], float_frame["B"]))
float_frame["tuples"] = tuples
result = float_frame["tuples"]
expected = Series(tuples, index=float_frame.index, name="tuples")
tm.assert_series_equal(result, expected)
def test_setitem_iloc_generator(self):
# GH#39614
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
indexer = (x for x in [1, 2])
df.iloc[indexer] = 1
expected = DataFrame({"a": [1, 1, 1], "b": [4, 1, 1]})
tm.assert_frame_equal(df, expected)
def test_setitem_iloc_two_dimensional_generator(self):
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
indexer = (x for x in [1, 2])
df.iloc[indexer, 1] = 1
expected = DataFrame({"a": [1, 2, 3], "b": [4, 1, 1]})
tm.assert_frame_equal(df, expected)
class TestSetitemTZAwareValues:
@pytest.fixture
def idx(self):
naive = DatetimeIndex(["2013-1-1 13:00", "2013-1-2 14:00"], name="B")
idx = naive.tz_localize("US/Pacific")
return idx
@pytest.fixture
def expected(self, idx):
expected = Series(np.array(idx.tolist(), dtype="object"), name="B")
assert expected.dtype == idx.dtype
return expected
def test_setitem_dt64series(self, idx, expected):
# convert to utc
df = DataFrame(np.random.randn(2, 1), columns=["A"])
df["B"] = idx
with tm.assert_produces_warning(FutureWarning) as m:
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
msg = "do 'idx.tz_convert(None)' before calling"
assert msg in str(m[0].message)
result = df["B"]
comp = Series(idx.tz_convert("UTC").tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
def test_setitem_datetimeindex(self, idx, expected):
# setting a DataFrame column with a tzaware DTI retains the dtype
df = DataFrame(np.random.randn(2, 1), columns=["A"])
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
def test_setitem_object_array_of_tzaware_datetimes(self, idx, expected):
# setting a DataFrame column with a tzaware DTI retains the dtype
df = DataFrame(np.random.randn(2, 1), columns=["A"])
# object array of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
tm.assert_series_equal(result, expected)
class TestDataFrameSetItemWithExpansion:
# TODO(ArrayManager) update parent (_maybe_update_cacher)
@td.skip_array_manager_not_yet_implemented
def test_setitem_listlike_views(self):
# GH#38148
df = DataFrame({"a": [1, 2, 3], "b": [4, 4, 6]})
# get one column as a view of df
ser = df["a"]
# add columns with list-like indexer
df[["c", "d"]] = np.array([[0.1, 0.2], [0.3, 0.4], [0.4, 0.5]])
# edit in place the first column to check view semantics
df.iloc[0, 0] = 100
expected = Series([100, 2, 3], name="a")
tm.assert_series_equal(ser, expected)
def test_setitem_string_column_numpy_dtype_raising(self):
# GH#39010
df = DataFrame([[1, 2], [3, 4]])
df["0 - Name"] = [5, 6]
expected = DataFrame([[1, 2, 5], [3, 4, 6]], columns=[0, 1, "0 - Name"])
tm.assert_frame_equal(df, expected)
def test_setitem_empty_df_duplicate_columns(self):
# GH#38521
df = DataFrame(columns=["a", "b", "b"], dtype="float64")
df.loc[:, "a"] = list(range(2))
expected = DataFrame(
[[0, np.nan, np.nan], [1, np.nan, np.nan]], columns=["a", "b", "b"]
)
tm.assert_frame_equal(df, expected)
def test_setitem_with_expansion_categorical_dtype(self):
# assignment
df = DataFrame(
{"value": np.array(np.random.randint(0, 10000, 100), dtype="int32")}
)
labels = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])
df = df.sort_values(by=["value"], ascending=True)
ser = cut(df.value, range(0, 10500, 500), right=False, labels=labels)
cat = ser.values
# setting with a Categorical
df["D"] = cat
str(df)
result = df.dtypes
expected = Series(
[np.dtype("int32"), CategoricalDtype(categories=labels, ordered=False)],
index=["value", "D"],
)
|
tm.assert_series_equal(result, expected)
|
pandas._testing.assert_series_equal
|
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA, IncrementalPCA
from sklearn.manifold import TSNE
from sklearn.decomposition import TruncatedSVD
from data_describe.compat import _is_dataframe, _compat, _requires
from data_describe.backends._backends import _get_compute_backend
def dim_reduc(
data,
n_components: int,
dim_method: str,
apply_tsvd: bool = True,
compute_backend=None,
):
"""Reduces the number of dimensions of the input data.
Args:
data: The dataframe
n_components: Desired dimensionality for the data set prior to modeling
dim_method: {'pca', 'ipca', 'tsne', 'tsvd'}
- pca: Principal Component Analysis
- ipca: Incremental Principal Component Analysis. Highly suggested for very large datasets
- tsne: T-distributed Stochastic Neighbor Embedding
- tsvd: Truncated Singular Value Decomposition
apply_tsvd: If True, TSVD will be run before t-SNE. This is highly recommended when running t-SNE
Returns:
The dimensionally-reduced dataframe and reduction object
"""
if not _is_dataframe(data):
raise ValueError("Data must be a Pandas (or Modin) DataFrame")
if dim_method == "pca":
reduc_df, reductor = run_pca(data, n_components, compute_backend)
elif dim_method == "ipca":
reduc_df, reductor = run_ipca(data, n_components, compute_backend)
elif dim_method == "tsne":
reduc_df, reductor = run_tsne(data, n_components, apply_tsvd, compute_backend)
elif dim_method == "tsvd":
reduc_df, reductor = run_tsvd(data, n_components, compute_backend)
else:
raise NotImplementedError("{} is not supported".format(dim_method))
return reduc_df, reductor
def run_pca(data, n_components, compute_backend=None):
"""Reduces the number of dimensions of the input data using PCA.
Args:
data: The dataframe
n_components: Desired dimensionality for the data set prior to modeling
Returns:
reduc_df: The dimensionally-reduced dataframe
pca: The applied PCA object
"""
fname = ["component_{}".format(i) for i in range(1, n_components + 1)]
return _get_compute_backend(compute_backend, data).compute_run_pca(
data, n_components, column_names=fname
)
def run_ipca(data, n_components, compute_backend=None):
"""Reduces the number of dimensions of the input data using Incremental PCA.
Args:
data: The dataframe
n_components: Desired dimensionality for the data set prior to modeling
Returns:
reduc_df: The dimensionally-reduced dataframe
ipca: The applied IncrementalPCA object
"""
fname = ["component_{}".format(i) for i in range(1, n_components + 1)]
return _get_compute_backend(compute_backend, data).compute_run_ipca(
data, n_components, column_names=fname
)
def run_tsne(data, n_components, apply_tsvd=True, compute_backend=None):
"""Reduces the number of dimensions of the input data using t-SNE.
Args:
data: The dataframe
n_components: Desired dimensionality for the output dataset
apply_tsvd: If True, TSVD will be run before t-SNE. This is highly recommended when running t-SNE
Returns:
reduc_df: The dimensionally-reduced dataframe
tsne: The applied t-SNE object
"""
return _get_compute_backend(compute_backend, data).compute_run_tsne(
data, n_components, apply_tsvd
)
def run_tsvd(data, n_components, compute_backend=None):
"""Reduces the number of dimensions of the input data using TSVD.
Args:
data: The dataframe
n_components: Desired dimensionality for the output dataset
Returns:
reduc_df: The dimensionally-reduced dataframe
tsne: The applied TSVD object
"""
fname = ["component_{}".format(i) for i in range(1, n_components + 1)]
return _get_compute_backend(compute_backend, data).compute_run_tsvd(
data, n_components, column_names=fname
)
def _pandas_compute_run_pca(data, n_components, column_names):
"""Performs PCA on the provided dataset.
Args:
data: The dataframe
n_components: Desired dimensionality for the output dataset
column_names: Names for the columns in the output dataset
Returns:
The dimensionally-reduced Pandas dataframe
pca: The applied PCA object
"""
pca = PCA(n_components)
reduc = pca.fit_transform(data)
return pd.DataFrame(reduc, columns=column_names), pca
def _pandas_compute_run_ipca(data, n_components, column_names):
"""Performs Incremental PCA on the provided dataset.
Args:
data: The dataframe
n_components: Desired dimensionality for the output dataset
column_names: Names for the columns in the output dataset
Returns:
The dimensionally-reduced Pandas dataframe
ipca: The applied IncrementalPCA object
"""
ipca = IncrementalPCA(n_components)
reduc = ipca.fit_transform(data)
return
|
pd.DataFrame(reduc, columns=column_names)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import pandas
import numpy
import sys
import unittest
from datetime import datetime
from pandas.testing import assert_frame_equal, assert_series_equal
import os
import copy
sys.path.append("..")
import warnings
import nPYc
from nPYc.enumerations import SampleType
from nPYc.enumerations import AssayRole
from nPYc.enumerations import VariableType
from generateTestDataset import generateTestDataset
import tempfile
from isatools import isatab
class test_msdataset_synthetic(unittest.TestCase):
"""
Test MSDataset object functions with synthetic data
"""
def setUp(self):
self.msData = nPYc.MSDataset('', fileType='empty')
self.msData.sampleMetadata = pandas.DataFrame(
{'Sample File Name': ['Unittest_file_001', 'Unittest_file_002', 'Unittest_file_003'],
'Sample Base Name': ['Unittest_file_001', 'Unittest_file_002', 'Unittest_file_003'],
'AssayRole': [AssayRole.Assay, AssayRole.PrecisionReference, AssayRole.PrecisionReference],
'SampleType': [SampleType.StudySample, SampleType.StudyPool, SampleType.ExternalReference],
'Sample Name': ['Sample1', 'Sample2', 'Sample3'], 'Acqu Date': ['26-May-17', '26-May-17', '26-May-17'],
'Acqu Time': ['16:42:57', '16:58:49', '17:14:41'], 'Vial': ['1:A,1', '1:A,2', '1:A,3'],
'Instrument': ['XEVO-TOF#UnitTest', 'XEVO-TOF#UnitTest', 'XEVO-TOF#UnitTest'],
'Acquired Time': [datetime(2017, 5, 26, 16, 42, 57), datetime(2017, 5, 26, 16, 58, 49),
datetime(2017, 5, 26, 17, 14, 41)], 'Run Order': [0, 1, 2], 'Batch': [1, 1, 2],
'Correction Batch': [numpy.nan, 1, 2], 'Matrix': ['U', 'U', 'U'],
'Subject ID': ['subject1', 'subject1', 'subject2'], 'Sample ID': ['sample1', 'sample2', 'sample3'],
'Dilution': [numpy.nan, '60.0', '100.0'],'Exclusion Details': ['','','']})
self.msData.featureMetadata = pandas.DataFrame(
{'Feature Name': ['Feature1', 'Feature2', 'Feature3'], 'Retention Time': [6.2449, 2.7565, 5.0564],
'm/z': [249.124281, 381.433191, 471.132083]})
self.msData.featureMetadata['Exclusion Details'] = None
self.msData.featureMetadata['User Excluded'] = False
self.msData.featureMetadata[['rsdFilter', 'varianceRatioFilter', 'correlationToDilutionFilter', 'blankFilter',
'artifactualFilter']] = pandas.DataFrame([[True, True, True, True, True]],
index=self.msData.featureMetadata.index)
self.msData.featureMetadata[['rsdSP', 'rsdSS/rsdSP', 'correlationToDilution', 'blankValue']] \
= pandas.DataFrame([[numpy.nan, numpy.nan, numpy.nan, numpy.nan]], index=self.msData.featureMetadata.index)
self.msData._intensityData = numpy.array([[10.2, 20.95, 30.37], [10.1, 20.03, 30.74], [3.065, 15.83, 30.16]])
# Attributes
self.msData.Attributes['FeatureExtractionSoftware'] = 'UnitTestSoftware'
# excluded data
self.msData.sampleMetadataExcluded = []
self.msData.intensityDataExcluded = []
self.msData.featureMetadataExcluded = []
self.msData.excludedFlag = []
self.msData.sampleMetadataExcluded.append(self.msData.sampleMetadata[[True, False, False]])
self.msData.intensityDataExcluded.append(self.msData._intensityData[0, :])
self.msData.featureMetadataExcluded.append(self.msData.featureMetadata)
self.msData.excludedFlag.append('Samples')
self.msData.featureMetadataExcluded.append(self.msData.featureMetadata[[True, False, False]])
self.msData.intensityDataExcluded.append(self.msData._intensityData[:, 0])
self.msData.sampleMetadataExcluded.append(self.msData.sampleMetadata)
self.msData.excludedFlag.append('Features')
# finish
self.msData.VariableType = VariableType.Discrete
self.msData.initialiseMasks()
def test_rsd_raises(self):
msData = nPYc.MSDataset('', fileType='empty')
with self.subTest(msg='No reference samples'):
msData.sampleMetadata = pandas.DataFrame(None)
with self.assertRaises(ValueError):
msData.rsdSP
with self.subTest(msg='Only one reference sample'):
msData.sampleMetadata = pandas.DataFrame([[nPYc.enumerations.AssayRole.PrecisionReference, nPYc.enumerations.SampleType.StudyPool]], columns=['AssayRole', 'SampleType'])
with self.assertRaises(ValueError):
msData.rsdSP
def test_getsamplemetadatafromfilename(self):
"""
Test we are parsing NPC MS filenames correctly (PCSOP.081).
"""
# Create an empty object with simple filenames
msData = nPYc.MSDataset('', fileType='empty')
msData.sampleMetadata['Sample File Name'] = ['Test1_HPOS_ToF01_P1W02',
'Test2_RPOS_ToF02_U2W03',
'Test3_RNEG_ToF03_S3W04',
'Test4_LPOS_ToF04_P4W05_LTR',
'Test5_LNEG_ToF05_U5W06_SR',
'Test6_HPOS_ToF06_S4W05_MR',
'Test1_HPOS_ToF01_P1W02_x',
'Test2_RPOS_ToF02_U2W03_b',
'Test3_RNEG_ToF03_S3W04_2',
'Test4_RPOS_ToF04_B1S1_SR_q',
'Test5_LPOS_ToF05_B2E2_SR',
'Test6_LNEG_ToF06_B3SRD01_9',
'Test1_HPOS_ToF06_Blank01',
'Test1_HPOS_ToF06_IC02',
'Test1_HPOS_ToF06_EIC21']
msData._getSampleMetadataFromFilename(msData.Attributes['filenameSpec'])
##
# Check basename
##
basename = pandas.Series(['Test1_HPOS_ToF01_P1W02',
'Test2_RPOS_ToF02_U2W03',
'Test3_RNEG_ToF03_S3W04',
'Test4_LPOS_ToF04_P4W05_LTR',
'Test5_LNEG_ToF05_U5W06_SR',
'Test6_HPOS_ToF06_S4W05_MR',
'Test1_HPOS_ToF01_P1W02',
'Test2_RPOS_ToF02_U2W03',
'Test3_RNEG_ToF03_S3W04',
'Test4_RPOS_ToF04_B1S1_SR',
'Test5_LPOS_ToF05_B2E2_SR',
'Test6_LNEG_ToF06_B3SRD01',
'Test1_HPOS_ToF06_Blank01',
'Test1_HPOS_ToF06_IC02',
'Test1_HPOS_ToF06_EIC21'],
name='Sample Base Name',
dtype='str')
assert_series_equal(msData.sampleMetadata['Sample Base Name'], basename)
##
# Check Study
##
study = pandas.Series(['Test1',
'Test2',
'Test3',
'Test4',
'Test5',
'Test6',
'Test1',
'Test2',
'Test3',
'Test4',
'Test5',
'Test6',
'Test1',
'Test1',
'Test1'],
name='Study',
dtype='str')
assert_series_equal(msData.sampleMetadata['Study'], study)
##
#
##
chromatography = pandas.Series(['H',
'R',
'R',
'L',
'L',
'H',
'H',
'R',
'R',
'R',
'L',
'L',
'H',
'H',
'H'],
name='Chromatography',
dtype='str')
assert_series_equal(msData.sampleMetadata['Chromatography'], chromatography)
##
#
##
ionisation = pandas.Series(['POS',
'POS',
'NEG',
'POS',
'NEG',
'POS',
'POS',
'POS',
'NEG',
'POS',
'POS',
'NEG',
'POS',
'POS',
'POS'],
name='Ionisation',
dtype='str')
assert_series_equal(msData.sampleMetadata['Ionisation'], ionisation)
##
#
##
instrument = pandas.Series(['ToF01',
'ToF02',
'ToF03',
'ToF04',
'ToF05',
'ToF06',
'ToF01',
'ToF02',
'ToF03',
'ToF04',
'ToF05',
'ToF06',
'ToF06',
'ToF06',
'ToF06'],
name='Instrument',
dtype='str')
assert_series_equal(msData.sampleMetadata['Instrument'], instrument)
##
#
##
reRun = pandas.Series(['',
'',
'',
'',
'',
'',
'',
'b',
'',
'q',
'',
'',
'',
'',
''],
name='Re-Run',
dtype='str')
assert_series_equal(msData.sampleMetadata['Re-Run'], reRun)
##
#
##
suplemental = pandas.Series(['',
'',
'',
'',
'',
'',
'',
'',
'2',
'',
'',
'9',
'',
'',
''],
name='Suplemental Injections',
dtype='str')
assert_series_equal(msData.sampleMetadata['Suplemental Injections'], suplemental)
##
#
##
skipped = pandas.Series([False,
False,
False,
False,
False,
False,
True,
False,
False,
False,
False,
False,
False,
False,
False],
name='Skipped',
dtype='bool')
assert_series_equal(msData.sampleMetadata['Skipped'], skipped)
##
#
##
matrix = pandas.Series(['P',
'U',
'S',
'P',
'U',
'S',
'P',
'U',
'S',
'',
'',
'',
'',
'',
''],
name='Matrix',
dtype='str')
assert_series_equal(msData.sampleMetadata['Matrix'], matrix)
##
#
##
well = pandas.Series([2,
3,
4,
5,
6,
5,
2,
3,
4,
1,
2,
1,
-1,
-1,
-1],
name='Well',
dtype='int')
assert_series_equal(msData.sampleMetadata['Well'], well, check_dtype=False)
self.assertEqual(msData.sampleMetadata['Well'].dtype.kind, well.dtype.kind)
##
#
##
plate = pandas.Series([1,
2,
3,
4,
5,
4,
1,
2,
3,
1,
2,
3,
1,
2,
21],
name='Plate',
dtype='int')
assert_series_equal(msData.sampleMetadata['Plate'], plate, check_dtype=False)
self.assertEqual(msData.sampleMetadata['Plate'].dtype.kind, well.dtype.kind)
##
#
##
batch = pandas.Series([numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
1.0,
2.0,
3.0,
numpy.nan,
numpy.nan,
numpy.nan],
name='Batch',
dtype='float')
assert_series_equal(msData.sampleMetadata['Batch'], batch)
##
#
##
dilution = pandas.Series([numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
1.0,
numpy.nan,
numpy.nan,
numpy.nan],
name='Dilution',
dtype='float')
assert_series_equal(msData.sampleMetadata['Dilution'], dilution)
##
#
##
assayRole = pandas.Series([AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.Assay,
AssayRole.Assay],
name='AssayRole',
dtype=object)
assert_series_equal(msData.sampleMetadata['AssayRole'], assayRole)
##
#
##
sampleType = pandas.Series([SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.ExternalReference,
SampleType.StudyPool,
SampleType.MethodReference,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.ProceduralBlank,
SampleType.StudyPool,
SampleType.StudyPool],
name='SampleType',
dtype=object)
assert_series_equal(msData.sampleMetadata['SampleType'], sampleType)
def test_updateMasks_features(self):
msData = nPYc.MSDataset('', fileType='empty')
msData.Attributes['artifactualFilter'] = True
##
# Variables:
# Good Corr, Good RSD
# Poor Corr, Good RSD
# Good Corr, Poor RSD
# Poor Corr, Poor RSD
# Good Corr, Good RSD, below blank
##
msData.intensityData = numpy.array([[100, 23, 99, 51, 100],
[90, 54, 91, 88, 91],
[50, 34, 48, 77, 49],
[10, 66, 11, 56, 11],
[1, 12, 2, 81, 2],
[50, 51, 2, 12, 49],
[51, 47, 1, 100, 50],
[47, 50, 70, 21, 48],
[51, 49, 77, 91, 50],
[48, 49, 12, 2, 49],
[50, 48, 81, 2, 51],
[54, 53, 121, 52, 53],
[57, 49, 15, 51, 56],
[140, 41, 97, 47, 137],
[52, 60, 42, 60, 48],
[12, 48, 8, 56, 12],
[1, 2, 1, 1.21, 51],
[2, 1, 1.3, 1.3, 63]],
dtype=float)
msData.sampleMetadata = pandas.DataFrame(data=[[100, 1, 1, 1, AssayRole.LinearityReference, SampleType.StudyPool],
[90, 1, 1, 2, AssayRole.LinearityReference, SampleType.StudyPool],
[50, 1, 1, 3, AssayRole.LinearityReference, SampleType.StudyPool],
[10, 1, 1, 4, AssayRole.LinearityReference, SampleType.StudyPool],
[1, 1, 1, 5, AssayRole.LinearityReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[0, 1, 1, 1, AssayRole.Assay, SampleType.ProceduralBlank],
[0, 1, 1, 1, AssayRole.Assay, SampleType.ProceduralBlank]],
columns=['Dilution', 'Batch', 'Correction Batch', 'Well', 'AssayRole', 'SampleType'])
msData.featureMetadata = pandas.DataFrame(data=[['Feature_1', 0.5, 100., 0.3],
['Feature_2', 0.55, 100.04, 0.3],
['Feature_3', 0.75, 200., 0.1],
['Feature_4', 0.9, 300., 0.1],
['Feature_5', 0.95, 300.08, 0.1]],
columns=['Feature Name','Retention Time','m/z','Peak Width'])
msData.featureMetadata['Exclusion Details'] = None
msData.featureMetadata['User Excluded'] = False
msData.featureMetadata[['rsdFilter', 'varianceRatioFilter', 'correlationToDilutionFilter', 'blankFilter',
'artifactualFilter']] = pandas.DataFrame([[True, True, True, True, True]],
index=msData.featureMetadata.index)
msData.featureMetadata[['rsdSP', 'rsdSS/rsdSP', 'correlationToDilution', 'blankValue']] \
= pandas.DataFrame([[numpy.nan, numpy.nan, numpy.nan, numpy.nan]], index=msData.featureMetadata.index)
msData.initialiseMasks()
with self.subTest(msg='Default Parameters'):
expectedFeatureMask = numpy.array([True, False, False, False, False], dtype=bool)
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True})
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='Lax RSD threshold'):
expectedFeatureMask = numpy.array([True, False, True, False, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True}, **dict(rsdThreshold=90, varianceRatio=0.1, corrThreshold=0.7))
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='Lax correlation threshold'):
expectedFeatureMask = numpy.array([True, True, False, False, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter': True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True}, **dict(rsdThreshold=30, varianceRatio=1.1, corrThreshold=0))
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='High variance ratio'):
expectedFeatureMask = numpy.array([False, False, False, False, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True}, **dict(rsdThreshold=30, varianceRatio=100, corrThreshold=0.7))
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='Lax blank filter'):
expectedFeatureMask = numpy.array([True, False, False, False, True], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True}, **dict(blankThreshold=0.5))
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='No blank filter'):
expectedFeatureMask = numpy.array([True, False, False, False, True], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':False})
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='Default withArtifactualFiltering'):
expectedTempArtifactualLinkageMatrix = pandas.DataFrame(data=[[0,1],[3,4]],columns=['node1','node2'])
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': True,'blankFilter':True})
assert_frame_equal(expectedTempArtifactualLinkageMatrix, msData._tempArtifactualLinkageMatrix)
with self.subTest(msg='Altered withArtifactualFiltering parameters'):
expectedArtifactualLinkageMatrix = pandas.DataFrame(data=[[0,1]],columns=['node1','node2'])
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter': True, 'correlationToDilutionFilter': True, 'varianceRatioFilter': True,
'artifactualFilter': True,'blankFilter':True}, **dict(deltaMzArtifactual=300,
overlapThresholdArtifactual=0.1,
corrThresholdArtifactual=0.2))
self.assertEqual(msData.Attributes['filterParameters']['deltaMzArtifactual'], 300)
self.assertEqual(msData.Attributes['filterParameters']['overlapThresholdArtifactual'], 0.1)
self.assertEqual(msData.Attributes['filterParameters']['corrThresholdArtifactual'], 0.2)
assert_frame_equal(expectedArtifactualLinkageMatrix, msData._artifactualLinkageMatrix)
with self.subTest(msg='withArtifactualFiltering=None, Attribute[artifactualFilter]=False'):
msData2 = copy.deepcopy(msData)
msData2.Attributes['artifactualFilter'] = False
expectedFeatureMask = numpy.array([True, False, False, False, False], dtype=bool)
msData2.initialiseMasks()
msData2.updateMasks(featureFilters={'rsdFilter': True, 'correlationToDilutionFilter': True, 'varianceRatioFilter': True,
'artifactualFilter': False, 'blankFilter': True})
numpy.testing.assert_array_equal(expectedFeatureMask, msData2.featureMask)
with self.subTest(msg='withArtifactualFiltering=None, Attribute[artifactualFilter]=True'):
msData2 = copy.deepcopy(msData)
msData2.Attributes['artifactualFilter'] = True
expectedTempArtifactualLinkageMatrix = pandas.DataFrame(data=[[0, 1], [3, 4]], columns=['node1', 'node2'])
msData2.initialiseMasks()
msData2.updateMasks(featureFilters={'rsdFilter': True, 'correlationToDilutionFilter': True, 'varianceRatioFilter': True,
'artifactualFilter': True,'blankFilter':True})
assert_frame_equal(expectedTempArtifactualLinkageMatrix, msData2._tempArtifactualLinkageMatrix)
def test_updateMasks_samples(self):
from nPYc.enumerations import VariableType, DatasetLevel, AssayRole, SampleType
msData = nPYc.MSDataset('', fileType='empty')
msData.intensityData = numpy.zeros([18, 5],dtype=float)
msData.sampleMetadata['AssayRole'] = pandas.Series([AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference],
name='AssayRole',
dtype=object)
msData.sampleMetadata['SampleType'] = pandas.Series([SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.ExternalReference,
SampleType.MethodReference],
name='SampleType',
dtype=object)
with self.subTest(msg='Default Parameters'):
expectedSampleMask = numpy.array([True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(withArtifactualFiltering=False, filterFeatures=False)
numpy.testing.assert_array_equal(expectedSampleMask, msData.sampleMask)
with self.subTest(msg='Export SP and ER'):
expectedSampleMask = numpy.array([False, False, False, False, False, True, True, True, True, True, True, False, False, False, False, False, True, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(withArtifactualFiltering=False, filterFeatures=False,
sampleTypes=[SampleType.StudyPool, SampleType.ExternalReference],
assayRoles=[AssayRole.PrecisionReference])
numpy.testing.assert_array_equal(expectedSampleMask, msData.sampleMask)
with self.subTest(msg='Export Dilution Samples only'):
expectedSampleMask = numpy.array([True, True, True, True, True, False, False, False, False, False, False, False, False, False, False, False, False, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(withArtifactualFiltering=False, filterFeatures=False,
sampleTypes=[SampleType.StudyPool],
assayRoles=[AssayRole.LinearityReference])
numpy.testing.assert_array_equal(expectedSampleMask, msData.sampleMask)
def test_updateMasks_raises(self):
msData = nPYc.MSDataset('', fileType='empty')
with self.subTest(msg='Correlation'):
self.assertRaises(ValueError, msData.updateMasks, **dict(corrThreshold=-1.01))
self.assertRaises(ValueError, msData.updateMasks, **dict(corrThreshold=1.01))
self.assertRaises(TypeError, msData.updateMasks, **dict(corrThreshold='0.7'))
with self.subTest(msg='RSD'):
self.assertRaises(ValueError, msData.updateMasks, **dict(rsdThreshold=-1.01))
self.assertRaises(TypeError, msData.updateMasks, **dict(rsdThreshold='30'))
with self.subTest(msg='Blanks'):
self.assertRaises(TypeError, msData.updateMasks, **dict(blankThreshold='A string'))
with self.subTest(msg='RSD'):
self.assertRaises(ValueError, msData.updateMasks, **dict(rsdThreshold=-1.01))
self.assertRaises(TypeError, msData.updateMasks, **dict(rsdThreshold='30'))
with self.subTest(msg='Variance Ratio'):
self.assertRaises(TypeError, msData.updateMasks, **dict(varianceRatio='1.1'))
with self.subTest(msg='ArtifactualParameters'):
self.assertRaises(TypeError, msData.updateMasks, featureFilters={'artifactualFilter':'A string', 'rsdFilter':False, 'blankFilter': False,
'correlationToDilutionFilter':False, 'varianceRatioFilter':False}, **dict(blankThreshold=False))
self.assertRaises(ValueError, msData.updateMasks, featureFilters={'artifactualFilter':True}, **dict(corrThresholdArtifactual=1.01, blankThreshold=False))
self.assertRaises(ValueError, msData.updateMasks, featureFilters={'artifactualFilter':True}, **dict(corrThresholdArtifactual=-0.01, blankThreshold=False))
self.assertRaises(TypeError, msData.updateMasks, featureFilters={'artifactualFilter':True}, **dict(corrThresholdArtifactual='0.7', blankThreshold=False))
self.assertRaises(TypeError, msData.updateMasks, featureFilters={'artifactualFilter':True}, **dict(deltaMzArtifactual='100', blankThreshold=False))
self.assertRaises(TypeError, msData.updateMasks, featureFilters={'artifactualFilter':True}, **dict(overlapThresholdArtifactual='0.5', blankThreshold=False))
def test_applyMasks(self):
fit = numpy.random.randn(self.msData.noSamples, self.msData.noFeatures)
self.msData.fit = copy.deepcopy(fit)
deletedFeatures = numpy.random.randint(0, self.msData.noFeatures, size=2)
self.msData.featureMask[deletedFeatures] = False
fit = numpy.delete(fit, deletedFeatures, 1)
self.msData.applyMasks()
numpy.testing.assert_array_almost_equal(self.msData.fit, fit)
def test_correlationToDilution(self):
from nPYc.utilities._internal import _vcorrcoef
noSamp = numpy.random.randint(30, high=500, size=None)
noFeat = numpy.random.randint(200, high=400, size=None)
dataset = generateTestDataset(noSamp, noFeat, dtype='MSDataset', sop='GenericMS')
dataset.sampleMetadata['SampleType'] = nPYc.enumerations.SampleType.StudyPool
dataset.sampleMetadata['AssayRole'] = nPYc.enumerations.AssayRole.LinearityReference
dataset.sampleMetadata['Well'] = 1
dataset.sampleMetadata['Dilution'] = numpy.linspace(1, noSamp, num=noSamp)
correlations = dataset.correlationToDilution
with self.subTest(msg='Checking default path'):
numpy.testing.assert_array_almost_equal(correlations, _vcorrcoef(dataset.intensityData, dataset.sampleMetadata['Dilution'].values))
with self.subTest(msg='Checking corr exclusions'):
dataset.corrExclusions = None
numpy.testing.assert_array_almost_equal(correlations, _vcorrcoef(dataset.intensityData, dataset.sampleMetadata['Dilution'].values))
def test_correlateToDilution_raises(self):
noSamp = numpy.random.randint(30, high=500, size=None)
noFeat = numpy.random.randint(200, high=400, size=None)
dataset = generateTestDataset(noSamp, noFeat, dtype='MSDataset')
with self.subTest(msg='Unknown correlation type'):
self.assertRaises(ValueError, dataset._MSDataset__correlateToDilution, method='unknown')
with self.subTest(msg='No LR samples'):
dataset.sampleMetadata['AssayRole'] = AssayRole.Assay
self.assertRaises(ValueError, dataset._MSDataset__correlateToDilution)
with self.subTest(msg='No Dilution field'):
dataset.sampleMetadata.drop(['Dilution'], axis=1, inplace=True)
self.assertRaises(KeyError, dataset._MSDataset__correlateToDilution)
def test_validateObject(self):
with self.subTest(msg='validateObject successful on correct dataset'):
goodDataset = copy.deepcopy(self.msData)
self.assertEqual(goodDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True), {'Dataset': True, 'BasicMSDataset':True ,'QC':True, 'sampleMetadata':True})
with self.subTest(msg='BasicMSDataset fails on empty MSDataset'):
badDataset = nPYc.MSDataset('', fileType='empty')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset':False ,'QC':False, 'sampleMetadata':False})
with self.subTest(msg='check raise no warnings with raiseWarning=False'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['rtWindow']
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
result = badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False)
# check it generally worked
self.assertEqual(result, {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
# check each warning
self.assertEqual(len(w), 0)
with self.subTest(msg='check fail and raise warnings on bad Dataset'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, 'featureMetadata')
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
result = badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True)
# check it generally worked
self.assertEqual(result, {'Dataset': False, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
# check each warning
self.assertEqual(len(w), 5)
assert issubclass(w[0].category, UserWarning)
assert "Failure, no attribute 'self.featureMetadata'" in str(w[0].message)
assert issubclass(w[1].category, UserWarning)
assert "Does not conform to Dataset:" in str(w[1].message)
assert issubclass(w[2].category, UserWarning)
assert "Does not conform to basic MSDataset" in str(w[2].message)
assert issubclass(w[3].category, UserWarning)
assert "Does not have QC parameters" in str(w[3].message)
assert issubclass(w[4].category, UserWarning)
assert "Does not have sample metadata information" in str(w[4].message)
with self.subTest(msg='check raise warnings BasicMSDataset'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['rtWindow']
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
result = badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True)
# check it generally worked
self.assertEqual(result, {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
# check each warning
self.assertEqual(len(w), 4)
assert issubclass(w[0].category, UserWarning)
assert "Failure, no attribute 'self.Attributes['rtWindow']" in str(w[0].message)
assert issubclass(w[1].category, UserWarning)
assert "Does not conform to basic MSDataset:" in str(w[1].message)
assert issubclass(w[2].category, UserWarning)
assert "Does not have QC parameters" in str(w[2].message)
assert issubclass(w[3].category, UserWarning)
assert "Does not have sample metadata information" in str(w[3].message)
with self.subTest(msg='check raise warnings QC parameters'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Batch'] = 'not an int or float'
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
result = badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True)
# check it generally worked
self.assertEqual(result, {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
# check each warning
self.assertEqual(len(w), 3)
assert issubclass(w[0].category, UserWarning)
assert "Failure, 'self.sampleMetadata['Batch']' is <class 'str'>" in str(w[0].message)
assert issubclass(w[1].category, UserWarning)
assert "Does not have QC parameters:" in str(w[1].message)
assert issubclass(w[2].category, UserWarning)
assert "Does not have sample metadata information:" in str(w[2].message)
with self.subTest(msg='check raise warnings sampleMetadata'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata.drop(['Subject ID'], axis=1, inplace=True)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
result = badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True)
# check it generally worked
self.assertEqual(result, {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
# check each warning
self.assertEqual(len(w), 2)
assert issubclass(w[0].category, UserWarning)
assert "Failure, 'self.sampleMetadata' lacks a 'Subject ID' column" in str(w[0].message)
assert issubclass(w[1].category, UserWarning)
assert "Does not have sample metadata information:" in str(w[1].message)
with self.subTest(msg='self.Attributes[\'rtWindow\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['rtWindow']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'rtWindow\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['rtWindow'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'msPrecision\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['msPrecision']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'msPrecision\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['msPrecision'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'varianceRatio\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['varianceRatio']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'varianceRatio\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['varianceRatio'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'blankThreshold\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['blankThreshold']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'blankThreshold\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['blankThreshold'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'corrMethod\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['corrMethod']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'corrMethod\'] is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['corrMethod'] = 5.0
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'corrThreshold\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['corrThreshold']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'corrThreshold\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['corrThreshold'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'rsdThreshold\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['rsdThreshold']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'rsdThreshold\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['rsdThreshold'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'deltaMzArtifactual\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['deltaMzArtifactual']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'deltaMzArtifactual\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['deltaMzArtifactual'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'overlapThresholdArtifactual\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['overlapThresholdArtifactual']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'overlapThresholdArtifactual\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['overlapThresholdArtifactual'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'corrThresholdArtifactual\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['corrThresholdArtifactual']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'corrThresholdArtifactual\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['corrThresholdArtifactual'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'FeatureExtractionSoftware\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['FeatureExtractionSoftware']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'FeatureExtractionSoftware\'] is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['FeatureExtractionSoftware'] = 5.0
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'Raw Data Path\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['Raw Data Path']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'Raw Data Path\'] is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['Raw Data Path'] = 5.0
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'Feature Names\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['Feature Names']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'Feature Names\'] is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['Feature Names'] = 5.0
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.VariableType is not an enum VariableType'):
badDataset = copy.deepcopy(self.msData)
badDataset.VariableType = 'not an enum'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.corrExclusions does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, 'corrExclusions')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self._correlationToDilution does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, '_correlationToDilution')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self._correlationToDilution is not a numpy.ndarray'):
badDataset = copy.deepcopy(self.msData)
badDataset._correlationToDilution = 'not a numpy.ndarray'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self._artifactualLinkageMatrix does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, '_artifactualLinkageMatrix')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self._artifactualLinkageMatrix is not a pandas.DataFrame'):
badDataset = copy.deepcopy(self.msData)
badDataset._artifactualLinkageMatrix = 'not a pandas.DataFrame'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self._tempArtifactualLinkageMatrix does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, '_tempArtifactualLinkageMatrix')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self._tempArtifactualLinkageMatrix is not a pandas.DataFrame'):
badDataset = copy.deepcopy(self.msData)
badDataset._tempArtifactualLinkageMatrix = 'not a pandas.DataFrame'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.fileName does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, 'fileName')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.fileName is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.fileName = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.filePath does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, 'filePath')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.filePath is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.filePath = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata does not have the same number of samples as self._intensityData'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata.drop([0], axis=0, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Sample File Name\'] is not str'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Sample File Name'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'AssayRole\'] is not an enum \'AssayRole\''):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['AssayRole'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'SampleType\'] is not an enum \'SampleType\''):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['SampleType'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Dilution\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Dilution'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Batch\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Batch'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Correction Batch\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Correction Batch'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Run Order\'] is not an int'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Run Order'] = 'not an int'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Acquired Time\'] is not a datetime'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Acquired Time'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Sample Base Name\'] is not str'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Sample Base Name'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata does not have a Matrix column'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata.drop(['Matrix'], axis=1, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Matrix\'] is not str'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Matrix'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata does not have a Subject ID column'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata.drop(['Subject ID'], axis=1, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Subject ID\'] is not str'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Subject ID'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Sample ID\'] is not str'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Sample ID'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata does not have the same number of samples as self._intensityData'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata.drop([0], axis=0, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata[\'Feature Name\'] is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata['Feature Name'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata[\'Feature Name\'] is not unique'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata['Feature Name'] = ['Feature1','Feature1','Feature1']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata does not have a m/z column'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata.drop(['m/z'], axis=1, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata[\'m/z\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata['m/z'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata does not have a Retention Time column'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata.drop(['Retention Time'], axis=1, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata[\'Retention Time\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata['Retention Time'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMask has not been initialised'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMask = numpy.array(False, dtype=bool)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMask does not have the same number of samples as self._intensityData'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMask = numpy.squeeze(numpy.ones([5, 1], dtype=bool), axis=1)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMask has not been initialised'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMask = numpy.array(False, dtype=bool)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMask does not have the same number of samples as self._intensityData'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMask = numpy.squeeze(numpy.ones([5, 1], dtype=bool), axis=1)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
class test_msdataset_batch_inference(unittest.TestCase):
"""
Check batches are generated and amended correctly
"""
def setUp(self):
self.msData = nPYc.MSDataset('', fileType='empty')
self.msData.sampleMetadata['Sample File Name'] = ['Test_RPOS_ToF04_B1S1_SR',
'Test_RPOS_ToF04_B1S2_SR',
'Test_RPOS_ToF04_B1S3_SR',
'Test_RPOS_ToF04_B1S4_SR',
'Test_RPOS_ToF04_B1S5_SR',
'Test_RPOS_ToF04_P1W01',
'Test_RPOS_ToF04_P1W02_SR',
'Test_RPOS_ToF04_P1W03',
'Test_RPOS_ToF04_B1E1_SR',
'Test_RPOS_ToF04_B1E2_SR',
'Test_RPOS_ToF04_B1E3_SR',
'Test_RPOS_ToF04_B1E4_SR',
'Test_RPOS_ToF04_B1E5_SR',
'Test_RPOS_ToF04_B2S1_SR',
'Test_RPOS_ToF04_B2S2_SR',
'Test_RPOS_ToF04_B2S3_SR',
'Test_RPOS_ToF04_B2S4_SR',
'Test_RPOS_ToF04_B2S5_SR',
'Test_RPOS_ToF04_P2W01',
'Test_RPOS_ToF04_P2W02_SR',
'Test_RPOS_ToF04_P3W03',
'Test_RPOS_ToF04_B2S1_SR_2',
'Test_RPOS_ToF04_B2S2_SR_2',
'Test_RPOS_ToF04_B2S3_SR_2',
'Test_RPOS_ToF04_B2S4_SR_2',
'Test_RPOS_ToF04_B2S5_SR_2',
'Test_RPOS_ToF04_P3W03_b',
'Test_RPOS_ToF04_B2E1_SR',
'Test_RPOS_ToF04_B2E2_SR',
'Test_RPOS_ToF04_B2E3_SR',
'Test_RPOS_ToF04_B2E4_SR',
'Test_RPOS_ToF04_B2E5_SR',
'Test_RPOS_ToF04_B2SRD1']
self.msData.addSampleInfo(descriptionFormat='Filenames')
self.msData.sampleMetadata['Run Order'] = self.msData.sampleMetadata.index + 1
def test_fillbatches_correctionbatch(self):
self.msData._fillBatches()
correctionBatch = pandas.Series([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, numpy.nan],
name='Correction Batch',
dtype='float')
assert_series_equal(self.msData.sampleMetadata['Correction Batch'], correctionBatch)
def test_fillbatches_warns(self):
self.msData.sampleMetadata.drop('Run Order', axis=1, inplace=True)
self.assertWarnsRegex(UserWarning, 'Unable to infer batches without run order, skipping\.', self.msData._fillBatches)
def test_amendbatches(self):
"""
"""
self.msData._fillBatches()
self.msData.amendBatches(20)
correctionBatch = pandas.Series([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 4.0,
4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, numpy.nan],
name='Correction Batch',
dtype='float')
assert_series_equal(self.msData.sampleMetadata['Correction Batch'], correctionBatch)
def test_msdataset_addsampleinfo_batches(self):
self.msData.addSampleInfo(descriptionFormat='Batches')
correctionBatch = pandas.Series([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, numpy.nan],
name='Correction Batch',
dtype='float')
assert_series_equal(self.msData.sampleMetadata['Correction Batch'], correctionBatch)
class test_msdataset_import_undefined(unittest.TestCase):
"""
Test we raise an error when passing an fileType we don't understand.
"""
def test_raise_notimplemented(self):
self.assertRaises(NotImplementedError, nPYc.MSDataset, os.path.join('nopath'), fileType='Unknown filetype')
class test_msdataset_import_QI(unittest.TestCase):
"""
Test import from QI csv files
"""
def setUp(self):
self.msData = nPYc.MSDataset(os.path.join('..','..','npc-standard-project','Derived_Data','UnitTest1_PCSOP.069_QI.csv'), fileType='QI')
self.msData.addSampleInfo(descriptionFormat='Filenames')
def test_dimensions(self):
self.assertEqual((self.msData.noSamples, self.msData.noFeatures), (115, 4))
def test_samples(self):
samples = pandas.Series(['UnitTest1_LPOS_ToF02_B1SRD01', 'UnitTest1_LPOS_ToF02_B1SRD02',
'UnitTest1_LPOS_ToF02_B1SRD03', 'UnitTest1_LPOS_ToF02_B1SRD04',
'UnitTest1_LPOS_ToF02_B1SRD05', 'UnitTest1_LPOS_ToF02_B1SRD06',
'UnitTest1_LPOS_ToF02_B1SRD07', 'UnitTest1_LPOS_ToF02_B1SRD08',
'UnitTest1_LPOS_ToF02_B1SRD09', 'UnitTest1_LPOS_ToF02_B1SRD10',
'UnitTest1_LPOS_ToF02_B1SRD11', 'UnitTest1_LPOS_ToF02_B1SRD12',
'UnitTest1_LPOS_ToF02_B1SRD13', 'UnitTest1_LPOS_ToF02_B1SRD14',
'UnitTest1_LPOS_ToF02_B1SRD15', 'UnitTest1_LPOS_ToF02_B1SRD16',
'UnitTest1_LPOS_ToF02_B1SRD17', 'UnitTest1_LPOS_ToF02_B1SRD18',
'UnitTest1_LPOS_ToF02_B1SRD19', 'UnitTest1_LPOS_ToF02_B1SRD20',
'UnitTest1_LPOS_ToF02_B1SRD21', 'UnitTest1_LPOS_ToF02_B1SRD22',
'UnitTest1_LPOS_ToF02_B1SRD23', 'UnitTest1_LPOS_ToF02_B1SRD24',
'UnitTest1_LPOS_ToF02_B1SRD25', 'UnitTest1_LPOS_ToF02_B1SRD26',
'UnitTest1_LPOS_ToF02_B1SRD27', 'UnitTest1_LPOS_ToF02_B1SRD28',
'UnitTest1_LPOS_ToF02_B1SRD29', 'UnitTest1_LPOS_ToF02_B1SRD30',
'UnitTest1_LPOS_ToF02_B1SRD31', 'UnitTest1_LPOS_ToF02_B1SRD32',
'UnitTest1_LPOS_ToF02_B1SRD33', 'UnitTest1_LPOS_ToF02_B1SRD34',
'UnitTest1_LPOS_ToF02_B1SRD35', 'UnitTest1_LPOS_ToF02_B1SRD36',
'UnitTest1_LPOS_ToF02_B1SRD37', 'UnitTest1_LPOS_ToF02_B1SRD38',
'UnitTest1_LPOS_ToF02_B1SRD39', 'UnitTest1_LPOS_ToF02_B1SRD40',
'UnitTest1_LPOS_ToF02_B1SRD41', 'UnitTest1_LPOS_ToF02_B1SRD42',
'UnitTest1_LPOS_ToF02_B1SRD43', 'UnitTest1_LPOS_ToF02_B1SRD44',
'UnitTest1_LPOS_ToF02_B1SRD45', 'UnitTest1_LPOS_ToF02_B1SRD46',
'UnitTest1_LPOS_ToF02_B1SRD47', 'UnitTest1_LPOS_ToF02_B1SRD48',
'UnitTest1_LPOS_ToF02_B1SRD49', 'UnitTest1_LPOS_ToF02_B1SRD50',
'UnitTest1_LPOS_ToF02_B1SRD51', 'UnitTest1_LPOS_ToF02_B1SRD52',
'UnitTest1_LPOS_ToF02_B1SRD53', 'UnitTest1_LPOS_ToF02_B1SRD54',
'UnitTest1_LPOS_ToF02_B1SRD55', 'UnitTest1_LPOS_ToF02_B1SRD56',
'UnitTest1_LPOS_ToF02_B1SRD57', 'UnitTest1_LPOS_ToF02_B1SRD58',
'UnitTest1_LPOS_ToF02_B1SRD59', 'UnitTest1_LPOS_ToF02_B1SRD60',
'UnitTest1_LPOS_ToF02_B1SRD61', 'UnitTest1_LPOS_ToF02_B1SRD62',
'UnitTest1_LPOS_ToF02_B1SRD63', 'UnitTest1_LPOS_ToF02_B1SRD64',
'UnitTest1_LPOS_ToF02_B1SRD65', 'UnitTest1_LPOS_ToF02_B1SRD66',
'UnitTest1_LPOS_ToF02_B1SRD67', 'UnitTest1_LPOS_ToF02_B1SRD68',
'UnitTest1_LPOS_ToF02_B1SRD69', 'UnitTest1_LPOS_ToF02_B1SRD70',
'UnitTest1_LPOS_ToF02_B1SRD71', 'UnitTest1_LPOS_ToF02_B1SRD72',
'UnitTest1_LPOS_ToF02_B1SRD73', 'UnitTest1_LPOS_ToF02_B1SRD74',
'UnitTest1_LPOS_ToF02_B1SRD75', 'UnitTest1_LPOS_ToF02_B1SRD76',
'UnitTest1_LPOS_ToF02_B1SRD77', 'UnitTest1_LPOS_ToF02_B1SRD78',
'UnitTest1_LPOS_ToF02_B1SRD79', 'UnitTest1_LPOS_ToF02_B1SRD80',
'UnitTest1_LPOS_ToF02_B1SRD81', 'UnitTest1_LPOS_ToF02_B1SRD82',
'UnitTest1_LPOS_ToF02_B1SRD83', 'UnitTest1_LPOS_ToF02_B1SRD84',
'UnitTest1_LPOS_ToF02_B1SRD85', 'UnitTest1_LPOS_ToF02_B1SRD86',
'UnitTest1_LPOS_ToF02_B1SRD87', 'UnitTest1_LPOS_ToF02_B1SRD88',
'UnitTest1_LPOS_ToF02_B1SRD89', 'UnitTest1_LPOS_ToF02_B1SRD90',
'UnitTest1_LPOS_ToF02_B1SRD91', 'UnitTest1_LPOS_ToF02_B1SRD92',
'UnitTest1_LPOS_ToF02_Blank01', 'UnitTest1_LPOS_ToF02_Blank02',
'UnitTest1_LPOS_ToF02_B1E1_SR', 'UnitTest1_LPOS_ToF02_B1E2_SR',
'UnitTest1_LPOS_ToF02_B1E3_SR', 'UnitTest1_LPOS_ToF02_B1E4_SR',
'UnitTest1_LPOS_ToF02_B1E5_SR', 'UnitTest1_LPOS_ToF02_B1S1_SR',
'UnitTest1_LPOS_ToF02_B1S2_SR', 'UnitTest1_LPOS_ToF02_B1S3_SR',
'UnitTest1_LPOS_ToF02_B1S4_SR', 'UnitTest1_LPOS_ToF02_B1S5_SR',
'UnitTest1_LPOS_ToF02_S1W01', 'UnitTest1_LPOS_ToF02_S1W02',
'UnitTest1_LPOS_ToF02_S1W03', 'UnitTest1_LPOS_ToF02_S1W04',
'UnitTest1_LPOS_ToF02_S1W05', 'UnitTest1_LPOS_ToF02_S1W06',
'UnitTest1_LPOS_ToF02_S1W07', 'UnitTest1_LPOS_ToF02_S1W08_x',
'UnitTest1_LPOS_ToF02_S1W11_LTR', 'UnitTest1_LPOS_ToF02_S1W12_SR',
'UnitTest1_LPOS_ToF02_ERROR'],
name='Sample File Name',
dtype=str)
assert_series_equal(self.msData.sampleMetadata['Sample File Name'], samples)
def test_featuremetadata_import(self):
with self.subTest(msg='Checking Feature Names'):
features = pandas.Series(['3.17_262.0378m/z',
'3.17_293.1812m/z',
'3.17_145.0686m/z',
'3.17_258.1033m/z'],
name='Feature Name',
dtype='str')
assert_series_equal(self.msData.featureMetadata['Feature Name'], features)
with self.subTest(msg='Checking Peak Widths'):
peakWidth = pandas.Series([0.03931667,
0.01403333,
0.01683333,
0.01683333],
name='Peak Width',
dtype='float')
assert_series_equal(self.msData.featureMetadata['Peak Width'], peakWidth)
with self.subTest(msg='Checking m/z'):
mz = pandas.Series([262.0378339,
293.1811941,
145.0686347,
258.1033447],
name='m/z',
dtype='float')
assert_series_equal(self.msData.featureMetadata['m/z'], mz)
with self.subTest(msg='Checking Retention Time'):
rt = pandas.Series([3.17485,
3.17485,
3.17485,
3.17485],
name='Retention Time',
dtype='float')
assert_series_equal(self.msData.featureMetadata['Retention Time'], rt)
with self.subTest(msg='Checking Isotope Distribution'):
isotope = pandas.Series(['100 - 36.9',
'100 - 11.9',
'100 - 8.69',
'100 - 73.4'],
name='Isotope Distribution',
dtype='str')
assert_series_equal(self.msData.featureMetadata['Isotope Distribution'], isotope)
def test_dilutionlevels(self):
dilution = pandas.Series([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 10., 10., 10., 10., 10., 10., 10., 10., 10., 10., 20., 20., 20., 20., 20.,
40., 40., 40., 60., 60., 60., 80., 80., 80., 80., 80., 100., 100., 100., 100., 100., 100., 100., 100., 100., 100.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 10., 10., 10., 10., 10., 10., 10., 10., 10., 10., 20., 20., 20., 20., 20.,
40., 40., 40., 60., 60., 60., 80., 80., 80., 80., 80., 100., 100., 100., 100., 100., 100., 100., 100., 100., 100.,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan],
name='Dilution',
dtype='float')
assert_series_equal(self.msData.sampleMetadata['Dilution'], dilution)
def test_feature_correlation(self):
self.msData.addSampleInfo(descriptionFormat='Raw Data', filePath=os.path.join('..','..','npc-standard-project','Raw_Data'))
self.msData.addSampleInfo(descriptionFormat='Batches')
with self.subTest(msg='Testing Pearson correlations'):
correlations = numpy.array([0.99999997, 0.32017508, 1., -0.0693418])
numpy.testing.assert_array_almost_equal(self.msData.correlationToDilution, correlations)
with self.subTest(msg='Testing Spearman correlations'):
correlations = numpy.array([0.9992837, 0.34708745, 1., -0.038844])
self.msData.Attributes['corrMethod'] = 'spearman'
numpy.testing.assert_array_almost_equal(self.msData.correlationToDilution, correlations)
def test_variabletype(self):
self.assertEqual(self.msData.VariableType, nPYc.enumerations.VariableType.Discrete)
class test_msdataset_import_xcms(unittest.TestCase):
"""
Test import from XCMS csv files
"""
def setUp(self):
self.msData = nPYc.MSDataset(os.path.join('..','..','npc-standard-project','Derived_Data','UnitTest1_PCSOP.069_xcms.csv'), fileType='XCMS', noFeatureParams=9)
self.msData_PeakTable = nPYc.MSDataset(os.path.join('..','..','npc-standard-project','Derived_Data','UnitTest1_PCSOP.069_xcms_peakTable.csv'), fileType='XCMS', noFeatureParams=8)
self.msData.addSampleInfo(descriptionFormat='Filenames')
self.msData_PeakTable.addSampleInfo(descriptionFormat='Filenames')
def test_dimensions(self):
self.assertEqual((self.msData.noSamples, self.msData.noFeatures), (111, 4))
self.assertEqual((self.msData_PeakTable.noSamples, self.msData_PeakTable.noFeatures), (111, 4))
def test_samples(self):
samples = pandas.Series(['UnitTest1_LPOS_ToF02_B1SRD01', 'UnitTest1_LPOS_ToF02_B1SRD02',
'UnitTest1_LPOS_ToF02_B1SRD03', 'UnitTest1_LPOS_ToF02_B1SRD04',
'UnitTest1_LPOS_ToF02_B1SRD05', 'UnitTest1_LPOS_ToF02_B1SRD06',
'UnitTest1_LPOS_ToF02_B1SRD07', 'UnitTest1_LPOS_ToF02_B1SRD08',
'UnitTest1_LPOS_ToF02_B1SRD09', 'UnitTest1_LPOS_ToF02_B1SRD10',
'UnitTest1_LPOS_ToF02_B1SRD11', 'UnitTest1_LPOS_ToF02_B1SRD12',
'UnitTest1_LPOS_ToF02_B1SRD13', 'UnitTest1_LPOS_ToF02_B1SRD14',
'UnitTest1_LPOS_ToF02_B1SRD15', 'UnitTest1_LPOS_ToF02_B1SRD16',
'UnitTest1_LPOS_ToF02_B1SRD17', 'UnitTest1_LPOS_ToF02_B1SRD18',
'UnitTest1_LPOS_ToF02_B1SRD19', 'UnitTest1_LPOS_ToF02_B1SRD20',
'UnitTest1_LPOS_ToF02_B1SRD21', 'UnitTest1_LPOS_ToF02_B1SRD22',
'UnitTest1_LPOS_ToF02_B1SRD23', 'UnitTest1_LPOS_ToF02_B1SRD24',
'UnitTest1_LPOS_ToF02_B1SRD25', 'UnitTest1_LPOS_ToF02_B1SRD26',
'UnitTest1_LPOS_ToF02_B1SRD27', 'UnitTest1_LPOS_ToF02_B1SRD28',
'UnitTest1_LPOS_ToF02_B1SRD29', 'UnitTest1_LPOS_ToF02_B1SRD30',
'UnitTest1_LPOS_ToF02_B1SRD31', 'UnitTest1_LPOS_ToF02_B1SRD32',
'UnitTest1_LPOS_ToF02_B1SRD33', 'UnitTest1_LPOS_ToF02_B1SRD34',
'UnitTest1_LPOS_ToF02_B1SRD35', 'UnitTest1_LPOS_ToF02_B1SRD36',
'UnitTest1_LPOS_ToF02_B1SRD37', 'UnitTest1_LPOS_ToF02_B1SRD38',
'UnitTest1_LPOS_ToF02_B1SRD39', 'UnitTest1_LPOS_ToF02_B1SRD40',
'UnitTest1_LPOS_ToF02_B1SRD41', 'UnitTest1_LPOS_ToF02_B1SRD42',
'UnitTest1_LPOS_ToF02_B1SRD43', 'UnitTest1_LPOS_ToF02_B1SRD44',
'UnitTest1_LPOS_ToF02_B1SRD45', 'UnitTest1_LPOS_ToF02_B1SRD46',
'UnitTest1_LPOS_ToF02_B1SRD47', 'UnitTest1_LPOS_ToF02_B1SRD48',
'UnitTest1_LPOS_ToF02_B1SRD49', 'UnitTest1_LPOS_ToF02_B1SRD50',
'UnitTest1_LPOS_ToF02_B1SRD51', 'UnitTest1_LPOS_ToF02_B1SRD52',
'UnitTest1_LPOS_ToF02_B1SRD53', 'UnitTest1_LPOS_ToF02_B1SRD54',
'UnitTest1_LPOS_ToF02_B1SRD55', 'UnitTest1_LPOS_ToF02_B1SRD56',
'UnitTest1_LPOS_ToF02_B1SRD57', 'UnitTest1_LPOS_ToF02_B1SRD58',
'UnitTest1_LPOS_ToF02_B1SRD59', 'UnitTest1_LPOS_ToF02_B1SRD60',
'UnitTest1_LPOS_ToF02_B1SRD61', 'UnitTest1_LPOS_ToF02_B1SRD62',
'UnitTest1_LPOS_ToF02_B1SRD63', 'UnitTest1_LPOS_ToF02_B1SRD64',
'UnitTest1_LPOS_ToF02_B1SRD65', 'UnitTest1_LPOS_ToF02_B1SRD66',
'UnitTest1_LPOS_ToF02_B1SRD67', 'UnitTest1_LPOS_ToF02_B1SRD68',
'UnitTest1_LPOS_ToF02_B1SRD69', 'UnitTest1_LPOS_ToF02_B1SRD70',
'UnitTest1_LPOS_ToF02_B1SRD71', 'UnitTest1_LPOS_ToF02_B1SRD72',
'UnitTest1_LPOS_ToF02_B1SRD73', 'UnitTest1_LPOS_ToF02_B1SRD74',
'UnitTest1_LPOS_ToF02_B1SRD75', 'UnitTest1_LPOS_ToF02_B1SRD76',
'UnitTest1_LPOS_ToF02_B1SRD77', 'UnitTest1_LPOS_ToF02_B1SRD78',
'UnitTest1_LPOS_ToF02_B1SRD79', 'UnitTest1_LPOS_ToF02_B1SRD80',
'UnitTest1_LPOS_ToF02_B1SRD81', 'UnitTest1_LPOS_ToF02_B1SRD82',
'UnitTest1_LPOS_ToF02_B1SRD83', 'UnitTest1_LPOS_ToF02_B1SRD84',
'UnitTest1_LPOS_ToF02_B1SRD85', 'UnitTest1_LPOS_ToF02_B1SRD86',
'UnitTest1_LPOS_ToF02_B1SRD87', 'UnitTest1_LPOS_ToF02_B1SRD88',
'UnitTest1_LPOS_ToF02_B1SRD89', 'UnitTest1_LPOS_ToF02_B1SRD90',
'UnitTest1_LPOS_ToF02_B1SRD91', 'UnitTest1_LPOS_ToF02_B1SRD92',
'UnitTest1_LPOS_ToF02_B1E1_SR', 'UnitTest1_LPOS_ToF02_B1E2_SR',
'UnitTest1_LPOS_ToF02_B1E3_SR', 'UnitTest1_LPOS_ToF02_B1E4_SR',
'UnitTest1_LPOS_ToF02_B1E5_SR', 'UnitTest1_LPOS_ToF02_B1S1_SR',
'UnitTest1_LPOS_ToF02_B1S2_SR', 'UnitTest1_LPOS_ToF02_B1S3_SR',
'UnitTest1_LPOS_ToF02_B1S4_SR', 'UnitTest1_LPOS_ToF02_B1S5_SR',
'UnitTest1_LPOS_ToF02_S1W01', 'UnitTest1_LPOS_ToF02_S1W02',
'UnitTest1_LPOS_ToF02_S1W03', 'UnitTest1_LPOS_ToF02_S1W04',
'UnitTest1_LPOS_ToF02_S1W05', 'UnitTest1_LPOS_ToF02_S1W06',
'UnitTest1_LPOS_ToF02_S1W07', 'UnitTest1_LPOS_ToF02_S1W11_LTR',
'UnitTest1_LPOS_ToF02_S1W12_SR'],
name='Sample File Name',
dtype=str)
assert_series_equal(self.msData.sampleMetadata['Sample File Name'], samples)
assert_series_equal(self.msData_PeakTable.sampleMetadata['Sample File Name'], samples)
def test_featuremetadata_import(self):
with self.subTest(msg='Checking Feature Names'):
features = pandas.Series(['3.17_262.0378m/z',
'3.17_293.1812m/z',
'3.17_145.0686m/z',
'3.17_258.1033m/z'],
name='Feature Name',
dtype='str')
assert_series_equal(self.msData.featureMetadata['Feature Name'], features)
assert_series_equal(self.msData_PeakTable.featureMetadata['Feature Name'], features)
with self.subTest(msg='Checking m/z'):
mz = pandas.Series([262.0378339,
293.1811941,
145.0686347,
258.1033447],
name='m/z',
dtype='float')
|
assert_series_equal(self.msData.featureMetadata['m/z'], mz)
|
pandas.testing.assert_series_equal
|
from hypothesis import given, settings, HealthCheck, Phase, Verbosity
import hypothesis.strategies as hs
import dask
import dask.dataframe as dd
import pandas as pd
from spatialpandas import GeoSeries, GeoDataFrame
from spatialpandas.dask import DaskGeoDataFrame
from tests.geometry.strategies import (
st_multipoint_array, st_multiline_array,
st_point_array, st_bounds)
import numpy as np
from spatialpandas.io import (
to_parquet, read_parquet, read_parquet_dask, to_parquet_dask
)
dask.config.set(scheduler="single-threaded")
hyp_settings = settings(
deadline=None, max_examples=100, suppress_health_check=[HealthCheck.too_slow]
)
@given(
gp_point=st_point_array(min_size=1, geoseries=True),
gp_multipoint=st_multipoint_array(min_size=1, geoseries=True),
gp_multiline=st_multiline_array(min_size=1, geoseries=True),
)
@hyp_settings
def test_parquet(gp_point, gp_multipoint, gp_multiline, tmp_path):
# Build dataframe
n = min(len(gp_multipoint), len(gp_multiline))
df = GeoDataFrame({
'point': GeoSeries(gp_point[:n]),
'multipoint': GeoSeries(gp_multipoint[:n]),
'multiline': GeoSeries(gp_multiline[:n]),
'a': list(range(n))
})
df.index.name = 'range_idx'
path = tmp_path / 'df.parq'
to_parquet(df, path)
df_read = read_parquet(str(path), columns=['point', 'multipoint', 'multiline', 'a'])
assert isinstance(df_read, GeoDataFrame)
pd.testing.assert_frame_equal(df, df_read)
assert df_read.index.name == df.index.name
@given(
gp_point=st_point_array(min_size=1, geoseries=True),
gp_multipoint=st_multipoint_array(min_size=1, geoseries=True),
gp_multiline=st_multiline_array(min_size=1, geoseries=True),
)
@hyp_settings
def test_parquet_columns(gp_point, gp_multipoint, gp_multiline, tmp_path):
# Build dataframe
n = min(len(gp_multipoint), len(gp_multiline))
df = GeoDataFrame({
'point': GeoSeries(gp_point[:n]),
'multipoint': GeoSeries(gp_multipoint[:n]),
'multiline': GeoSeries(gp_multiline[:n]),
'a': list(range(n))
})
path = tmp_path / 'df.parq'
to_parquet(df, path)
columns = ['a', 'multiline']
df_read = read_parquet(str(path), columns=columns)
assert isinstance(df_read, GeoDataFrame)
pd.testing.assert_frame_equal(df[columns], df_read)
@given(
gp_multipoint=st_multipoint_array(min_size=1, geoseries=True),
gp_multiline=st_multiline_array(min_size=1, geoseries=True),
)
@hyp_settings
def test_parquet_dask(gp_multipoint, gp_multiline, tmp_path):
# Build dataframe
n = min(len(gp_multipoint), len(gp_multiline))
df = GeoDataFrame({
'points': GeoSeries(gp_multipoint[:n]),
'lines': GeoSeries(gp_multiline[:n]),
'a': list(range(n))
})
ddf = dd.from_pandas(df, npartitions=3)
path = tmp_path / 'ddf.parq'
ddf.to_parquet(str(path))
ddf_read = read_parquet_dask(str(path))
# Check type
assert isinstance(ddf_read, DaskGeoDataFrame)
# Check that partition bounds were loaded
nonempty = np.nonzero(
np.asarray(ddf.map_partitions(len).compute() > 0)
)[0]
assert set(ddf_read._partition_bounds) == {'points', 'lines'}
expected_partition_bounds = (
ddf['points'].partition_bounds.iloc[nonempty].reset_index(drop=True)
)
expected_partition_bounds.index.name = 'partition'
pd.testing.assert_frame_equal(
expected_partition_bounds,
ddf_read._partition_bounds['points'],
)
expected_partition_bounds = (
ddf['lines'].partition_bounds.iloc[nonempty].reset_index(drop=True)
)
expected_partition_bounds.index.name = 'partition'
pd.testing.assert_frame_equal(
expected_partition_bounds,
ddf_read._partition_bounds['lines'],
)
assert ddf_read.geometry.name == 'points'
@given(
gp_multipoint=st_multipoint_array(min_size=10, max_size=40, geoseries=True),
gp_multiline=st_multiline_array(min_size=10, max_size=40, geoseries=True),
)
@settings(deadline=None, max_examples=30)
def test_pack_partitions(gp_multipoint, gp_multiline):
# Build dataframe
n = min(len(gp_multipoint), len(gp_multiline))
df = GeoDataFrame({
'points': GeoSeries(gp_multipoint[:n]),
'lines': GeoSeries(gp_multiline[:n]),
'a': list(range(n))
}).set_geometry('lines')
ddf = dd.from_pandas(df, npartitions=3)
# Pack partitions
ddf_packed = ddf.pack_partitions(npartitions=4)
# Check the number of partitions
assert ddf_packed.npartitions == 4
# Check that rows are now sorted in order of hilbert distance
total_bounds = df.lines.total_bounds
hilbert_distances = ddf_packed.lines.map_partitions(
lambda s: s.hilbert_distance(total_bounds=total_bounds)
).compute().values
# Compute expected total_bounds
expected_distances = np.sort(
df.lines.hilbert_distance(total_bounds=total_bounds).values
)
np.testing.assert_equal(expected_distances, hilbert_distances)
@given(
gp_multipoint=st_multipoint_array(min_size=60, max_size=100, geoseries=True),
gp_multiline=st_multiline_array(min_size=60, max_size=100, geoseries=True),
use_temp_format=hs.booleans()
)
@settings(
deadline=None,
max_examples=30,
suppress_health_check=[HealthCheck.too_slow],
phases=[
Phase.explicit,
Phase.reuse,
Phase.generate,
Phase.target
],
verbosity=Verbosity.verbose,
)
def test_pack_partitions_to_parquet(
gp_multipoint, gp_multiline, use_temp_format, tmp_path
):
# Build dataframe
n = min(len(gp_multipoint), len(gp_multiline))
df = GeoDataFrame({
'points': GeoSeries(gp_multipoint[:n]),
'lines': GeoSeries(gp_multiline[:n]),
'a': list(range(n))
}).set_geometry('lines')
ddf = dd.from_pandas(df, npartitions=3)
path = tmp_path / 'ddf.parq'
if use_temp_format:
(tmp_path / 'scratch').mkdir(parents=True, exist_ok=True)
tempdir_format = str(tmp_path / 'scratch' / 'part-{uuid}-{partition:03d}')
else:
tempdir_format = None
_retry_args = dict(
wait_exponential_multiplier=10,
wait_exponential_max=20000,
stop_max_attempt_number=4
)
ddf_packed = ddf.pack_partitions_to_parquet(
str(path),
npartitions=12,
tempdir_format=tempdir_format,
_retry_args=_retry_args,
)
# Check the number of partitions (< 4 can happen in the case of empty partitions)
assert ddf_packed.npartitions <= 12
# Check that rows are now sorted in order of hilbert distance
total_bounds = df.lines.total_bounds
hilbert_distances = ddf_packed.lines.map_partitions(
lambda s: s.hilbert_distance(total_bounds=total_bounds)
).compute().values
# Compute expected total_bounds
expected_distances = np.sort(
df.lines.hilbert_distance(total_bounds=total_bounds).values
)
np.testing.assert_equal(expected_distances, hilbert_distances)
assert ddf_packed.geometry.name == 'points'
# Read columns
columns = ['a', 'lines']
ddf_read_cols = read_parquet_dask(path, columns=columns)
pd.testing.assert_frame_equal(
ddf_read_cols.compute(), ddf_packed[columns].compute()
)
@given(
gp_multipoint1=st_multipoint_array(min_size=10, max_size=40, geoseries=True),
gp_multiline1=st_multiline_array(min_size=10, max_size=40, geoseries=True),
gp_multipoint2=st_multipoint_array(min_size=10, max_size=40, geoseries=True),
gp_multiline2=st_multiline_array(min_size=10, max_size=40, geoseries=True),
)
@settings(deadline=None, max_examples=30, suppress_health_check=[HealthCheck.too_slow])
def test_pack_partitions_to_parquet_glob(
gp_multipoint1, gp_multiline1,
gp_multipoint2, gp_multiline2,
tmp_path
):
# Build dataframe1
n = min(len(gp_multipoint1), len(gp_multiline1))
df1 = GeoDataFrame({
'points': GeoSeries(gp_multipoint1[:n]),
'lines': GeoSeries(gp_multiline1[:n]),
'a': list(range(n))
}).set_geometry('lines')
ddf1 = dd.from_pandas(df1, npartitions=3)
path1 = tmp_path / 'ddf1.parq'
ddf_packed1 = ddf1.pack_partitions_to_parquet(str(path1), npartitions=3)
# Build dataframe2
n = min(len(gp_multipoint2), len(gp_multiline2))
df2 = GeoDataFrame({
'points': GeoSeries(gp_multipoint2[:n]),
'lines': GeoSeries(gp_multiline2[:n]),
'a': list(range(n))
}).set_geometry('lines')
ddf2 = dd.from_pandas(df2, npartitions=3)
path2 = tmp_path / 'ddf2.parq'
ddf_packed2 = ddf2.pack_partitions_to_parquet(str(path2), npartitions=4)
# Load both packed datasets with glob
ddf_globbed = read_parquet_dask(tmp_path / "ddf*.parq", geometry="lines")
# Check the number of partitions (< 7 can happen in the case of empty partitions)
assert ddf_globbed.npartitions <= 7
# Check contents
expected_df = pd.concat([ddf_packed1.compute(), ddf_packed2.compute()])
df_globbed = ddf_globbed.compute()
|
pd.testing.assert_frame_equal(df_globbed, expected_df)
|
pandas.testing.assert_frame_equal
|
"""SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI
connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL,
PostgreSQL) derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback`)
"""
import csv
from datetime import date, datetime, time
from io import StringIO
import sqlite3
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
isna,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.io.sql as sql
from pandas.io.sql import read_sql_query, read_sql_table
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
"create_iris": {
"sqlite": """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
"mysql": """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
"postgresql": """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)""",
},
"insert_iris": {
"sqlite": """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
"mysql": """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
"postgresql": """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);""",
},
"create_test_types": {
"sqlite": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
"mysql": """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`IntDateOnlyCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
"postgresql": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)""",
},
"insert_test_types": {
"sqlite": {
"query": """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"mysql": {
"query": """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"postgresql": {
"query": """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"DateColWithTz",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
},
"read_parameters": {
"sqlite": "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
"mysql": 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
"postgresql": 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s',
},
"read_named_parameters": {
"sqlite": """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
"mysql": """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
"postgresql": """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
""",
},
"create_view": {
"sqlite": """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
},
}
class MixInBase:
def teardown_method(self, method):
# if setup fails, there may not be a connection to close.
if hasattr(self, "conn"):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute(f"DROP TABLE IF EXISTS {sql._get_valid_mysql_name(table_name)}")
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute("SHOW TABLES")
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute(
f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}"
)
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
pass
class PandasSQLTest:
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, "execute"):
return self.conn
else:
return self.conn.cursor()
@pytest.fixture(params=[("data", "iris.csv")])
def load_iris_data(self, datapath, request):
import io
iris_csv_file = datapath(*request.param)
if not hasattr(self, "conn"):
self.setup_connect()
self.drop_table("iris")
self._get_exec().execute(SQL_STRINGS["create_iris"][self.flavor])
with io.open(iris_csv_file, mode="r", newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS["insert_iris"][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table("iris_view")
self._get_exec().execute(SQL_STRINGS["create_view"][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
assert issubclass(pytype, np.floating)
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _load_test1_data(self):
columns = ["index", "A", "B", "C", "D"]
data = [
(
"2000-01-03 00:00:00",
0.980268513777,
3.68573087906,
-0.364216805298,
-1.15973806169,
),
(
"2000-01-04 00:00:00",
1.04791624281,
-0.0412318367011,
-0.16181208307,
0.212549316967,
),
(
"2000-01-05 00:00:00",
0.498580885705,
0.731167677815,
-0.537677223318,
1.34627041952,
),
(
"2000-01-06 00:00:00",
1.12020151869,
1.56762092543,
0.00364077397681,
0.67525259227,
),
]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(
dict(
A=[4, 1, 3, 6],
B=["asd", "gsq", "ylt", "jkl"],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=["1990-11-22", "1991-10-26", "1993-11-26", "1995-12-12"],
)
)
df["E"] = to_datetime(df["E"])
self.test_frame2 = df
def _load_test3_data(self):
columns = ["index", "A", "B"]
data = [
("2000-01-03 00:00:00", 2 ** 31 - 1, -1.987670),
("2000-01-04 00:00:00", -29, -0.0412318367011),
("2000-01-05 00:00:00", 20000, 0.731167677815),
("2000-01-06 00:00:00", -290867, 1.56762092543),
]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table("types_test_data")
self._get_exec().execute(SQL_STRINGS["create_test_types"][self.flavor])
ins = SQL_STRINGS["insert_test_types"][self.flavor]
data = [
{
"TextCol": "first",
"DateCol": "2000-01-03 00:00:00",
"DateColWithTz": "2000-01-01 00:00:00-08:00",
"IntDateCol": 535852800,
"IntDateOnlyCol": 20101010,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": 1,
"BoolColWithNull": False,
},
{
"TextCol": "first",
"DateCol": "2000-01-04 00:00:00",
"DateColWithTz": "2000-06-01 00:00:00-07:00",
"IntDateCol": 1356998400,
"IntDateOnlyCol": 20101212,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": None,
"BoolColWithNull": None,
},
]
for d in data:
self._get_exec().execute(
ins["query"], [d[field] for field in ins["fields"]]
)
def _count_rows(self, table_name):
result = (
self._get_exec()
.execute(f"SELECT count(*) AS count_1 FROM {table_name}")
.fetchone()
)
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS["read_parameters"][self.flavor]
params = ["Iris-setosa", 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS["read_named_parameters"][self.flavor]
params = {"name": "Iris-setosa", "length": 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self, method=None):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=method)
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _to_sql_empty(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], "test_frame1")
def _to_sql_fail(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
assert self.pandasSQL.has_table("test_frame1")
msg = "Table 'test_frame1' already exists"
with pytest.raises(ValueError, match=msg):
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
self.drop_table("test_frame1")
def _to_sql_replace(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="replace")
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_append(self):
# Nuke table just in case
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="append")
assert self.pandasSQL.has_table("test_frame1")
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_method_callable(self):
check = [] # used to double check function below is really being used
def sample(pd_table, conn, keys, data_iter):
check.append(1)
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(pd_table.table.insert(), data)
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=sample)
assert self.pandasSQL.has_table("test_frame1")
assert check == [1]
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _roundtrip(self):
self.drop_table("test_frame_roundtrip")
self.pandasSQL.to_sql(self.test_frame1, "test_frame_roundtrip")
result = self.pandasSQL.read_query("SELECT * FROM test_frame_roundtrip")
result.set_index("level_0", inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _to_sql_save_index(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")], columns=["A", "B", "C"], index=["A"]
)
self.pandasSQL.to_sql(df, "test_to_sql_saves_index")
ix_cols = self._get_index_columns("test_to_sql_saves_index")
assert ix_cols == [["A"]]
def _transaction_test(self):
with self.pandasSQL.run_transaction() as trans:
trans.execute("CREATE TABLE test_trans (A INT, B TEXT)")
class DummyException(Exception):
pass
# Make sure when transaction is rolled back, no rows get inserted
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise DummyException("error")
except DummyException:
# ignore raised exception
pass
res = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res) == 0
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res2) == 1
# -----------------------------------------------------------------------------
# -- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode
(`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific
tests for the different sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = "sqlite"
mode: str
def setup_connect(self):
self.conn = self.connect()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def load_test_data_and_sql(self):
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, "test_frame1", self.conn)
assert sql.has_table("test_frame1", self.conn)
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
assert sql.has_table("test_frame2", self.conn)
msg = "Table 'test_frame2' already exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="replace")
assert sql.has_table("test_frame3", self.conn)
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame3")
assert num_rows == num_entries
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="append")
assert sql.has_table("test_frame4", self.conn)
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame4")
assert num_rows == num_entries
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, "test_frame5", self.conn, index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype="int64"), name="series")
sql.to_sql(s, "test_series", self.conn, index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_roundtrip(self):
sql.to_sql(self.test_frame1, "test_frame_roundtrip", con=self.conn)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index("level_0", inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(
self.test_frame1,
"test_frame_roundtrip",
con=self.conn,
index=False,
chunksize=2,
)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def test_date_parsing(self):
# Test date parsing in read_sql
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["DateCol"]
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"IntDateOnlyCol": "%Y%m%d"},
)
assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64)
assert df.IntDateOnlyCol.tolist() == [
pd.Timestamp("2010-10-10"),
pd.Timestamp("2010-12-12"),
]
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
index_col="DateCol",
parse_dates=["DateCol", "IntDateCol"],
)
assert issubclass(df.index.dtype.type, np.datetime64)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_timedelta(self):
# see #6921
df = to_timedelta(Series(["00:00:01", "00:00:03"], name="foo")).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql("test_timedelta", self.conn)
result = sql.read_sql_query("SELECT * FROM test_timedelta", self.conn)
tm.assert_series_equal(result["foo"], df["foo"].astype("int64"))
def test_complex_raises(self):
df = DataFrame({"a": [1 + 1j, 2j]})
msg = "Complex datatypes not supported"
with pytest.raises(ValueError, match=msg):
df.to_sql("test_complex", self.conn)
@pytest.mark.parametrize(
"index_name,index_label,expected",
[
# no index name, defaults to 'index'
(None, None, "index"),
# specifying index_label
(None, "other_label", "other_label"),
# using the index name
("index_name", None, "index_name"),
# has index name, but specifying index_label
("index_name", "other_label", "other_label"),
# index name is integer
(0, None, "0"),
# index name is None but index label is integer
(None, 0, "0"),
],
)
def test_to_sql_index_label(self, index_name, index_label, expected):
temp_frame = DataFrame({"col1": range(4)})
temp_frame.index.name = index_name
query = "SELECT * FROM test_index_label"
sql.to_sql(temp_frame, "test_index_label", self.conn, index_label=index_label)
frame = sql.read_sql_query(query, self.conn)
assert frame.columns[0] == expected
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame(
{"col1": range(4)},
index=MultiIndex.from_product([("A0", "A1"), ("B0", "B1")]),
)
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, "test_index_label", self.conn)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[0] == "level_0"
assert frame.columns[1] == "level_1"
# specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["A", "B"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# using the index name
temp_frame.index.names = ["A", "B"]
sql.to_sql(temp_frame, "test_index_label", self.conn, if_exists="replace")
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# has index name, but specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["C", "D"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["C", "D"]
msg = "Length of 'index_label' should match number of levels, which is 2"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label="C",
)
def test_multiindex_roundtrip(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")],
columns=["A", "B", "C"],
index=["A", "B"],
)
df.to_sql("test_multiindex_roundtrip", self.conn)
result = sql.read_sql_query(
"SELECT * FROM test_multiindex_roundtrip", self.conn, index_col=["A", "B"]
)
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn, if_exists="replace")
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, "test", con=self.conn)
assert "CREATE" in create_sql
def test_get_schema_dtypes(self):
float_frame = DataFrame({"a": [1.1, 1.2], "b": [2.1, 2.2]})
dtype = sqlalchemy.Integer if self.mode == "sqlalchemy" else "INTEGER"
create_sql = sql.get_schema(
float_frame, "test", con=self.conn, dtype={"b": dtype}
)
assert "CREATE" in create_sql
assert "INTEGER" in create_sql
def test_get_schema_keys(self):
frame = DataFrame({"Col1": [1.1, 1.2], "Col2": [2.1, 2.2]})
create_sql = sql.get_schema(frame, "test", con=self.conn, keys="Col1")
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
assert constraint_sentence in create_sql
# multiple columns as key (GH10385)
create_sql = sql.get_schema(
self.test_frame1, "test", con=self.conn, keys=["A", "B"]
)
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
assert constraint_sentence in create_sql
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list("abcde"))
df.to_sql("test_chunksize", self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
res2 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_query(
"select * from test_chunksize", self.conn, chunksize=5
):
res2 = concat([res2, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res2)
# reading the query in chunks with read_sql_query
if self.mode == "sqlalchemy":
res3 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_table("test_chunksize", self.conn, chunksize=5):
res3 = concat([res3, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res3)
def test_categorical(self):
# GH8624
# test that categorical gets written correctly as dense column
df = DataFrame(
{
"person_id": [1, 2, 3],
"person_name": ["<NAME>", "<NAME>", "<NAME>"],
}
)
df2 = df.copy()
df2["person_name"] = df2["person_name"].astype("category")
df2.to_sql("test_categorical", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_categorical", self.conn)
tm.assert_frame_equal(res, df)
def test_unicode_column_name(self):
# GH 11431
df = DataFrame([[1, 2], [3, 4]], columns=["\xe9", "b"])
df.to_sql("test_unicode", self.conn, index=False)
def test_escaped_table_name(self):
# GH 13206
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("d1187b08-4943-4c8d-a7f6", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM `d1187b08-4943-4c8d-a7f6`", self.conn)
tm.assert_frame_equal(res, df)
@pytest.mark.single
@pytest.mark.skipif(not SQLALCHEMY_INSTALLED, reason="SQLAlchemy not installed")
class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = "sqlite"
mode = "sqlalchemy"
def connect(self):
return sqlalchemy.create_engine("sqlite:///:memory:")
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
cols = ["A", "B"]
result = sql.read_sql_table("test_frame", self.conn, columns=cols)
assert result.columns.tolist() == cols
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
result = sql.read_sql_table("test_frame", self.conn, index_col="index")
assert result.index.names == ["index"]
result = sql.read_sql_table("test_frame", self.conn, index_col=["A", "B"])
assert result.index.names == ["A", "B"]
result = sql.read_sql_table(
"test_frame", self.conn, index_col=["A", "B"], columns=["C", "D"]
)
assert result.index.names == ["A", "B"]
assert result.columns.tolist() == ["C", "D"]
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table("iris", self.conn)
iris_frame2 = sql.read_sql("iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
def test_not_reflect_all_tables(self):
# create invalid table
qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);"""
self.conn.execute(qry)
qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);"""
self.conn.execute(qry)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
sql.read_sql_table("other_table", self.conn)
sql.read_sql_query("SELECT * FROM other_table", self.conn)
# Verify some things
assert len(w) == 0
def test_warning_case_insensitive_table_name(self):
# see gh-7815
#
# We can't test that this warning is triggered, a the database
# configuration would have to be altered. But here we test that
# the warning is certainly NOT triggered in a normal case.
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# This should not trigger a Warning
self.test_frame1.to_sql("CaseSensitive", self.conn)
# Verify some things
assert len(w) == 0
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes("test_index_saved")
ixs = [i["column_names"] for i in ixs]
return ixs
def test_sqlalchemy_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLDatabase(self.conn)
table = sql.SQLTable("test_type", db, frame=df)
# GH 9086: TIMESTAMP is the suggested type for datetimes with timezones
assert isinstance(table.table.c["time"].type, sqltypes.TIMESTAMP)
def test_database_uri_string(self):
# Test read_sql and .to_sql method with a database URI (GH10654)
test_frame1 = self.test_frame1
# db_uri = 'sqlite:///:memory:' # raises
# sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near
# "iris": syntax error [SQL: 'iris']
with tm.ensure_clean() as name:
db_uri = "sqlite:///" + name
table = "iris"
test_frame1.to_sql(table, db_uri, if_exists="replace", index=False)
test_frame2 = sql.read_sql(table, db_uri)
test_frame3 = sql.read_sql_table(table, db_uri)
query = "SELECT * FROM iris"
test_frame4 = sql.read_sql_query(query, db_uri)
tm.assert_frame_equal(test_frame1, test_frame2)
tm.assert_frame_equal(test_frame1, test_frame3)
tm.assert_frame_equal(test_frame1, test_frame4)
# using driver that will not be installed on Travis to trigger error
# in sqlalchemy.create_engine -> test passing of this error to user
try:
# the rest of this test depends on pg8000's being absent
import pg8000 # noqa
pytest.skip("pg8000 is installed")
except ImportError:
pass
db_uri = "postgresql+pg8000://user:pass@host/dbname"
with pytest.raises(ImportError, match="pg8000"):
sql.read_sql("select * from table", db_uri)
def _make_iris_table_metadata(self):
sa = sqlalchemy
metadata = sa.MetaData()
iris = sa.Table(
"iris",
metadata,
sa.Column("SepalLength", sa.REAL),
sa.Column("SepalWidth", sa.REAL),
sa.Column("PetalLength", sa.REAL),
sa.Column("PetalWidth", sa.REAL),
sa.Column("Name", sa.TEXT),
)
return iris
def test_query_by_text_obj(self):
# WIP : GH10846
name_text = sqlalchemy.text("select * from iris where name=:name")
iris_df = sql.read_sql(name_text, self.conn, params={"name": "Iris-versicolor"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-versicolor"}
def test_query_by_select_obj(self):
# WIP : GH10846
iris = self._make_iris_table_metadata()
name_select = sqlalchemy.select([iris]).where(
iris.c.Name == sqlalchemy.bindparam("name")
)
iris_df = sql.read_sql(name_select, self.conn, params={"name": "Iris-setosa"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-setosa"}
class _EngineToConnMixin:
"""
A mixin that causes setup_connect to create a conn rather than an engine.
"""
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
super().load_test_data_and_sql()
engine = self.conn
conn = engine.connect()
self.__tx = conn.begin()
self.pandasSQL = sql.SQLDatabase(conn)
self.__engine = engine
self.conn = conn
yield
self.__tx.rollback()
self.conn.close()
self.conn = self.__engine
self.pandasSQL = sql.SQLDatabase(self.__engine)
# XXX:
# super().teardown_method(method)
@pytest.mark.single
class TestSQLApiConn(_EngineToConnMixin, TestSQLApi):
pass
@pytest.mark.single
class TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi):
"""
Test the public sqlite connection fallback API
"""
flavor = "sqlite"
mode = "fallback"
def connect(self, database=":memory:"):
return sqlite3.connect(database)
def test_sql_open_close(self):
# Test if the IO in the database still work if the connection closed
# between the writing and reading (as in many real situations).
with tm.ensure_clean() as name:
conn = self.connect(name)
sql.to_sql(self.test_frame3, "test_frame3_legacy", conn, index=False)
conn.close()
conn = self.connect(name)
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn)
conn.close()
tm.assert_frame_equal(self.test_frame3, result)
@pytest.mark.skipif(SQLALCHEMY_INSTALLED, reason="SQLAlchemy is installed")
def test_con_string_import_error(self):
conn = "mysql://root@localhost/pandas_nosetest"
msg = "Using URI string without sqlalchemy installed"
with pytest.raises(ImportError, match=msg):
sql.read_sql("SELECT * FROM iris", conn)
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
msg = "Execution failed on sql 'iris': near \"iris\": syntax error"
with pytest.raises(sql.DatabaseError, match=msg):
sql.read_sql("iris", self.conn)
def test_safe_names_warning(self):
# GH 6798
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b "]) # has a space
# warns on create table with spaces in names
with tm.assert_produces_warning():
sql.to_sql(df, "test_frame3_legacy", self.conn, index=False)
def test_get_schema2(self):
# without providing a connection object (available for backwards comp)
create_sql = sql.get_schema(self.test_frame1, "test")
assert "CREATE" in create_sql
def _get_sqlite_column_type(self, schema, column):
for col in schema.split("\n"):
if col.split()[0].strip('""') == column:
return col.split()[1]
raise ValueError(f"Column {column} not found")
def test_sqlite_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLiteDatabase(self.conn)
table = sql.SQLiteTable("test_type", db, frame=df)
schema = table.sql_schema()
assert self._get_sqlite_column_type(schema, "time") == "TIMESTAMP"
# -----------------------------------------------------------------------------
# -- Database flavor specific tests
class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest):
"""
Base class for testing the sqlalchemy backend.
Subclasses for specific database types are created below. Tests that
deviate for each flavor are overwritten there.
"""
flavor: str
@pytest.fixture(autouse=True, scope="class")
def setup_class(cls):
cls.setup_import()
cls.setup_driver()
conn = cls.connect()
conn.connect()
def load_test_data_and_sql(self):
self._load_raw_sql()
self._load_test1_data()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
@classmethod
def setup_import(cls):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
pytest.skip("SQLAlchemy not installed")
@classmethod
def setup_driver(cls):
raise NotImplementedError()
@classmethod
def connect(cls):
raise NotImplementedError()
def setup_connect(self):
try:
self.conn = self.connect()
self.pandasSQL = sql.SQLDatabase(self.conn)
# to test if connection can be made:
self.conn.connect()
except sqlalchemy.exc.OperationalError:
pytest.skip(f"Can't connect to {self.flavor} server")
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_to_sql_method_multi(self):
self._to_sql(method="multi")
def test_to_sql_method_callable(self):
self._to_sql_method_callable()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
def test_drop_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
pandasSQL.drop_table("temp_frame")
assert not temp_conn.has_table("temp_frame")
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_sql_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_sql_table(
"iris", con=self.conn, columns=["SepalLength", "SepalLength"]
)
tm.equalContents(iris_frame.columns.values, ["SepalLength", "SepalLength"])
def test_read_table_absent_raises(self):
msg = "Table this_doesnt_exist not found"
with pytest.raises(ValueError, match=msg):
sql.read_sql_table("this_doesnt_exist", con=self.conn)
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
assert issubclass(df.BoolCol.dtype.type, np.bool_)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA values becomes object
assert issubclass(df.BoolColWithNull.dtype.type, np.object)
def test_bigint(self):
# int64 should be converted to BigInteger, GH7433
df = DataFrame(data={"i64": [2 ** 62]})
df.to_sql("test_bigint", self.conn, index=False)
result =
|
sql.read_sql_table("test_bigint", self.conn)
|
pandas.io.sql.read_sql_table
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
# In[3]:
anova_data = pd.read_csv("https://raw.githubusercontent.com/ajstewartlang/02_intro_to_python_programming/main/data/ANOVA_data1.csv")
# In[4]:
anova_data.head()
# In[5]:
anova_data.describe()
# In[6]:
anova_data.info()
# In[7]:
anova_data.hist()
# In[8]:
anova_data['RT'].hist()
# In[9]:
import matplotlib.pyplot as plt
# In[13]:
plt.style.use('ggplot') #this sets the style
plt.plot(anova_data['Condition'], anova_data['RT'], 'bo') #condition is on x axis, RT is on y axis, bo is blue markers to indicate the points
plt.xlabel('Condition')
plt.ylabel('RT (ms.)')
plt.title('Reaction Time by Condition')
plt.margins(.5, .5)
plt.show()
# In[15]:
grouped_data = anova_data.groupby(['Condition'])
# In[17]:
grouped_data.count()
# In[19]:
grouped_data['RT'].mean()
# In[20]:
grouped_data['RT'].std() #the dot takes the initial object and applies the function to that object (similar to and then)
# In[22]:
my_means = grouped_data['RT'].mean()
# In[23]:
my_means
# In[27]:
my_means.plot(kind='bar')
plt.ylabel('RT (ms.)')
plt.title('Reaction Time by Condition')
plt.show() #use plot show to export script as python script
# In[25]:
my_std = grouped_data['RT'].std()
# In[26]:
my_std
# In[29]:
my_std[1]
# In[30]:
error = [my_std[0], my_std[1]]
# In[31]:
error
# In[38]:
my_means.plot.bar(yerr=error, align='center', alpha=0.5, ecolor='black', capsize = 10) #ecolour because it is colour for the error bars and capsize is ends of the error bars, center align is default
plt.ylabel('RT (ms.)')
plt.xlabel('Word Frequency')
plt.xticks([0, 1], ['High\nFrequency', 'Low\nFrequency'], rotation = 45) #xticks used to change what is on the x axis and \n used to go to next line
plt.title('Mean RT and SDs by Condition')
plt.show()
# In[39]:
from scipy import stats
# In[40]:
#ONE -WAY ANOVA (subset the data frame to compare reaction time of high frequency with reaction time of low frequency)
anova_data['Condition']=='high'
# In[42]:
#it will keep the data when condition is equal to high
anova_data[anova_data['Condition']=='high']
# In[44]:
anova_data[anova_data['Condition']=='high']['RT']
# In[45]:
high_group = anova_data[anova_data['Condition']=='high']['RT']
# In[46]:
low_group = anova_data[anova_data['Condition']=='low']['RT']
# In[47]:
high_group
# In[48]:
low_group
# In[50]:
stats.f_oneway(high_group, low_group) #p value indiactes move decimal point nine places to the left
# In[51]:
stats.ttest_ind(high_group, low_group)
# In[52]:
import statsmodels.api as sm
from statsmodels.formula.api import ols
# In[53]:
model = ols('RT ~ Condition', data= anova_data).fit()
# In[55]:
anova_table = sm.stats.anova_lm(model, type=3) #type 3 sums of square errors (its the easiest way to interpret main effects in the context of interactions)
anova_table
# In[56]:
#FACTORIAL ANOVA
factorial_anova_data = pd.read_csv('https://raw.githubusercontent.com/ajstewartlang/02_intro_to_python_programming/main/data/ANOVA_data3.csv')
# In[57]:
factorial_anova_data
# In[59]:
grouped_data = factorial_anova_data.groupby(['Prime', 'Target'])
# In[62]:
group_means = grouped_data['RT'].mean()
# In[63]:
#error bars as standard deviatios either side of the mean for each of the 4 experimental groups
group_errors = grouped_data['RT'].std()
# In[64]:
group_means
# In[72]:
group_means.plot(kind='bar', yerr=group_errors, alpha=0.5, capsize=10)
plt.xlabel('Prime x Target')
plt.xticks([0, 1, 2, 3], ['Negative\nNegavtive', 'Negative\nPositive', 'Positive\nNgetaive', 'Positive\nPositive'])
plt.show()
# In[73]:
from statsmodels.graphics.factorplots import interaction_plot
# In[74]:
#pandas dataframe with means of the two conditions
group_mean = grouped_data.mean()
# In[75]:
group_mean
# In[76]:
#convert into panda dataframe
|
pd.DataFrame(group_mean)
|
pandas.DataFrame
|
import csv
import datetime
import math
import os
import typing as T
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as stats
import seaborn as sns
from const import OUTPUT_PATH_PLOTS_DETAIL, STAT_SIGNIFICANCE_CUTOFF
# line below suppresses annoying SettingWithCopyWarning
pd.options.mode.chained_assignment = None
sig_results = {}
all_results = {}
def get_acs_vars_for_analysis() -> T.List:
"""Function to identify/grab which ACS variables
will be included in the correlation analysis.
Parameters
----------
none
Returns
-------
vars : list
A list of human-readable variable names. The
ACS raw names are converted to human-readable
names in the load_census_data function.
"""
vars = [
'total-renter-occupied-households',
'pct-renter-occupied',
'pct-owner-occupied',
'pct-owner-occupied-mortgage',
'pct-owner-occupied-without-mortgage',
'median-gross-rent',
'median-year-structure-built',
'median-household-income',
'median-property-value',
'median-house-age',
'median-monthly-housing-cost',
'pct-white',
'pct-af-am',
'pct-hispanic',
'pct-am-in',
'pct-asian',
'pct-nh-pi',
'pct-multiple-race',
'pct-other-race',
'pct-non-white',
'pct-below-poverty-level',
'pct-without-health-insurance',
'pct-pop-in-labor-force',
'pop-total',
'pct-households-married-with-own-children',
'pct-male-single-parent-household',
'pct-female-single-parent-household',
'pct-male-older-adult-living-alone',
'pct-female-older-adult-living-alone',
'pct-households-with-children',
'pct-households-with-elderly',
'pct-enrolled-in-school',
'education-attained',
'pct-veterans',
'pct-foreign-born',
'pct-not-us-citizen',
'pct-disability',
'gini-index',
'level-of-education-less-than-9th',
'unemployment-rate',
'pct-women-in-labor-force',
'mean-commute-time',
'pct-service-occupations',
'pct-public-transport-to-work',
'pct-with-snap-benefits',
'per-capita-income',
'pct-vacant-properties',
'pct-no-vehicles-available',
'pct-incomplete-plumbing',
'pct-non-english-spoken-in-home',
'pct-broadband-internet',
'pct-own-computer',
'pct-english-fluency-not-great',
'pct-one-or-less-occupants-per-room',
'pct-mobile-homes',
'median-income-male-worker',
'median-income-female-worker',
'median-income-diff-male-vs-female',
'median-population-age',
'total-owner-occupied-households-mortgage',
]
return vars
def calc_acs_correlations(df: pd.DataFrame, x_var: str, y_var: str):
"""Function to calculate correlations.
Parameters
----------
df : pandas df
Final pandas df containing housing loss data
and ACS variables to search within for correlations.
x_var : string
name of first variable to include in correlation search
(likely an ACS variable name).
y_var: string
name of second variable to include in correlation
search (likely a housing loss metric).
Returns
-------
corr: 1x2 numerical array
From either stats.pearsonr or stats.spearmanr.
First element is the correlation, second is the
p-value. Note that we are assuming we have near-census
level data collection so p-value doesn't make much
sense here.
"""
x = df[x_var]
y = df[y_var]
corr = 0.0
try:
corr = stats.pearsonr(x, y)
# corr = stats.spearmanr(x,y)
except:
print('Couldn\'t calculate correlation between ' + x_var + ' and ' + y_var)
corr = [999.0, 999.0]
return corr
def plot_acs_correlations(
df: pd.DataFrame, x_var: str, y_var: str, plot_write_path: str
) -> None:
"""For each variable pairs (x_var and y_var),
calculate, visualize, and save correlation results.
Parameters
----------
df : pandas df
Final pandas df containing housing loss data
and ACS variables to search within for correlations.
x_var : string
name of first variable to include in correlation search
(likely an ACS variable name).
y_var: string
name of second variable to include in correlation
search (likely a housing loss metric).
Returns
-------
none
(function outputs: saving individual scatter plot
images and storing important results in dictionary sig_results)
"""
# ignore self-correlation
if x_var == y_var:
return
# drop NaN values and negative before calculating correlation.
df.dropna(subset=[x_var, y_var], inplace=True)
# make sure all relevant values between 0 and 1e7.
# this is because some error codes seem to be -1e8, some +1e8
# note this excludes all negative values. as far as I've seen,
# no ACS/housing loss variables should have negative values.
df = df[df[x_var] >= 0.0]
df = df[df[y_var] >= 0.0]
df = df[df[x_var] <= 1.0e7]
df = df[df[y_var] <= 1.0e7]
corr_results = calc_acs_correlations(df, x_var, y_var)
r_value = round(corr_results[0], 3)
p_value = round(corr_results[1], 3)
title_string = "ACS Correlations\n {} vs. {}: \n r = {}".format(
y_var, x_var, r_value
)
###only relevant for non-production code study
all_results[x_var] = r_value
###
if math.fabs(r_value) >= STAT_SIGNIFICANCE_CUTOFF:
file_string = "strong_corr_{}_vs_{}.png".format(y_var, x_var)
sig_results[x_var] = r_value
else:
file_string = "weak_corr_{}_vs_{}.png".format(y_var, x_var)
f, ax = plt.subplots()
corr_plt = sns.regplot(x=x_var, y=y_var, data=df).set_title(title_string)
figure = corr_plt.get_figure()
try:
figure.savefig(
str(plot_write_path / OUTPUT_PATH_PLOTS_DETAIL / file_string), dpi=200
)
except FileNotFoundError:
print(
'Error: The absolute file path is too long for Python to save this file. '
'Please shorten the file path to your data directory'
)
plt.close()
def correlation_analysis(
census_df: pd.DataFrame,
processed_data_df: pd.DataFrame,
target_var: str,
plot_write_path: str,
) -> None:
### Defining the list of variables to run correlations on
acs_vars_for_correlations = get_acs_vars_for_analysis()
### Here's the housing loss file. for now it's grabbing a previous version. we'll have to replace with calculated file.
to_keep = ['geoid', target_var]
processed_data_df = processed_data_df[to_keep]
processed_data_df = processed_data_df[processed_data_df['geoid'].notna()]
processed_data_df.geoid = processed_data_df.geoid.astype(str)
census_df.GEOID = census_df.GEOID.astype(str)
mrg = processed_data_df.merge(census_df, left_on='geoid', right_on='GEOID')
hl_type = ''
if target_var == 'total_filings':
hl_type = 'evictions'
if target_var == 'total_foreclosures':
hl_type = 'all foreclosures'
if target_var == 'housing-loss-index':
hl_type = 'all types of housing loss'
print(
'\nCalculating correlations and visualizing the strongest relationships for '
+ str(hl_type)
+ '...'
)
for i in acs_vars_for_correlations:
plot_acs_correlations(mrg, i, target_var, plot_write_path)
acs_vars_for_correlations.append('GEOID')
sig_results_series =
|
pd.Series(sig_results)
|
pandas.Series
|
# -------------------------------------------------------------------------------------
# Libraries
import logging
import numpy as np
import geopandas as gpd
import pandas as pd
import rasterio
logging.getLogger('rasterio').setLevel(logging.WARNING)
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read shapefile section(s)
def read_data_shapefile_section(file_name, columns_name_expected=None, columns_name_type=None, columns_name_tag=None):
if columns_name_expected is None:
columns_name_expected = ['HMC_X', 'HMC_Y', 'BASIN', 'SEC_NAME', 'SEC_RS', 'AREA', 'Q_THR1', 'Q_THR2']
if columns_name_type is None:
columns_name_type = [np.int, np.int, str, str, str, np.float, np.float, np.float]
if columns_name_tag is None:
columns_name_tag = columns_name_expected
file_dframe_raw = gpd.read_file(file_name)
file_rows = file_dframe_raw.shape[0]
file_obj = {}
for column_name, column_type, column_tag in zip(columns_name_expected, columns_name_type, columns_name_tag):
if column_name in file_dframe_raw.columns:
column_data_tmp = file_dframe_raw[column_name].values.tolist()
if column_type == np.int:
column_data = [np.int(item) for item in column_data_tmp]
elif column_type == str:
column_data = [str(item) for item in column_data_tmp]
elif column_type == np.float:
column_data = [np.float(item) for item in column_data_tmp]
else:
logging.error(' ===> Datatype for undefined columns in the section shapefile is not allowed')
raise NotImplementedError('Datatype not implemented yet')
else:
logging.warning(' ===> Column ' + column_name +
' not available in shapefile. Initialized with undefined values according with datatype')
if column_type == np.int:
column_data = [-9999] * file_rows
elif column_type == str:
column_data = [''] * file_rows
elif column_type == np.float:
column_data = [-9999.0] * file_rows
else:
logging.error(' ===> Datatype for undefined columns in the section shapefile is not allowed')
raise NotImplementedError('Datatype not implemented yet')
file_obj[column_tag] = column_data
section_df =
|
pd.DataFrame(file_obj, columns=columns_name_tag)
|
pandas.DataFrame
|
#!/usr/bin/env python3
import sqlite3
import luigi
import pandas as pd
import time
import json
timestamp = time.strftime("%Y%m%d")
class ChinookData(luigi.Task):
"""
This class extend luigi task for
extracting ChinookData
Attributes
----------
local_target : str
input file target name
output_target : str
output file target name
task_complete : boolean
status of luigi task
"""
local_target = timestamp+"_chinook_{}.csv"
output_target = "./processed/"+local_target
task_complete = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.columns = ['albums', 'artists', 'customers', 'employees',
'genres', 'invoice_items', 'invoices', 'media_types', 'playlist_track', 'playlists', 'sqlite_sequence',
'sqlite_stat1', 'tracks']
def requires(self):
"""
This method will be executed before
the run method
Returns
----------
list : list
Empty list
"""
return []
def complete(self):
"""
This method will be executed after
the run method
Returns
----------
task_complete : boolean
status of luigi task
"""
return self.task_complete
def run(self):
"""
This method load data from the local SQLite database
and write CSV file represent the database
"""
con = sqlite3.connect("./sources/chinook.db")
for column in self.columns:
results = pd.read_sql_query('SELECT * from {}'.format(column), con)
results.to_csv(self.output_target.format(column),
encoding='utf-8', index=False, header=True, quoting=2)
self.task_complete = True
class DatabaseData(luigi.Task):
"""
This class extend luigi task for
extracting DatabaseData
Attributes
----------
local_target : str
input file target name
output_target : str
output file target name
task_complete : boolean
status of luigi task
"""
local_target = timestamp+"_database_{}.csv"
output_target = "./processed/"+local_target
task_complete = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.columns = ['artists', 'content', 'genres',
'labels', 'reviews', 'years']
def requires(self):
"""
This method will be executed before
the run method
Returns
----------
list : list
Empty list
"""
return []
def complete(self):
"""
This method will be executed after
the run method
Returns
----------
task_complete : boolean
status of luigi task
"""
return self.task_complete
def run(self):
"""
This method load data from the local SQLite database
and write CSV file represent the database
"""
con = sqlite3.connect("./sources/database.sqlite")
for column in self.columns:
results = pd.read_sql_query('SELECT * from {}'.format(column), con)
results.to_csv(self.output_target.format(column),
encoding='utf-8', index=False, header=True, quoting=2)
self.task_complete = True
class DisasterData(luigi.Task):
"""
This class extend luigi task for
extracting DisasterData
Attributes
----------
local_target : str
input file target name
output_target : str
output file target name
task_complete : boolean
status of luigi task
"""
local_target = timestamp+"_disaster_data.csv"
output_target = "./processed/"+local_target
task_complete = False
def requires(self):
"""
This method will be executed before
the run method
Returns
----------
list : list
Empty list
"""
return []
def complete(self):
"""
This method will be executed after
the run method
Returns
----------
task_complete : boolean
status of luigi task
"""
return self.task_complete
def run(self):
"""
This method load data from the local CSV files
and write another CSV file represent the previous one
"""
df = pd.read_csv('./sources/disaster_data.csv')
df.to_csv(self.output_target,
encoding='utf-8', index=False, header=True, quoting=2)
self.task_complete = True
class ReviewData(luigi.Task):
"""
This class extend luigi task for
extracting ReviewData
Attributes
----------
local_target : str
input file target name
output_target : str
output file target name
task_complete : boolean
status of luigi task
"""
local_target = timestamp+"_reviews_data.csv"
output_target = "./processed/"+local_target
task_complete = False
def requires(self):
"""
This method will be executed before
the run method
Returns
----------
list : list
Empty list
"""
return []
def complete(self):
"""
This method will be executed after
the run method
Returns
----------
task_complete : boolean
status of luigi task
"""
return self.task_complete
def run(self):
"""
This method load data from different local CSV files
and write another CSV file represent the previous one
"""
review1 = pd.read_csv('./sources/reviews_q1.csv')
review2 = pd.read_csv('./sources/reviews_q2.csv')
review3 = pd.read_csv('./sources/reviews_q2.csv')
review4 = pd.read_excel('./sources/reviews_q1.xlsx')
frames = [review1, review2, review3, review4]
df =
|
pd.concat(frames)
|
pandas.concat
|
import os
import sys
import pandas as pd
from utils.node_object_creator import *
from first_neural_network.embeddings import Embedding
from parameters import folder, pattern, vector_size
class Initialize_vector_representation():
def __init__(self, folder, pattern, vector_size):
self.folder = folder
self.pattern = pattern
self.vector_size = vector_size
def initial_vector_representation(self):
# Training the first neural network
vectors_dict = self.first_neural_network()
#save_files(ls_nodes)
self.save_vectors(vectors_dict)
def save_vectors(self, vectors_dict):
df =
|
pd.DataFrame.from_dict(vectors_dict)
|
pandas.DataFrame.from_dict
|
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(
|
StringIO(data)
|
pandas.compat.StringIO
|
import os
import numpy as np
import pandas as pd
from collections import OrderedDict
def read_ephys_info_from_excel_2017(excel_file, skiprows_animal=1, skiprows_cell=5):
# read Ex and In solutions from the first two lines
excelname = os.path.basename(excel_file)
excelname = os.path.splitext(excelname)[0]
animal_info =
|
pd.read_excel(excel_file, header=0, skiprows=skiprows_animal)
|
pandas.read_excel
|
#!/usr/bin/python3
from .Camoco import Camoco
from .RefGen import RefGen
from .Locus import Locus
from .Term import Term
from pandas import DataFrame
from scipy.stats import hypergeom
from itertools import chain
from functools import lru_cache
from collections import OrderedDict
import sys
import copy
import numpy
import camoco as co
import pandas as pd
class Ontology(Camoco):
'''
An Ontology is just a collection of terms. Each term is just a
collection of genes. Sometimes terms are related or nested
within each other, sometimes not. Simple enough.
Parameters
----------
name : unique identifier
Returns
-------
An Ontology Object
'''
def __init__(self, name, type='Ontology'):
super().__init__(name, type=type)
if self.refgen:
self.refgen = RefGen(self.refgen)
def __len__(self):
'''
Return the number of non-empty terms
'''
return self.num_terms(min_term_size=1)
def __iter__(self):
return self.iter_terms()
def num_terms(self,min_term_size=0,max_term_size=10e10):
'''
Returns the number of terms in the Ontology
within the min_term_size and max_term_size
Parameters
----------
min_term_size (default:0)
The minimum number of loci associated with the term
max_term_size (default: 10e10)
The maximum number of loci associated with the term
Returns
-------
the number of terms that fit the criteria
'''
return self.db.cursor().execute(
'''SELECT COUNT(*) FROM (
SELECT DISTINCT(term) FROM term_loci
GROUP BY term
HAVING COUNT(term) >= ?
AND COUNT(term) <= ?
);''',
(min_term_size, max_term_size)
).fetchone()[0]
@lru_cache(maxsize=131072)
def __getitem__(self, id):
''' retrieve a term by id '''
try:
(id, desc) = self.db.cursor().execute(
'SELECT * from terms WHERE id = ?', (id, )
).fetchone()
term_loci = [
self.refgen[gene_id] for gene_id, in self.db.cursor().execute(
''' SELECT id FROM term_loci WHERE term = ?''', (id, )
).fetchall()]
term_attrs = {k:v for k,v in self.db.cursor().execute(
''' SELECT key,val FROM term_attrs WHERE term = ?''',(id,)
)
}
return Term(id, desc=desc, loci=term_loci,**term_attrs)
except TypeError as e: # Not in database
raise e
def terms_containing(self,locus_list,max_term_size=10e10,min_term_size=0):
'''
Retrurns the set of terms which contains the
specified loci.
Parameters
----------
locus_list : iterable of type Locus
The list of loci for which to retrieve
corresponding terms.
max_term_size : int (default: 10e10)
The maximum term size for which to test enrichment. Useful
for filtering out large terms that would otherwise be
uninformative (e.g. top level GO terms)
min_term_size : int (default: 0)
The minimum term size for which to test enrichment. Useful
for filtering out very small terms that would be uninformative
(e.g. single gene terms)
Returns
-------
list of terms which contain provided loci
'''
# Filter to unique set
locus_list = set(locus_list)
# query the database
terms = self.db.cursor().execute('''SELECT DISTINCT term
FROM term_loci WHERE id IN ('{}')
'''.format(
"','".join([x.id for x in locus_list])
)).fetchall()
# Fetch the terms with the proper size
terms = list(
filter(
lambda t: (len(t) >= min_term_size) and (len(t) <= max_term_size),
[self[name] for name, in terms]
)
)
return terms
def num_distinct_loci(self):
return self.db.cursor().execute(
'SELECT COUNT(DISTINCT(id)) FROM term_loci;'
).fetchone()[0]
def distinct_loci_ids(self):
return [x[0] for x in self.db.cursor().execute(
'SELECT DISTINCT(id) FROM term_loci'
)]
def iter_terms(self,min_term_size=0,max_term_size=10e10):
'''
Return a generator that iterates over each term in the ontology.
'''
terms = self.db.cursor().execute('''
SELECT term from term_loci
GROUP BY term
HAVING COUNT(term) >= ?
AND COUNT(term) <= ?
''',(min_term_size,max_term_size))
for id, in terms:
yield self[id]
def terms(self,min_term_size=0,max_term_size=10e10):
return list(self.iter_terms(min_term_size=min_term_size,max_term_size=max_term_size))
def summary(self):
return "Ontology:{} - desc: {} - contains {} terms for {}".format(
self.name, self.description, len(self), self.refgen)
def rand(self, n=1, min_term_size=1, max_term_size=100000):
'''
Return a random Term from the Ontology
Parameters
----------
n : int (default: 1)
The number of random terms to return
min_term_size : int (default: 1)
The smallest acceptable term size
i.e. the number of genes annotated to the term
max_term_size : int (default: 100000)
The largest acceptable term size
'''
cur = self.db.cursor()
ids = cur.execute('''
SELECT term FROM term_loci
GROUP BY term
HAVING COUNT(term) >= ?
AND COUNT(term) <= ?
ORDER BY RANDOM()
LIMIT ?;
''',(min_term_size,max_term_size,n)).fetchall()
if len(ids) == 0:
raise ValueError(
'No Terms exists with this criteria '
'{} < len(term) < {}:'.format(min_term_size,max_term_size)
)
terms = [self[id[0]] for id in ids]
if len(terms) == 1:
return terms[0]
else:
return terms
def add_term(self, term, cursor=None, overwrite=False):
'''
This will add a single term to the ontology
Parameters
----------
term : Term object
The term object you wish to add.
cursor : apsw cursor object
A initialized cursor object, for batch operation. This will
allow for adding many terms in one transaction as long as the
passed in cursor has executed the "BEGIN TRANSACTION" command.
overwrite : bool
Indication to delete any existing entry before writing
'''
if overwrite:
self.del_term(term.id)
if not cursor:
cur = self.db.cursor()
cur.execute('BEGIN TRANSACTION')
else:
cur = cursor
# Add the term id and description
cur.execute('''
INSERT OR ABORT INTO terms (id, desc)
VALUES (?, ?)''', (term.id, term.desc))
# Add the term loci
if term.loci:
for locus in term.loci:
cur.execute('''
INSERT OR ABORT INTO term_loci (term, id)
VALUES (?, ?)
''', (term.id, locus.id))
# Add the term attrs
if term.attrs:
for key,val in term.attrs.items():
cur.execute('''
INSERT OR ABORT INTO term_attrs (term,key,val)
VALUES (?,?)
''',(term.id,key,val))
if not cursor:
cur.execute('END TRANSACTION')
def del_term(self, term, cursor=None):
''' This will delete a single term to the ontology
Parameters
----------
term : Term object or str
The term object or id you wish to remove.
cursor : apsw cursor object
A initialized cursor object, for batch operation.'''
try:
if not cursor:
cur = self.db.cursor()
cur.execute('BEGIN TRANSACTION')
else:
cur = cursor
if not isinstance(term, str):
id = term.id
else:
id = term
cur.execute('''
DELETE FROM term_loci WHERE term = ?;
DELETE FROM terms WHERE id = ?;
''', (id, id))
if not cursor:
cur.execute('END TRANSACTION')
except Exception as e:
cur.execute('ROLLBACK')
raise e
def add_terms(self, terms, overwrite=True):
'''
A Convenience function to add terms from an iterable.
Parameters
----------
terms : iterable of camoco.Term objects
'''
if overwrite:
self.del_terms(terms)
cur = self.db.cursor()
cur.execute('BEGIN TRANSACTION')
for term in terms:
self.add_term(term, cursor=cur, overwrite=False)
cur.execute('END TRANSACTION')
def del_terms(self, terms):
'''
A Convenience function to delete many term object
Parameters
----------
terms : iterable of camoco.Term objects.
'''
cur = self.db.cursor()
cur.execute('BEGIN TRANSACTION')
for term in terms:
self.del_term(term, cursor=cur)
cur.execute('END TRANSACTION')
def set_strongest(self,attr=None,higher=None):
'''
Convinience function that allows you to set default values for
strongest SNP2Gene mapping tasks.
Parameters
----------
attr: The locus attr used to determine which locus is the
strongest locus.
higher: Flag indicating whether the value in --strongest-attr
is stronger if it is higher. Default behavior is to
treatlower values as stronger (i.e. p-vals)
'''
if not(attr is None):
self._global('strongest_attr',attr)
if not(higher is None):
self._global('strongest_higher',higher)
def get_strongest_attr(self):
'''
Convinience function that allows you to get the default value for
the locus attr used to determine which locus is the strongest locus
strongest SNP2Gene mapping.
'''
return self._global('strongest_attr')
def get_strongest_higher(self):
'''
Convinience function that allows you to get default values for
the flag indicating whether the value in `strongest-attr` is
is stronger if higher for strongest SNP2Gene mapping tasks.
'''
return self._global('strongest_higher')
@classmethod
def create(cls, name, description, refgen, type='Ontology'):
'''
This method creates a fresh Ontology with nothing it it.
'''
# run the inherited create method from Camoco
self = super().create(name, description, type=type)
# set the refgen for the current instance
self.refgen = refgen
# add the global refgen
self._global('refgen', refgen.name)
# build the tables
self._create_tables()
return self
@classmethod
def from_DataFrame(cls, dataframe, name, description, refgen,
gene_col='gene',term_col='Term'):
'''
Convenience function to create a Ontology from an iterable
terms object.
Parameters
----------
dataframe : pandas.DataFrame
A pandas dataframe containing the mapping betweeen gene ids
and
name : str
The name of the camoco object to be stored in the database.
description : str
A short message describing the dataset.
refgen : camoco.RefGen
A RefGen object describing the genes in the dataset
Optional Parameters
-------------------
gene_col : str (default: gene)
The string designating the column in the dataframe containing
gene names (ids)
term_col : str (default: Term)
The string designating the column in the dataframe containing
the term name.
'''
self = cls.create(name,description,refgen)
# create terms from
terms = [
Term(id,loci=refgen[set(df[gene_col])]) \
for id,df in dataframe.groupby(term_col)
]
self.log('Adding {} terms to the database.',len(terms))
self.add_terms(terms, overwrite=True)
# Build the indices
self.log('Building the indices.')
self._build_indices()
self.log('Your gene ontology is built.')
return self
@classmethod
def from_terms(cls, terms, name, description, refgen):
'''
Convenience function to create a Ontology from an iterable
terms object.
Parameters
----------
terms : iterable of camoco.GOTerm objects
Items to add to the ontology. The key being the name
of the term and the items being the loci.
name : str
The name of the camoco object to be stored in the database.
description : str
A short message describing the dataset.
refgen : camoco.RefGen
A RefGen object describing the genes in the dataset
'''
self = cls.create(name,description,refgen)
self.log('Adding {} terms to the database.',len(terms))
self.add_terms(terms, overwrite=True)
# Build the indices
self.log('Building the indices.')
self._build_indices()
self.log('Your gene ontology is built.')
return self
def _create_tables(self):
cur = self.db.cursor()
cur.execute('''
CREATE TABLE IF NOT EXISTS terms (
id TEXT UNIQUE,
desc TEXT
)'''
)
cur.execute('''
CREATE TABLE IF NOT EXISTS term_loci (
term TEXT,
id TEXT
);'''
)
cur.execute('''
CREATE TABLE IF NOT EXISTS term_attrs (
term TEXT,
key TEXT,
val TEXT
);
''')
def _clear_tables(self):
cur = self.db.cursor()
cur.execute('DELETE FROM terms; DELETE FROM term_loci;')
def _build_indices(self):
cursor = self.db.cursor()
cursor.execute('CREATE INDEX IF NOT EXISTS termIND ON terms (id)')
cursor.execute('CREATE INDEX IF NOT EXISTS lociIND ON term_loci (term,id)')
def _drop_indices(self):
cursor = self.db.cursor()
cursor.execute('DROP INDEX IF EXISTS termIND; DROP INDEX IF EXISTS lociIND;')
def enrichment(self, locus_list, pval_cutoff=0.05, max_term_size=300,
min_term_size=2, num_universe=None, return_table=False,
label=None,include_genes=False,bonferroni_correction=True,
min_overlap=1):
'''
Evaluates enrichment of loci within the locus list for terms within
the ontology. NOTE: this only tests terms that have at least one
locus that exists in locus_list.
Parameters
----------
locus_list : list of co.Locus *or* instance of co.Ontology
A list of loci for which to test enrichment. i.e. is there
an over-representation of these loci within and the terms in
the Ontology. If an ontology is passed, each term in the ontology
will be iterated over and tested as if they were a locus_list.
pval_cutoff : float (default: 0.05)
Report terms with a pval lower than this value
bonferroni_correction : bool (default: True)
correct for testing multiple terms using Bonferroni correction
max_term_size : int (default: 300)
The maximum term size for which to test enrichment. Useful
for filtering out large terms that would otherwise be
uninformative (e.g. top level GO terms)
min_term_size : int (default: 5)
The minimum term size for which to test enrichment. Useful
for filtering out very small terms that would be uninformative
(e.g. single gene terms)
num_universe : int (default: None)
Use a custom universe size for the hypergeometric calculation,
for instance if you have a reduced number of genes in a reference
co-expression network. If None, the value will be calculated as
the total number of distinct genes that are observed in the
ontology.
include_genes : bool (default: False)
Include comma delimited genes as a field
return_table : bool (default: False)
If True, return results as a data frame
label: str (default: None)
If a label is specified, it will be inlcuded in the results
min_overlap : int (default: 1)
The minimum overlap between genes in the term and genes in
the locus list. Increasing this value can minimize spurious
or uninformative terms
'''
if isinstance(locus_list,co.Ontology):
ontology = locus_list
self.log('Calculating enrichment for an Ontology: {}',ontology.name)
enrich = []
if label is None:
label = ontology.name
if num_universe is None:
num_universe = len(set(self.distinct_loci_ids()).union(ontology.distinct_loci_ids()))
for term in ontology.terms(min_term_size=min_term_size,max_term_size=max_term_size):
term = copy.copy(term)
e = self.enrichment(
term.loci,
pval_cutoff=pval_cutoff,
max_term_size=max_term_size,
min_term_size=min_term_size,
num_universe=num_universe,
return_table=return_table,
label=label+'_'+term.id,
include_genes=include_genes,
bonferroni_correction=bonferroni_correction,
min_overlap=min_overlap,
)
enrich.append(e)
if return_table:
return pd.concat(enrich)
else:
return enrich
# return a new copy of each
terms = [copy.copy(term) for term in self.terms_containing(
locus_list,
min_term_size=min_term_size,
max_term_size=max_term_size
)]
# Calculate the size of the Universe
if num_universe is None:
num_universe = self.num_distinct_loci()
self.log(
'{}: Loci occur in {} terms, containing {} genes'.format(
label,len(terms), num_universe
)
)
significant_terms = []
for term in terms:
term_genes = set(term.loci)
#if len(term_genes) > max_term_size:
# continue
num_common = len(term_genes.intersection(locus_list))
num_in_term = len(term_genes)
num_sampled = len(locus_list)
# the reason this is num_common - 1 is because we are looking for 1 - cdf
# and we need to greater than OR EQUAL TO num_common
# Look. Do this in ipython:
'''
In [99]: probs = [hypergeom.pmf(x,100,5,10) for x in range(0,6)]
In [100]: probs
Out[100]:
[0.58375236692612187,
0.33939091100357333,
0.070218809173150043,
0.006383528106649855,
0.00025103762217164457,
3.3471682956218215e-06]
In [103]: 1-sum(probs[0:3])
# Get the probs of drawing 3 or more
Out[103]: 0.006637912897154763
# Remember slicing is exclusive for the end value
In [105]: hypergeom.sf(3,100,5,10)
# That aint right
Out[105]: 0.00025438479046726637
In [106]: hypergeom.sf(3-1,100,5,10)
# See? You want num_common - 1
Out[106]: 0.0066379128971171221
'''
pval = hypergeom.sf(num_common-1,num_universe,num_in_term,num_sampled)
if pval <= pval_cutoff and num_common >= min_overlap:
term.attrs['hyper'] = OrderedDict([
('source' , self.name),
('pval' , pval),
('terms_tested' , len(terms)),
('num_common' , num_common),
('num_universe' , num_universe),
('source_term_size' , num_in_term),
('target_term_size' , len(locus_list)),
('num_terms' , len(self)),
#('num_sampled' , num_sampled)
])
if label != None:
term.attrs['hyper']['label'] = label
if bonferroni_correction == True:
# Right now this isn't true bonferroni, its only correcting for
# the number of terms that had term genes in it
if pval > pval_cutoff / len(terms):
term.attrs['hyper']['bonferroni'] = False
else:
term.attrs['hyper']['bonferroni'] = True
term.attrs['pval'] = pval
if include_genes == True:
term.attrs['hyper']['genes'] = ",".join(
[x.id for x in term_genes.intersection(locus_list)]
)
significant_terms.append(term)
self.log('\t\tFound {} was significant for {} terms',label,len(significant_terms))
if return_table == True:
tbl = []
for x in significant_terms:
val = OrderedDict([
('name', x.name),
('id' , x.id)
])
val.update(x.attrs['hyper'])
val.update(x.attrs)
del val['hyper']
tbl.append(val)
tbl =
|
DataFrame.from_records(tbl)
|
pandas.DataFrame.from_records
|
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from src.utils import db, logs
plt.style.use("ggplot")
TO_INT = ["No. isolates", "No. sequences", "Genome Fragment", "Order within Fragment", "Accessory Fragment",
"Accessory Order with Fragment", "Min group size nuc", "Max group size nuc", "Avg group size nuc"]
TO_STR = ["Non-unique Gene name", "Annotation", "QC"]
TO_FLOAT = ["Avg sequences per isolate"]
def power(database):
lf = logs.LoggerFactory()
lf.addConsoleHandler()
logger = lf.create()
db.load_database_config(logger=logger)
sql = "select locus_id, count(locus_id) as counts from pairs group by locus_id;"
counts = db.from_sql(sql, database=database)
counts["log_counts"] = np.log2(counts["counts"])
return np.sum(counts["log_counts"])
def locus_entropy(x):
prob = x / np.sum(x)
return np.sum(prob * np.log2(prob))
def richness(database, weighted=True):
lf = logs.LoggerFactory()
lf.addConsoleHandler()
logger = lf.create()
db.load_database_config(logger=logger)
sql = "select a.locus_id, a.allele_id, b.count" \
" from pairs as a" \
" left join (select allele_id, count from alleles) as b" \
" on a.allele_id=b.allele_id;"
counts = db.from_sql(sql, database=database)
ent = counts.groupby("locus_id").agg({"count": locus_entropy})
if weighted:
sql = "select locus_id, occurrence from loci;"
loci = db.from_sql(sql, database=database)
weight = pd.merge(ent, loci, left_index=True, right_on="locus_id")
return np.average(weight["count"], weights=weight["occurrence"])
else:
return np.average(ent)
def calculate_loci_coverage(input_dir, output_dir, database):
lf = logs.LoggerFactory()
lf.addConsoleHandler()
logger = lf.create()
db.load_database_config(logger=logger)
logger.info("Start calculating locus coverage...")
subject_number = count_subjects(input_dir)
logger.info("Start plotting locus coverage...")
plot_stats(output_dir, subject_number, database)
def count_subjects(input_dir):
input_file = os.path.join(input_dir, "allele_profiles.tsv")
with open(input_file, "r") as file:
first_line = file.readline()
genomes = first_line.strip().split("\t")
return len(genomes)
def plot_stats(output_dir, subject_number, database):
sql = "select locus_id, num_isolates, is_paralog from locus_meta where is_paralog=FALSE;"
table = db.from_sql(sql, database=database)
table["owned by"] = [int(x / subject_number * 100) for x in table["num_isolates"]]
plot_genome_coverage(table["owned by"], output_dir, perc=0)
plot_genome_coverage(table["owned by"], output_dir)
plot_genome_coverage(table["owned by"], output_dir, perc=0, cumulative=-1)
plot_genome_coverage(table["owned by"], output_dir, cumulative=-1)
def plot_genome_coverage(data, output_dir, perc=5, cumulative=False):
prefix = "cumulative_genome_coverage" if cumulative else "genome_coverage"
pic_name = "{}_{}_prec.png".format(prefix, perc) if perc != 0 else "{}.png".format(prefix)
output_file = os.path.join(output_dir, pic_name)
title = "Cumulative genome coverage distribution" if cumulative else "Genome coverage distribution"
# plot
fig = plt.figure(figsize=(12, 9))
plt.hist(data[data >= perc], bins=50, cumulative=cumulative, histtype="step", lw=2)
plt.title(title, fontsize=25)
plt.xlabel("Percentage of genomes covered by loci (%)", fontsize=18)
plt.ylabel("Number of locus", fontsize=18)
fig.savefig(output_file)
def calculate_allele_length(output_dir, database, interval=20):
lf = logs.LoggerFactory()
lf.addConsoleHandler()
logger = lf.create()
db.load_database_config(logger=logger)
logger.info("Start calculating allele length heatmap...")
plot_length_heamap(output_dir, database, interval=interval)
def plot_length_heamap(output_dir, database, interval):
output_file = os.path.join(output_dir, "allele_length_heatmap.png")
allele_info = get_allele_info(database)
allele_info["intervals"] = list(map(lambda x: (int(x / interval) + 1) * interval, allele_info["length"]))
pairs = db.from_sql("select * from pairs;", database=database)
collect = []
for locus_id, df in pairs.groupby("locus_id"):
df2 = pd.merge(df, allele_info, on="allele_id", how="left")
series = df2.groupby("intervals")["count"].sum()
series.name = locus_id
collect.append(series)
table = pd.concat(collect, axis=1).fillna(0).T
table = table.apply(lambda x: 100 * x / np.sum(x), axis=1)
# sort by scheme order
freq = db.from_sql("select locus_id from loci order by occurrence DESC;", database=database)
table =
|
pd.merge(freq, table, left_on="locus_id", right_index=True)
|
pandas.merge
|
############################
# calculate other measures
##########################
# Load the Pandas libraries with alias 'pd'
import pandas as pd
# for filename matching
import fnmatch
import os
# save into a new df
# object With column names only
df = pd.DataFrame(columns = ['sc', 'id_len', 'speed_mean_avg', 'speed_std_avg', 'throughput', 'lc_count', 'ZOV_density'])
print(df)
# auto-run for all scenarios
# for i in range(0,3):
for i in [15, 16, 17]:
subdir = "%02d" % i
print(subdir)
for file in os.listdir('./output/crystal_v2/sc'+subdir):
if fnmatch.fnmatch(file, '*_crystal.csv'):
print(file)
print('crystal' + subdir)
# try:
# data = pd.read_csv('sc' + subdir + '/' + file)
# except OSError:
# print("no file")
# pass
data =
|
pd.read_csv('./output/crystal_v2/sc' + subdir + '/' + file)
|
pandas.read_csv
|
import json
import os
import keras
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import requests
from keras.layers import Dropout, Dense, LSTM, TimeDistributed
from keras.models import Sequential
from sklearn.preprocessing import normalize, MinMaxScaler
"""
Created by <NAME> on 7/25/18.
Email : <EMAIL> or <EMAIL>
Website: http://ce.sharif.edu/~naghipourfar
Github: https://github.com/naghipourfar
Skype: mn7697np
"""
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ..., t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j + 1, i)) for j in range(n_vars)]
# put it all together
agg =
|
pd.concat(cols, axis=1)
|
pandas.concat
|
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def test_ops(self):
td = Timedelta(10, unit='d')
self.assertEqual(-td, Timedelta(-10, unit='d'))
self.assertEqual(+td, Timedelta(10, unit='d'))
self.assertEqual(td - td, Timedelta(0, unit='ns'))
self.assertTrue((td - pd.NaT) is pd.NaT)
self.assertEqual(td + td, Timedelta(20, unit='d'))
self.assertTrue((td + pd.NaT) is pd.NaT)
self.assertEqual(td * 2, Timedelta(20, unit='d'))
self.assertTrue((td * pd.NaT) is pd.NaT)
self.assertEqual(td / 2, Timedelta(5, unit='d'))
self.assertEqual(abs(td), td)
self.assertEqual(abs(-td), td)
self.assertEqual(td / td, 1)
self.assertTrue((td / pd.NaT) is np.nan)
# invert
self.assertEqual(-td, Timedelta('-10d'))
self.assertEqual(td * -1, Timedelta('-10d'))
self.assertEqual(-1 * td, Timedelta('-10d'))
self.assertEqual(abs(-td), Timedelta('10d'))
# invalid
self.assertRaises(TypeError, lambda: Timedelta(11, unit='d') // 2)
# invalid multiply with another timedelta
self.assertRaises(TypeError, lambda: td * td)
# can't operate with integers
self.assertRaises(TypeError, lambda: td + 2)
self.assertRaises(TypeError, lambda: td - 2)
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
self.assertEqual(Timedelta(241, unit='h'), td + pd.offsets.Hour(1))
self.assertEqual(Timedelta(241, unit='h'), pd.offsets.Hour(1) + td)
self.assertEqual(240, td / pd.offsets.Hour(1))
self.assertEqual(1 / 240.0, pd.offsets.Hour(1) / td)
self.assertEqual(Timedelta(239, unit='h'), td - pd.offsets.Hour(1))
self.assertEqual(Timedelta(-239, unit='h'), pd.offsets.Hour(1) - td)
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
self.assertRaises(TypeError, lambda: td + np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
self.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(-other + td, expected)
self.assertRaises(TypeError, lambda: td - np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td * np.array([2]), expected)
self.assert_numpy_array_equal(np.array([2]) * td, expected)
self.assertRaises(TypeError, lambda: td * other)
self.assertRaises(TypeError, lambda: other * td)
self.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
self.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other - td, expected)
def test_ops_series(self):
# regression test for GH8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
def test_ops_series_object(self):
# GH 13043
s = pd.Series([pd.Timestamp('2015-01-01', tz='US/Eastern'),
pd.Timestamp('2015-01-01', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timestamp('2015-01-02', tz='US/Eastern'),
pd.Timestamp('2015-01-02', tz='Asia/Tokyo')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('1 days'), exp)
tm.assert_series_equal(pd.Timedelta('1 days') + s, exp)
# object series & object series
s2 = pd.Series([pd.Timestamp('2015-01-03', tz='US/Eastern'),
pd.Timestamp('2015-01-05', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s2.dtype, object)
exp = pd.Series([pd.Timedelta('2 days'), pd.Timedelta('4 days')],
name='xxx')
tm.assert_series_equal(s2 - s, exp)
tm.assert_series_equal(s - s2, -exp)
s = pd.Series([pd.Timedelta('01:00:00'), pd.Timedelta('02:00:00')],
name='xxx', dtype=object)
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timedelta('01:30:00'), pd.Timedelta('02:30:00')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('00:30:00'), exp)
tm.assert_series_equal(pd.Timedelta('00:30:00') + s, exp)
def test_ops_notimplemented(self):
class Other:
pass
other = Other()
td = Timedelta('1 day')
self.assertTrue(td.__add__(other) is NotImplemented)
self.assertTrue(td.__sub__(other) is NotImplemented)
self.assertTrue(td.__truediv__(other) is NotImplemented)
self.assertTrue(td.__mul__(other) is NotImplemented)
self.assertTrue(td.__floordiv__(td) is NotImplemented)
def test_ops_error_str(self):
# GH 13624
tdi = TimedeltaIndex(['1 day', '2 days'])
for l, r in [(tdi, 'a'), ('a', tdi)]:
with tm.assertRaises(TypeError):
l + r
with tm.assertRaises(TypeError):
l > r
with tm.assertRaises(TypeError):
l == r
with tm.assertRaises(TypeError):
l != r
def test_timedelta_ops(self):
# GH4984
# make sure ops return Timedelta
s = Series([Timestamp('20130101') + timedelta(seconds=i * i)
for i in range(10)])
td = s.diff()
result = td.mean()
expected = to_timedelta(timedelta(seconds=9))
self.assertEqual(result, expected)
result = td.to_frame().mean()
self.assertEqual(result[0], expected)
result = td.quantile(.1)
expected = Timedelta(np.timedelta64(2600, 'ms'))
self.assertEqual(result, expected)
result = td.median()
expected = to_timedelta('00:00:09')
self.assertEqual(result, expected)
result = td.to_frame().median()
self.assertEqual(result[0], expected)
# GH 6462
# consistency in returned values for sum
result = td.sum()
expected = to_timedelta('00:01:21')
self.assertEqual(result, expected)
result = td.to_frame().sum()
self.assertEqual(result[0], expected)
# std
result = td.std()
expected = to_timedelta(Series(td.dropna().values).std())
self.assertEqual(result, expected)
result = td.to_frame().std()
self.assertEqual(result[0], expected)
# invalid ops
for op in ['skew', 'kurt', 'sem', 'prod']:
self.assertRaises(TypeError, getattr(td, op))
# GH 10040
# make sure NaT is properly handled by median()
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07')])
self.assertEqual(s.diff().median(), timedelta(days=4))
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'),
Timestamp('2015-02-15')])
self.assertEqual(s.diff().median(), timedelta(days=6))
def test_timedelta_ops_scalar(self):
# GH 6808
base = pd.to_datetime('20130101 09:01:12.123456')
expected_add = pd.to_datetime('20130101 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta(10, unit='s'), timedelta(seconds=10),
np.timedelta64(10, 's'),
np.timedelta64(10000000000, 'ns'),
pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
base = pd.to_datetime('20130102 09:01:12.123456')
expected_add = pd.to_datetime('20130103 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta('1 day, 00:00:10'),
pd.to_timedelta('1 days, 00:00:10'),
timedelta(days=1, seconds=10),
np.timedelta64(1, 'D') + np.timedelta64(10, 's'),
pd.offsets.Day() + pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = DataFrame(['00:00:02']).apply(pd.to_timedelta)
dfn = DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
NA = np.nan
actual = scalar1 + scalar1
self.assertEqual(actual, scalar2)
actual = scalar2 - scalar1
self.assertEqual(actual, scalar1)
actual = s1 + s1
assert_series_equal(actual, s2)
actual = s2 - s1
assert_series_equal(actual, s1)
actual = s1 + scalar1
assert_series_equal(actual, s2)
actual = scalar1 + s1
assert_series_equal(actual, s2)
actual = s2 - scalar1
assert_series_equal(actual, s1)
actual = -scalar1 + s2
assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
assert_series_equal(actual, sn)
actual = s1 + NA
assert_series_equal(actual, sn)
actual = NA + s1
assert_series_equal(actual, sn)
actual = s1 - NA
assert_series_equal(actual, sn)
actual = -NA + s1
assert_series_equal(actual, sn)
actual = s1 + pd.NaT
assert_series_equal(actual, sn)
actual = s2 - pd.NaT
assert_series_equal(actual, sn)
actual = s1 + df1
assert_frame_equal(actual, df2)
actual = s2 - df1
assert_frame_equal(actual, df1)
actual = df1 + s1
assert_frame_equal(actual, df2)
actual = df2 - s1
assert_frame_equal(actual, df1)
actual = df1 + df1
assert_frame_equal(actual, df2)
actual = df2 - df1
assert_frame_equal(actual, df1)
actual = df1 + scalar1
assert_frame_equal(actual, df2)
actual = df2 - scalar1
assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 + NA
assert_frame_equal(actual, dfn)
actual = df1 - NA
assert_frame_equal(actual, dfn)
actual = df1 + pd.NaT # NaT is datetime, not timedelta
assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
assert_frame_equal(actual, dfn)
def test_compare_timedelta_series(self):
# regresssion test for GH5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_compare_timedelta_ndarray(self):
# GH11835
periods = [Timedelta('0 days 01:00:00'), Timedelta('0 days 01:00:00')]
arr = np.array(periods)
result = arr[0] > arr
expected = np.array([False, False])
self.assert_numpy_array_equal(result, expected)
class TestSlicing(tm.TestCase):
def test_tdi_ops_attributes(self):
rng = timedelta_range('2 days', periods=5, freq='2D', name='x')
result = rng + 1
exp = timedelta_range('4 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
result = rng - 2
exp = timedelta_range('-2 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
result = rng * 2
exp = timedelta_range('4 days', periods=5, freq='4D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '4D')
result = rng / 2
exp = timedelta_range('1 days', periods=5, freq='D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, 'D')
result = -rng
exp = timedelta_range('-2 days', periods=5, freq='-2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '-2D')
rng = pd.timedelta_range('-2 days', periods=5, freq='D', name='x')
result = abs(rng)
exp = TimedeltaIndex(['2 days', '1 days', '0 days', '1 days',
'2 days'], name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, None)
def test_add_overflow(self):
# see gh-14068
msg = "too (big|large) to convert"
with tm.assertRaisesRegexp(OverflowError, msg):
to_timedelta(106580, 'D') + Timestamp('2000')
with tm.assertRaisesRegexp(OverflowError, msg):
Timestamp('2000') + to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with tm.assertRaisesRegexp(OverflowError, msg):
to_timedelta([106580], 'D') + Timestamp('2000')
with tm.assertRaisesRegexp(OverflowError, msg):
Timestamp('2000') + to_timedelta([106580], 'D')
with tm.assertRaisesRegexp(OverflowError, msg):
|
to_timedelta([_NaT])
|
pandas.to_timedelta
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 25 11:33:55 2020
@author: User
"""
import sys
from pathlib import Path
import functools
# import collections
from collections import Counter
import pickle
# import types
# import post_helper
# import plotting
import matplotlib.pyplot as plt
import matplotlib as mpl
from scipy.stats import linregress, zscore
import pandas as pd
import numpy as np
import datetime as dt
import pandas as pd
mpl.style.use("seaborn")
mpl.rcParams["figure.dpi"] = 100
# from sklearn.cluster import KMeans
# print ('Name prepare input:', __name__ )
if __name__ == "__main__":
# print(f'Package: {__package__}, File: {__file__}')
# FH_path = Path(__file__).parent.parent.parent.joinpath('FileHelper')
# sys.path.append(str(FH_path))
# sys.path.append(str(Path(__file__).parent.parent.joinpath('indexer')))
sys.path.append(str(Path(__file__).parent.parent.parent))
# sys.path.append("..")
# print(sys.path)
# import FileHelper
from FileHelper.PostChar import Characterization_TypeSetting, SampleCodesChar
from FileHelper.PostPlotting import *
from FileHelper.FindSampleID import GetSampleID
from FileHelper.FindFolders import FindExpFolder
# from FileHelper.FileFunctions.FileOperations import PDreadXLorCSV
from collect_load import Load_from_Indexes, CollectLoadPars
# from FileHelper.FindExpFolder import FindExpFolder
from plotting import eisplot
from prep_postchar import postChar
import EIS_export
elif "prepare_input" in __name__:
pass
# import RunEC_classifier
# from FileHelper.FindSampleID import FindSampleID
import logging
_logger = logging.getLogger(__name__)
# from FileHelper.PostChar import SampleSelection, Characterization_TypeSetting
def mkfolder(folder):
folder.mkdir(exist_ok=True, parents=True)
return folder
def filter_cols(_df, n):
if any(["startswith" in i for i in n]):
_lst = [i for i in _df.columns if i.startswith(n[-1])]
else:
_lst = [i for i in _df.columns if n[-1] in i]
return _lst
OriginColors = Characterization_TypeSetting.OriginColorList()
Pfolder = FindExpFolder().TopDir.joinpath(
Path("Preparation-Thesis/SiO2_projects/SiO2_Me_ECdepth+LC")
)
plotsfolder = mkfolder(Pfolder.joinpath("correlation_plots"))
EC_folder = Pfolder.joinpath("EC_data")
EC_index, SampleCodes = Load_from_Indexes.get_EC_index()
print("finished")
# SampleCodesChar().load
def multiIndex_pivot(df, index=None, columns=None, values=None):
# https://github.com/pandas-dev/pandas/issues/23955
output_df = df.copy(deep=True)
if index is None:
names = list(output_df.index.names)
output_df = output_df.reset_index()
else:
names = index
output_df = output_df.assign(
tuples_index=[tuple(i) for i in output_df[names].values]
)
if isinstance(columns, list):
output_df = output_df.assign(
tuples_columns=[tuple(i) for i in output_df[columns].values]
) # hashable
output_df = output_df.pivot(
index="tuples_index", columns="tuples_columns", values=values
)
output_df.columns = pd.MultiIndex.from_tuples(
output_df.columns, names=columns
) # reduced
else:
output_df = output_df.pivot(
index="tuples_index", columns=columns, values=values
)
output_df.index = pd.MultiIndex.from_tuples(output_df.index, names=names)
return output_df
def get_float_cols(df):
return [key for key, val in df.dtypes.to_dict().items() if "float64" in str(val)]
def cm2inch(value):
return value / 2.54
# class PorphSamples():
# def __init__(self):
# self.template = PorphSamples.template()
def decorator(func):
@functools.wraps(func)
def wrapper_decorator(*args, **kwargs):
# Do something before
value = func(*args, **kwargs)
# Do something after
return value
return wrapper_decorator
def read_load_pkl(_pklstem):
_pklpath = EC_PorphSiO2.folder.joinpath(_pklstem).with_suffix(".pkl")
if _pklpath.exists():
try:
print("pkl reloading:", _pklpath)
DF_diff = pd.read_pickle(_pklpath)
DF_diff.columns
return DF_diff
except Exception as e:
print("reading error", e)
return pd.DataFrame()
else:
print("read error not existing", _pklpath)
return pd.DataFrame()
def save_DF_pkl(_pklstem, _DF):
_pklpath = EC_PorphSiO2.folder.joinpath(_pklstem).with_suffix(".pkl")
try:
print("pkl saving to:", _pklpath)
_DF.to_pickle(_pklpath)
except Exception as e:
print("pkl saving error", e, _pklpath)
return _pklpath
def load_dict_pkl(_pklstem):
_pklpath = EC_PorphSiO2.folder.joinpath(_pklstem).with_suffix(".pkl")
if _pklpath.exists():
try:
print("pkl reloading:", _pklpath)
with open(_pklpath, "rb") as file:
_dict = pickle.load(file)
return _dict
except Exception as e:
print("reading error", e)
return {}
else:
print("read error not existing", _pklpath)
return {}
def save_dict_pkl(_pklstem, _dict):
_pklpath = EC_PorphSiO2.folder.joinpath(_pklstem).with_suffix(".pkl")
try:
print("pkl saving to:", _pklpath)
with open(_pklpath, "wb") as file:
pickle.dump(_dict, file)
except Exception as e:
print("pkl saving error", e, _pklpath)
return _pklpath
def PorphSiO2_template():
# 'SerieIDs' : ('Porph_SiO2')*5,
Series_Porph_SiO2 = {
"SampleID": ("JOS1", "JOS2", "JOS3", "JOS4", "JOS5"),
"Metal": ("Fe", "Co", "MnTPP", "FeTPP", "H2"),
"color": (2, 4, 6, 15, 3),
}
Porphyrins = {
"TMPP": {"Formula": "C48H38N4O4", "MW": 734.8382},
"TMPP-Fe(III)Cl": {"Formula": "C48H36ClFeN4O4", "MW": 824.1204},
"TMPP-Co(II)": {"Formula": "C48H36CoN4O4", "MW": 791.7556},
"TTP-Mn(III)Cl": {"Formula": "C44H28ClMnN4", "MW": 703.1098},
"TPP-Fe(III)Cl": {"Formula": "C44H28ClFeN4", "MW": 704.0168},
"TPP": {"Formula": "C44H30N4", "MW": 614.7346},
}
Porph_template = pd.DataFrame(Series_Porph_SiO2)
return Porph_template
def EC_types_grp():
# KL ['ORR_E_AppV_RHE', 'ORR_KL_E_AppV_RHE','Electrode']
_basic_EC_cond = ["postAST_post", "Sweep_Type", "pH", "Loading_cm2"]
_extra_EC_cond = {
"N2CV": [],
"N2": [],
"ORR": ["RPM_DAC_uni"],
"KL": ["Electrode", "ORR_E_AppV_RHE"],
"EIS": ["E_RHE"],
"HER": ["HER_RPM_post"],
"OER": [],
}
_out = {key: _basic_EC_cond + val for key, val in _extra_EC_cond.items()}
return _out
def save_EC_index_PorphSiO2(EC_index, EC_folder):
_porph_index = EC_index.loc[EC_index.SampleID.isin(PorphSiO2_template().SampleID)]
_porph_index.to_excel(EC_folder.joinpath("EC_index_PorphSiO2.xlsx"))
# save_EC_index_PorphSiO2(EC_index, EC_folder)
class EC_PorphSiO2:
folder = FindExpFolder("PorphSiO2").compare
Porph_template = PorphSiO2_template()
# globals EC_index
# ['Model(Singh2015_RQRQ)', 'Model(Singh2015_RQRQR)', 'Model(Bandarenka_2011_RQRQR)',
# 'Model(Singh2015_RQRWR)', 'Model(Randles_RQRQ)', 'Model(Singh2015_R3RQ)']
# model_select = EC_PorphSiO2.EIS_models[1]
# self = EC_PorphSiO2()
def __init__(self):
# self.index, self.AST_days = EC_PorphSiO2.select_ECexps(EC_folder)
self.select_EC_ASTexps_from_ECindex()
# self.pars = EC_PorphSiO2.mergedEC()
# self.par_export = EC_OHC.to_excel(self.folder.joinpath('EC_ORR_HPRR.xlsx'))
def select_EC_ASTexps_from_ECindex(self):
EC_idx_PorphSiO2_samples = EC_index.loc[
EC_index.SampleID.isin(self.Porph_template.SampleID.unique())
]
# pd.read_excel(list(EC_folder.rglob('*EC_index*'))[0])
EC_idx_PorphSiO2_samples = EC_idx_PorphSiO2_samples.assign(
**{
"PAR_date_day_dt": [
dt.date.fromisoformat(np.datetime_as_string(np.datetime64(i, "D")))
for i in EC_idx_PorphSiO2_samples.PAR_date.to_numpy()
]
}
)
self.EC_idx_PorphSiO2_samples = EC_idx_PorphSiO2_samples
self.get_AST_days()
# LC_idx_fp = list(EC_folder.rglob('*EC_index*'))[0].parent.joinpath('LC_index.xlsx')
EC_idx_PorphSiO2_AST = EC_idx_PorphSiO2_samples.loc[
EC_idx_PorphSiO2_samples.PAR_date_day_dt.isin(
[i for a in self.AST_days.to_numpy() for i in a]
)
]
# AST_days = EC_PorphSiO2.get_AST_days()
# EC_idx_PorphSiO2_AST.to_excel(list(EC_folder.rglob('*EC_index*'))[0].parent.joinpath('LC_index.xlsx'))
self.EC_idx_PorphSiO2 = EC_idx_PorphSiO2_AST
# if LC_idx_fp.exists():
# else:
# try:
# LC_fls = pd.read_excel(LC_idx_fp,index_col=[0])
# except Exception as e:
# print(f'Excel load fail: {e}\n,file: {LC_idx_fp}')
# LC_fls = pd.DataFrame()
# return LC_fls, AST_days
def get_AST_days(self):
gr_idx = self.EC_idx_PorphSiO2_samples.groupby("PAR_date_day_dt")
AST_days = []
for n, gr in gr_idx:
# n,gr
exps = gr.PAR_exp.unique()
# gr.PAR_date_day.unique()[0]
if any(["AST" in i for i in exps]):
# print(n,exps)
# AST_days.append(n)
if n + dt.timedelta(1) in gr_idx.groups.keys():
_post = gr_idx.get_group(n + dt.timedelta(1))
# print(n + dt.timedelta(1), gr_idx.get_group(n + dt.timedelta(1)))
AST_days.append((n, n + dt.timedelta(1)))
else:
AST_days.append((n, n))
print(n + dt.timedelta(1), "grp missing")
# (AST_days[-1][0], AST_days[0][1])
# AST_days.append((dt.date(2019,5,6), dt.date(2019,1,25)))
# AST_days.append((dt.date(2019,5,6), dt.date(2019,1,26)))
_extra_AST_days = [
(dt.date(2019, 5, 6), dt.date(2019, 1, 25)),
(dt.date(2019, 5, 6), dt.date(2019, 1, 26)),
]
AST_days += _extra_AST_days
AST_days = pd.DataFrame(
AST_days, columns=["PAR_date_day_dt_pre", "PAR_date_day_dt_post"]
)
AST_days = AST_days.assign(
**{
"PAR_date_day_dt_diff": AST_days.PAR_date_day_dt_pre
- AST_days.PAR_date_day_dt_post
}
)
self.AST_days = AST_days
# def select_ECexps(EC_folder):
# LC_idx_fp = list(EC_folder.rglob('*EC_index*'))[0].parent.joinpath('LC_index.xlsx')
# AST_days = EC_PorphSiO2.get_AST_days()
# if LC_idx_fp.exists():
# LC_fls = EC_PorphSiO2.EC_idx_PorphSiO2.loc[EC_PorphSiO2.EC_idx_PorphSiO2.PAR_date_day_dt.isin([i for a in AST_days.to_numpy() for i in a])]
# LC_fls.to_excel(list(EC_folder.rglob('*EC_index*'))[0].parent.joinpath('LC_index.xlsx'))
# else:
# try:
# LC_fls = pd.read_excel(LC_idx_fp,index_col=[0])
# except Exception as e:
# print(f'Excel load fail: {e}\n,file: {LC_idx_fp}')
# LC_fls = pd.DataFrame()
# return LC_fls, AST_days
# def repr_index(self):
# PAR_exp_uniq = {grn : len(grp) for grn,grp in self.index.groupby("PAR_exp")}
# print(f'Len({len(self.index)},\n{PAR_exp_uniq}')
def _testing_():
tt = EC_prepare_EC_merged(reload_AST=True, reload_merged=True, reload_pars=True)
self = tt
N2CV = self.N2cv(reload=False, use_daily=True)
#%% == EC_prepare_EC_merged == testing
class EC_prepare_EC_merged:
EIS_models = EIS_export.EIS_selection.mod_select
# ['Model(EEC_Randles_RWpCPE)', 'Model(EEC_2CPE)', 'Model(EEC_2CPEpW)',
# 'Model(EEC_RQ_RQ_RW)', 'Model(EEC_RQ_RQ_RQ)', 'Model(Randles_RQRQ)']
ORR_reload = dict(reload=True, use_daily=False)
ORR_no_reload = dict(reload=False, use_daily=True)
use_daily = True
# global ParsColl
# ParsColl = ParsColl
mcols = [i for i in Load_from_Indexes.EC_label_cols if i not in ["PAR_file"]] + [
"Sweep_Type"
]
_pkl_EC_merged = "EC_merged_dict"
def __init__(self, reload_AST=False, reload_merged=False, reload_pars=True):
self.reload_AST = reload_AST
self.reload_merged = reload_merged
self.reload_pars = reload_pars
self.set_pars_collection()
self.reload_pars_kws = dict(reload=reload_pars, use_daily=self.use_daily)
self.EC_merged_dict = {}
self.load_EC_PorphSiO2()
self.load_merged_EC()
def set_pars_collection(self):
if "ParsColl" in globals().keys():
self.ParsColl = ParsColl
else:
Pars_Collection = CollectLoadPars(load_type="fast")
# globals()['Pars_Collection'] = Pars_Collection
ParsColl = Pars_Collection.pars_collection
self.ParsColl = ParsColl
def load_EC_PorphSiO2(self):
self.EC_PorphSiO2 = EC_PorphSiO2()
self.AST_days = self.EC_PorphSiO2.AST_days
self.EC_idx_PorphSiO2 = self.EC_PorphSiO2.EC_idx_PorphSiO2
def load_merged_EC(self):
if self.reload_merged:
self.reload_merged_EC()
if not self.EC_merged_dict:
_load_EC_merge = load_dict_pkl(self._pkl_EC_merged)
if _load_EC_merge:
self.EC_merged_dict = _load_EC_merge
def reload_merged_EC(self):
try:
self.load_N2CV()
self.load_ORR()
self.load_KL()
self.load_EIS()
self.load_HER()
self.add_filter_selection_of_EC_merged()
save_dict_pkl(self._pkl_EC_merged, self.EC_merged_dict)
except Exception as e:
_logger.warning(f"EC_prepare_EC_merged, reload_merged_EC failure: {e}")
def get_AST_matches(self, DF, _verbose=False):
# LC_fls, AST_days = EC_PorphSiO2.select_ECexps(EC_folder)
# DF = ORR.drop_duplicates()
# DF = N2CV.drop_duplicates()
# DF = EIS.drop_duplicates()
# DF = HER.drop_duplicates()
# DF = ttpars
if "PAR_date_day_dt" not in DF.columns:
DF = DF.assign(
**{
"PAR_date_day_dt": [
dt.date.fromisoformat(
np.datetime_as_string(np.datetime64(i, "D"))
)
for i in DF.PAR_date.to_numpy()
]
}
)
DF.PAR_date_day_dt = pd.to_datetime(DF.PAR_date_day_dt, unit="D")
# list((set(DF.columns).intersection(set(LC_fls.columns))).intersection(set(mcols) ))
# DF = pd.merge(DF,LC_fls,on=)
_compare_cols = [
i for i in ["SampleID", "pH", "Gas", "Loading_cm2"] if i in DF.columns
]
_swp_rpm = [
"Sweep_Type",
"RPM_DAC_uni" if "RPM_DAC_uni" in DF.columns else "RPM_DAC",
]
_coll = []
# AST_days_run_lst = [i for i in AST_days if len(i) == 2][-1:]
for n, r in self.AST_days.iterrows():
# if len(_dates) == 2:
# _pre,_post = _dates
# elif (len_dates) == 1:
_pre, _post = r.PAR_date_day_dt_pre, r.PAR_date_day_dt_post
_preslice = DF.loc[
(DF.PAR_date_day == _pre.strftime("%Y-%m-%d")) & (DF.postAST == "no")
]
pre = _preslice.groupby(_compare_cols)
_postslice = DF.loc[
(DF.PAR_date_day == _post.strftime("%Y-%m-%d")) & (DF.postAST != "no")
]
post = _postslice.groupby(_compare_cols)
_res = {}
_res = {
"pre_PAR_date_day_dt": _pre,
"post_PAR_date_day_dt": _post,
"AST_days_n": n,
}
# print(_res,[_preslice.postAST.unique()[0], _postslice.postAST.unique()[0]])
union = set(pre.groups.keys()).union(set(post.groups.keys()))
matches = set(pre.groups.keys()).intersection(set(post.groups.keys()))
_difference_pre = set(pre.groups.keys()).difference(set(post.groups.keys()))
_difference_post = set(post.groups.keys()).difference(
set(pre.groups.keys())
)
# _diffr.append((_pre,_post,_difference_pre, _difference_post))
if not _preslice.empty and not _postslice.empty:
for match in union:
_res.update(dict(zip(_compare_cols, match)))
_mgrpcols = ["PAR_file", "dupli_num", "postAST"]
if match in matches:
_mpre = pre.get_group(match).groupby(_mgrpcols)
_mpost = post.get_group(match).groupby(_mgrpcols)
elif match in _difference_pre:
_mpre = pre.get_group(match).groupby(_mgrpcols)
_mpost = pre.get_group(match).groupby(_mgrpcols)
elif match in _difference_post:
_mpre = post.get_group(match).groupby(_mgrpcols)
_mpost = post.get_group(match).groupby(_mgrpcols)
# print(_mpost.groups)
for (_prePF, npr, _preAST), prgrp in _mpre:
_res.update(
{
"pre_dupli_num": npr,
"pre_PAR_file": _prePF,
"pre_postAST": _preAST,
}
)
for (_poPF, npo, _postAST), pogrp in _mpost:
_res.update(
{
"post_dupli_num": npo,
"post_PAR_file": _poPF,
"post_postAST": _postAST,
"dupli_num_combo": f"{npr}, {npo}",
}
)
if _postAST in "postAST_sHA|postAST_LC" and _verbose:
print(_res)
_pr1 = prgrp.groupby(_swp_rpm)
_po1 = pogrp.groupby(_swp_rpm)
_rpmswp_matches = set(_pr1.groups.keys()).intersection(
set(_po1.groups.keys())
)
for _m in _rpmswp_matches:
_res.update(dict(zip(_swp_rpm, _m)))
# print(_res)
_coll.append(_res.copy())
AST_matches = pd.DataFrame(_coll)
return AST_matches
# prgrp.groupby(['Sweep_Type','RPM_DAC']).groups
# prgrp['ORR_Jkin_min_700']-pogrp['ORR_Jkin_min_700']
def load_N2CV(self):
N2CV = self.edit_pars_N2cv(**self.reload_pars_kws)
# N2_pltqry = EC_merged_dict.get('N2CV')
N2_AST = self.get_AST_matches(N2CV)
N2_AST_diff = self.compare_AST_pars(N2CV, N2_AST, reload=self.reload_AST)
# _DFtype = EC_PorphSiO2.sense_DF_type(N2CV)
# EC_merged_dict.update({'N2CV' : N2_AST_diff})
self.EC_merged_dict.update(
{"N2CV": {"PARS": N2CV, "AST_matches": N2_AST, "AST_diff": N2_AST_diff}}
)
def load_ORR(self, _testing=False):
ORR = self.edit_pars_ORR()
ORR_AST = self.get_AST_matches(ORR)
ORR_AST_diff = self.compare_AST_pars(ORR, ORR_AST, reload=self.reload_AST)
if _testing:
ttpars = ORR.query('RPM_DAC_uni > 1000 & Sweep_Type == "cathodic"')
tt_AST = self.get_AST_matches(ttpars)
tt = ORR_AST.query('RPM_DAC_uni > 1000 & Sweep_Type == "cathodic"')
tt_diff = self.compare_AST_pars(ORR, tt, reload=reload_AST, save_pkl=False)
# ttpfs = ORR.loc[ORR.ORR_Jkin_max_700 > 0].PAR_file.unique()
# ttpfs = ORR.query('Sweep_Type == "mean"').loc[ORR.ORR_E_onset > 0.85].PAR_file.unique()
# ORR.loc[(ORR.ORR_E_onset > 0.85) & (ORR.Sweep_Type == 'cathodic')].PAR_file.unique()
# EC_merged_dict.update({'ORR' : ORR_AST_diff})
self.EC_merged_dict.update(
{"ORR": {"PARS": ORR, "AST_matches": ORR_AST, "AST_diff": ORR_AST_diff}}
)
def load_KL(self):
KL = self.edit_pars_KL()
KL = KL.assign(**{"RPM_DAC": 1500})
KL_AST = self.get_AST_matches(KL)
KL_AST_diff = self.compare_AST_pars(KL, KL_AST, reload=self.reload_AST)
# EC_merged_dict.update({'KL' : KL_AST_diff})
self.EC_merged_dict.update(
{"KL": {"PARS": KL, "AST_matches": KL_AST, "AST_diff": KL_AST_diff}}
)
def load_EIS(self):
EIS = self.edit_pars_EIS()
EIS_AST = self.get_AST_matches(EIS)
EIS_AST_diff = self.compare_AST_pars(EIS, EIS_AST, reload=self.reload_AST)
# EC_merged_dict.update({'EIS' : EIS_AST_diff})
self.EC_merged_dict.update(
{"EIS": {"PARS": EIS, "AST_matches": EIS_AST, "AST_diff": EIS_AST_diff}}
)
def load_HER(self):
HER = self.edit_pars_HER()
HER_type_grp = HER.groupby("HER_type")
HER.HER_at_E_slice = HER.HER_at_E_slice.round(3)
HER_AST = self.get_AST_matches(HER)
for Htype, Hgrp in HER_type_grp:
# Htype, Hgrp = 'E_slice', HER.loc[HER.groupby('HER_type').groups['E_slice']]
HER_AST_diff = self.compare_AST_pars(
Hgrp, HER_AST, reload=self.reload_AST, extra=Htype
)
try:
if not HER_AST_diff.empty:
self.EC_merged_dict.update(
{
f"HER_{Htype}": {
"PARS": Hgrp,
"AST_matches": HER_AST,
"AST_diff": HER_AST_diff,
}
}
)
except Exception as e:
print(f"HER {Htype} fail, {e}")
# EC_merged_dict.update({f'HER_{Htype}' : HER_AST_diff})
def finish_EC_merged(self):
_pkl_EC_merged = "EC_merged_dict"
EC_merged_dict = EC_PorphSiO2.add_filter_selection_of_EC_merged(EC_merged_dict)
save_dict_pkl(_pkl_EC_merged, EC_merged_dict)
# EC_merged_dict = load_dict_pkl(_pkl_EC_merged)
def add_filter_selection_of_EC_merged(self):
_drop_AST_row_pre = [
"2019-01-25;N2_20cls_300_100_10_JOS5_256;no;0",
"2019-01-25;N2_20cls_300_100_10_JOS4_256;no;0",
]
_check_cols = [
"SampleID",
"AST_row",
"PAR_date_day_dt_pre",
"PAR_date_day_dt_post",
"postAST_post",
]
_srt2 = ["postAST_post", "SampleID"]
_ch_match = [
"SampleID",
"pre_PAR_date_day_dt",
"post_PAR_date_day_dt",
"post_postAST",
"pre_postAST",
]
_sortcols = ["SampleID", "post_postAST"][::-1]
pd.set_option("display.max_columns", 6)
pd.set_option("display.width", 100)
for _EC, _DF in self.EC_merged_dict.items():
# _EC, _DF = 'N2CV', EC_merged_dict['N2CV']
# _EC, _DF = 'ORR', EC_merged_dict['ORR']
# print(_EC)
if "AST_row_n" not in _DF["AST_diff"].columns:
_DF["AST_diff"]["AST_row_n"] = [
int(i[-1]) for i in _DF["AST_diff"].AST_row.str.split("_").values
]
AST_diff = _DF["AST_diff"].copy()
AST_diff.loc[~AST_diff.AST_row_pre.isin(_drop_AST_row_pre)]
AST_matches = (
_DF["AST_matches"].copy().sort_values(by=["post_postAST", "SampleID"])
)
_rem1 = AST_matches.loc[
(AST_matches.post_postAST == "postAST_LC")
& (AST_matches.SampleID.isin(["JOS2", "JOS4", "JOS5"]))
& (AST_matches.pre_PAR_date_day_dt == dt.date(2019, 1, 25))
].assign(**{"rem": 1})
_rem2 = AST_matches.loc[
(
(AST_matches.post_postAST == "postAST_LC")
& (AST_matches.pre_postAST == "no")
& (
AST_matches.SampleID.isin(
["JOS1", "JOS2", "JOS3", "JOS4", "JOS5"]
)
)
& (AST_matches.pre_PAR_date_day_dt == dt.date(2019, 5, 6))
& (
AST_matches.post_PAR_date_day_dt.isin(
[dt.date(2019, 1, 25), dt.date(2019, 1, 26)]
)
)
)
].assign(**{"rem": 2})
# _keep_clean.loc[2].to_dict()
# _jos3 = {'SampleID': 'JOS3', 'pre_PAR_date_day_dt': dt.date(2019, 1, 24), 'post_PAR_date_day_dt': dt.date(2019, 1, 25),
# 'post_postAST': 'postAST_LC', 'pre_postAST': 'no'}
# _jos3qry = ' & '.join([f'{k} == {repr(val)}' for k,val in _jos3.items()])
# AST_matches.query(_jos3qry)
_rem3 = AST_matches.loc[
(
(AST_matches.post_postAST == "postAST_LC")
& (AST_matches.pre_postAST == "no")
& (AST_matches.SampleID.isin(["JOS3"]))
& (AST_matches.pre_PAR_date_day_dt == dt.date(2019, 1, 24))
& (AST_matches.post_PAR_date_day_dt == dt.date(2019, 1, 25))
)
].assign(**{"rem": 3})
_rem4 = AST_matches.loc[(AST_matches.pre_postAST != "no")].assign(
**{"rem": 4}
)
_edit = _rem1 # changed later 15.03
_remove = pd.concat([_rem2, _rem4, _rem3])
_keep = AST_matches.iloc[~AST_matches.index.isin(_remove.index.values)]
AST_matches[_ch_match].drop_duplicates()
_rem_clean = _remove[_ch_match + ["rem"]].sort_values(by=_sortcols)
_keep_clean = _keep[_ch_match].sort_values(by=_sortcols)
# _remove[['SampleID','post_postAST']] # check
# _rem = _DF['AST_diff'].loc[_DF['AST_diff']['AST_row_n'].isin(_remove.index.values)]
# _rem[['SampleID','postAST_post','PAR_date_day_pre']] #check
_filtered = AST_diff.loc[~AST_diff["AST_row_n"].isin(_remove.index.values)]
# DF['AST_diff'] = _filtered
self.EC_merged_dict.update({_EC: {**_DF, **{"AST_diff_filter": _filtered}}})
print(
f'EC merged dict updated with dropped rows in "AST_diff_filter" for:\n {self.EC_merged_dict.keys()}'
)
# return EC_merged_dict
# _DF['AST_diff'].loc[_DF['AST_diff'].AST_row_n.isin(_rem]
def EC_merge_postchar(_reloadset=False):
_pkl_EC_postchar = "EC_merged_postchars"
EC_postchars = load_dict_pkl(_pkl_EC_postchar)
if not EC_postchars and _reloadset != True:
EC_merged_dict = EC_PorphSiO2.mergedEC(_reloadset=True)
# EC_merged_dict_bak = EC_merged_dict.copy()
EC_merged_dict = EC_PorphSiO2.add_filter_selection_of_EC_merged(
EC_merged_dict
)
postChars = postChar().merged
_extracols = [i for i in SampleCodes.columns if not "SampleID" in i]
EC_postchars = {}
for _EC, _DF_dict in EC_merged_dict.items():
_DF = _DF_dict["AST_diff_filter"]
_initcols = _DF.columns
_DF = _DF.dropna(axis=1, how="all")
_DF = _DF.drop(columns=_DF.columns.intersection(_extracols))
_DF = pd.merge(_DF, postChars, on="SampleID")
_postcols = _DF.columns
EC_postchars.update({_EC: _DF})
save_dict_pkl(_pkl_EC_postchar, EC_postchars)
return EC_postchars
def _fix_ORR_scans():
EC_postchars = EC_PorphSiO2.EC_merge_postchar(_reloadset=True)
_ORR = EC_postchars["ORR"]
_J245 = _ORR.loc[
_ORR.SampleID.isin(["JOS2,", "JOS4", "JOS5"])
& (_ORR.postAST_post == "postAST_LC")
]
_extracols = [i for i in SampleCodes.columns if not "SampleID" in i]
def compare_AST_pars(self, _DF, _AST_in, reload=False, extra="", save_pkl=True):
# _DF, _AST_in = EIS, EIS_AST
# _DF, _AST_in = N2CV, N2_AST
# _DF, _AST_in = ORR, ORR_AST
# _DF, _AST_in = KL, KL_AST
# _DF, _AST_in = HER, HER_AST
# _DF, _AST_in = Hgrp, HER_AST
# _DF, _AST_in = ttN2CV, ttAST
# reload, extra = _reloadset, Htype
_DF = _DF.drop_duplicates()
_DFtype = self.sense_DF_type(_DF)
_DFtype = "".join([i for i in _DFtype if str.isalpha(i)])
_DFtype_prefix = _DFtype.split("_")[0]
if extra:
_pklpath = EC_PorphSiO2.folder.joinpath(
f"AST_compared_pars_{_DFtype}_{extra}.pkl"
)
else:
_pklpath = EC_PorphSiO2.folder.joinpath(f"AST_compared_pars_{_DFtype}.pkl")
if _pklpath.exists() and not reload:
try:
print("AST compare reading:", _pklpath)
DF_diff = pd.read_pickle(_pklpath)
return DF_diff
except Exception as e:
return print("reading error", e)
else:
_prec = [i for i in _AST_in.columns if not i.startswith("post")]
_precols = [
i.split("pre_")[-1] if i.startswith("pre") else i for i in _prec
]
_post = [i for i in _AST_in.columns if not i.startswith("pre")]
_postcols = [
i.split("post_")[-1] if i.startswith("post") else i for i in _post
]
_dropnacols = set(_post + _prec)
list(set(_prec).intersection(set(_post)))
_AST = _AST_in.dropna(subset=_dropnacols, how="any")
# _AST = _AST_in.loc[(_AST_in.SampleID == "JOS4") ]
# & (_AST_in.post_postAST.str.contains('LC'))]
_DF_diff_out = []
_errors = []
_dms = []
# _AST.loc[_AST.SampleID == "JOS4"].tail(2)
for n, r in _AST.iterrows():
# r[_dropnacols]
_uniq_AST_row_pre = f"{r.pre_PAR_date_day_dt};{Path(r.pre_PAR_file).stem};{r.pre_postAST};{int(r.pre_dupli_num)}"
_uniq_AST_row_post = f"{r.post_PAR_date_day_dt};{Path(r.post_PAR_file).stem};{r.post_postAST};{int(r.post_dupli_num)}"
# EIS.query(' and '.join([f'{k} == {repr(v)}' for k, v in _pred.items()]))
_pred = dict(zip(_precols, r[_prec].to_dict().values()))
_preQ = " & ".join(
[f"{k} == {repr(v)}" for k, v in _pred.items() if k in _DF.columns][
1:
]
)
_Dpre = _DF.query(_preQ).dropna(axis=1, how="all")
_postd = dict(zip(_postcols, r[_post].to_dict().values()))
_postQ = " & ".join(
[
f"{k} == {repr(v)}"
for k, v in _postd.items()
if k in _DF.columns
][1:]
)
_Dpos = _DF.query(_postQ).dropna(axis=1, how="all")
_dms.append((n, _pred, _postd))
# pd.merge(_Dpre,_Dpos)
_0 = [
(i, _Dpre[i].unique()[0])
for i in _Dpre.columns
if _Dpre[i].nunique() <= 1 and not i.startswith(_DFtype_prefix)
]
_1 = [
(i, _Dpos[i].unique()[0])
for i in _Dpos.columns
if _Dpos[i].nunique() <= 1 and not i.startswith(_DFtype_prefix)
]
# _dms.append((n, len(_Dm), _Dm ))
_mcols = [
i[0]
for i in set(_0).intersection(set(_1))
if not i[0].startswith("dupli")
]
_mcols = [
i
for i in _mcols
if i not in ["PAR_exp", "Dest_dir"] and not i.startswith("EXP_")
]
_mcols.sort()
_othercols = _Dpos.columns.difference(_mcols)
t2 = _Dpos[_othercols]
if "EIS" in _DFtype and all(
["E_RHE" in i for i in [_Dpre.columns, _Dpos.columns]]
):
_mcols += ["E_RHE"]
# _Dm = pd.merge(_Dpre,_Dpos,on=_mcols + ['E_RHE'],suffixes=['_pre','_post'])
elif "ORR" in _DFtype:
_KLcols = ["ORR_E_AppV_RHE", "ORR_KL_E_AppV_RHE", "Electrode"]
if all(i in _othercols for i in _KLcols):
_mcols += _KLcols
# _Dm = pd.merge(_Dpre, _Dpos, on = _mcols, suffixes = ['_pre','_post'])
elif "HER" in _DFtype:
_addcols = [
i
for i in [
"HER_type",
"HER_at_J_slice",
"HER_at_E_slice",
"HER_Segnum",
]
if i in set(_Dpre.columns).union(_Dpos.columns)
]
_mcols += _addcols
_Dm = pd.merge(_Dpre, _Dpos, on=_mcols, suffixes=["_pre", "_post"])
_Dm = _Dm.assign(
**{
"AST_row": f"{_DFtype}_{n}",
"AST_row_n": int(n),
"AST_days_n": r.AST_days_n,
"AST_row_pre": _uniq_AST_row_pre,
"AST_row_post": _uniq_AST_row_post,
}
)
# [(i, _Dpos[i].nunique(), _Dpos[i].unique()[0], _Dpre[i].nunique(), _Dpre[i].unique()[0], (_Dpos[i].unique(),_Dpre[i].unique()))
# for i in _mcols if _Dpos[i].nunique() > 1]
if _Dm.empty:
run_this
# try:
# _Dm = pd.merge_asof(_Dpre.sort_values(_mcols), _Dpos.sort_values(_mcols), on = _mcols, suffixes = ['_pre','_post'])
_parcols = [
(i, i.replace("_pre", "_post"))
for i in _Dm.columns
if i.startswith(_DFtype_prefix)
and i.endswith("_pre")
and i.replace("_pre", "_post") in _Dm.columns
]
for _c0, _c1 in _parcols:
try:
_diffabs = _Dm[_c0] - _Dm[_c1]
_diffperc = 100 * (_Dm[_c1] - _Dm[_c0]) / _Dm[_c0]
_Dm = _Dm.assign(
**{
_c0.split("_pre")[0] + "_diff_abs": _diffabs,
_c0.split("_pre")[0] + "_diff_perc": _diffperc,
}
)
except Exception as e:
# pass
_errors.append((_c0, _c1, e))
_DF_diff_out.append(_Dm)
# print(_c0, e)
DF_diff = pd.concat(_DF_diff_out).drop_duplicates()
if save_pkl == True:
DF_diff.to_pickle(_pklpath)
_logger.info(f"AST compare len({len(DF_diff)}) saved to:{_pklpath}")
return DF_diff
# DF_diff.groupby(['postAST_post','SampleID']).plot(x='E_RHE', y='EIS_Rct_O2_diff_abs',ylim=(-200,200))
def sense_DF_type(self, _DF):
# _c = [i[0] for i in Counter([i.split('_')[0] for i in _DF.columns]).most_common(5) if i[0] not in ['BET','tM']][0]
_excl = set(self.EC_idx_PorphSiO2.columns).union(SampleCodes.columns)
_res = [
i
for i in Counter(
["_".join(i.split("_")[0:2]) for i in _DF.columns]
).most_common(20)
if not any([i[0] in b for b in _excl]) and i[0][0].isalnum()
]
_res2 = Counter(["_".join(i.split("_")[0:1]) for i, c in _res])
_type = _res2.most_common(1)[0][0]
_extraC = Counter(
["_".join(i.split("_")[1:2]) for i in _DF.columns if _type in i]
).most_common(1)
if _extraC[0][1] > 4:
_type = f"{_type}_{_extraC[0][0]}"
# if _res2.most_common(2)[1][1] > 3:
# _type = f'{_type}_{_res2.most_common(2)[1][0]}'
return _type
# EC_all_merged_lst.append(EC_OHN_merged)
# EC_all_merged = pd.concat(EC_all_merged_lst)
# ORR_cath = EC_PorphSiO2.ORR_updated_pars(sweep_type_select='cathodic')
# ORR_an = EC_Pfrom collections import CounterorphSiO2.ORR_updated_pars(sweep_type_select='anodic')
# EC_OHN2 = pd.merge(template, pd.merge(ORR_an,pd.merge(HPRR, N2CV),on='SampleID'), on='SampleID')
# EC_OHN2_cath = pd.merge(template, pd.merge(ORR,pd.merge(HPRR, N2CV),on='SampleID'), on='SampleID')
# EC_OHN2.to_excel(FindExpFolder('PorphSiO2').compare.joinpath('EC_ORR_HPRR_N2.xlsx'))
def export_to_xls(EC_OHN_merged):
export_path = FindExpFolder("PorphSiO2").compare.joinpath(f"EC_pars_all.xlsx")
if "Sweep_Type" in EC_OHN_merged.columns:
with pd.ExcelWriter(export_path) as writer:
for swp, swpgr in EC_OHN_merged.groupby("Sweep_Type"):
swpgr.to_excel(writer, sheet_name=swp)
swpgr.to_excel(export_path.with_name(f"EC_pars_{swp}.xlsx"))
else:
export_path = FindExpFolder("PorphSiO2").compare.joinpath(
"EC_pars_no-sweep.xlsx"
)
EC_OHN_merged.to_excel(export_path)
print(f"EC pars saved to:\n{export_path}")
return export_path
def edit_columns(func, template=pd.concat([PorphSiO2_template(), SampleCodes])):
def wrapper(*args, **kwargs):
if kwargs:
pars_out, suffx = func(*args, **kwargs)
else:
pars_out, suffx = func(*args)
_skipcols = set(
EC_prepare_EC_merged.mcols
+ ["RPM_DAC_uni"]
+ list(PorphSiO2_template().columns)
+ list(EC_index.columns)
+ list(SampleCodes.columns)
)
cols = [
i
for i in pars_out.columns
if i not in _skipcols and not i.startswith(f"{suffx}")
]
pars_out = pars_out.rename(columns={i: f"{suffx}_" + i for i in cols})
return pars_out
return wrapper
@edit_columns
def edit_pars_HPRR(sweep_type_select=["anodic", "cathodic"]):
hfs = []
for swp in sweep_type_select:
hprr_files = list(EC_PorphSiO2.folder.rglob(f"*{swp}*HPRR*disk*"))
# print(hprr_files)
for hf in hprr_files:
hprr_raw = pd.read_excel(hf)
hprr_raw["file"] = hf.stem
E_App_col = [i for i in hprr_raw.columns if "E_APP" in i.upper()][0]
E_jmin = hprr_raw.iloc[np.abs(hprr_raw["jmAcm-2"]).idxmin()][E_App_col]
sID = GetSampleID.try_find_sampleID(hf)[0]
fit_lin_fit = linregress(hprr_raw[E_App_col], hprr_raw["HPRR_j0_Fit"])
hfs.append(
{
"SampleID": sID,
"E_onset": E_jmin,
"dj/dE": fit_lin_fit[0],
"Sweep_Type": swp,
}
)
HPRR_pars_origin = pd.DataFrame(hfs)
return HPRR_pars_origin, "HPRR"
def load_pars_HER(self):
HER_pars_all = Load_from_Indexes.HER_pars_OVV(**self.reload_pars_kws)
self.pars_HER = HER_pars_all
@edit_columns
def edit_pars_HER(self, sweep_type_select=["anodic", "cathodic"], unit="F"):
# reload= False, use_daily = True, extra_plotting=False, xls_out = False
# LC_idx = self.index
if (
not Pfolder.joinpath("HER_orig_data.pkl").exists()
or self.reload_pars == True
):
self.load_pars_HER()
HER_pars = self.pars_HER.loc[
(
(self.pars_HER._type == "HER_pars")
& (self.pars_HER.PAR_file.isin(self.index.PAR_file.to_numpy()))
)
]
HER_pars.to_pickle(Pfolder.joinpath("HER_orig_data.pkl"))
else:
HER_pars = pd.read_pickle(Pfolder.joinpath("HER_orig_data.pkl"))
HER_pars = HER_pars.dropna(how="all", axis=1)
return HER_pars, "HER"
def load_pars_ORR(self):
ORR_pars_all = self.ParsColl["ORR_pars"]
# Load_from_Indexes.ORR_pars_OVV(**self.reload_pars_kws)
self.pars_ORR = ORR_pars_all
@edit_columns
def edit_pars_ORR(self):
if not hasattr(self, "pars_ORR"):
self.load_pars_ORR()
ORR_pars = self.pars_ORR.loc[
(
(self.pars_ORR.source_type == "ORR_pars")
& (
self.pars_ORR.PAR_file.isin(
self.EC_idx_PorphSiO2.PAR_file.to_numpy()
)
)
)
]
ORR_pars = ORR_pars.dropna(how="all", axis=1)
# Adding log cols to ORR pars
ORR_pars = ORR_pars.assign(
**{
f'{"_".join(i.split("_")[0:-1])}_log_{i.split("_")[-1]}': np.log(
ORR_pars[i]
)
for i in ORR_pars.columns
if "Jkin" in i
}
)
return ORR_pars, "ORR"
@edit_columns
def edit_pars_KL(self):
if not hasattr(self, "pars_ORR"):
self.load_pars_ORR()
KL_pars = self.pars_ORR.loc[
(
(self.pars_ORR.source_type == "KL_pars")
& (
self.pars_ORR.PAR_file.isin(
self.EC_idx_PorphSiO2.PAR_file.to_numpy()
)
)
)
]
KL_pars = KL_pars.dropna(how="all", axis=1)
return KL_pars, "ORR"
def load_pars_N2CV(self):
# N2_loadpars = N2_LoadPars(reload = True, reload_raw = False )
Cdl_pars_all = self.ParsColl["N2_pars"]
# N2_loadpars.N2_pars
# Load_from_Indexes.N2_pars_OVV(**self.reload_pars_kws)
# (reload= self.reload_pars, use_daily = use_daily, extra_plotting=extra_plotting, xls_out = xls_out)
self.pars_N2CV = Cdl_pars_all
@edit_columns
def edit_pars_N2cv(
self,
sweep_type_select=["anodic", "cathodic"],
unit="F",
reload=False,
use_daily=True,
extra_plotting=False,
xls_out=False,
):
self.load_pars_N2CV()
if not Pfolder.joinpath("N2_orig_data.pkl").exists() or reload == True:
Cdl_pars_all = self.pars_N2CV
Cdl_pars = Cdl_pars_all.loc[
Cdl_pars_all.PAR_file.isin(self.EC_idx_PorphSiO2.PAR_file.to_numpy())
]
# IndexOVV_N2_pars_fn = FindExpFolder('VERSASTAT').PostDir.joinpath('N2Cdl_pars_IndexOVV_v{0}.pkl.compress'.format(FileOperations.version))
Cdl_pars = Cdl_pars.assign(**{"E_RHE_mV": 1000 * Cdl_pars.E_RHE.to_numpy()})
# Cdl_pars.index = pd.MultiIndex.from_frame(Cdl_pars[['PAR_file','Sweep_Type_N2']])
# N2_files, N2fs = list(EC_PorphSiO2.folder.rglob('*CVs*xlsx')), []
N2fs = []
if unit == "mF":
unit_factor = 1
elif unit == "F":
unit_factor = 1e-3
else:
unit_factor = 1
for n2f, ngr in Cdl_pars.groupby("PAR_file"):
idx_cols = [i for i in ngr.columns if ngr[i].nunique() == 1]
_dc = [i for i in ngr.columns if ngr[i].nunique() > 1]
# sID = GetSampleID.try_find_sampleID(n2f)[0]
ngr.index = pd.MultiIndex.from_frame(ngr[idx_cols])
ngr.drop(columns=idx_cols, inplace=True)
ngr = ngr.dropna(axis=1, how="all")
for swp, swgrp in ngr.groupby("Sweep_Type_N2"):
if swp in sweep_type_select:
# anod = n2_raw.get(swp)
swgrp_Ev = swgrp.loc[
(swgrp.E_RHE_mV.isin(np.arange(0.0, 1000.0, 100)))
& (swgrp.Cdl_R > 0.8)
]
_mgr = []
for n, gr in swgrp_Ev.groupby("E_RHE_mV"):
if len(gr) > 1:
_mean = pd.DataFrame(pd.DataFrame(gr.mean(axis=0)).T)
_mean.index = gr.take([0]).index
_mgr.append(_mean)
else:
_mgr.append(gr)
_swgr_Ev_mean = pd.concat(_mgr)
_pvt = _swgr_Ev_mean.pipe(
multiIndex_pivot,
index=None,
columns=["E_RHE_mV"],
values="Cdl",
)
_pvt = _pvt.assign(**{"Sweep_Type": swp})
N2fs.append(_pvt)
else:
pass
N2_orig = pd.concat([i.reset_index() for i in N2fs], ignore_index=True)
N2_orig.columns = list(N2_orig.columns.get_level_values(0))
# N2_orig.index.names = N2fs[0].index.names
N2_orig = N2_orig.rename(
columns={
i: f"Cdl_{unit}cm-2_{int(i)}" for i in np.arange(0.0, 1000.0, 100)
}
)
N2_orig = N2_orig.assign(**{"RPM_DAC": 0})
N2_orig.to_pickle(Pfolder.joinpath("N2_orig_data.pkl"))
else:
N2_orig = pd.read_pickle(Pfolder.joinpath("N2_orig_data.pkl"))
# N2_orig = pd.DataFrame(N2fs) #.set_index('SampleID','Sweep_Type')
return N2_orig, "N2"
def load_pars_EIS(self):
_source = "Load pars"
if "files" in _source:
eis_files, eisfs = (
list(
self.folder.parent.joinpath(f"EIS_Porph_SiO2\{model_select}").rglob(
"JOS*.xlsx"
)
),
[],
)
if eis_files:
for ef in eis_files:
eis_raw = pd.read_excel(ef, index_col=[0])
eisfs.append(eis_raw)
EIS_pars_mod = pd.concat(eisfs, ignore_index=True).reset_index(
drop=True
)
else:
print("EIS pars file list empty!!")
else:
EIS_pars_mod = self.ParsColl["EIS_pars"]
# Load_from_Indexes.EIS_pars_OVV(reload= False, extra_plotting=False, xls_out = False, use_daily = True, use_latest=True)
# EIS_pars_mod = EIS_pars.loc[EIS_pars.Model_EEC.isin(self.EIS_models.values())]
self.pars_EIS = EIS_pars_mod
@edit_columns
def edit_pars_EIS(self, _source="Load pars"):
"""Models used are selected in the EIS_export module
via dict from EC_PorphSiO2.EIS_models"""
self.load_pars_EIS()
EIS_pars_mod = self.pars_EIS
EIS_pars_mod = EIS_pars_mod.loc[
EIS_pars_mod.index.isin(EIS_pars_mod.best_mod_index)
]
_sample_uniq_cols1 = set(
[
a
for n, gr in EIS_pars_mod.groupby("SampleID")
for a in [i for i in gr.columns if gr[i].nunique() == 1]
]
)
_sample_uniq_cols2 = set(
[
a
for n, gr in EIS_pars_mod.groupby("SampleID")
for a in [i for i in gr.columns if gr[i].nunique() == 2]
]
)
_sample_uniq_cols2.difference(_sample_uniq_cols1)
# Create EIS var columns with gas N2 or O2 as suffix names
# EPgrp = EIS_pars_mod.groupby(['Gas','Model_EEC'])
EPgrp_gas = EIS_pars_mod.groupby(["Gas"])
# N2grp = ('N2',self.EIS_models.get('N2'))
# O2grp = ('O2',self.EIS_models.get('O2'))
# EP_N2,EP_O2 = EPgrp.get_group(N2grp).drop(columns='Gas'), EPgrp.get_group(O2grp).drop(columns='Gas')
EC_exp_index = [
i for i in Load_from_Indexes.EC_label_cols if i not in ["PAR_file", "Gas"]
] + ["PAR_date_day"]
_gasgrp = []
for gas in ["O2", "N2"]:
# gasn_grp = (gas,self.EIS_models.get(gas))
grp = EPgrp_gas.get_group(gas)
_varsgrp = [a for i in grp.lmfit_var_names.unique() for a in i.split(", ")]
_varsgrp += ["Rct_kin" for i in _varsgrp if "Rct" in i] + [
"Qad+Cdlp"
for i in _varsgrp
if all([i in _varsgrp for i in ["Qad", "Cdlp"]])
]
_sample_uniq_cols1 = set([i for i in grp.columns if grp[i].nunique() == 1])
# grp.lmfit_var_names.unique()[0].split(', ')
_grp = grp.rename(columns={i: i + f"_{gas}" for i in set(_varsgrp)})
# _grp = _grp.drop(columns='Gas')
# _grp.set_index(EC_exp_index+[ i for i in list(_sample_uniq_cols) if i not in _varsgrp],inplace=True)
# _grp = _grp.drop(columns=EC_exp_index+[ i for i in list(_sample_uniq_cols) if i not in _varsgrp])
# [i for i in Load_from_Indexes.EC_label_cols if i is not 'Gas']
_gasgrp.append(_grp)
# _ggidx = [i.set_index(EC_exp_index) for i in _gasgrp]
# pd.concat(_ggidx,axis=0)
# _dups = [[(count,item) for item, count in collections.Counter(i.index.values).items() if count > 1] for i in _ggidx]
# _DUP_PARFILES = pd.concat(_ggidx).loc[[a[1] for i in _dups for a in i]].sort_values('PAR_date').PAR_file.unique()
# pd.merge(_gasgrp[0],_gasgrp[1], on =EC_exp_index+[ i for i in list(_sample_uniq_cols) if i not in _varsgrp])
# pd.merge(*_ggidx,left_index=True, right_index=True)
EIS_N2O2 = pd.concat(_gasgrp, ignore_index=True)
# EIS_N2O2 = pd.merge(EP_N2,EP_O2, suffixes=['_N2','_O2'],on='SampleID')
Rsis = [
i
for i in EIS_N2O2.columns
if "Rs" in i and not any(c in i for c in ("stderr", "_kin_", "_setting"))
]
Rct_cols = [
i
for i in EIS_N2O2.columns
if "Rct" in i and not any(c in i for c in ("stderr", "_kin_"))
]
# EIS_pars_origin[Rsis] = EIS_pars_origin[Rsis].mask(EIS_pars_origin[Rsis] < 1)
EIS_N2O2[Rsis] = EIS_N2O2[Rsis].mask(EIS_N2O2[Rsis] < 1)
print("EIS Rs mask applied")
EIS_N2O2[Rct_cols] = EIS_N2O2[Rct_cols].mask(EIS_N2O2[Rct_cols] > 1e5)
print("EIS Rct mask applied")
EIS_N2O2 = EIS_N2O2.dropna(axis=1, how="all")
# RedChiSq_limit = ORReis_merge.query('Rs > 1').RedChisqr.mean()+ 1*ORReis_merge.query('Rs > 1').RedChisqr.std()
# ORReis_neat = ORReis_merge.query('RedChisqr < @RedChiSq_limit & Rs > 2 & Rct < 9E05')
EIS_N2O2_an, EIS_N2O2_cat = EIS_N2O2.copy(), EIS_N2O2.copy()
EIS_N2O2_an["Sweep_Type"] = "anodic"
EIS_N2O2_cat["Sweep_Type"] = "cathodic"
EIS_N2O2_new = pd.concat([EIS_N2O2_an, EIS_N2O2_cat], axis=0)
# EIS_pars_orig_mod = EIS_pars_origin.query('Model_EEC == @model_select')
return EIS_N2O2_new, "EIS"
def EIS_spectra_origin_prep(model_select=["Model(R0-L0-p(R1-Ws1,CPE1)-C2)"]):
eis_metaf, _specs = (
list(
EC_PorphSiO2.folder.parent.rglob(
"EIS_Porph_SiO2\meta_data*EIS*origin.xlsx"
)
),
[],
)
EISmeta = pd.read_excel(eis_metaf[0], index_col=[0])
EISmeta.columns
for (sID, gas), pgrp in EISmeta.groupby(["SampleID", "Gas"]): # 'PAR_file'
PF, pgrp
EIScombined = pd.read_excel(pgrp.SpectraFile.iloc[0], index_col=[0])
EISspectra_mod = EIScombined.query("Model_EEC == @model_select")
EISspectra_mod = make_uniform_EvRHE(EISspectra_mod)
for Ev, Egrp in EISspectra_mod.groupby("E_RHE"):
Egrp = Egrp.assign(**{"SampleID": sID, "Gas": gas})
_specs.append(Egrp)
# _specs.update({(sID,gas,Ev) : Egrp})
spectra = pd.concat(_specs)
spectra.to_excel(eis_metaf[0].with_name("clean_spectra.xlsx"))
def EIS_spectra_origin(model_select=["Model(R0-L0-p(R1-Ws1,CPE1)-C2)"]):
eis_metaf, _specs = (
list(
EC_PorphSiO2.folder.parent.rglob(
f"EIS_Porph_SiO2\{model_select}\meta_data*EIS*origin.xlsx"
)
),
[],
)
specdir = mkfolder(eis_metaf[0].parent.joinpath("spectra"))
spectra = pd.read_excel(
eis_metaf[0].with_name("clean_spectra.xlsx"), index_col=[0]
)
spectra.columns
for ax_type in [("Zre", "-Zim"), ("Yre", "Yim")]:
cols = [i + a for i in ["DATA_", "FIT_"] for a in ax_type]
for gas, Ggrp in spectra.groupby("Gas"):
for sID, sgrp in Ggrp.groupby("SampleID"):
with pd.ExcelWriter(
specdir.joinpath(f"{ax_type[0][0]}_{gas}_{sID}.xlsx")
) as writer:
for Ev, Egrp in sgrp.groupby("E_RHE"):
# sheet_name = Ev
EmV = f"{1E3*Ev:.0f}"
Egrp[["Frequency(Hz)"] + cols].to_excel(
writer, sheet_name=EmV
)
# === plotting
fig, ax = plt.subplots()
Egrp.plot(
x=cols[0],
y=cols[1],
kind="scatter",
ax=ax,
label=cols[1],
)
Egrp.plot(x=cols[2], y=cols[3], c="r", ax=ax, label=cols[3])
plt.legend()
ax.set_xlabel(ax_type[0])
ax.set_ylabel(ax_type[1])
ax.set_title(f"{gas} {sID} {EmV}")
ax.grid(True)
plt.savefig(
specdir.joinpath(f"{ax_type[0][0]}_{gas}_{sID}_{EmV}"),
bbox_inches="tight",
)
plt.close()
# ===
def save_load_AST_pars(func):
# argnames = func.func_code.co_varnames[:func.func_code.co_argcount]
# fname = func.func_name
def wrapper(*args, **kwargs):
func_args = inspect.signature(func).bind(*args, **kwargs).arguments
func_args_str = ", ".join(
"{} = {!r}".format(*item) for item in func_args.items()
)
print(f"{func.__module__}.{func.__qualname__} ( {func_args_str} )")
# args = list(args)
# print('must-have arguments are:')
# my_var_name = [ (k,v) for k,v in locals().items()]
# for i in my_var_name:
# print(f'{(i)}')
## for i in args:
## print(eval(i))
# print('optional arguments are:')
# for kw in kwargs.keys():
# print( kw+'='+str( kwargs[kw] ))
return args
return wrapper
# # @save_load_AST_pars
# def mergedEC( _reloadset = False):
# _pkl_EC_merged = 'EC_merged_dict'
# # EC_merged_dict = EC_PorphSiO2.mergedEC(_reloadset=True)
# if _reloadset == True:
# # EC_merged_dict_bak = EC_merged_dict.copy()
# # EC_merged_dict = EC_PorphSiO2.take_selection_of_EC_merged(EC_merged_dict)
# mcols = [i for i in Load_from_Indexes.EC_label_cols if i not in ['PAR_file']]+['Sweep_Type']
# _mcols = [i for i in mcols if not i in ['Gas','E_RHE']]
# LC_fls, AST_days = EC_PorphSiO2.select_ECexps(EC_folder)
# EC_merged_dict = {}
# # _reloadset = True
# template = PorphSiO2_template()
# HPRR = EC_PorphSiO2.HPRR()
# N2CV = EC_PorphSiO2().N2cv(reload= False, use_daily = True)
# # N2_pltqry = EC_merged_dict.get('N2CV')
# N2_AST = EC_PorphSiO2.get_AST_matches(N2CV)
# N2_AST_diff = EC_PorphSiO2.compare_AST_pars(N2CV, N2_AST, reload = False)
# # _DFtype = EC_PorphSiO2.sense_DF_type(N2CV)
# # EC_merged_dict.update({'N2CV' : N2_AST_diff})
# EC_merged_dict.update({'N2CV' : {'PARS' : N2CV, 'AST_matches' : N2_AST, 'AST_diff' : N2_AST_diff}})
# # list(N2CV.columns)
# # _renameN2 = {c : c.split('_')[-1] for c in [i for i in N2CV.columns if any([i.split('_')[-1] in mcols])]}
# # N2CV = N2CV.rename(columns = _renameN2)
# ORR = EC_PorphSiO2().ORR_pars()
# ORR_AST = EC_PorphSiO2.get_AST_matches(ORR)
# ORR_AST_diff = EC_PorphSiO2.compare_AST_pars(ORR, ORR_AST, reload = _reloadset)
# ttpars = ORR.query('RPM_DAC_uni > 1000 & Sweep_Type == "cathodic"')
# tt_AST = EC_PorphSiO2.get_AST_matches(ttpars)
# tt = ORR_AST.query('RPM_DAC_uni > 1000 & Sweep_Type == "cathodic"')
# tt_diff = EC_PorphSiO2.compare_AST_pars(ORR, tt, reload = _reloadset, save_pkl = False)
# # ttpfs = ORR.loc[ORR.ORR_Jkin_max_700 > 0].PAR_file.unique()
# # ttpfs = ORR.query('Sweep_Type == "mean"').loc[ORR.ORR_E_onset > 0.85].PAR_file.unique()
# # ORR.loc[(ORR.ORR_E_onset > 0.85) & (ORR.Sweep_Type == 'cathodic')].PAR_file.unique()
# # EC_merged_dict.update({'ORR' : ORR_AST_diff})
# EC_merged_dict.update({'ORR' : {'PARS' : ORR, 'AST_matches' : ORR_AST, 'AST_diff' : ORR_AST_diff}})
# # _renameO2 = {c : c.split('_')[-1] for c in [i for i in ORR.columns if any([i.split('_')[-1] in mcols]) and not '_Ring' in i]}
# # ORR = ORR.rename(columns = _renameO2)
# KL = EC_PorphSiO2().KL_pars()
# KL = KL.assign(**{'RPM_DAC' : 0})
# KL_AST = EC_PorphSiO2.get_AST_matches(KL)
# KL_AST_diff = EC_PorphSiO2.compare_AST_pars(KL, KL_AST, reload = _reloadset)
# # EC_merged_dict.update({'KL' : KL_AST_diff})
# EC_merged_dict.update({'KL' : {'PARS' : KL, 'AST_matches' : KL_AST, 'AST_diff' : KL_AST_diff}})
# # _KLdatacols = ['ORR_KL_data_file_post','ORR_KL_data_x_post', 'ORR_KL_data_y_post', 'ORR_KL_fit_y_post', 'ORR_KL_fit_y_2e_post', 'ORR_KL_fit_y_4e_post']
# # _renameKL = {c : c.split('_')[-1] for c in [i for i in KL.columns if any([i.split('_')[-1] in mcols]) and not '_Ring' in i]}
# # KL = KL.rename(columns = _renameKL)
# EIS = EC_PorphSiO2.EIS_pars()
# EIS_AST = EC_PorphSiO2.get_AST_matches(EIS)
# EIS_AST_diff = EC_PorphSiO2.compare_AST_pars(EIS, EIS_AST, reload = _reloadset)
# # EC_merged_dict.update({'EIS' : EIS_AST_diff})
# EC_merged_dict.update({'EIS' : {'PARS' : EIS, 'AST_matches' : EIS_AST, 'AST_diff' : EIS_AST_diff}})
# # _renameEIS = {c : c.split('_')[-1] for c in [i for i in EIS.columns if any([i.split('_')[-1] in mcols]) and not '_Ring' in i]}
# # EIS = EIS.rename(columns = _renameEIS)
# HER = EC_PorphSiO2().HER_pars(reload= False, use_daily = True)
# HER_type_grp = HER.groupby('HER_type')
# HER.HER_at_E_slice = HER.HER_at_E_slice.round(3)
# HER_AST = EC_PorphSiO2.get_AST_matches(HER)
# for Htype, Hgrp in HER_type_grp:
# # Htype, Hgrp = 'E_slice', HER.loc[HER.groupby('HER_type').groups['E_slice']]
# HER_AST_diff = EC_PorphSiO2.compare_AST_pars(Hgrp, HER_AST, reload = _reloadset,extra= Htype)
# try:
# if not HER_AST_diff.empty:
# EC_merged_dict.update({f'HER_{Htype}' : {'PARS' : Hgrp, 'AST_matches' : HER_AST, 'AST_diff' : HER_AST_diff}})
# except Exception as e:
# print(f'HER {Htype} fail, {e}')
# # EC_merged_dict.update({f'HER_{Htype}' : HER_AST_diff})
# EC_merged_dict = EC_PorphSiO2.add_filter_selection_of_EC_merged(EC_merged_dict)
# save_dict_pkl(_pkl_EC_merged, EC_merged_dict)
# else:
# EC_merged_dict = load_dict_pkl(_pkl_EC_merged)
# return EC_merged_dict
# ECmerged = pd.merge(ORR,pd.merge(N2CV, EIS,on=_mcols),on=_mcols)
# EC_EIS = pd.merge(ECmerged,EIS,on=mcols)
# EC_OHN_merged = pd.merge(template, EC_EIS, on='SampleID')
# EC_PorphSiO2.export_to_xls(EC_OHN_merged)
# return EC_OHN_merged
def corr_plots():
EC_OHC.query('SampleID != "JOS5"').corr()
corrstk = EC_OHC.query('SampleID != "JOS5"').corr().stack()
EC_OHC.plot(x="E_onset", y="HPRR_E_onset", kind="scatter")
EC_OHC.plot(x="FracH2O2_050", y="HPRR_E_onset", kind="scatter")
EC_OHC.plot(x="N2_Cdl_mFcm-2_0.5", y="HPRR_dj/dE", kind="scatter")
EC_OHC.plot(x="N2_Cdl_mFcm-2_0.5", y="E_half", kind="scatter")
EC_OHC.corr(method="pearson")
def _check_eis_plots():
_par = ["Cdlp", "Rorr", "Rct", "Qad", "Aw"][-1]
_checky = ["N_content", "BET_cat_agg"][0]
for modn, mgrp in EIS_pars_all.loc[EIS_pars_all.pH < 3].groupby(
["pH", "postAST", "Model_EEC"]
):
_ps = eisplot(_par)
if len(mgrp[_par].dropna()) > 3:
mgrp.plot(
x="E_RHE",
y=_par,
yerr=f"{_par}_stderr",
kind="scatter",
ylim=_ps.ylim,
logy=_ps.logy,
title=f"{modn}",
c=_checky,
cmap="rainbow",
)
# def EC_PorphSio():
## folder = Path('F:\EKTS_CloudStation\CloudStation\Preparation-Thesis\SiO2_projects\SiO2_Me_EC+Struc\EC_Porph_SiO2_0.1MH2SO4\Compare_parameters')
## folder = Path('G:\CloudStation\Preparation-Thesis\SiO2_projects\SiO2_Me_EC+Struc\EC_Porph_SiO2_0.1MH2SO4\Compare_parameters')
## HPRR = pd.concat([pd.read_excel(i)['file'] for i in hprr_files])
# EC_ORR_HPRR = pd.merge(ORR_pars_origin,HPRR_pars_origin)
# HPRR_pars_origin.join(N2_orig, on='SampleID')
# EC_OHC = pd.merge(ORR_pars_origin,pd.merge(HPRR_pars_origin, N2_orig),on='SampleID')
## orr_raw.query('RPM > 1400')
## orrfs.append(orr_raw.query('RPM > 1400'))
# EC_OHC.to_excel(folder.joinpath('EC_ORR_HPRR.xlsx'))
def _testing_():
tt = EC_prepare_EC_merged()
self = tt
_pp = EC_post_plotting(tt)
self = _pp
N2CV = self.N2cv(reload=False, use_daily=True)
#%% == EC_post_plotting == testing
class EC_post_plotting:
def __init__(self, _EC_prepare_EC_merged):
self._EC_prepare_EC_merged = _EC_prepare_EC_merged
self.add_attrs()
def add_attrs(self):
if hasattr(self._EC_prepare_EC_merged, "EC_merged_dict"):
self.EC_merged = self._EC_prepare_EC_merged.EC_merged_dict
else:
self.EC_merged = {} # self._EC_prepare_EC_merged
def ORR_get_experiments(self):
ORR_AST = self.EC_merged["ORR"]["AST_matches"]
ORR_AST_mean1500 = ORR_AST.loc[
(ORR_AST.Sweep_Type == "mean") & (ORR_AST.RPM_DAC_uni > 1000)
]
ORR_AST_mean1500.to_excel(EC_folder.joinpath("ORR_AST_exp_overview.xlsx"))
# N2_scan_index = EC_index.loc[(EC_index.SampleID.isin(_smpls)) & (EC_index.PAR_exp.str.contains('N2_act'))]
# N2_scan_index.to_excel(EC_folder.joinpath('N2_scan_exp_overview.xlsx'))
def N2_repr_Cdl(self):
# ECname = 'N2'
# Cdl_pars_all = Load_from_Indexes.N2_pars_OVV()
_DF = self.EC_merged["N2CV"]["PARS"]
# _DF = Cdl_pars_all
ECname = "N2"
_raw_data_folder = mkfolder(EC_folder.joinpath(f"{ECname}_reproducibility"))
_grpcols = ["pH", "Loading_cm2", "SampleID"]
_swpcol = [i for i in _DF.columns if "Sweep_Type" in i]
_grpcols += _swpcol
_sIDgrps = _DF.loc[
_DF.SampleID.isin(PorphSiO2_template().SampleID.values) & (_DF.pH < 2)
]
# .query('postAST == "no"')
_lst = []
for sID, sgrp in _sIDgrps.groupby(_grpcols):
sID,
_sgpr_Cdl_mean = (
sgrp.groupby("E_AppV_RHE").Cdl.mean().rename("Cdl_grp_mean")
)
_Cdl_cols = [i for i in sgrp.columns if i.startswith("N2_Cdl_F")]
fig, (ax1, ax2) = plt.subplots(2, figsize=(10, 10), sharex=True)
_sgpr_Cdl_mean.plot(
c="grey", alpha=0.5, ls="--", lw=5, label="mean Cdl", ax=ax1
)
pfgrps = sgrp.groupby("PAR_file")
for pf, pfgrp in pfgrps:
pfgrp = pd.merge(pfgrp, _sgpr_Cdl_mean, on=EvRHE)
ls = "-" if "no" in pfgrp.postAST.unique() else "--"
pfgrp = pfgrp.assign(
**{"Cdl_mean_diff": pfgrp.Cdl - pfgrp.Cdl_grp_mean}
)
_lst.append(pfgrp)
pfgrp.plot(
x="E_AppV_RHE", y="Cdl_mean_diff", ax=ax2, legend=False, ls=ls
)
_dt = pfgrp.PAR_date_day.unique()[0]
_lbl = f"{_dt}, {Path(pf).stem}"
pfgrp.plot(x="E_AppV_RHE", y="Cdl", ax=ax1, label=_lbl, ls=ls)
_title = ", ".join([f"{k} : {str(val)}" for k, val in (zip(_grpcols, sID))])
_stem = "_".join([str(i) for i in sID]) + f"_{len(pfgrps)}"
ax1.set_ylabel("Cdl")
ax1.set_title(_title)
ax1.legend(
fontsize=15, bbox_to_anchor=(1.02, 1), loc="upper left", fancybox=True
)
ax2.set_ylabel("Cdl - Cdl_mean")
# ax2.legend(False)
plt.savefig(
_raw_data_folder.joinpath(_stem + ".png"), bbox_inches="tight", dpi=200
)
plt.close()
N2_Cdl_pars_mean = pd.concat(_lst)
def select_sID_N2(_sIDgrps):
_grp_select = (1.0, 0.379, "JOS4", "cathodic")
_jos4 = _sIDgrps.groupby(_grpcols).get_group(_grp_select)
_raw_data_folder = mkfolder(
EC_folder.joinpath(
f"{ECname}_reproducibility", "_".join([str(i) for i in _grp_select])
)
)
_j4lc = _jos4.loc[_jos4.postAST == "postAST_LC"]
j4post = pd.concat(
[
pd.read_excel(_j4lc.sourceFilename.unique()[0].parent.joinpath(i))
for i in _j4lc.N2_CV_datafilenames.unique()[0].split(", ")
]
)
_j4no = _jos4.loc[
(_jos4.PAR_date_day == "2019-05-06") & (_jos4.postAST == "no")
]
j4no = pd.concat(
[
pd.read_excel(_j4no.SourceFilename.unique()[0].parent.joinpath(i))
for i in _j4no.N2_CV_datafilenames.unique()[0].split(", ")
]
)
_j4no_pfgrps = _jos4.loc[(_jos4.postAST == "no")].groupby("PAR_file")
for pf, pgr in _j4no_pfgrps:
j4no_grps = pd.concat(
[
pd.read_excel(pgr.SourceFilename.unique()[0].parent.joinpath(i))
for i in pgr.N2_CV_datafilenames.unique()[0].split(", ")
]
).groupby("ScanRate_mVs")
for sr, sgrp in j4post.groupby("ScanRate_mVs"):
fig, ax = plt.subplots()
j4no_grps.get_group(sr).plot(
x="E_AppV_RHE",
y="jmAcm-2",
ax=ax,
label=f"pre,{pgr.PAR_date_day.unique()[0]} / {Path(pgr.PAR_file.unique()[0]).stem}",
)
sgrp.plot(
x="E_AppV_RHE", y="jmAcm-2", ax=ax, label="postAST_LC", title=sr
)
ax.legend(
fontsize=15,
bbox_to_anchor=(1.02, 1),
loc="upper left",
fancybox=True,
)
_stem = f"{sr}_{pgr.PAR_date_day.unique()[0]}_{Path(pgr.PAR_file.unique()[0]).stem}"
plt.savefig(
_raw_data_folder.joinpath(_stem + ".png"),
bbox_inches="tight",
dpi=200,
)
def reproducibility_check_samples(_DF, ECname):
ECname = "EIS"
if ECname == "EIS":
EIS = EC_PorphSiO2.EIS_pars()
_DF = EIS
_grpcols = ["pH", "Loading_cm2", "SampleID"]
_eisqry = '(postAST == "no") &'
_sIDgrps = _DF.loc[
_DF.SampleID.isin(PorphSiO2_template().SampleID.values) & (_DF.pH < 2)
].query('(Sweep_Type == "cathodic")')
_lst = []
for sID, sgrp in _sIDgrps.groupby(_grpcols):
sID
pars, vars = eisplot.read_varnames(sgrp)
for gas in ["O2", "N2"]:
# gas ='N2'
_vars_gas = [i for i in vars if i.endswith(gas)]
sgrp_gas = sgrp.copy()
sgrp_gas.dropna(subset=_vars_gas, axis=0, inplace=True)
sgrp_gas.dropna(axis=1, how="all", inplace=True)
sgrp_gas = sgrp_gas.loc[
sgrp_gas[[i for i in _vars_gas if f"Rct_{gas}" in i][0]] < 2000
]
for var in _vars_gas:
_raw_data_folder = mkfolder(
EC_folder.joinpath(f"{ECname}_reproducibility/{var}")
)
# var = f'EIS_{_var}_{gas}'
_sgpr_var_mean = (
sgrp_gas.groupby("E_RHE")[var].mean().rename(f"{var}_mean")
)
fig, (ax1, ax2) = plt.subplots(2, figsize=(10, 10), sharex=True)
_sgpr_var_mean.plot(
c="grey",
alpha=0.5,
ls="--",
lw=5,
label=f"{var}_mean",
ax=ax1,
)
pfgrps = sgrp_gas.groupby("PAR_file")
for pf, pfgrp in pfgrps:
pfgrp = pd.merge(pfgrp, _sgpr_var_mean, on="E_RHE")
pfgrp = pfgrp.assign(
**{
f"{var}_mean_diff": pfgrp[var]
- pfgrp[f"{var}_mean"]
}
)
_lst.append(pfgrp)
pfgrp.plot(
x="E_RHE", y=f"{var}_mean_diff", ax=ax2, legend=False
)
_dt = pfgrp.PAR_date_day.unique()[0]
_lbl = f"{_dt}, {Path(pf).stem}"
pfgrp.plot(
x="E_RHE", y=var, ax=ax1, label=_lbl
) # TODO add yerr=
_title = (
", ".join(
[f"{k} : {str(val)}" for k, val in (zip(_grpcols, sID))]
)
+ f", par : {var}"
)
_stem = (
var
+ "_"
+ "_".join([str(i) for i in sID])
+ f"_{len(pfgrps)}"
)
# TODO CHECK REPR
ax1.set_ylabel(var)
ax1.set_title(_title)
ax1.legend(
fontsize=15,
bbox_to_anchor=(1.02, 1),
loc="upper left",
fancybox=True,
)
ax2.set_ylabel(f"{var}_mean_diff")
# ax2.legend(False)
plt.savefig(
_raw_data_folder.joinpath(_stem + ".png"),
bbox_inches="tight",
dpi=200,
)
plt.close()
EIS_diff_means = pd.concat(_lst)
def get_raw_data(EC_merged_dict, ECname):
EC_merged_dict = EC_PorphSiO2.mergedEC(_reloadset=False)
# _CVdfls, _mcols, _setAST, pAST, ref_dict = _srcfls_fit, _mcols, _setAST, pAST, _uniq_id
def _read_excel_df(_CVdfls, _mcols, _setAST, pAST, ref_dict={}):
if _CVdfls:
_stCVdata = pd.concat(
[pd.read_excel(i, index_col=[0]) for i in _CVdfls],
sort=False,
ignore_index=True,
).dropna(axis=1, how="all")
_stCVdata = _stCVdata.rename(
columns={
i: f"{i}_{status}" for i in _stCVdata.columns if i not in _mcols
}
)
_good_cols = [
key
for key, val in ref_dict.items()
if key in _stCVdata.columns and _stCVdata[key].unique()[0] == val
]
_bad_cols = [
key
for key, val in ref_dict.items()
if key in _stCVdata.columns and _stCVdata[key].unique()[0] != val
]
_missing_cols = {
key: val
for key, val in ref_dict.items()
if key not in _stCVdata.columns
}
_stCVdata = _stCVdata.assign(
**{**{"postAST": _setAST, "postAST_post": pAST}, **_missing_cols}
)
else:
_stCVdata = pd.DataFrame()
return _stCVdata
if "N2CV" in EC_merged_dict.keys():
ECname = "N2"
_raw_data_folder = mkfolder(EC_folder.joinpath(f"{ECname}_raw_data"))
_loadDF = read_load_pkl(f"{ECname}_raw_data")
if _loadDF.empty:
AST_grp_cols = ["postAST_post", "Sweep_Type", "pH", "Loading_cm2"]
N2_pltqry = EC_merged_dict.get("N2CV").groupby(AST_grp_cols)
_mcols = [
EvRHE,
"ScanRate_mVs",
"Sweep_Type",
"SampleID",
"postAST",
] + ["postAST_post", "postAST_pre"]
_grpcols = ("cathodic", 1.0, 0.379)
_pAST_opts = ["postAST_LC", "postAST_sHA"]
_st = []
for pAST in _pAST_opts:
ASTgrp = N2_pltqry.get_group((pAST, *_grpcols))
_samplegrpcols = ["SampleID", "AST_row"]
for n, gr in ASTgrp.groupby(_samplegrpcols):
# n = list(ASTgrp.groupby(_samplegrpcols).groups)[-2]
# gr = ASTgrp.groupby(_samplegrpcols).get_group(n)
# TODO find missing JOS4 postAST_LC N2_act!
for status in ["pre", "post"]:
_sourcedfls = [
i
for i in gr[f"N2_SourceFilename_{status}"].unique()
if pd.notna(i)
]
_setAST = "no" if "pre" in status else pAST
_CVdfls = [
f.parent.joinpath(e)
for f in _sourcedfls
for e in [
a
for i in gr[
f"N2_CV_datafilenames_{status}"
].unique()
for a in i.split(", ")
]
if f.parent.joinpath(e).is_file()
]
_uniq_id = dict(
zip(
AST_grp_cols + _samplegrpcols,
(_setAST, *_grpcols, *n),
)
)
if _CVdfls:
_stCVdata = _read_excel_df(
_CVdfls, _mcols, _setAST, pAST, ref_dict=_uniq_id
)
# _stCVdata = pd.concat([pd.read_excel(i, index_col=[0]) for i in _CVdfls],sort=False,ignore_index=True)
# _stCVdata = _stCVdata.assign(**{'postAST' :_setAST, 'postAST_post' : pAST })
# _stCVdata = _stCVdata.rename(columns = {i : f'{i}_{status}' for i in _stCVdata.columns if i not in _mcols})
_st.append(_stCVdata)
else:
print("Sources empty!!", n, status, pAST)
N2_CVs = pd.concat([i for i in _st])
save_DF_pkl(f"{ECname}_raw_data", N2_CVs)
else:
N2_CVs = _loadDF
_select_cols = [
c
for c in N2_CVs.columns
if any(
[
i in c
for i in [
EvRHE,
"jmAcm-2",
"postAST",
"Sweep_Type",
"pH",
"Loading_cm2",
]
]
)
]
_ScanRates_check_lim = [
(
n,
gr[["jmAcm-2_pre", "jmAcm-2_post"]].max(),
gr[["jmAcm-2_pre", "jmAcm-2_post"]].min(),
)
for n, gr in N2_CVs.groupby(["ScanRate_mVs"])
]
_ScanRates_ylim = {
10: (-2.5, 2.5),
100: (-11, 7),
150: (-13, 10),
200: (-16, 12),
300: (-22, 16),
}
for n, gr in N2_CVs.groupby(["SampleID", "AST_row", "ScanRate_mVs"]):
n, gr
_name = "_".join([str(i) for i in (*_grpcols, *n)])
fig, ax = plt.subplots(figsize=(6, 4))
for _past, pgr in gr.groupby("postAST"):
_jcol = "pre" if _past == "no" else "post"
pgr.plot(
x=EvRHE,
y=f"jmAcm-2_{_jcol}",
title=f"{_name}",
label=f"{_jcol}, {_past}",
ax=ax,
)
ax.set_ylim(_ScanRates_ylim.get(n[-1]))
plt.savefig(
_raw_data_folder.joinpath(f"{_name}.png"),
dpi=100,
bbox_inches="tight",
)
plt.close()
print(_name)
gr[_select_cols].dropna(axis=1, how="all").to_excel(
_raw_data_folder.joinpath(f"{_name}.xlsx")
)
# TODO Check new plots with Update Pre scans for ORR and for FINAL PLOTS!!
if "ORR" in EC_merged_dict.keys():
ECname = "ORR"
_raw_data_folder = mkfolder(EC_folder.joinpath(f"{ECname}_raw_data_3"))
_loadDF = read_load_pkl(f"{ECname}_raw_data")
_mcols = [EvRHE, "Sweep_Type", "SampleID", "postAST"] + [
"postAST_post",
"postAST_pre",
]
_grpcols = (1500, "mean", 1)
print(f"ORR selection: {_grpcols}")
KL_AST_diff = EC_merged_dict.get("KL").get("AST_diff")
if _loadDF.empty:
ORR_pltqry = (
EC_merged_dict.get("ORR")
.get("AST_diff")
.query("RPM_DAC_uni > 1000")
.groupby(["postAST_post", "RPM_DAC_uni", "Sweep_Type", "pH"])
)
_st = []
for pAST in ["postAST_LC", "postAST_sHA"]:
ASTgrp = ORR_pltqry.get_group((pAST, *_grpcols))
for n, gr in ASTgrp.groupby("SampleID"):
for status in ["pre", "post"]:
gr.ORR_RRDE_swp_data_file_pre
_sourcedfls = [
i
for i in gr[f"ORR_RRDE_swp_data_file_{status}"].unique()
if pd.notna(i)
]
_setAST = "no" if "pre" in status else pAST
# _CVdfls = [f.parent.joinpath(e) for f in _sourcedfls
# for e in [a for i in gr[f'ORR_datafilenames_{status}'].unique() for a in i.split(', ')]
# if f.parent.joinpath(e).is_file() ]
if _sourcedfls:
_stCVdata = _read_excel_df(
_sourcedfls, _mcols, _setAST, pAST
)
# _stCVdata = pd.concat([pd.read_excel(i, index_col=[0]) for i in _sourcedfls],sort=False,ignore_index=True).dropna(axis=1,how='all')
# _stCVdata = _stCVdata.assign(**{'postAST' :_setAST, 'postAST_post' : pAST })
# _stCVdata = _stCVdata.rename(columns = {i : f'{i}_{status}' for i in _stCVdata.columns if i not in _mcols})
_st.append(_stCVdata)
ORR_data = pd.concat([i for i in _st])
save_DF_pkl(_raw_data_folder, ORR_data)
else:
ORR_data = _loadDF
def ORR_prepost_AST_4plots():
KL_grp = KL_AST_diff.query('Electrode == "KL_I_Disk"').groupby(
["postAST_post", "SampleID", "Sweep_Type"]
)
""" problem in data, there are double PAR_files in the _pre data
01.03: fixed the plottings for pre, need to choose which pre version to take still...
11.03: fixed and chose the ORR scans with N2 BGs, all use N2_jcorr, still check which ure used here...."""
_take_sweep_type = "mean"
# .query('postAST_post == "postAST_LC" ')
for n, gr in ORR_data.groupby(["postAST_post", "SampleID"]):
n, gr
# gr = gr.sort_values(EvRHE,ascending=True)
try:
_nKL = KL_grp.get_group((*n, gr.Sweep_Type.unique()[0]))
# _nKL = _nKL.assign(**{'ORR_KL_2e' : 2, 'ORR_KL_4e' : 4})
_nKL.to_excel(_raw_data_folder.joinpath(f"KL_{_name}.xlsx"))
except:
print(f"no KL plot for: {n}")
_nKL = pd.DataFrame()
_metal = PorphSiO2_template().query("SampleID == @n[-1]").Metal.iloc[0]
# fig,axes = plt.subplots(2,2,figsize=(22/2.54,18/2.54))
for prePF, pfgr in gr.groupby("PAR_file_disk_pre"):
prePF, pfgr
fig, axes = plt.subplots(2, 2, figsize=(22 / 2.54, 18 / 2.54))
# gr.sort_values(EvRHE,ascending=True).plot(x=EvRHE,y=[f'Jcorr_{_stat}'],ax=axes[1][0],ylim=(-6,0.5),xlim=(0,1))
_prepost_date = []
for _stat in ["pre", "post"]:
if _stat == "pre":
_date = [
np.datetime_as_string(i, unit="D")
for i in pfgr[f"EXP_date_day_dt_{_stat}"].unique()
if not pd.isna(i)
][0]
_lbl = f"{_stat}_{_date}"
_ckws = {"c": "b", "label": _lbl}
pfgr.sort_values(EvRHE, ascending=True).plot(
x=EvRHE,
y="Jcorr_pre",
ax=axes[1][0],
ylim=(-6, 0.5),
xlim=(0, 1),
**_ckws,
)
# pfgr.plot(x=EvRHE,y='Jcorr_pre',ax=axes[1][0],ylim=(-6,0.5),xlim=(0,1),**_ckws)
pfgr.plot(
x=EvRHE,
y="Frac_H2O2_pre",
ylim=(0, 30),
ax=axes[0][0],
xlim=(0, 1),
**_ckws,
)
pfgr.plot(
x=EvRHE,
y=f"Jkin_min_{_stat}",
ylim=(0.01, 30),
logy=True,
xlim=(0.5, 0.9),
ax=axes[1][1],
**_ckws,
)
if not _nKL.empty:
_nKL.query("PAR_file_pre == @prePF").plot(
x=f"ORR_{EvRHE}",
y=f"ORR_nElectrons_{_stat}",
ax=axes[0][1],
ylim=(0, 7),
xlim=(0, 1),
**_ckws,
)
_prepost_date.append(_lbl)
elif _stat == "post":
_date = [
np.datetime_as_string(i, unit="D")
for i in gr[f"EXP_date_day_dt_{_stat}"].unique()
if not pd.isna(i)
][0]
_lbl = f"{_stat}_{_date}"
_ckws = {"c": "r"}
gr.plot(
x=EvRHE,
y=f"Jcorr_{_stat}",
ax=axes[1][0],
ylim=(-6, 0.5),
xlim=(0, 1),
label=f"Jcorr_{_lbl}",
**_ckws,
)
gr.plot(
x=EvRHE,
y=f"Frac_H2O2_{_stat}",
ylim=(0, 30),
ax=axes[0][0],
xlim=(0, 1),
label=f"FracH2O2_{_lbl}",
**_ckws,
)
gr.plot(
x=EvRHE,
y=f"Jkin_min_{_stat}",
ylim=(0.01, 30),
logy=True,
xlim=(0.5, 0.9),
ax=axes[1][1],
label=f"Jkin_min_{_lbl}",
**_ckws,
)
if not _nKL.empty:
_nKL.sort_values(f"ORR_{EvRHE}", ascending=True).plot(
x=f"ORR_{EvRHE}",
y=f"ORR_nElectrons_{_stat}",
ax=axes[0][1],
ylim=(0, 7),
xlim=(0, 1),
**_ckws,
)
_prepost_date.append(_lbl)
axes[1][0].axhline(y=0, color="black", ls="--", alpha=0.2)
axes[0][1].axhline(y=2, color="grey", ls="-.", alpha=0.2)
axes[0][1].axhline(y=4, color="grey", ls="-.", alpha=0.4)
_name = "_".join([str(i) for i in (*_grpcols, *n, *_prepost_date)])
fig.suptitle(f"{_metal}, {_name}")
plt.savefig(
_raw_data_folder.joinpath(f"{_name}.png"),
dpi=100,
bbox_inches="tight",
)
plt.close()
gr.to_excel(_raw_data_folder.joinpath(f"{_name}.xlsx"))
def old_ORR_plot():
gr.plot(
x=EvRHE,
y=["Jcorr_pre", "Jcorr_post"],
ax=axes[1][0],
ylim=(-6, 0.5),
xlim=(0, 1),
)
gr.plot(
x=EvRHE,
y=["Frac_H2O2_pre", "Frac_H2O2_post"],
ylim=(0, 30),
ax=axes[0][0],
xlim=(0, 1),
)
gr.plot(
x=EvRHE,
y=["Jkin_min_pre", "Jkin_min_post"],
ylim=(0.01, 30),
logy=True,
xlim=(0.5, 0.9),
ax=axes[1][1],
)
_name = "_".join([str(i) for i in (*_grpcols, *n)])
# TODO implement KL plotting raw data
try:
_nKL = KL_grp.get_group((*n, gr.Sweep_Type.unique()[0]))
# _nKL = _nKL.assign(**{'ORR_KL_2e' : 2, 'ORR_KL_4e' : 4})
_nKL.to_excel(_raw_data_folder.joinpath(f"KL_{_name}.xlsx"))
_nKL.plot(
x=f"ORR_{EvRHE}",
y=["ORR_nElectrons_pre", "ORR_nElectrons_post"],
ax=axes[0][1],
ylim=(0, 7),
xlim=(0, 1),
)
# _nKL.plot(x=f'ORR_{EvRHE}',y=['ORR_KL_4e'],ax=axes[0][1],**{'ls' : '-.'},alpha=0.2, c='grey', label=None)
# _nKL.plot(x=f'ORR_{EvRHE}',y=['ORR_KL_2e'],ax=axes[0][1],**{'ls' : '-.'},alpha=0.2, c='grey', label=None)
axes[0][1].axhline(y=2, color="grey", ls="-.", alpha=0.2)
axes[0][1].axhline(y=4, color="grey", ls="-.", alpha=0.4)
except:
print(f"no KL plot for: {n}")
[ax.legend(fontsize=11) for ax1 in axes for ax in ax1]
plt.savefig(
_raw_data_folder.joinpath(f"{_name}.png"), dpi=100, bbox_inches="tight"
)
plt.close()
gr.to_excel(_raw_data_folder.joinpath(f"{_name}.xlsx"))
def ORR_plot_all():
KL_grp = KL_AST_diff.query('Electrode == "KL_I_Disk"').groupby(
["postAST_post", "SampleID", "Sweep_Type"]
)
templ = PorphSiO2_template()
fig, axes = plt.subplots(2, 2, figsize=(22 / 2.54, 18 / 2.54))
for n, gr in ORR_data.groupby(["postAST_post", "SampleID"]):
n, gr
# gr = gr.sort_values(EvRHE,ascending=True)
_metal = templ.query("SampleID == @n[-1]").Metal.iloc[0]
_cn = templ.query("SampleID == @n[-1]").color.iloc[0]
_RGB = OriginColors.iloc[_cn].RGB_255
print(_metal, _cn, _RGB)
_nKL = KL_grp.get_group((*n, gr.Sweep_Type.unique()[0]))
_name = "_".join([str(i) for i in (*_grpcols, *n)])
for _AST in ["pre", "post"]:
_ls = "-" if "pre" in _AST else ":"
kws = {"ls": _ls, "c": [*_RGB, 0.6]}
gr.plot(
x=EvRHE,
y=f"Jcorr_{_AST}",
ax=axes[1][0],
ylim=(-6, 0.5),
xlim=(0, 1),
**kws,
)
gr.plot(
x=EvRHE,
y=f"Frac_H2O2_{_AST}",
ylim=(0, 30),
ax=axes[0][0],
xlim=(0, 1),
**kws,
)
gr.plot(
x=EvRHE,
y=f"Jkin_min_{_AST}",
ylim=(0.01, 30),
logy=True,
xlim=(0.5, 0.9),
ax=axes[1][1],
**kws,
)
# TODO implement KL plotting raw data
try:
# _nKL = _nKL.assign(**{'ORR_KL_2e' : 2, 'ORR_KL_4e' : 4})
_nKL.plot(
x=f"ORR_{EvRHE}",
y=f"ORR_nElectrons_{_AST}",
ax=axes[0][1],
ylim=(0, 7),
xlim=(0, 1),
)
# _nKL.plot(x=f'ORR_{EvRHE}',y=['ORR_KL_4e'],ax=axes[0][1],**{'ls' : '-.'},alpha=0.2, c='grey', label=None)
# _nKL.plot(x=f'ORR_{EvRHE}',y=['ORR_KL_2e'],ax=axes[0][1],**{'ls' : '-.'},alpha=0.2, c='grey', label=None)
axes[0][1].axhline(y=2, color="grey", ls="-.", alpha=0.2)
axes[0][1].axhline(y=4, color="grey", ls="-.", alpha=0.4)
except:
print(f"no KL plot for: {n}")
_lgnds = [ax.legend(fontsize=11) for ax1 in axes for ax in ax1]
plt.legend(_lgnds, ["jA"])
fig.suptitle(f'{_metal}, {", ".join(n)}')
axes[1][0].axhline(y=0, color="black", ls="--", alpha=0.2)
plt.savefig(
_raw_data_folder.joinpath(f"{_name}.png"), dpi=100, bbox_inches="tight"
)
plt.close()
# gr.to_excel(_raw_data_folder.joinpath(f'{_name}.xlsx'))
if "EIS" in EC_merged_dict.keys():
ECname = "EIS"
EIS_merged = EC_merged_dict.get("EIS")
model_select = EIS_merged["PARS"].EIS_Model_EEC.unique()[1]
print(f"Chosen model: {model_select}")
# 'Model(EEC_2CPE)'
_raw_data_folder = mkfolder(
EC_folder.joinpath(f"{ECname}_{model_select}_raw_data")
)
EIS_merg_matches_clean = EIS_merged["AST_matches"].drop_duplicates()
EIS_merg_matches_clean = EIS_merg_matches_clean.loc[
(EIS_merg_matches_clean.Sweep_Type == "cathodic")
& (EIS_merg_matches_clean.RPM_DAC > 1000)
& (EIS_merg_matches_clean.pH < 2)
]
# EIS_merg_matches_clean.to_excel(_raw_data_folder.parent.joinpath('EIS_AST_matches_pH1_1500rpm.xlsx'))
_loadDF = read_load_pkl(f"{ECname}_data_raw")
_mcols = [EvRHE, "Sweep_Type", "SampleID", "postAST"] + [
"postAST_post",
"postAST_pre",
]
_grpcols = ("cathodic", 1, 0.6)
AST_grp_cols = ["postAST_post", "Sweep_Type", "pH", "E_RHE", "AST_row"]
if _loadDF.empty:
DF_pltqry = EIS_merged["AST_diff_filter"].groupby(AST_grp_cols)
_grp_keys = [
(key, val)
for key, val in DF_pltqry.groups.items()
if "cathodic" in key and key[0] is not np.nan
]
# _st_raw, _st_fit, _st_source = [],[],[]
_st_comb = []
# for pAST in ['postAST_LC','postAST_sHA']:
for (pAST, Sweep_Type, pH, E_RHE, AST_n), _gr in _grp_keys:
_grpcols = (Sweep_Type, pH, E_RHE, AST_n)
# ASTgrp = DF_pltqry.get_group((pAST,*_grpcols))
ASTgrp = DF_pltqry.get_group((pAST, *_grpcols))
_samplegrpcols = ["SampleID", "Gas"]
for n, gr in ASTgrp.groupby(_samplegrpcols):
_prepost_cols = set(
[
"_".join(i.split("_")[0:-1])
for i in list(gr.columns)
if i.endswith("_pre") or i.endswith("_post")
]
)
for status in ["pre", "post"]:
gr.EIS_File_SpecRaw_post
_setAST = "no" if "pre" in status else pAST
_uniq_id = dict(
zip(
AST_grp_cols + _samplegrpcols,
(_setAST, *_grpcols, *n),
)
)
_srcfls_raw = [
i
for i in gr[f"EIS_File_SpecRaw_{status}"].unique()
if pd.notna(i)
]
_srcfls_fit = [
i
for i in gr[f"EIS_File_SpecFit_{status}"].unique()
if pd.notna(i)
]
_srcfls_source = [
i
for i in gr[f"EIS_sourceFilename_{status}"].unique()
if pd.notna(i)
]
# _srcfls_fit
# _test = list(_srcfls_fit[0].parent.rglob(f'*{_GPDRT_path.stem}*'))
# _GP_DRT_succes_rows = gr[f'EIS_GP_DRT_run_success_{status}'].dropna(axis=0)
if _srcfls_fit:
# not _GP_DRT_succes_rows.empty and all([i for i in _GP_DRT_succes_rows.unique()]):
# _GPDRT_path = Path(gr[f'EIS_GP_DRT_fit_{status}'].unique()[0])
def _GP_DRT_load(
_srcfls_fit, DRTsuffix, _uniq_id, status
):
# Path(_srcfls_fit[0]).parent.rglob(f'{Path(_srcfls_fit[0]).name.split("_spectrumfit_")[0]}')
# _test = list(_srcfls_fit[0].parent.rglob(f'*{_GPDRT_path.stem}*'))
# _test = list(_srcfls_raw[0].parent.rglob(f'*{_GPDRT_path.stem}*'))
try:
_DRT_path = list(
Path(_srcfls_fit[0]).parent.rglob(
f'*{Path(_srcfls_fit[0]).name.split("_spectrumfit_")[0]}*{DRTsuffix}*'
)
)
# _GPDRT_path.parent.joinpath(_GPDRT_path.stem+f'_GP_{DRTsuffix}.pkl')
if _DRT_path:
_DRT_DF = pd.read_pickle(_DRT_path[0])
_DRT_DF = _DRT_DF.rename(
columns={
i: f"{i}_{status}"
for i in _DRT_DF.columns
}
)
else:
_DRT_DF = pd.DataFrame()
except:
_DRT_DF = pd.DataFrame()
# return _DRT_DF
_DRT_DF = _DRT_DF.assign(**_uniq_id)
return _DRT_DF
_DRT_star = _GP_DRT_load(
_srcfls_fit, "DRT_Z_star", _uniq_id, status
)
_DRT_exp = _GP_DRT_load(
_srcfls_fit, "Z_exp", _uniq_id, status
)
if not _DRT_star.empty and not _DRT_exp.empty:
_DRT_star = pd.merge_asof(
_DRT_star,
_DRT_exp,
left_on=f"freq_vec_star_{status}",
right_on=f"freq_vec_{status}",
suffixes=["", "_exp"],
)
else:
_DRT_star = pd.merge(_DRT_star, _DRT_exp)
else:
_DRT_star = pd.DataFrame()
if _srcfls_fit:
_mcols = set(_mcols).union(list(_uniq_id.keys()))
_src_data_raw = _read_excel_df(
_srcfls_raw,
_mcols,
_setAST,
pAST,
ref_dict=_uniq_id,
)
_src_data_fit = _read_excel_df(
_srcfls_fit,
_mcols,
_setAST,
pAST,
ref_dict=_uniq_id,
).query(f'Model_EEC_{status} == "{model_select}"')
_src_data_source = _read_excel_df(
_srcfls_source,
_mcols,
_setAST,
pAST,
ref_dict=_uniq_id,
).query(f'Model_EEC_{status} == "{model_select}"')
_st_comb.append(
(
_src_data_raw,
_src_data_fit,
_src_data_source,
_DRT_star,
)
)
# _test_plot_EIS_spectra(_src_data_fit,_DRT_star, status)
# pd.concat([pd.read_excel(i, index_col=[0]) for i in _sourcedfls],sort=False,ignore_index=True).dropna(axis=1,how='all')
# _stCVdata = _stCVdata.assign(**{'postAST' :_setAST, 'postAST_post' : pAST })
# _stCVdata = _stCVdata.rename(columns = {i : f'{i}_{status}' for i in _stCVdata.columns if i not in _mcols})
# _st.append(_stCVdata)
# if _srcfls_raw:
EIS_data_raw = pd.concat([i[0] for i in _st_comb])
save_DF_pkl(f"{ECname}_{model_select}_data_raw", EIS_data_raw)
EIS_data_fit = pd.concat([i[1] for i in _st_comb])
save_DF_pkl(f"{ECname}_{model_select}_data_fit", EIS_data_fit)
EIS_data_source = pd.concat([i[2] for i in _st_comb])
save_DF_pkl(f"{ECname}_{model_select}_data_source", EIS_data_source)
EIS_data_DRT = pd.concat([i[3] for i in _st_comb])
save_DF_pkl(f"{ECname}_{model_select}_data_DRT", EIS_data_DRT)
else:
EIS_data_raw = read_load_pkl(f"{ECname}_{model_select}_data_raw")
EIS_data_fit = read_load_pkl(f"{ECname}_{model_select}_data_fit")
EIS_data_source = read_load_pkl(f"{ECname}_{model_select}_data_source")
EIS_data_DRT = read_load_pkl(f"{ECname}_{model_select}_data_DRT")
_plt_grp_cols = ["SampleID", "AST_row", "Gas", "E_RHE"]
EIS_data_DRT_grp = EIS_data_DRT.groupby(_plt_grp_cols)
for n, gr in EIS_data_fit.groupby(_plt_grp_cols):
n, gr
"postAST_post"
if n in EIS_data_DRT_grp.groups:
_DRT_grp = EIS_data_DRT_grp.get_group(n)
else:
_DRT_grp = pd.DataFrame()
_ndir = "_".join(
[
str(int(1000 * i)) if "float" in str(type(i)) else str(i)
for i in n[0:2]
]
)
# _ndir += f'_{gr.postAST_post.unique()[0]}'
_png_stem = "_".join(
[
str(int(1000 * i)) if "float" in str(type(i)) else str(i)
for i in n
]
)
_test_plot_EIS_spectra(
gr, _DRT_grp, save_path=_raw_data_folder.joinpath(_ndir, _png_stem)
)
# TODO check for missing status 'pre' DRT plots ...
# TODO change plotting loop to include post and pre
# TODO plot Pars vs E_RHE with pre and post combos besides raw data
_plt_grp_cols_noE = ["SampleID", "AST_row"]
for n, _pargrp in EIS_merged["AST_diff"].groupby(_plt_grp_cols_noE):
n, _pargrp
_ndir = "_".join(
[
str(int(1000 * i)) if "float" in str(type(i)) else str(i)
for i in n
]
)
# _ndir += f'_{_pargrp.postAST_post.unique()[0]}'
_test_plot_EIS_Pars(_pargrp, save_path=_raw_data_folder.joinpath(_ndir))
# 'postAST_post'
# if n in EIS_data_DRT_grp.groups:
# _DRT_grp = EIS_data_DRT_grp.get_group(n)
# else:
# _DRT_grp = pd.DataFrame()
# fig,axes = plt.subplots(2,2,figsize=(10,10))
# gr.plot(x=EvRHE,y=['Jcorr_pre','Jcorr_post'],ax=axes[1][0])
# gr.plot(x=EvRHE,y=['Frac_H2O2_pre','Frac_H2O2_post'],ylim=(0,50),ax=axes[0][0])
# gr.plot(x=EvRHE,y=['Jkin_min_pre','Jkin_min_post'],ylim=(0,3),xlim=(0.5,0.9),ax=axes[1][1])
# fig.suptitle(f'{n}')
# [ax.legend(fontsize=14) for ax1 in axes for ax in ax1]
# _name= '_'.join([str(i) for i in (*_grpcols,*n)])
# plt.savefig(_raw_data_folder.joinpath(f'{_name}.png'),dpi=100,bbox_inches='tight')
# plt.close()
# gr.to_excel(_raw_data_folder.joinpath(f'{_name}.xlsx'))
if "HER" in EC_merged_dict.keys():
print("export HER raw data")
# TODO EHR
if "OER" in EC_merged_dict.keys():
print("export OER raw data")
# TODO OER
# _src_data_fit, _DRT_star, status_opts = gr,EIS_data_DRT_grp.get_group(n), ['pre','post']
def _test_plot_EIS_spectra(
_src_data_fit, _DRT_star, status_opts=["pre", "post"], save_path=""
):
fig, axes = plt.subplots(ncols=1, nrows=4, figsize=(8, 10))
ax1, ax2, ax3, ax4 = axes
_status = {}
for status in status_opts:
c_exp = "green" if "pre" in status else "orange"
_src_data_fit.plot(
x=f"FIT_Zre_{status}", y=f"FIT_-Zim_{status}", kind="line", ax=ax1
)
_src_data_fit.plot(
x=f"DATA_Zre_{status}",
y=f"DATA_-Zim_{status}",
kind="scatter",
c=c_exp,
ax=ax1,
)
_src_data_fit.plot(
x=f"FIT_Yre_{status}", y=f"FIT_Yim_{status}", kind="line", ax=ax2
)
_src_data_fit.plot(
x=f"DATA_Yre_{status}",
y=f"DATA_Yim_{status}",
kind="scatter",
c=c_exp,
ax=ax2,
)
_status.update(
{
status: _src_data_fit.dropna(
subset=[f"DATA_Zre_{status}"]
).postAST.unique()[0]
}
)
if not _DRT_star.empty:
_DRT_star.plot(
x=f"freq_vec_star_{status}",
y=f"gamma_vec_star_{status}",
logx=True,
ax=ax4,
)
_DRT_star = _DRT_star.assign(
**{
f"-1_Z_exp_imag_{status}": -1
* _DRT_star[f"Z_exp_imag_{status}"],
f"-1_Z_im_vec_star_{status}": -1
* _DRT_star[f"Z_im_vec_star_{status}"],
}
)
_DRT_star.plot(
x=f"freq_vec_{status}",
y=f"-1_Z_exp_imag_{status}",
logx=True,
ax=ax3,
kind="scatter",
c=c_exp,
)
_DRT_star.plot(
x=f"freq_vec_star_{status}",
y=f"-1_Z_im_vec_star_{status}",
logx=True,
ax=ax3,
)
# TODO Add plot for -Zim vs Freq for DRT_fitting and Experimental data points...
# _src_data_fit.plot(x=f'FIT_Yre_{status}', y=f'FIT_Yim_{status}',kind='line',ax=axes[1][1])
# _src_data_fit.plot(x=f'DATA_Yre_{status}', y=f'DATA_Yim_{status}',kind='scatter', c='r',ax=axes[1][1])
_titletxt = dict(
[
(i, _src_data_fit[i].unique()[0])
for i in _src_data_fit.columns
if _src_data_fit[i].nunique() == 1
and not any([opt in i for opt in status_opts])
and i in Load_from_Indexes.EC_label_cols[1::]
]
)
plt.suptitle(f'{", ".join([str(i) for i in _titletxt.values()])},\n {_status}')
_newdir = f'{save_path.parent.name}_{_status["post"]}'
mkfolder(save_path.parent.parent.joinpath(_newdir))
if save_path:
plt.savefig(
save_path.parent.parent.joinpath(_newdir, save_path.stem).with_suffix(
".png"
),
dpi=200,
bbox_inches="tight",
)
else:
plt.show()
plt.close()
def _test_plot_EIS_Pars(_pargrp, status_opts=["pre", "post"], save_path=""):
_pargrp.EIS_lmfit_var_names_pre
# ax1, ax2, ax3,ax4 = axes
_varsgrp = [
[
a
for i in _pargrp[f"EIS_lmfit_var_names_{_st}"].unique()
for a in i.split(", ")
]
for _st in status_opts
]
_vars = set([a for i in _varsgrp for a in i])
# mkfolder(save_path)
for _par in _vars:
_ps = eisplot(_par)
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(10, 6))
_status = {}
_valsmax = []
for status in status_opts:
gas_pars = [
i
for i in _pargrp.columns
if i.startswith(f"EIS_{_par}")
and i.endswith(status)
and not "stderr" in i
and not _pargrp[i].isna().all()
and len(i.split("_")) == 4
and i.split("_")[1] == _par
]
if "_" in _par:
gas_pars = [
i
for i in _pargrp.columns
if i.startswith(f"EIS_{_par}")
and i.endswith(status)
and not "stderr" in i
and not _pargrp[i].isna().all()
and len(i.split("_")) == 5
and "_".join(i.split("_")[1:3]) == _par
]
_status.update({status: _pargrp[f"postAST_{status}"].unique()[0]})
for gp in gas_pars:
_stderr = [
i
for i in _pargrp.columns
if i.startswith(f'{"_".join(gp.split("_")[0:2])}')
and i.endswith(status)
and "stderr" in i
]
c_exp = "green" if "pre" in status else "orange"
ms = "s" if not "guess" in gp else "*"
# _axnum = 0 if 'N2' in gp else 1
# print(gp,c_exp,_axnum)
_pargrp.plot(
x="E_RHE",
y=gp,
kind="scatter",
yerr=_stderr[0],
c=c_exp,
ax=ax,
logy=_ps.logy,
label=gp,
s=80,
marker=ms,
)
_clvals = [
i for i in _pargrp[gp].dropna().values if i > 0 and i < 1e6
]
_vals_indx = [
n
for n, i in enumerate(zscore(_clvals))
if np.abs(i) < 3 and _clvals[n] > 0
]
if _vals_indx:
_valsmax.append(np.array(_clvals)[_vals_indx].max())
# _vals[_vals_indx].min()
_ylim = _ps.ylim # if not _par == 'Cdlp' else (0,0.002)
if _valsmax:
_ylim = (_ylim[0], np.min([_ylim[1], np.max(_valsmax).max() * 2]))
ax.set_ylim(_ylim)
ax.set_ylabel(_par)
_titletxt = dict(
[
(i, _pargrp[i].unique()[0])
for i in _pargrp.columns
if _pargrp[i].nunique() == 1
and not any([opt in i for opt in status_opts])
and i in Load_from_Indexes.EC_label_cols[1::]
]
)
plt.suptitle(
f'{", ".join([str(i) for i in _titletxt.values()])},\n {_status}'
)
_newdir = save_path.parent.joinpath(f'{save_path.name}_{_status["post"]}')
mkfolder(_newdir)
_stem = f'{_par}_{_titletxt["Gas"]}_{_titletxt["SampleID"]}_{_titletxt["RPM_DAC"]}_{_pargrp.AST_row.unique()[0]}'
if save_path:
plt.savefig(
_newdir.joinpath(_stem).with_suffix(".png"),
dpi=200,
bbox_inches="tight",
)
else:
plt.show()
plt.close()
def _test_plots():
# N2CV plot test
_x = [["IndividualLabel"], "SampleID"][0][0]
N2_AST_diff = EC_merged_dict.get("N2CV").get("AST_diff_filter")
ECname = "N2"
_raw_data_folder = mkfolder(EC_folder.joinpath(f"{ECname}_raw_data"))
_y = (["N2_Cdl_Fcm-2_600_diff_perc"], (-80, 100))
_y = (["N2_Cdl_Fcm-2_600_pre", "N2_Cdl_Fcm-2_600_post"], (0, 50e-3))
N2_pltqry = N2_AST_diff.groupby(
["postAST_post", "Sweep_Type", "pH", "Loading_cm2"]
)
n = ("postAST_LC", "cathodic", 1.0, 0.379)
gr = N2_pltqry.get_group(n)
for n, gr in N2_pltqry: # .groupby(['postAST_post', 'Sweep_Type','pH']
n, gr
_title = "_".join([str(i) for i in n])
fig, ax = plt.subplots()
gr.set_index(_x).sort_values(by=_x).plot.bar(
y=_y[0], ylim=_y[1], rot=0, title=_title, ax=ax
)
plt.savefig(
_raw_data_folder.joinpath(f"bar_Cdl_{_title}.png"),
dpi=200,
bbox_inches="tight",
)
plt.close()
N2_AST_diff.groupby(["postAST_post"]).plot(
x="SampleID", y="N2_Cdl_Fcm-2_400_diff_abs", ylim=(-200, 200)
)
tt3 = EC_index.loc[
(EC_index.PAR_date_day_dt.isin(set([a for i in AST_days for a in i])))
& (EC_index.PAR_exp == "N2_act")
]
gg = ("postAST_LC", "cathodic", 1.0, 0.379)
ASTgrp = N2_pltqry.get_group(gg)
# ORR
_AST_diff = "AST_diff_filter"
ORR_AST_diff = EC_merged_dict.get("ORR").get(_AST_diff).reset_index()
ECname = "ORR"
list(ORR_AST_diff.columns)
_raw_data_folder = mkfolder(EC_folder.joinpath(f"{ECname}_raw_data"))
_y = "ORR_Frac_H2O2_500_diff_abs"
_y = (["ORR_Frac_H2O2_500_pre", "ORR_Frac_H2O2_500_post"], (0, 40), False)
_y = (["ORR_Jkin_min_750_pre", "ORR_Jkin_min_750_post"], (0.01, 10), True)
_y = (["ORR_Jkin_min_650_pre", "ORR_Jkin_min_650_post"], (0.1, 10), True)
_y = (["ORR_E_half_pre", "ORR_E_half_post"], (0.35, 0.8), False)
_y = (["ORR_E_onset_pre", "ORR_E_onset_post"], (0.65, 1), False)
ORR_pltqry = ORR_AST_diff.query("RPM_DAC_uni > 1000").groupby(
["postAST_post", "RPM_DAC_uni", "Sweep_Type", "pH", "Loading_cm2"]
)
# ORR_AST.query('RPM_DAC_uni > 1000').groupby(['post_postAST', 'RPM_DAC_uni','Sweep_Type','pH']).groups
ORR_pltqry.groups
# oldORR.groupby(['postAST_post', 'RPM_DAC','Sweep_Type','pH'])
[
gr.plot(x=_x, y=_y[0], ylim=_y[1], title=", ".join(str(i) for i in n))
for n, gr in ORR_pltqry
]
for n, gr in ORR_pltqry:
if n[1] > 1000 and "mean" in n[2]:
fig, ax = plt.subplots()
_title = "_".join([str(i) for i in n])
gr.sort_values(by="IndividualLabel").plot.bar(
x=_x, y=_y[0], ylim=_y[1], logy=_y[2], title=_title, rot=60, ax=ax
)
_ylbl = _y[0][0].split("_pre")[0] if "pre" in _y[0][0] else _y[0][0]
ax.set_ylabel(_ylbl)
ax.legend(ncol=1, fontsize=10)
plt.savefig(
_raw_data_folder.joinpath(f"bar_{ECname}_{_title}_{_ylbl}.png"),
dpi=200,
bbox_inches="tight",
)
# [gr.sort_values(by='IndividualLabel').plot.bar(x= _x , y=_y[0],ylim=_y[1],logy=_y[2], title=', '.join([str(i) for i in n]), rot=60)
# for n,gr in ORR_pltqry if n[1] > 1000 and 'mean' in n[2]]
[
(
n,
"Pre",
gr["PAR_file_pre"].unique(),
"Post",
gr["PAR_file_post"].unique(),
)
for n, gr in ORR_pltqry
]
ORR_pltqry.get_group(("postAST_sHA", 1500.0, "cathodic", 1.0))[
["SampleID", "ORR_E_half_post"]
]
ORR_pltqry.get_group((None, 1500.0, "cathodic", 1.0))[
["SampleID", *_y[0], "PAR_file_post"]
]
ORR_pltqry.get_group(list(ORR_pltqry.groups.keys())[0])
# ORR KL
KL_AST_diff = EC_merged_dict.get("KL").get(_AST_diff).reset_index()
ECname = "ORR"
_raw_data_folder = mkfolder(EC_folder.joinpath(f"{ECname}_raw_data"))
_y = (["ORR_nElectrons_diff_perc"], (-0, 0.51))
_y = (["ORR_nElectrons_pre", "ORR_nElectrons_post"], (0, 10))
_grps = KL_AST_diff.groupby(
["postAST_post", "Electrode", "Sweep_Type", "pH", "ORR_E_AppV_RHE"]
)
_Etest = [
(n, len(gr.dropna(subset=_y[0], how="any", axis=0)))
for n, gr in _grps
if n[1] == "KL_I_Disk" and len(gr) > 2
]
_Etest = sorted(_Etest, key=lambda x: x[0][-1])
E_KL = 0.5
KL_qry = KL_AST_diff.loc[
np.isclose(KL_AST_diff.ORR_E_AppV_RHE, E_KL, atol=0.01)
].groupby(["postAST_post", "Electrode", "Sweep_Type", "pH"])
KL_AST_diff.query("ORR_E_AppV_RHE ==@E_KL")
for n, gr in KL_qry:
if "Disk" in n[1] and "mean" in n[-2]:
fig, ax = plt.subplots()
_title = "_".join([str(i) for i in n + (f"E={E_KL}",)])
_ylbl = _y[0][0].split("_pre")[0] if "pre" in _y[0][0] else _y[0][0]
ax.set_ylabel(_ylbl)
ax.legend(ncol=1, fontsize=10)
# _title = '_'.join([str(i) for i in n])
gr.sort_values(by="IndividualLabel").plot.bar(
x=_x, y=_y[0], ylim=_y[1], title=_title, rot=60, ax=ax
)
plt.savefig(
_raw_data_folder.joinpath(f"bar_{ECname}_KL__{_title}_{_ylbl}.png"),
dpi=200,
bbox_inches="tight",
)
[
gr.sort_values(by="IndividualLabel").plot.bar(
x=_x,
y=_y[0],
ylim=_y[1],
title=", ".join([str(i) for i in n + (E_KL,)]),
rot=60,
)
for n, gr in KL_qry
if "Disk" in n[1]
]
[
gr.sort_values(by="IndividualLabel").plot.bar(
x=_x,
y=_y[0],
ylim=_y[1],
title=", ".join([str(i) for i in n + (E_KL,)]),
rot=60,
)
for n, gr in KL_qry
if "Disk" in n[1] and "mean" in n[2]
]
# EIS
_y = (["EIS_Rct_O2_pre", "EIS_Rct_O2_post"], (0, 600), (0.55, 0.65))
EIS_qry = EIS_AST_diff.query(
'E_RHE > @_y[2][0] & E_RHE < @_y[2][1] & Sweep_Type == "cathodic"'
).groupby(["postAST_post", "Sweep_Type", "pH", "E_RHE"])
[
gr.sort_values(by="IndividualLabel")
.dropna(axis=1, how="all")
.plot.bar(
x=_x, y=_y[0], ylim=_y[1], title=", ".join([str(i) for i in n]), rot=60
)
for n, gr in EIS_qry
if not gr.dropna(axis=1, how="all").empty
]
# HER test plot
HER_AST_diff_E = EC_merged_dict["HER_E_slice"]
_y = (["HER_J_upper_pre", "HER_J_upper_post"], (-2, 2), (-0.49, -0.52))
_y = (["HER_Tafel_slope_pre", "HER_Tafel_slope_post"], (0, 1e3), (-0.39, -0.42))
E_350mV_slice = HER_AST_diff_E.loc[(HER_AST_diff_E["HER_Segnum"] > 1)].query(
'(HER_type == "E_slice") & (HER_at_E_slice < @_y[2][0]) & (HER_at_E_slice > @_y[2][1])'
)
HER_pltqry = E_350mV_slice.groupby(
["postAST", "Sweep_Type", "pH", "Loading_cm2"]
)
HER_pltqry = E_350mV_slice.groupby(
["SampleID", "Sweep_Type", "pH", "Loading_cm2"]
)
[
gr.sort_values(by="IndividualLabel").plot.bar(
x=_x, y=_y[0], ylim=_y[1], title=", ".join([str(i) for i in n]), rot=60
)
for n, gr in HER_pltqry
]
E_350mV_slice = HER_AST_diff.loc[(HER_AST_diff["HER_Segment #_pre"] > 1)].query(
'(HER_type_pre == "E_slice") & (HER_at_E_slice_pre < -0.29) & (HER_at_E_slice_pre > -0.33)'
)
jmA2_slice = HER_AST_diff.loc[(HER_AST_diff["Segment #"] > 1)].query(
'(HER_type == "j_slice_onset") & (HER_at_J_slice == -2)'
)
# _Dm.plot(x='E_RHE',y=['EIS_Rct_O2_pre', 'EIS_Rct_O2_post', 'EIS_Rct_O2_diff_abs']) # test plotting
#%% == NOT_postChar ==
class NOT_postChar:
suffixes = ["R", "P", "EA", "B"]
def __init__(self):
self.folder = FindExpFolder("PorphSiO2").folder
# FindExpFolder().TopDir.joinpath(Path('Preparation-Thesis\SiO2_projects\SiO2_Me_EC+Struc'))
self.raman = self.postRaman
self.bet = self.postBET
self.ea = self.postEA
self.prec_ea_ratios()
self.merged = self.merged()
def merged(self):
cols = list(PorphSiO2_template().columns)
merged_out = pd.merge(
self.prep,
pd.merge(self.ea, pd.merge(self.raman, self.bet, on=cols), on=cols),
on=cols,
)
return merged_out
def decorator(func):
@functools.wraps(func)
def wrapper_decorator(*args, **kwargs):
# Do something before
value = postChar.slice_out(*args, slice_lst=postChar.template().SampleID)
# Do something after
return value
return wrapper_decorator
def slice_out(func, template=PorphSiO2_template()):
pars, suffx = func()
try:
slice_lst = template.SampleID
pars_out = pars.loc[pars.SampleID.isin(slice_lst)]
pars_out = pd.merge(template, pars_out, on="SampleID")
if suffx != "":
cols = [i for i in pars_out.columns if i not in template.columns]
pars_out = pars_out.rename(
columns=dict(zip(cols, [f"{suffx}_" + i for i in cols]))
)
except:
pars_out = pars
print("No slice out for {func.__name__} ")
return pars_out
@slice_out
def postRaman(peak_model="5peaks", plot_resid=False):
ramandir = FindExpFolder("PorphSiO2").folder / "SiO2_Me_EC+Struc" / "Raman"
raman_files = ramandir.rglob("*xlsx")
fitpars_fls = [
i
for i in raman_files
if all([a in i.stem for a in ["FitParameters_Model", "peaks"]])
]
FitPars_raw = pd.concat(
[
pd.read_excel(i).assign(**{"Model": i.stem.split("Model_")[-1]})
for i in fitpars_fls
],
sort=False,
)
FitPars = FitPars_raw.loc[
FitPars_raw.SampleID.isin(PorphSiO2_template().SampleID.values)
]
if plot_resid:
Model_redchi = pd.DataFrame(
[
(n, np.abs(gr.redchi).mean(), np.abs(gr.redchi).sum())
for n, gr in FitPars.groupby("Model")
],
columns=["Model", "redchi_mean", "redchi_sum"],
).set_index("Model")
Model_chi = pd.DataFrame(
[
(n, gr.chisqr.mean(), gr.chisqr.sum())
for n, gr in FitPars.groupby("Model")
],
columns=["Model", "redchi_mean", "redchi_sum"],
).set_index("Model")
Model_redchi.plot.bar()
Model_chi.plot.bar()
if peak_model:
FPars_out_1st = FitPars.loc[FitPars.Model.isin([peak_model])]
else:
FPars_out_1st = FitPars
t2nd_mod = "2ndOrder_4peaks"
if t2nd_mod in FitPars_raw.Model.unique():
FPars_out_2nd = FitPars.loc[FitPars.Model == t2nd_mod].dropna(axis=1)
flt2nd = get_float_cols(FPars_out_2nd)
FPars_out_2nd = FPars_out_2nd.rename(
columns=dict(zip(flt2nd, [f"2nd_{i}" for i in flt2nd]))
)
FPars_out = pd.merge(
FPars_out_1st.dropna(axis=1),
FPars_out_2nd,
on="SampleID",
how="left",
suffixes=["_1st", "_2nd"],
)
else:
FPars_out = FPars_out_1st
return FPars_out, "R"
@slice_out
def postBET():
betdir = FindExpFolder("PorphSiO2").folder.joinpath("SiO2_Me_EC+Struc/BET")
betdir = FindExpFolder("BET").DestDir
bet_files = betdir.rglob("BET_pars_index*pkl")
BET_ovv = pd.concat([pd.read_pickle(i) for i in bet_files])
BET_ovv_template = BET_ovv
# .loc[BET_ovv.SampleID.isin(PorphSiO2_template().SampleID.unique())]
BET_ovv_template = BET_ovv_template.loc[
BET_ovv_template.fullPath.str.endswith("RAW")
& ~BET_ovv_template.fullPath.str.contains("_FAIL")
]
return BET_ovv_template, ""
@slice_out
def postEA():
EAcols = [
"SampleID",
"C/N_ratio",
"N_content",
"C_content",
"H_content",
"100-CHN",
]
EA_results = pd.read_excel(list(FindExpFolder("EA").DestDir.rglob("*xlsx"))[0])[
EAcols
]
EA_results["C/H_ratio"] = EA_results["C_content"] / EA_results["H_content"]
return EA_results, "EA"
@slice_out
def postPrep():
Prep_Porph_SiO2 = {
"SampleID": ("JOS1", "JOS2", "JOS3", "JOS4", "JOS5"),
"WL_precursor": (31.0, 56.0, 53.0, 38.0, 41.0),
"Precursor_type": (
"FeTMPPCl",
"CoTMPPCl",
"MnTPPCl",
"FeTPPCl",
"H2TMPPCl",
),
"MW_g-mol": (824.12, 791.67, 703.11, 704.02, 734.84),
"Metal_element": (26.0, 27.0, 25.0, 26.0, 1.0),
}
Prep_Porph =
|
pd.DataFrame(Prep_Porph_SiO2)
|
pandas.DataFrame
|
from pathlib import Path
import copy
import pickle as pkl
from mmap import mmap
from scipy import stats as st
from scipy.stats._continuous_distns import FitDataError
import torch
from sklearn import svm
from sklearn import linear_model
import pandas as pd
import seaborn as sns
import warnings
import numpy as np
import os
import matplotlib.colors as mcolors
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
from mpl_toolkits.mplot3d.art3d import juggle_axes
from matplotlib.ticker import MaxNLocator
from joblib import Memory
import math
import lyap
import model_loader_utils as loader
import initialize_and_train as train
import utils
memory = Memory(location='./memoization_cache', verbose=2)
# memory.clear()
## Functions for computing means and error bars for the plots. 68% confidence
# intervals and means are currently
# implemented in this code. The commented out code is for using a gamma
# distribution to compute these, but uses a
# custom version of seaborn plotting library to plot.
def orth_proj(v):
n = len(v)
vv = v.reshape(-1, 1)
return torch.eye(n) - ([email protected])/(v@v)
USE_ERRORBARS = True
# USE_ERRORBARS = False
LEGEND = False
# LEGEND = True
folder_root = '../results/figs/'
def ci_acc(vals):
median, bounds = median_and_bound(vals, perc_bound=0.75, loc=1.,
shift=-.0001, reflect=True)
return bounds[1], bounds[0]
# ci_acc = 68
# ci_acc = 95
def est_acc(vals):
median, bounds = median_and_bound(vals, perc_bound=0.75, loc=1.,
shift=-.0001, reflect=True)
return median
# est_acc = "mean"
def ci_dim(vals):
median, bounds = median_and_bound(vals, perc_bound=0.75, loc=.9999)
return bounds[1], bounds[0]
# ci_dim = 68
# ci_dim = 95
def est_dim(vals):
median, bounds = median_and_bound(vals, perc_bound=0.75, loc=.9999)
return median
# est_dim = "mean"
def point_replace(a_string):
a_string = str(a_string)
return a_string.replace(".", "p")
def get_color(x, cmap=plt.cm.plasma):
"""Get normalized color assignments based on input data x and colormap
cmap."""
mag = torch.max(x) - torch.min(x)
x_norm = (x.float() - torch.min(x))/mag
return cmap(x_norm)
def median_and_bound(samples, perc_bound, dist_type='gamma', loc=0., shift=0,
reflect=False):
"""Get median and probability mass intervals for a gamma distribution fit
of samples."""
samples = np.array(samples)
def do_reflect(x, center):
return -1*(x - center) + center
if dist_type == 'gamma':
if np.sum(samples[0] == samples) == len(samples):
median = samples[0]
interval = [samples[0], samples[0]]
return median, interval
if reflect:
samples_reflected = do_reflect(samples, loc)
shape_ps, loc_fit, scale = st.gamma.fit(samples_reflected,
floc=loc + shift)
median_reflected = st.gamma.median(shape_ps, loc=loc, scale=scale)
interval_reflected = np.array(
st.gamma.interval(perc_bound, shape_ps, loc=loc, scale=scale))
median = do_reflect(median_reflected, loc)
interval = do_reflect(interval_reflected, loc)
else:
shape_ps, loc, scale = st.gamma.fit(samples, floc=loc + shift)
median = st.gamma.median(shape_ps, loc=loc, scale=scale)
interval = np.array(
st.gamma.interval(perc_bound, shape_ps, loc=loc, scale=scale))
else:
raise ValueError("Distribution option (dist_type) not recognized.")
return median, interval
## Set parameters for figure aesthetics
plt.rcParams['font.size'] = 6
plt.rcParams['font.size'] = 6
plt.rcParams['lines.markersize'] = 1
plt.rcParams['lines.linewidth'] = 1
plt.rcParams['axes.labelsize'] = 7
plt.rcParams['axes.spines.right'] = False
plt.rcParams['axes.spines.top'] = False
plt.rcParams['axes.titlesize'] = 8
# Colormaps
class_style = 'color'
cols11 = np.array([90, 100, 170])/255
cols12 = np.array([37, 50, 120])/255
cols21 = np.array([250, 171, 62])/255
cols22 = np.array([156, 110, 35])/255
cmap_activation_pnts = mcolors.ListedColormap([cols11, cols21])
cmap_activation_pnts_edge = mcolors.ListedColormap([cols12, cols22])
rasterized = False
dpi = 800
ext = 'pdf'
# Default figure size
figsize = (1.5, 1.2)
ax_pos = (0, 0, 1, 1)
def make_fig(figsize=figsize, ax_pos=ax_pos):
"""Create figure."""
fig = plt.figure(figsize=figsize)
ax = fig.add_axes(ax_pos)
return fig, ax
def out_fig(fig, figname, subfolder='', show=False, save=True, axis_type=0,
name_order=0, data=None):
""" Save figure."""
folder = Path(folder_root)
figname = point_replace(figname)
# os.makedirs('../results/figs/', exist_ok=True)
os.makedirs(folder, exist_ok=True)
ax = fig.axes[0]
ax.set_xlabel('')
ax.set_ylabel('')
ax.set_rasterized(rasterized)
if axis_type == 1:
ax.tick_params(axis='both', which='both',
# both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
left=False, top=False,
# ticks along the top edge are off
labelbottom=False,
labelleft=False) # labels along the bottom edge are off
elif axis_type == 2:
ax.axis('off')
if name_order == 0:
fig_path = folder/subfolder/figname
else:
fig_path = folder/subfolder/figname
if save:
os.makedirs(folder/subfolder, exist_ok=True)
fig_file = fig_path.with_suffix('.' + ext)
print(f"Saving figure to {fig_file}")
fig.savefig(fig_file, dpi=dpi, transparent=True, bbox_inches='tight')
if show:
fig.tight_layout()
fig.show()
if data is not None:
os.makedirs(folder/subfolder/'data/', exist_ok=True)
with open(folder/subfolder/'data/{}_data'.format(figname),
'wb') as fid:
pkl.dump(data, fid, protocol=4)
plt.close('all')
def autocorrelation(train_params, figname='autocorrelation'):
train_params_loc = train_params.copy()
model, params, run_dir = train.initialize_and_train(**train_params_loc)
class_datasets = params['datasets']
# val_loss = params['history']['losses']['val']
# val_losses[i0, i1] = val_loss
# val_acc = params['history']['accuracies']['val']
# val_accs[i0, i1] = val_acc
train_samples_per_epoch = len(class_datasets['train'])
class_datasets['train'].max_samples = 10
torch.manual_seed(params['model_seed'])
X = class_datasets['train'][:][0]
T = 0
if T > 0:
X = utils.extend_input(X, T)
X0 = X[:, 0]
elif train_params_loc['network'] != 'feedforward':
X0 = X[:, 0]
else:
X0 = X
# X = utils.extend_input(X, 10)
loader.load_model_from_epoch_and_dir(model, run_dir, -1)
hid = []
hid += model.get_post_activations(X)[:-1]
# auto_corr_mean = []
# auto_corr_var = []
auto_corr_table = pd.DataFrame(columns=['t_next', 'autocorr'])
h = hid[0]
for i0 in range(len(hid)):
h_next = hid[i0]
overlap = torch.sum(h*h_next, dim=1)
norms_h = torch.sqrt(torch.sum(h**2, dim=1))
norms_h_next = torch.sqrt(torch.sum(h_next**2, dim=1))
corrs = overlap/(norms_h*norms_h_next)
avg_corr = torch.mean(corrs)
d = {'t_next': i0, 'autocorr': corrs}
auto_corr_table = auto_corr_table.append(pd.DataFrame(d),
ignore_index=True)
fig, ax = make_fig(figsize)
sns.lineplot(ax=ax, x='t_next', y='autocorr', data=auto_corr_table)
out_fig(fig, figname)
def snapshots_through_time(train_params, figname="snap", subdir="snaps"):
"""
Plot PCA snapshots of the representation through time.
Parameters
----------
train_params : dict
Dictionary of training parameters that specify the model and dataset
to use for training.
"""
subdir = Path(subdir)
X_dim = train_params['X_dim']
FEEDFORWARD = train_params['network'] == 'feedforward'
num_pnts_dim_red = 800
num_plot = 600
train_params_loc = copy.deepcopy(train_params)
model, params, run_dir = train.initialize_and_train(**train_params_loc)
class_datasets = params['datasets']
class_datasets['train'].max_samples = num_pnts_dim_red
torch.manual_seed(train_params_loc['model_seed'])
X, Y = class_datasets['train'][:]
if FEEDFORWARD:
T = 10
y = Y
X0 = X
else:
T = 30
# T = 100
X = utils.extend_input(X, T + 2)
X0 = X[:, 0]
y = Y[:, -1]
loader.load_model_from_epoch_and_dir(model, run_dir, 0, 0)
hid_0 = [X0]
hid_0 += model.get_post_activations(X)[:-1]
loader.load_model_from_epoch_and_dir(model, run_dir,
train_params_loc['num_epochs'], 0)
hid = [X0]
hid += model.get_post_activations(X)[:-1]
if FEEDFORWARD:
r = model.layer_weights[-1].detach().clone().T
else:
r = model.Wout.detach().clone()
# r0_n = r[0] / torch.norm(r[0])
# r1_n = r[1] / torch.norm(r[1])
#
# r0_n_v = r0_n.reshape(r0_n.shape[0], 1)
# r1_n_v = r1_n.reshape(r1_n.shape[0], 1)
# r0_orth = torch.eye(len(r0_n)) - r0_n_v @ r0_n_v.T
# r1_orth = torch.eye(len(r1_n)) - r1_n_v @ r1_n_v.T
# h = hid[10]
# # h_proj = h @ r_orth
# u, s, v = torch.svd(h)
# v0 = v[:, 0]
# def orth_projector(v):
# n = len(v)
# return (torch.eye(n) - v.reshape(n, 1)@v.reshape(1, n))/(v@v)
# v0_orth = (torch.eye(n) - v0.reshape(n,1)@v0.reshape(1,n))/(v0@v0)
# h_v0_orth = h @ v0_orth
# r0_e_p = orth_projector(r0_e)
# r1_e_p = orth_projector(r1_e)
# h_r0_e_p0 = h[y] @ r0_e_p
# h_r0_e_p1 = h[y] @ r1_e_p
coloring = get_color(y, cmap_activation_pnts)[:num_plot]
edge_coloring = get_color(y, cmap_activation_pnts_edge)[:num_plot]
## Now get principal components (pcs) and align them from time point to
# time point
pcs = []
p_track = 0
norm = np.linalg.norm
projs = []
for i1 in range(1, len(hid)):
# pc = utils.get_pcs_covariance(hid[i1], [0, 1])
out = utils.get_pcs_covariance(hid[i1], [0, 1], return_extra=True)
pc = out['pca_projection']
mu = out['mean']
proj = out['pca_projectors']
mu_proj = mu@proj[:, :2]
if i1 > 0:
# Check for the best alignment
pc_flip_x = pc.clone()
pc_flip_x[:, 0] = -pc_flip_x[:, 0]
pc_flip_y = pc.clone()
pc_flip_y[:, 1] = -pc_flip_y[:, 1]
pc_flip_both = pc.clone()
pc_flip_both[:, 0] = -pc_flip_both[:, 0]
pc_flip_both[:, 1] = -pc_flip_both[:, 1]
difference0 = norm(p_track - pc)
difference1 = norm(p_track - pc_flip_x)
difference2 = norm(p_track - pc_flip_y)
difference3 = norm(p_track - pc_flip_both)
amin = np.argmin(
[difference0, difference1, difference2, difference3])
if amin == 1:
pc[:, 0] = -pc[:, 0]
proj[:, 0] = -proj[:, 0]
elif amin == 2:
pc[:, 1] = -pc[:, 1]
proj[:, 1] = -proj[:, 1]
elif amin == 3:
pc[:, 0] = -pc[:, 0]
pc[:, 1] = -pc[:, 1]
proj[:, 0] = -proj[:, 0]
proj[:, 1] = -proj[:, 1]
pc = pc + mu_proj
p_track = pc.clone()
pcs.append(pc[:num_plot])
projs.append(proj)
def take_snap(i0, scats, fig, dim=2, border=False):
# ax = fig.axes[0]
hid_pcs_plot = pcs[i0][:, :dim].numpy()
xm = np.min(hid_pcs_plot[:, 0])
xM = np.max(hid_pcs_plot[:, 0])
ym = np.min(hid_pcs_plot[:, 1])
yM = np.max(hid_pcs_plot[:, 1])
xc = (xm + xM)/2
yc = (ym + yM)/2
hid_pcs_plot[:, 0] = hid_pcs_plot[:, 0] - xc
hid_pcs_plot[:, 1] = hid_pcs_plot[:, 1] - yc
v = projs[i0]
# u, s, v = torch.svd(h)
if r.shape[0] == 2:
r0_p = r[0]@v
r1_p = r[1]@v
else:
r0_p = r.flatten()@v
r1_p = -r.flatten()@v
if class_style == 'shape':
scats[0][0].set_offsets(hid_pcs_plot)
else:
if dim == 3:
scat._offsets3d = juggle_axes(*hid_pcs_plot[:, :dim].T, 'z')
scat._offsets3d = juggle_axes(*hid_pcs_plot[:, :dim].T, 'z')
else:
scats[0].set_offsets(hid_pcs_plot)
scats[1].set_offsets(r0_p[:2].reshape(1, 2))
scats[2].set_offsets(r1_p[:2].reshape(1, 2))
xm = np.min(hid_pcs_plot[:, 0])
xM = np.max(hid_pcs_plot[:, 0])
ym = np.min(hid_pcs_plot[:, 1])
yM = np.max(hid_pcs_plot[:, 1])
max_extent = max(xM - xm, yM - ym)
max_extent_arg = xM - xm > yM - ym
if dim == 2:
x_factor = .4
if max_extent_arg:
ax.set_xlim(
[xm - x_factor*max_extent, xM + x_factor*max_extent])
ax.set_ylim([xm - .1*max_extent, xM + .1*max_extent])
else:
ax.set_xlim(
[ym - x_factor*max_extent, yM + x_factor*max_extent])
ax.set_ylim([ym - .1*max_extent, yM + .1*max_extent])
else:
if max_extent_arg:
ax.set_xlim([xm - .1*max_extent, xM + .1*max_extent])
ax.set_ylim([xm - .1*max_extent, xM + .1*max_extent])
ax.set_zlim([xm - .1*max_extent, xM + .1*max_extent])
else:
ax.set_xlim([ym - .1*max_extent, yM + .1*max_extent])
ax.set_ylim([ym - .1*max_extent, yM + .1*max_extent])
ax.set_zlim([ym - .1*max_extent, yM + .1*max_extent])
# ax.plot([r0_p[0]], [r0_p[1]], 'x', markersize=3, color='black')
# ax.plot([r1_p[0]], [r1_p[1]], 'x', markersize=3, color='black')
ax.set_ylim([-4, 4])
if dim == 3:
out_fig(fig, f"{figname}_{i0}",
subfolder=subdir, axis_type=0,
name_order=1)
else:
out_fig(fig, f"{figname}_{i0}",
subfolder=subdir, axis_type=0,
name_order=1)
return scats,
dim = 2
hid_pcs_plot = pcs[0]
if dim == 3:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim([-10, 10])
ax.set_ylim([-10, 10])
ax.set_zlim([-10, 10])
else:
fig, ax = make_fig()
ax.grid(False)
scat1 = ax.scatter(*hid_pcs_plot[:num_plot, :dim].T, c=coloring,
edgecolors=edge_coloring, s=10, linewidths=.65)
ax.plot([0], [0], 'x', markersize=7)
scat2 = ax.scatter([0], [0], marker='x', s=3, c='black')
scat3 = ax.scatter([0], [0], marker='x', s=3, color='black')
scats = [scat1, scat2, scat3]
# ax.plot([0], [0], 'o', markersize=10)
if FEEDFORWARD:
snap_idx = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
else:
snap_idx = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16, 21, 26,
31]) # snap_idx = list(range(T + 1))
for i0 in snap_idx:
take_snap(i0, scats, fig, dim=dim, border=False)
print
def _cluster_holdout_test_acc_stat_fun(h, y, clust_identity,
classifier_type='logistic_regression',
num_repeats=5, train_ratio=0.8, seed=11):
np.random.seed(seed)
num_clusts = np.max(clust_identity) + 1
num_clusts_train = int(round(num_clusts*train_ratio))
num_samples = h.shape[0]
test_accs = np.zeros(num_repeats)
train_accs = np.zeros(num_repeats)
for i0 in range(num_repeats):
permutation = np.random.permutation(np.arange(len(clust_identity)))
perm_inv = np.argsort(permutation)
clust_identity_shuffled = clust_identity[permutation]
train_idx = clust_identity_shuffled <= num_clusts_train
test_idx = clust_identity_shuffled > num_clusts_train
hid_train = h[train_idx[perm_inv]]
y_train = y[train_idx[perm_inv]]
y_test = y[test_idx[perm_inv]]
hid_test = h[test_idx[perm_inv]]
if classifier_type == 'svm':
classifier = svm.LinearSVC(random_state=3*i0 + 1)
else:
classifier = linear_model.LogisticRegression(random_state=3*i0 + 1,
solver='lbfgs')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
classifier.fit(hid_train, y_train)
train_accs[i0] = classifier.score(hid_train, y_train)
test_accs[i0] = classifier.score(hid_test, y_test)
return train_accs, test_accs
def clust_holdout_over_layers(seeds, gs, train_params,
figname="clust_holdout_over_layers"):
"""
Logistic regression training and testing error on the representation
through the layers. Compares networks trained
with different choices of g_radius (specified by input parameter gs).
Parameters
----------
seeds : List[int]
List of random number seeds to use for generating instantiations of
the model and dataset. Variation over
these seeds is used to plot error bars.
gs : List[float]
Values of g_radius to iterate over.
train_params : dict
Dictionary of training parameters that specify the model and dataset
to use for training. Value of g_radius
is overwritten by values in gs.
figname : str
Name of the figure to save.
"""
if not hasattr(gs, '__len__'):
gs = [gs]
layer_label = 'layer'
@memory.cache
def generate_data_table_clust(seeds, gs, train_params):
layer_label = 'layer'
clust_acc_table = pd.DataFrame(
columns=['seed', 'g_radius', 'training', layer_label, 'LR training',
'LR testing'])
train_params_loc = copy.deepcopy(train_params)
for i0, seed in enumerate(seeds):
for i1, g in enumerate(gs):
train_params_loc['g_radius'] = g
train_params_loc['model_seed'] = seed
num_pnts_dim_red = 500
model, params, run_dir = train.initialize_and_train(
**train_params_loc)
class_datasets = params['datasets']
num_train_samples = len(class_datasets['train'])
class_datasets['train'].max_samples = num_pnts_dim_red
torch.manual_seed(params['model_seed'])
X, Y = class_datasets['train'][:]
if train_params_loc['network'] == 'feedforward':
X0 = X
else:
X0 = X[:, 0]
for epoch, epoch_label in zip([0, -1], ['before', 'after']):
loader.load_model_from_epoch_and_dir(model, run_dir, epoch)
hid = [X0]
hid += model.get_post_activations(X)[:-1]
if len(Y.shape) > 1:
Y = Y[:, -1]
cluster_identity = class_datasets['train'].cluster_identity
ds = []
for lay, h in enumerate(hid):
stat = _cluster_holdout_test_acc_stat_fun(h.numpy(),
Y.numpy(),
cluster_identity)
ds.extend([{
'seed': seed, 'g_radius': g,
'training': epoch_label, layer_label: lay,
'LR training': stat[0][k], 'LR testing': stat[1][k]
} for k in range(len(stat[0]))])
clust_acc_table = clust_acc_table.append(pd.DataFrame(ds),
ignore_index=True)
clust_acc_table['seed'] = clust_acc_table['seed'].astype('category')
clust_acc_table['g_radius'] = clust_acc_table['g_radius'].astype(
'category')
clust_acc_table['training'] = clust_acc_table['training'].astype(
'category')
return clust_acc_table
clust_acc_table = generate_data_table_clust(seeds, gs, train_params)
layers = set(clust_acc_table[layer_label])
for stage in ['LR training', 'LR testing']:
if stage == 'LR training':
clust_acc_table_stage = clust_acc_table.drop(columns=['LR testing'])
else:
clust_acc_table_stage = clust_acc_table.drop(
columns=['LR training'])
fig, ax = make_fig((1.5, 1.2))
if USE_ERRORBARS:
g = sns.lineplot(ax=ax, x=layer_label, y=stage,
data=clust_acc_table_stage, estimator=est_acc,
ci=ci_acc, style='training',
style_order=['after', 'before'], hue='g_radius')
else:
g1 = sns.lineplot(ax=ax, x=layer_label, y=stage,
data=clust_acc_table_stage, estimator=None,
units='seed', style='training',
style_order=['after', 'before'], hue='g_radius',
alpha=0.6)
g2 = sns.lineplot(ax=ax, x=layer_label, y=stage,
data=clust_acc_table_stage, estimator='mean',
ci=None, style='training',
style_order=['after', 'before'], hue='g_radius')
if g1.legend_ is not None:
g1.legend_.remove()
if not LEGEND and g2.legend_ is not None:
g2.legend_.remove()
if not LEGEND and g.legend_ is not None:
g.legend_.remove()
ax.set_ylim([-.01, 1.01])
ax.set_xticks(range(len(layers)))
out_fig(fig, figname + '_' + stage, subfolder=train_params[
'network'] +
'/clust_holdout_over_layers/',
show=False, save=True, axis_type=0, name_order=0,
data=clust_acc_table)
plt.close('all')
def get_stats(stat_fun, train_params_list_hue, train_params_list_style=None,
seeds=None, hue_key=None, style_key=None, *args, **kwargs):
train_params_list_hue = [copy.deepcopy(t) for t in train_params_list_hue]
style_bool = train_params_list_style is not None
if style_bool:
train_params_list_style = [copy.deepcopy(t) for t in
train_params_list_style]
style_bool = train_params_list_style is not None
if style_bool and style_key is None:
raise ValueError("Please specify a style_key.")
hue_bool = len(train_params_list_hue) > 1
if hue_bool and hue_key is None:
raise ValueError("Please specify a hue_key.")
if seeds is None:
seeds = [train_params_list_hue[0]['model_seed']]
params_cat = [[], []]
params_cat[0] = train_params_list_hue
if style_bool:
params_cat[1] = train_params_list_style
else:
params_cat[1] = [None]
table = pd.DataFrame()
if hue_bool:
table.reindex(columns=table.columns.tolist() + [hue_key])
if style_bool:
table.reindex(columns=table.columns.tolist() + [style_key])
for i0 in range(len(params_cat)): # hue params
for i1 in range(len(params_cat[i0])):
params = params_cat[i0][i1]
table_piece = stat_fun(params, hue_key, style_key, seeds,
*args, **kwargs)
table = table.append(table_piece, ignore_index=True)
if hue_key is not None:
table[hue_key] = table[hue_key].astype('category')
if style_key is not None:
table[style_key] = table[style_key].astype('category')
return table
def dim_through_training(train_params_list_hue, train_params_list_style=None,
seeds=None, hue_key=None, style_key=None, figname='',
subdir=None, multiprocess_lock=None):
if subdir is None:
subdir = train_params_list_hue[0][
'network'] + '/' + 'dim_over_training' + '/'
@memory.cache
def compute_dim_through_training(params, hue_key, style_key, seeds):
num_pnts_dim_red = 500
table_piece = pd.DataFrame()
if params is not None:
if hue_key is not None:
hue_value = params[hue_key]
if style_key is not None:
style_value = params[style_key]
for seed in seeds:
params['model_seed'] = seed
model, returned_params, run_dir = train.initialize_and_train(
**params, multiprocess_lock=multiprocess_lock)
class_datasets = returned_params['datasets']
class_datasets['train'].max_samples = num_pnts_dim_red
torch.manual_seed(int(params['model_seed']))
np.random.seed(int(params['model_seed']))
X, Y = class_datasets['train'][:]
T = 0
if T > 0:
X = utils.extend_input(X, T)
X0 = X[:, 0]
elif params['network'] != 'feedforward':
X0 = X[:, 0]
else:
X0 = X
epochs, saves = loader.get_epochs_and_saves(run_dir)
epochs = [epoch for epoch in epochs if
epoch <= params['num_epochs']]
for i_epoch, epoch in enumerate(epochs):
loader.load_model_from_epoch_and_dir(model, run_dir, epoch)
hid = [X0]
hid += model.get_post_activations(X)[:-1]
try:
dim = utils.get_effdim(hid[-1],
preserve_gradients=False).item()
except RuntimeError:
print("Dim computation didn't converge.")
dim = np.nan
num_updates = int(
params['num_train_samples_per_epoch']/params[
'batch_size'])*epoch
d = {
'effective_dimension': dim, 'seed': seed,
'epoch_index': i_epoch, 'epoch': epoch,
'num_updates': num_updates
}
if hue_key is not None:
d.update({hue_key: hue_value})
if style_key is not None:
d.update({style_key: style_value})
# casting d to DataFrame necessary to preserve type
table_piece = table_piece.append(pd.DataFrame(d, index=[0]),
ignore_index=True)
return table_piece
table = get_stats(compute_dim_through_training, train_params_list_hue,
train_params_list_style, seeds, hue_key, style_key)
table = table.replace([np.inf, -np.inf], np.nan)
table = table.dropna()
fig, ax = make_fig((1.5, 1.2))
if USE_ERRORBARS:
g = sns.lineplot(ax=ax, x='epoch_index', y='effective_dimension',
data=table, estimator=est_dim, ci=ci_dim, hue=hue_key,
style=style_key)
if not LEGEND and g.legend_ is not None:
g.legend_.remove()
else:
g1 = sns.lineplot(ax=ax, x='epoch_index', y='effective_dimension',
data=table, estimator=None, units='seed', hue=hue_key,
style=style_key, alpha=.6)
g2 = sns.lineplot(ax=ax, x='epoch_index', y='effective_dimension',
data=table, estimator='mean', ci=None, hue=hue_key,
style=style_key)
if g1.legend_ is not None:
g1.legend_.remove()
if not LEGEND and g2.legend_ is not None:
g2.legend_.remove()
ax.xaxis.set_major_locator(plt.MaxNLocator(integer=True))
ax.set_ylim([0, None])
out_fig(fig, figname, subfolder=subdir, show=False, save=True, axis_type=0,
data=table)
plt.close('all')
def dim_over_layers(train_params_list_hue, train_params_list_style=None,
seeds=None, hue_key=None, style_key=None,
figname="dim_over_layers", subdir=None, T=0,
multiprocess_lock=None, use_error_bars=None, **plot_kwargs):
"""
Effective dimension measured over layers (or timepoints if looking at an
RNN) of the network, before and after
training.
Parameters
----------
seeds : List[int]
List of random number seeds to use for generating instantiations of
the model and dataset. Variation over
these seeds is used to plot error bars.
gs : List[float]
Values of g_radius to iterate over.
train_params : dict
Dictionary of training parameters that specify the model and dataset
to use for training. Value of g_radius
is overwritten by values in gs.
figname : str
Name of the figure to save.
T : int
Final timepoint to plot (if looking at an RNN). If 0, disregard this
parameter.
"""
if subdir is None:
subdir = train_params_list_hue[0]['network'] + '/dim_over_layers/'
if use_error_bars is None:
use_error_bars = USE_ERRORBARS
train_params_list_hue = [copy.deepcopy(t) for t in train_params_list_hue]
style_bool = train_params_list_style is not None
if style_bool:
train_params_list_style = [copy.deepcopy(t) for t in
train_params_list_style]
@memory.cache
def compute_dim_over_layers(params, hue_key, style_key, seeds):
num_pnts_dim_red = 500
table_piece = pd.DataFrame()
if params is not None:
if hue_key is not None:
hue_value = params[hue_key]
if style_key is not None:
style_value = params[style_key]
for seed in seeds:
params['model_seed'] = seed
model, returned_params, run_dir = train.initialize_and_train(
**params, multiprocess_lock=multiprocess_lock)
class_datasets = returned_params['datasets']
class_datasets['train'].max_samples = num_pnts_dim_red
torch.manual_seed(int(params['model_seed']))
np.random.seed(int(params['model_seed']))
X, Y = class_datasets['train'][:]
T = 15
if T > 0:
X = utils.extend_input(X, T)
X0 = X[:, 0]
elif params['network'] != 'feedforward':
X0 = X[:, 0]
else:
X0 = X
# epochs, saves = loader.get_epochs_and_saves(run_dir)
# for i_epoch, epoch in enumerate([0, -1]):
loader.load_model_from_epoch_and_dir(model, run_dir,
params['num_epochs'])
hid = [X0]
hid += model.get_post_activations(X)[:-1]
dims = []
for h in hid:
try:
dims.append(utils.get_effdim(h,
preserve_gradients=False).item())
except RuntimeError:
dims.append(np.nan)
d = {
'effective_dimension': dims,
'layer': list(range(len(dims))), 'seed': seed
}
if hue_key is not None:
d.update({hue_key: hue_value})
if style_key is not None:
d.update({style_key: style_value})
# casting d to DataFrame necessary to preserve type
table_piece = table_piece.append(pd.DataFrame(d),
ignore_index=True)
return table_piece
table = get_stats(compute_dim_over_layers, train_params_list_hue,
train_params_list_style, seeds, hue_key, style_key)
table = table.replace([np.inf, -np.inf], np.nan)
table = table.dropna()
# breakpoint()
# print(table)
fig, ax = make_fig((1.5, 1.2))
# table['g_radius'] = table['g_radius'].astype('float64')
# norm = plt.Normalize(table['g_radius'].min(), table['g_radius'].max())
# sm = plt.cm.ScalarMappable(cmap="viridis", norm=norm)
# sm.set_array([])
# try:
if use_error_bars:
g = sns.lineplot(ax=ax, x='layer', y='effective_dimension',
data=table, estimator=est_dim, ci=ci_dim,
style=style_key, hue=hue_key, **plot_kwargs)
# g.figure.colorbar(sm)
if not LEGEND and g.legend_ is not None:
g.legend_.remove()
else:
g1 = sns.lineplot(ax=ax, x='layer', y='effective_dimension',
data=table, estimator=None, units='seed',
style=style_key, hue=hue_key, alpha=0.6,
**plot_kwargs)
g2 = sns.lineplot(ax=ax, x='layer', y='effective_dimension',
data=table, estimator='mean', ci=None,
style=style_key, hue=hue_key, **plot_kwargs)
if g1.legend_ is not None:
g1.legend_.remove()
if not LEGEND and g2.legend_ is not None:
g2.legend_.remove()
# except FitDataError:
# print("Plotting data invalid.")
layers = set(table['layer'])
if len(layers) < 12:
ax.set_xticks(range(len(layers)))
else:
ax.xaxis.set_major_locator(plt.MaxNLocator(
integer=True)) # ax.xaxis.set_major_locator(plt.MaxNLocator(10))
# ax.set_ylim([0, None])
# ax.set_ylim([0, 15])
out_fig(fig, figname, subfolder=subdir, show=False, save=True, axis_type=0,
data=table)
plt.close('all')
def orth_compression_through_layers(train_params_list_hue,
train_params_list_style=None, seeds=None,
hue_key=None, style_key=None,
figname="orth_compression_through_layers",
subdir=None, multiprocess_lock=None,
**plot_kwargs):
"""
"""
# if train_params_list_hue[0]['loss'] != 'mse_scalar':
# raise ValueError("Expected scalar mse loss.")
if subdir is None:
subdir = train_params_list_hue[0][
'network'] + '/orth_compression_through_layers/'
train_params_list_hue = [copy.deepcopy(t) for t in train_params_list_hue]
style_bool = train_params_list_style is not None
if style_bool:
train_params_list_style = [copy.deepcopy(t) for t in
train_params_list_style]
@memory.cache
def compute_orth_compression_through_layers(params, hue_key, style_key,
seeds):
num_pnts = 500
# num_dims = 2
table_piece =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python
# vim:fileencoding=utf-8
# Author: <NAME>
# Created: 2017-09-26
import pandas as pd
import numpy as np
from logging import getLogger, INFO, Formatter, StreamHandler
def get_logger(name=None):
if name is None:
logger = getLogger(__name__)
else:
logger = getLogger(name)
logger.setLevel(INFO)
log_fmt = '%(asctime)s : %(name)s : %(levelname)s : %(message)s'
formatter = Formatter(log_fmt)
stream_handler = StreamHandler()
stream_handler.setLevel(INFO)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
return logger
logger = get_logger()
def load_depth_file(depth_file_path: str):
df = pd.read_csv(depth_file_path,
sep="\t",
names=["genome", "location", "depth"])
if len(df) == 0:
msg = "File {0} is empty".format(depth_file_path)
raise ValueError(msg)
return df
def load_multiple_depth_file(depth_file_path: list):
list_ = []
for i, f in enumerate(depth_file_path):
df = load_depth_file(f)
df["subject"] = i+1
list_.append(df)
c_df =
|
pd.concat(list_)
|
pandas.concat
|
# census_acs1519.py
# collects data from census ACS, using Census APIs, for use in CareerOneStop's Available Workforce, Area Profile
# 50 states plus DC and PR, plus counties, plus metro areas (but not micro areas)
# Some data we want is "DT", some is "DP", from the perspective of the Census -- different APIs.
# The "year" we want is 2015-2019. They don't call it the average, but something like that.
# Further, some of the PR data is part of the mainstream data, while some needs a PR-specific API call.
#
# I welcome ideas for improvement! <NAME>, <EMAIL>
if __name__ == '__main__':
#===== DP STATE data ========================
# DP because this program loads only the "Data Profile" var's, not the DT's
# States, not metros and not counties
import requests
import pandas as pd
response = requests.get(
'https://api.census.gov/data/2019/acs/acs5/profile?get=GEO_ID,NAME,DP02_0087E,DP02_0070PE,DP02_0067PE,DP02_0068PE,DP03_0003E,DP03_0004E,DP03_0088E,DP03_0025E,DP03_0062E,DP03_0009PE,DP03_0128PE,DP04_0089E,DP04_0134E&for=state:*')
DPStates_list = response.json()
# Get PR data that is missing from the above
response = requests.get(
'https://api.census.gov/data/2019/acs/acs5/profile?get=GEO_ID,NAME,DP02PR_0087E,DP02PR_0070PE,DP02PR_0067PE,DP02PR_0068PE&for=state:72') # noqa
DPPRState_list = response.json()
# Strip the column titles off the top
DPStates_list = DPStates_list[1:]
# Now correct the PR data in the initial response
DPStates_list.sort() #sorts by first field, which is geo_id, which puts PR last
DPStates_list[-1][1:6] = DPPRState_list[1][1:6] # replaces values in the last record
DP_column_names_17 = (
'geo_id', 'areaname_unused', 'population', 'vets', 'hs', 'bach', 'lf', 'employed', 'pcincome', 'travel', 'mincome',
'unemp', 'poverty', 'housevalue', 'grossrent', 'stfips')
DPStates_df = pd.DataFrame(DPStates_list, columns=DP_column_names_17)
#so it matches final product, fill it out
# assign stfips value to new column acs_geo_id2.
DPStates_df.insert(1, "acs_geo_id2", DPStates_df['stfips'].str.zfill(2))
# assign constant state type id. Future improvement: force this to be string, len 2.
DPStates_df.insert(3,"areatype","01")
# assign stfips value to new column area
DPStates_df.insert(4, "areacode", DPStates_df['stfips'].str.zfill(6))
#DPStates_df.to_csv("DPStates3.csv")
# ===== DP COUNTY data ========================
response = requests.get(
'https://api.census.gov/data/2019/acs/acs5/profile?get=GEO_ID,NAME,DP02_0087E,DP02_0070PE,DP02_0067PE,DP02_0068PE,DP03_0003E,DP03_0004E,DP03_0088E,DP03_0025E,DP03_0062E,DP03_0009PE,DP03_0128PE,DP04_0089E,DP04_0134E&for=county:*')
DPC_l = response.json() # Data Profiles, Counties, List
DP_cols_16 = (
'geo_id', 'areaname_unused', 'population', 'vets', 'hs', 'bach', 'lf', 'employed', 'pcincome', 'travel', 'mincome',
'unemp', 'poverty', 'housevalue', 'grossrent', 'stfips', 'areacode')
DPC_l = DPC_l[1:]
DPC_df =
|
pd.DataFrame(DPC_l, columns=DP_cols_16)
|
pandas.DataFrame
|
import datetime as dt
from functools import partial
from io import BytesIO, StringIO
from fastapi import HTTPException
import numpy as np
import pandas as pd
import pyarrow as pa
from pyarrow import feather
import pytest
from solarperformanceinsight_api import utils, models
httpfail = partial(
pytest.param, marks=pytest.mark.xfail(strict=True, raises=HTTPException)
)
@pytest.mark.parametrize(
"inp,typ,exp",
(
(
"time,datas\n2020-01-01T00:00Z,8.9",
StringIO,
pd.DataFrame({"time": [pd.Timestamp("2020-01-01T00:00Z")], "datas": [8.9]}),
),
(
b"time,datas\n2020-01-01T00:00Z,8.9",
BytesIO,
pd.DataFrame({"time": [pd.Timestamp("2020-01-01T00:00Z")], "datas": [8.9]}),
),
(
b"time,datas\n2020-01-01T00:00,8.9\n2020-01-02T00:00,-999",
BytesIO,
pd.DataFrame(
{
"time": [
pd.Timestamp("2020-01-01T00:00"),
pd.Timestamp("2020-01-02T00:00"),
],
"datas": [8.9, None],
}
),
),
# not valid later, but rely on dataframe validation to check dtypes
(
b"multi,header\ntime,datas\n2020-01-01T00:00,8.9\n2020-01-02T00:00,-999",
BytesIO,
pd.DataFrame(
{
"multi": ["time", "2020-01-01T00:00", "2020-01-02T00:00"],
"header": ["datas", "8.9", np.nan],
}
),
),
# no header row
httpfail(
b"2020-01-01T00:00,8.9\n2020-01-02T00:00,-999",
BytesIO,
None,
),
httpfail(
"",
StringIO,
None,
),
httpfail(
"empty",
StringIO,
None,
),
httpfail(
"notenoughheaders,\na,b",
StringIO,
None,
),
httpfail(
"a,b\n0,1,2\n0,1,3,4,5,6",
StringIO,
None,
),
),
)
def test_read_csv(inp, typ, exp):
out = utils.read_csv(typ(inp))
pd.testing.assert_frame_equal(out, exp)
@pytest.mark.parametrize(
"tbl,exp",
(
(
pa.Table.from_arrays([[1.0, 2, 3], [4.0, 5, 6]], ["a", "b"]),
pd.DataFrame({"a": [1, 2, 3.0], "b": [4, 5, 6.0]}),
),
# complex types to test to_pandas
(
pa.Table.from_arrays(
[pa.array([1.0, 2, 3]), pa.array([[], [5, 6], [7, 8]])], ["a", "b"]
),
pd.DataFrame({"a": [1, 2, 3.0], "b": [[], [5, 6], [7, 8]]}),
),
httpfail(
b"notanarrowfile",
None,
),
),
)
def test_read_arrow(tbl, exp):
if isinstance(tbl, bytes):
tblbytes = BytesIO(tbl)
else:
tblbytes = BytesIO(utils.dump_arrow_bytes(tbl))
out = utils.read_arrow(tblbytes)
pd.testing.assert_frame_equal(out, exp)
@pytest.mark.parametrize(
"inp,exp",
(
("text/csv", utils.read_csv),
("application/vnd.ms-excel", utils.read_csv),
("application/vnd.apache.arrow.file", utils.read_arrow),
("application/octet-stream", utils.read_arrow),
httpfail("application/json", None),
),
)
def test_verify_content_type(inp, exp):
out = utils.verify_content_type(inp)
assert out == exp
@pytest.mark.parametrize(
"inp,cols,exp",
(
(pd.DataFrame({"a": [0, 1], "b": [1, 2]}), ["a", "b"], set()),
(
pd.DataFrame(
{"time": [pd.Timestamp("2020-01-01")], "b": [0.8], "c": ["notnumeric"]}
),
["time", "b"],
{"c"},
),
httpfail(
pd.DataFrame({"time": [pd.Timestamp("2020-01-01")], "b": ["willfail"]}),
["time", "b"],
set(),
),
httpfail(pd.DataFrame({"a": [0, 1], "b": [1, 2]}), ["c"], {"a", "b"}),
httpfail(pd.DataFrame({"time": [0, 1], "b": [1, 2]}), ["time", "b"], set()),
(
pd.DataFrame(
{
"time": [
pd.Timestamp.now(),
pd.Timestamp("2020-01-01T00:00:01.09230"),
],
"b": [1, 2],
}
),
["time", "b"],
set(),
),
httpfail(
pd.DataFrame(
{
"time": [pd.Timestamp("2020-01-01"), pd.Timestamp("2020-01-01")],
"b": [0.8, 1],
},
),
["time", "b"],
set(),
),
(
pd.DataFrame(
{
"month": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
"other": list(range(12)),
}
),
["month", "other"],
set(),
),
(
pd.DataFrame(
{
"month": [
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
8.0,
9.0,
10.0,
11.0,
12.0,
],
"other": list(range(12)),
}
),
["month", "other"],
set(),
),
(
pd.DataFrame(
{
"month": [f"{i}." for i in range(1, 13)],
"other": list(range(12)),
}
),
["month", "other"],
set(),
),
(
pd.DataFrame(
{
"month": [str(i) for i in range(1, 13)],
"other": list(range(12)),
}
),
["month", "other"],
set(),
),
(
pd.DataFrame(
{
"month": [f"{i}.0" for i in range(1, 13)],
"other": list(range(12)),
}
),
["month", "other"],
set(),
),
(
pd.DataFrame(
{
"month": [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
],
"other": list(range(12)),
}
),
["month"],
{"other"},
),
(
pd.DataFrame(
{
"month": [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
],
}
),
["month"],
set(),
),
(
pd.DataFrame(
{
"month": [
"jan.",
"feb.",
"mar.",
"apr.",
"may",
"jun.",
"jul.",
"aug.",
"sep.",
"oct.",
"nov.",
"dec.",
],
"other": list(range(12)),
}
),
["month"],
{"other"},
),
(
pd.DataFrame(
{
"month": [
"January",
"february",
"march",
"april",
"may",
"june",
"july",
"August",
"september",
"October",
"November",
"December",
],
}
),
["month"],
set(),
),
httpfail(
pd.DataFrame(
{
"month": [
"October",
"November",
"December",
],
}
),
["month"],
set(),
),
httpfail(
pd.DataFrame({"month": range(0, 13)}),
["month"],
set(),
),
httpfail(
pd.DataFrame(
{
"month": [
"January",
"february",
"march",
"april",
"may",
"june",
"julio", # bad
"August",
"september",
"October",
"November",
"December",
],
}
),
["month"],
set(),
),
),
)
def test_validate_dataframe(inp, cols, exp):
out = utils.validate_dataframe(inp, cols)
assert out == exp
@pytest.mark.parametrize(
"inp,slc",
(
(
pd.DataFrame(
{
"month": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
"other": list(range(12)),
}
),
slice(None),
),
(
pd.DataFrame(
{
"month": [
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
8.0,
9.0,
10.0,
11.0,
12.0,
],
"other": list(range(12)),
}
),
slice(None),
),
(
pd.DataFrame(
{
"month": [str(i) for i in range(1, 13)],
"other": list(range(12)),
}
),
slice(None),
),
(
pd.DataFrame(
{
"month": [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
],
"other": list(range(12)),
}
),
slice(None),
),
(
pd.DataFrame(
{
"month": [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
],
}
),
slice(None),
),
(
pd.DataFrame(
{
"month": [
"jan.",
"feb.",
"mar.",
"apr.",
"may",
"jun.",
"jul.",
"aug.",
"sep.",
"oct.",
"nov.",
"dec.",
],
"other": list(range(12)),
}
),
slice(None),
),
(
pd.DataFrame(
{
"month": [
"January",
"february",
"march",
"april",
"may",
"june",
"july",
"August",
"september",
"October",
"November",
"December",
],
}
),
slice(None),
),
(
pd.DataFrame(
{
"month": [
"October",
"November",
"December",
],
},
index=[9, 10, 11],
),
slice(9, None),
),
),
)
def test_standardize_months(inp, slc):
exp = pd.Series(
[
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
],
name="month",
)
out = utils.standardize_months(inp)["month"]
pd.testing.assert_series_equal(out, exp[slc])
def test_standardize_months_fail():
out0 = utils.standardize_months(pd.DataFrame({"month": range(0, 13)}))["month"]
assert not pd.isna(out0[1:]).any()
assert pd.isna(out0[:1]).all()
out1 = utils.standardize_months(
pd.DataFrame(
{
"month": [
"January",
"february",
"march",
"april",
"may",
"june",
"julio", # bad
"August",
"september",
"October",
"November",
"December",
],
}
)
)
pd.testing.assert_series_equal(
out1["month"],
pd.Series(
[
"January",
"February",
"March",
"April",
"May",
"June",
None,
"August",
"September",
"October",
"November",
"December",
],
name="month",
),
)
@pytest.mark.parametrize(
"df,tbl",
(
(
pd.DataFrame({"a": [0.1, 0.2]}, dtype="float64"),
pa.Table.from_arrays(
[pa.array([0.1, 0.2], type=pa.float32())], names=["a"]
),
),
(
pd.DataFrame({"a": [0.1, 0.2]}, dtype="float32"),
pa.Table.from_arrays(
[pa.array([0.1, 0.2], type=pa.float32())], names=["a"]
),
),
(
pd.DataFrame(
{
"a": [0.1, 0.2],
"time": [
pd.Timestamp("2020-01-01T00:00Z"),
pd.Timestamp("2020-01-02T00:00Z"),
],
},
),
pa.Table.from_arrays(
[
pa.array([0.1, 0.2], type=pa.float32()),
pa.array(
[
dt.datetime(2020, 1, 1, tzinfo=dt.timezone.utc),
dt.datetime(2020, 1, 2, tzinfo=dt.timezone.utc),
],
type=pa.timestamp("s", tz="UTC"),
),
],
names=["a", "time"],
),
),
(
pd.DataFrame(
{
"b": [-999, 129],
"time": [
pd.Timestamp("2020-01-01T00:00Z"),
pd.Timestamp("2020-01-02T00:00Z"),
],
"a": [0.1, 0.2],
},
),
pa.Table.from_arrays(
[
pa.array([-999, 129], type=pa.int64()),
pa.array(
[
dt.datetime(2020, 1, 1, tzinfo=dt.timezone.utc),
dt.datetime(2020, 1, 2, tzinfo=dt.timezone.utc),
],
type=pa.timestamp("s", tz="UTC"),
),
pa.array([0.1, 0.2], type=pa.float32()),
],
names=["b", "time", "a"],
),
),
(
pd.DataFrame(
{"a": [0.1, 0.2], "time": ["one", "two"]},
),
pa.Table.from_arrays(
[
pa.array([0.1, 0.2], type=pa.float32()),
pa.array(["one", "two"]),
],
names=["a", "time"],
),
),
# non-localized ok
(
pd.DataFrame(
{
"b": [-999, 129],
"time": [
pd.Timestamp("2020-01-01T00:00"),
pd.Timestamp("2020-01-02T00:00"),
],
"a": [0.1, 0.2],
},
),
pa.Table.from_arrays(
[
pa.array([-999, 129], type=pa.int64()),
pa.array(
[
dt.datetime(2020, 1, 1),
dt.datetime(2020, 1, 2),
],
type=pa.timestamp("s"),
),
pa.array([0.1, 0.2], type=pa.float32()),
],
names=["b", "time", "a"],
),
),
(
pd.DataFrame(
{"nanfloat": [None, 1.0], "nans": [pd.NA, pd.NA], "str": ["a", "b"]}
),
pa.Table.from_arrays(
[
pa.array([None, 1.0], type=pa.float32()),
pa.array([None, None], type=pa.null()),
pa.array(["a", "b"], type=pa.string()),
],
names=["nanfloat", "nans", "str"],
),
),
httpfail(
pd.DataFrame(
{
"nanint": [pd.NA, 3], # arrow doesn't like this
}
),
None,
),
httpfail(
pd.DataFrame(
{
"nanstr": [pd.NA, "string"],
}
),
None,
),
),
)
def test_convert_to_arrow(df, tbl):
out = utils.convert_to_arrow(df)
assert out == tbl
@pytest.mark.parametrize(
"df",
(
pd.DataFrame(),
pd.DataFrame({"a": [0, 1992.9]}),
pd.DataFrame(
{
"b": [-999, 129],
"time": [
pd.Timestamp("2020-01-01T00:00"),
pd.Timestamp("2020-01-02T00:00"),
],
"a": [0.1, 0.2],
},
),
pd.DataFrame(
{
"b": [-999, 129],
"time": [
pd.Timestamp("2020-01-01T00:00Z"),
pd.Timestamp("2020-01-02T00:00Z"),
],
"a": [0.1, 0.2],
},
),
),
)
def test_dump_arrow_bytes(df):
tbl = pa.Table.from_pandas(df)
out = utils.dump_arrow_bytes(tbl)
assert isinstance(out, bytes)
new = feather.read_feather(BytesIO(out))
pd.testing.assert_frame_equal(df, new)
@pytest.mark.parametrize(
"inp,jti,exp_df,exp_extra,exp_missing",
[
(
pd.DataFrame(
{
"time": pd.date_range(
start="2020-01-01T00:00Z", freq="5min", periods=10
),
"col0": [0, 1, 2, 3, 4.0, 5, 6, 7, 8, 9],
}
),
{
"start": "2020-01-01T00:00Z",
"end": "2020-01-01T00:50Z",
"step": "05:00",
"timezone": None,
},
pd.DataFrame(
{
"time": pd.date_range(
start="2020-01-01T00:00Z", freq="5min", periods=10
),
"col0": [0, 1, 2, 3, 4.0, 5, 6, 7, 8, 9],
}
),
[],
[],
),
(
pd.DataFrame(
{
"time": [
pd.Timestamp("2020-01-01T00:00:00.00877Z"),
pd.Timestamp("2020-01-01T00:14:59.9871Z"),
pd.Timestamp("2020-01-01T00:30Z"),
pd.Timestamp("2020-01-01T00:45Z"),
],
"col0": [0, 1, 2, 3.0],
}
),
{
"start": "2020-01-01T00:00Z",
"end": "2020-01-01T00:50Z",
"step": "15:00",
"timezone": None,
},
pd.DataFrame(
{
"time": [
pd.Timestamp("2020-01-01T00:00Z"),
pd.Timestamp("2020-01-01T00:15Z"),
pd.Timestamp("2020-01-01T00:30Z"),
pd.Timestamp("2020-01-01T00:45Z"),
],
"col0": [0, 1, 2, 3.0],
}
),
[],
[],
),
(
pd.DataFrame(
{
"time": pd.date_range(
start="2020-01-01T00:00Z", freq="5min", periods=10
),
"col0": [0, 1, 2, 3, 4.0, 5, 6, 7, 8, 9],
}
),
{
"start": "2020-01-01T00:00Z",
"end": "2020-01-01T00:45Z",
"step": "05:00",
"timezone": None,
},
pd.DataFrame(
{
"time": pd.date_range(
start="2020-01-01T00:00Z", freq="5min", periods=9
),
"col0": [0, 1, 2, 3, 4.0, 5, 6, 7, 8],
}
),
[pd.Timestamp("2020-01-01T00:45Z")],
[],
),
(
pd.DataFrame({"time": [pd.Timestamp("2020-01-01T00:15Z")], "other": [0.0]}),
{
"start": "2020-01-01T00:00Z",
"end": "2020-01-01T01:00Z",
"step": "15:00",
"timezone": "UTC",
},
pd.DataFrame(
{
"time": pd.date_range(
start="2020-01-01T00:00Z", freq="15min", periods=4
),
"other": [np.nan, 0.0, np.nan, np.nan],
}
),
[],
[
pd.Timestamp("2020-01-01T00:00Z"),
pd.Timestamp("2020-01-01T00:30Z"),
pd.Timestamp("2020-01-01T00:45Z"),
],
),
(
pd.DataFrame(
{
"time": [
pd.Timestamp("2020-01-01T00:15"),
pd.Timestamp("2020-01-01T01:00"),
],
"other": [0.0, 1.0],
}
),
{
"start": "2020-01-01T00:00",
"end": "2020-01-01T01:00",
"step": "15:00",
"timezone": "UTC",
},
pd.DataFrame(
{
"time": pd.date_range(
start="2020-01-01T00:00Z", freq="15min", periods=4
),
"other": [np.nan, 0.0, np.nan, np.nan],
}
),
[pd.Timestamp("2020-01-01T01:00Z")],
[
|
pd.Timestamp("2020-01-01T00:00Z")
|
pandas.Timestamp
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.