prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os
import csv
import shutil
import hashlib
import tempfile
import numpy as np
import pandas as pd
from rdkit import Chem
from rdkit.Chem import AllChem, MACCSkeys
from rdkit.Chem import MolFromSmiles
from padelpy import padeldescriptor # required to calculate KlekotaRothFingerPrint
from metstab_shap.config import csv_section, utils_section
DATA = 'DATA'
test = 'test'
def load_data(data_config, fingerprint, morgan_nbits=None):
datasets = []
indices = []
this_start = 0
for path in sorted(data_config[DATA].values()):
x, y, smiles = preprocess_dataset(path=path, data_config=data_config,
fingerprint=fingerprint, morgan_nbits=morgan_nbits)
datasets.append((x, y, smiles))
indices.append((this_start, this_start+len(y)))
this_start += len(y)
x = np.vstack([el[0] for el in datasets])
y = np.hstack([el[1] for el in datasets])
smiles = np.hstack([el[2] for el in datasets])
cv_split = get_cv_split(indices)
# test set
test_x, test_y, test_smiles = preprocess_dataset(path=data_config[utils_section][test],
data_config=data_config,
fingerprint=fingerprint,
morgan_nbits=morgan_nbits)
return x, y, cv_split, test_x, test_y, smiles, test_smiles
def load_data_from_df(dataset_paths, smiles_index, y_index, skip_line=False, delimiter=',', scale=None, average=None):
"""
Load multiple files from csvs, concatenate and return smiles and ys
:param dataset_paths: list: paths to csv files with data
:param smiles_index: int: index of the column with smiles
:param y_index: int: index of the column with the label
:param skip_line: boolean: True if the first line of the file contains column names, False otherwise
:param delimiter: delimeter used in csv
:param scale: should y be scaled? (useful with skewed distributions of y)
:param average: if the same SMILES appears multiple times how should its values be averaged?
:return: (smiles, labels) - np.arrays
"""
# column names present in files?
header = 0 if skip_line else None
# load all files
dfs = []
for data_path in dataset_paths:
dfs.append(pd.read_csv(data_path, delimiter=delimiter, header=header))
# merge
data_df = | pd.concat(dfs) | pandas.concat |
import pandas as pd
from pandas import DataFrame
import logging
logger = logging.getLogger('jsonml')
class MDataFrame:
'''
基于DataFrame 的数据处理类,构造函数只接收DataFrame一个参数
'''
def __init__(self, data=None):
if isinstance(data, DataFrame):
self.data = data
else:
raise Exception("error data type, must be DataFrame")
def columns(self):
'''
所有列名
:return: list 列名
'''
return self.data.columns.values.tolist()
def size_column(self):
'''
列的长度
:return: int 列数
'''
return len(self.data.columns)
def size_row(self):
'''
数据行数
:return: int 行数
'''
return len(self.data)
def select(self, columns):
'''
选取部分列
:param columns: list 所选列名
:return: 在当前对象上选取部分列,与当前对象一致
'''
self.data = self.data[[column for column in columns]]
return self
def drop(self, columns):
'''
去掉部分列
:param columns: list 删除列名
:return: 在当前对象上删除部分列,与当前对象一致
'''
self.data.drop(columns, axis=1, inplace=True)
return self
def copy_column(self, input_columns, output_columns):
'''
复制列
:param input_columns:
:param output_columns:
:return: 在当前对象上复制列,与当前对象一致
'''
for index, input in enumerate(input_columns):
self.data[output_columns[index]] = self.data[input]
return self
def add_column(self, columns, value):
'''
添加列
:param columns: 列名,list or str
:param value: 添加列填充的值
:return:
'''
if isinstance(columns, (list, set)):
for column in columns:
self.data[column] = value
else:
self.data[columns] = value
def merge(self, mDataFrame):
'''
与另外一个对象进行逐行合并,等效于pandas的concat([], axis=1)
:param MDataFrame: 列名与当前对象不能重复,否则会报错,行数需与当前对象相同,否则报错
:return: 合并后的对象,与当前对象一致
'''
self.data = pd.concat([self.data, mDataFrame.datas()], axis=1)
return self
def join(self, MDataFrame, columns, type='inner'):
'''
与另外一个对象进行按行叉乘合并
:param MDataFrame: 列名与当前对象仅不能重复,否则会报错,行数需与当前对象相同,否则报错
:param columns: join列名
:param type: join类型,可以选择 'inner', 'outer', 'left_outer', 'right_outer'
:return: 合并后的对象,与当前对象一致
'''
return self
def rename(self, columns):
'''
重命名列名
:param columns: 支持dict和list。 list长度需与当前列长一致,dict时新列名不能为空
:return: 修改列名后的对象,与原来对象是同一个
'''
if isinstance(columns, dict):
self.data.rename(columns=columns, inplace=True)
else:
self.data.columns = columns
return self
def order_column(self, columns):
'''
对列按照指定顺序输出
:param columns: 指定顺序
:return: 修改列顺序后的对象,与原来对象是同一个
'''
self.data = self.data[columns]
return self
def process_udf(self, udf, columns_input, columns_output=None, keep_input_columns=False):
'''
单行多列处理,包括单列变单列,单列表多列,多列变单列,多列变多列
:param udf: udf: 自定义处理方法的对象
:param columns_input: list[list] 输入列名, 第二维表示用该udf可以同时处理多个,第一维需与udf方法参数顺序和个数一致
:param columns_output: list[list] 输出列名,可为空,为空时默认填充。如果第一维大小为1,并且udf输出为list,则展开list,并自动加后缀命名
:param keep_input_colums: boolean 是否保留输入列,默认不保留
:return: 数据处理后的对象,与原来对象是同一个
'''
if len(columns_input) == 0:
raise Exception('columns can not be empty')
# 一维数组处理
if not isinstance(columns_input[0], list):
columns_tmp = []
for columns in columns_input:
columns_tmp.append([columns])
columns_input = columns_tmp
if not isinstance(columns_output[0], list):
columns_tmp = []
for columns in columns_output:
columns_tmp.append([columns])
columns_output = columns_tmp
for index, columns in enumerate(columns_input):
output_column = columns_output[index]
logger.debug('input = ' + str(columns))
logger.debug('output = ' + str(output_column))
result_list = [udf.process(*x) for x in zip(self.data[columns[0]])] if len(columns) == 1 \
else [udf.process(*x) for x in zip(*tuple([self.data[column] for column in columns]))]
if not keep_input_columns:
self.drop(columns)
else:
self.drop([column for column in columns if column in output_column])
if len(output_column) > 1:
result_df = DataFrame(result_list, columns=output_column)
self.data = | pd.concat([self.data, result_df], axis=1) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 21 23:24:11 2021
@author: rayin
"""
import os, sys
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import math
import re
import random
from collections import Counter
from pprint import pprint
os.chdir("/Users/rayin/Google Drive/Harvard/5_data/UDN/work")
case_gene_update = pd.read_csv("data/processed/variant_clean.csv", index_col=0)
aa_variant = list(case_gene_update['\\12_Candidate variants\\09 Protein\\'])
#pd.DataFrame(aa_variant).to_csv('aa_variant.csv')
#aa_variant_update = pd.read_csv("data/processed/aa_variant_update.csv", index_col=0)
#aa_variant_update = list(aa_variant_update['\\12_Candidate variants\\09 Protein\\'])
amino_acid = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K', 'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',
'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W', 'ALA': 'A', 'VAL':'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M', 'TER': 'X'}
aa_3 = []
aa_1 = []
for i in amino_acid.keys():
aa_3.append(i)
aa_1.append(amino_acid[i])
for i in range(0, len(aa_variant)):
for j in range(len(aa_3)):
if isinstance(aa_variant[i], float):
break
aa_variant[i] = str(aa_variant[i].upper())
if aa_3[j] in aa_variant[i]:
aa_variant[i] = aa_variant[i].replace(aa_3[j], aa_1[j])
#extracting aa properties from aaindex
#https://www.genome.jp/aaindex/
aa = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V']
#RADA880108
polarity = [-0.06, -0.84, -0.48, -0.80, 1.36, -0.73, -0.77, -0.41, 0.49, 1.31, 1.21, -1.18, 1.27, 1.27, 0.0, -0.50, -0.27, 0.88, 0.33, 1.09]
aa_polarity = pd.concat([pd.Series(aa), pd.Series(polarity)], axis=1)
aa_polarity = aa_polarity.rename(columns={0:'amino_acid', 1: 'polarity_value'})
#KLEP840101
net_charge = [0, 1, 0, -1, 0, 0, -1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
aa_net_charge = pd.concat([pd.Series(aa), pd.Series(net_charge)], axis=1)
aa_net_charge = aa_net_charge.rename(columns={0:'amino_acid', 1: 'net_charge_value'})
#CIDH920103
hydrophobicity = [0.36, -0.52, -0.90, -1.09, 0.70, -1.05, -0.83, -0.82, 0.16, 2.17, 1.18, -0.56, 1.21, 1.01, -0.06, -0.60, -1.20, 1.31, 1.05, 1.21]
aa_hydrophobicity = pd.concat([pd.Series(aa), pd.Series(hydrophobicity)], axis=1)
aa_hydrophobicity = aa_hydrophobicity.rename(columns={0:'amino_acid', 1: 'hydrophobicity_value'})
#FAUJ880103 -- Normalized van der Waals volume
normalized_vdw = [1.00, 6.13, 2.95, 2.78, 2.43, 3.95, 3.78, 0.00, 4.66, 4.00, 4.00, 4.77, 4.43, 5.89, 2.72, 1.60, 2.60, 8.08, 6.47, 3.00]
aa_normalized_vdw = pd.concat([pd.Series(aa), pd.Series(normalized_vdw)], axis=1)
aa_normalized_vdw = aa_normalized_vdw.rename(columns={0:'amino_acid', 1: 'normalized_vdw_value'})
#CHAM820101
polarizability = [0.046, 0.291, 0.134, 0.105, 0.128, 0.180, 0.151, 0.000, 0.230, 0.186, 0.186, 0.219, 0.221, 0.290, 0.131, 0.062, 0.108, 0.409, 0.298, 0.140]
aa_polarizability = pd.concat([pd.Series(aa), pd.Series(polarizability)], axis=1)
aa_polarizability = aa_polarizability.rename(columns={0:'amino_acid', 1: 'polarizability_value'})
#JOND750102
pK_COOH = [2.34, 1.18, 2.02, 2.01, 1.65, 2.17, 2.19, 2.34, 1.82, 2.36, 2.36, 2.18, 2.28, 1.83, 1.99, 2.21, 2.10, 2.38, 2.20, 2.32]
aa_pK_COOH = pd.concat([pd.Series(aa), pd.Series(pK_COOH)], axis=1)
aa_pK_COOH = aa_pK_COOH.rename(columns={0:'amino_acid', 1: 'pK_COOH_value'})
#FASG760104
pK_NH2 = [9.69, 8.99, 8.80, 9.60, 8.35, 9.13, 9.67, 9.78, 9.17, 9.68, 9.60, 9.18, 9.21, 9.18, 10.64, 9.21, 9.10, 9.44, 9.11, 9.62]
aa_pK_NH2 = pd.concat([pd.Series(aa), pd.Series(pK_NH2)], axis=1)
aa_pK_NH2 = aa_pK_NH2.rename(columns={0:'amino_acid', 1: 'pK_NH2_value'})
#ROBB790101 Hydration free energy
hydration = [-1.0, 0.3, -0.7, -1.2, 2.1, -0.1, -0.7, 0.3, 1.1, 4.0, 2.0, -0.9, 1.8, 2.8, 0.4, -1.2, -0.5, 3.0, 2.1, 1.4]
aa_hydration = pd.concat([pd.Series(aa), pd.Series(hydration)], axis=1)
aa_hydration = aa_hydration.rename(columns={0:'amino_acid', 1: 'hydration_value'})
#FASG760101
molecular_weight = [89.09, 174.20, 132.12, 133.10, 121.15, 146.15, 147.13, 75.07, 155.16, 131.17, 131.17, 146.19, 149.21, 165.19,
115.13, 105.09, 119.12, 204.24, 181.19, 117.15]
aa_molecular_weight = pd.concat([ | pd.Series(aa) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex, Categorical
from pandas.compat import StringIO
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.errors import ParserWarning
class DtypeTests(object):
def test_passing_dtype(self):
# see gh-6607
df = DataFrame(np.random.rand(5, 2).round(4), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# see gh-3795: passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
expected = df.astype(str)
tm.assert_frame_equal(result, expected)
# for parsing, interpret object as str
result = self.read_csv(path, dtype=object, index_col=0)
tm.assert_frame_equal(result, expected)
# we expect all object columns, so need to
# convert to test for equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
pytest.raises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# see gh-12048: empty frame
actual = self.read_csv(StringIO('A,B'), dtype=str)
expected = DataFrame({'A': [], 'B': []}, index=[], dtype=str)
tm.assert_frame_equal(actual, expected)
def test_pass_dtype(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'})
assert result['one'].dtype == 'u1'
assert result['two'].dtype == 'object'
def test_categorical_dtype(self):
# GH 10153
data = """a,b,c
1,a,3.4
1,a,3.4
2,b,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['a', 'a', 'b']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype=CategoricalDtype())
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={'a': 'category',
'b': 'category',
'c': CategoricalDtype()})
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={'b': 'category'})
expected = pd.DataFrame({'a': [1, 1, 2],
'b': Categorical(['a', 'a', 'b']),
'c': [3.4, 3.4, 4.5]})
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={1: 'category'})
tm.assert_frame_equal(actual, expected)
# unsorted
data = """a,b,c
1,b,3.4
1,b,3.4
2,a,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['b', 'b', 'a']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
# missing
data = """a,b,c
1,b,3.4
1,nan,3.4
2,a,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['b', np.nan, 'a']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
@pytest.mark.slow
def test_categorical_dtype_high_cardinality_numeric(self):
# GH 18186
data = np.sort([str(i) for i in range(524289)])
expected = DataFrame({'a': Categorical(data, ordered=True)})
actual = self.read_csv(StringIO('a\n' + '\n'.join(data)),
dtype='category')
actual["a"] = actual["a"].cat.reorder_categories(
np.sort(actual.a.cat.categories), ordered=True)
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_encoding(self):
# GH 10153
pth = tm.get_data_path('unicode_series.csv')
encoding = 'latin-1'
expected = self.read_csv(pth, header=None, encoding=encoding)
expected[1] = Categorical(expected[1])
actual = self.read_csv(pth, header=None, encoding=encoding,
dtype={1: 'category'})
tm.assert_frame_equal(actual, expected)
pth = tm.get_data_path('utf16_ex.txt')
encoding = 'utf-16'
expected = self.read_table(pth, encoding=encoding)
expected = expected.apply(Categorical)
actual = self.read_table(pth, encoding=encoding, dtype='category')
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_chunksize(self):
# GH 10153
data = """a,b
1,a
1,b
1,b
2,c"""
expecteds = [pd.DataFrame({'a': [1, 1],
'b': Categorical(['a', 'b'])}),
pd.DataFrame({'a': [1, 2],
'b': Categorical(['b', 'c'])},
index=[2, 3])]
actuals = self.read_csv(StringIO(data), dtype={'b': 'category'},
chunksize=2)
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize('ordered', [False, True])
@pytest.mark.parametrize('categories', [
['a', 'b', 'c'],
['a', 'c', 'b'],
['a', 'b', 'c', 'd'],
['c', 'b', 'a'],
])
def test_categorical_categoricaldtype(self, categories, ordered):
data = """a,b
1,a
1,b
1,b
2,c"""
expected = pd.DataFrame({
"a": [1, 1, 1, 2],
"b": Categorical(['a', 'b', 'b', 'c'],
categories=categories,
ordered=ordered)
})
dtype = {"b": CategoricalDtype(categories=categories,
ordered=ordered)}
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_categoricaldtype_unsorted(self):
data = """a,b
1,a
1,b
1,b
2,c"""
dtype = CategoricalDtype(['c', 'b', 'a'])
expected = pd.DataFrame({
'a': [1, 1, 1, 2],
'b': Categorical(['a', 'b', 'b', 'c'], categories=['c', 'b', 'a'])
})
result = self.read_csv(StringIO(data), dtype={'b': dtype})
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_coerces_numeric(self):
dtype = {'b': CategoricalDtype([1, 2, 3])}
data = "b\n1\n1\n2\n3"
expected = pd.DataFrame({'b': Categorical([1, 1, 2, 3])})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_coerces_datetime(self):
dtype = {
'b': CategoricalDtype(pd.date_range('2017', '2019', freq='AS'))
}
data = "b\n2017-01-01\n2018-01-01\n2019-01-01"
expected = pd.DataFrame({'b': Categorical(dtype['b'].categories)})
result = self.read_csv( | StringIO(data) | pandas.compat.StringIO |
import requests
import pandas as pd
from bs4 import BeautifulSoup
import time
import re
from data_loader.accent_data_loader import AccentDataLoader
from accent_dataset.audio_downloader import AudioDownloader
ROOT_URL = 'http://accent.gmu.edu/'
BROWSE_LANGUAGE_URL = 'browse_language.php?function=find&language={}'
# WAIT = 1.2
WAIT = 0.1
DEBUG = True
def get_htmls(urls):
'''
Retrieves html in text form from ROOT_URL
:param urls (list): List of urls from which to retrieve html
:return (list): list of HTML strings
'''
htmls = []
for url in urls:
if DEBUG:
print('downloading from {}'.format(url))
htmls.append(requests.get(url).text)
time.sleep(WAIT)
return (htmls)
def build_search_urls(languages):
'''
creates url from ROOT_URL and languages
:param languages (list): List of languages
:return (list): List of urls
'''
return ([ROOT_URL + BROWSE_LANGUAGE_URL.format(language) for language in languages])
def parse_p(p_tag):
'''
Extracts href property from HTML <p> tag string
:param p_tag (str): HTML string
:return (str): string of link
'''
text = p_tag.text.replace(' ', '').split(',')
return ([ROOT_URL + p_tag.a['href'], text[0], text[1]])
def get_bio(hrefs):
'''
Retrieves HTML from list of hrefs and returns bio information
:param hrefs (list): list of hrefs
:return (DataFrame): Pandas DataFrame with bio information
'''
htmls = get_htmls(hrefs)
bss = [BeautifulSoup(html, 'html.parser') for html in htmls]
rows = []
bio_row = []
for bs in bss:
rows.append([li.text for li in bs.find('ul', 'bio').find_all('li')])
for row in rows:
bio_row.append(parse_bio(row))
return ( | pd.DataFrame(bio_row) | pandas.DataFrame |
import os
import datetime as dt
import pandas as pd
import numpy as np
from IPython.display import display
from timeit import default_timer as timer # https://stackoverflow.com/questions/7370801/how-to-measure-elapsed-time-in-python
from sklearn.model_selection import train_test_split
from hommmer.charts import accuracy
from hommmer.helpers import check_metric
class Model():
def __init__(self, y, X, media_labels, settings, model):
# set timestamp
self.timestamp = dt.datetime.today().strftime('%Y-%m-%d %H:%M')
# train-test split
if settings['split']:
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=settings['split'], random_state=0)
else:
X_train, X_test, y_train, y_test = X, X, y, y
self.settings = settings
self.model = model
self.runtime = None
# assign X and y
self.X_actual = X
self.y_actual = y
self.X_train = X_train
self.y_train = y_train
self.X_test = X_test
self.y_test = y_test
self.media_labels = media_labels
# placeholders
self.coefficients = []
def _fit(self, y, X):
return None
def results(self):
results_df = pd.DataFrame(self.contribution().sum(), columns=['contribution'])
results_df['share'] = results_df['contribution'] / results_df['contribution'].sum() * 100
results_df['coefficient'] = self.coefficients
results_df['pvalue'] = self._pvalues()
results_df = pd.concat([results_df, self._confidence_intervals()], axis=1)
return np.around(results_df, 3)
def contribution(self, X=None):
if (X) is None:
X = self.X_actual
coef_df = pd.DataFrame({'coefficient': self.coefficients}, index=X.columns)
data = []
for x in list(X.columns):
contrib = coef_df['coefficient'].loc[x] * X[x]
data.append(contrib)
contrib_df = | pd.DataFrame(data) | pandas.DataFrame |
import matplotlib.pyplot as plt
import numpy as np
from sklearn.manifold import TSNE
import pandas as pd
import seaborn as sns
def reduce_dimensions(embeddings, fraction=None):
# Dimensionality reduction with t-SNE
# Source: https://www.kaggle.com/ferdzso/knowledge-graph-analysis-with-node2vec
number_of_embeddings = len(embeddings)
if fraction is not None:
idx = np.random.randint(number_of_embeddings, size=int(number_of_embeddings * fraction))
x = embeddings[idx, :]
print(f'Number of embeddings: {len(idx)} (fractioned by factor {fraction})')
else:
x = embeddings
print(f'Number of embeddings: {len(x)}')
# Perform 2D t-SNE dimensionality reduction
x_embedded = TSNE(n_components=2, random_state=1).fit_transform(x)
print(f't-SNE object was trained with {x.shape[0]} items')
return x_embedded
def visualize(embeddings, filename=None, labels=None):
if labels is not None:
# Generate random colors for each label
show_legend = False
if len(set(labels)) == 4:
colors = ["r", "b", "g", "y"]
show_legend = True
if len(set(labels)) == 8:
colors = ["r", "b", "g", "y", "c", "m", "k", "burlywood"]
show_legend = True
else:
colors = np.random.rand(len(set(labels)), 3)
label_map = {}
for i, l in enumerate(labels):
if l not in label_map:
label_map[l] = []
label_map[l].append(i)
fig, ax = plt.subplots(figsize=(15, 15))
# Layout
fig.suptitle(f'Number of labels: {len(set(labels))}')
fig.tight_layout()
for i, lab in enumerate(label_map.keys()):
idx = label_map[lab]
x = list(embeddings[idx, 0])
y = list(embeddings[idx, 1])
ax.scatter(x, y, s=150, color=colors[i], label=lab, alpha=0.5, edgecolors='none')
if show_legend:
plt.legend(loc=0, fontsize=10)
else:
plt.figure(figsize=(15, 15))
x = list(embeddings[:, 0])
y = list(embeddings[:, 1])
plt.scatter(x, y, alpha=0.5)
# Save or show graph
if filename is None:
plt.show()
else:
plt.savefig(filename)
def visualize_highlight(embeddings, id1, id2, label, filename=None, labels=None, colors=None):
# Generate random colors for each label
if colors is None:
colors = np.random.rand(len(set(labels)), 3)
label_map = {}
for i, l in enumerate(labels):
if l not in label_map:
label_map[l] = []
label_map[l].append(i)
fig, ax = plt.subplots(figsize=(15, 15))
# Layout
fig.suptitle(f'Number of labels: {len(set(labels))}')
fig.tight_layout()
df_array = []
for i, lab in enumerate(label_map.keys()):
idx = label_map[lab]
x = list(embeddings[idx, 0])
y = list(embeddings[idx, 1])
assert len(x) == len(y)
for index, emb in enumerate(x):
df_array.append([lab, x[index], y[index]])
print(colors)
dataframe = | pd.DataFrame(df_array) | pandas.DataFrame |
import os
import re
import sys
import pickle
import csv
import gzip
import dill
import numpy as np
from collections import Counter
from itertools import chain
from bs4 import BeautifulSoup
import pandas as pd
from gensim import corpora
from gensim.parsing.preprocessing import STOPWORDS
from collections import defaultdict
from joblib import Parallel, delayed
def tokenize_cnn(inputdir, inputfile, outputdir, maxtokens=10000):
df = pd.read_csv(os.path.join(inputdir, inputfile))
# Clean up summaries
df['true_summary'] = df['true_summary'].str.replace('[^A-Za-z0-9]+', ' ').str.strip().fillna("")
df['sentence'] = df['sentence'].str.replace('[^A-Za-z0-9]+', ' ').str.strip().fillna("")
df['query'] = df['query'].str.replace('[^A-Za-z0-9]+', ' ').str.strip().fillna("")
frequency = defaultdict(int)
n = df.shape[0]
div = n // 10
qtokens, stokens, tstokens = [], [], []
for i, row in df.iterrows():
qtokens+= [row['query'].split(" ")]
stokens+= [row['sentence'].split(" ")]
tstokens+= [row['true_summary'].split(" ")]
if ((i + 1) % div) == 0:
print("%i/%i (%i%%) complete rows." % (i + 1, n, (i + 1) / float(n) * 100 ))
# Getting the dictionary with token info
dictionary = corpora.Dictionary(stokens + qtokens + tstokens )
# Mapping to numeric list -- adding plus one to tokens
dictionary.token2id = {k: v + 1 for k,v in dictionary.token2id.items()}
dictionary.id2token = {v:k for k,v in dictionary.token2id.items()}
print("Exporting word to index and dictionary to word indices")
output = open(os.path.join(outputdir,'LSTMDQN_Dic_token2id_cnn.pkl'), 'ab+')
pickle.dump(dictionary.token2id, output)
output.close()
output = open(os.path.join(outputdir,'LSTMDQN_Dic_id2token_cnn.pkl'), 'ab+')
pickle.dump(dictionary.id2token, output)
output.close()
odf0 = | pd.DataFrame.from_dict(dictionary.dfs, orient='index') | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
import pytest
import os
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import numpy.testing as npt
from numpy.linalg import norm, lstsq
from numpy.random import randn
from flaky import flaky
from lifelines import CoxPHFitter, WeibullAFTFitter, KaplanMeierFitter, ExponentialFitter
from lifelines.datasets import load_regression_dataset, load_larynx, load_waltons, load_rossi
from lifelines import utils
from lifelines import exceptions
from lifelines.utils.sklearn_adapter import sklearn_adapter
from lifelines.utils.safe_exp import safe_exp
def test_format_p_values():
assert utils.format_p_value(2)(0.004) == "<0.005"
assert utils.format_p_value(3)(0.004) == "0.004"
assert utils.format_p_value(3)(0.000) == "<0.0005"
assert utils.format_p_value(3)(0.005) == "0.005"
assert utils.format_p_value(3)(0.2111) == "0.211"
assert utils.format_p_value(3)(0.2119) == "0.212"
def test_ridge_regression_with_penalty_is_less_than_without_penalty():
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1=2.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])
assert norm(utils.ridge_regression(X, Y, c1=1.0, c2=1.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])
def test_ridge_regression_with_extreme_c1_penalty_equals_close_to_zero_vector():
c1 = 10e8
c2 = 0.0
offset = np.ones(2)
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0]) < 10e-4
def test_ridge_regression_with_extreme_c2_penalty_equals_close_to_offset():
c1 = 0.0
c2 = 10e8
offset = np.ones(2)
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0] - offset) < 10e-4
def test_lstsq_returns_similar_values_to_ridge_regression():
X = randn(2, 2)
Y = randn(2)
expected = lstsq(X, Y, rcond=None)[0]
assert norm(utils.ridge_regression(X, Y)[0] - expected) < 10e-4
def test_lstsq_returns_correct_values():
X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
beta, V = utils.ridge_regression(X, y)
expected_beta = [-0.98684211, -0.07894737]
expected_v = [
[-0.03289474, -0.49342105, 0.06578947, 0.03289474, 0.49342105],
[-0.30263158, 0.46052632, -0.39473684, 0.30263158, -0.46052632],
]
assert norm(beta - expected_beta) < 10e-4
for V_row, e_v_row in zip(V, expected_v):
assert norm(V_row - e_v_row) < 1e-4
def test_unnormalize():
df = load_larynx()
m = df.mean(0)
s = df.std(0)
ndf = utils.normalize(df)
npt.assert_almost_equal(df.values, utils.unnormalize(ndf, m, s).values)
def test_normalize():
df = load_larynx()
n, d = df.shape
npt.assert_almost_equal(utils.normalize(df).mean(0).values, np.zeros(d))
npt.assert_almost_equal(utils.normalize(df).std(0).values, np.ones(d))
def test_median():
sv = pd.DataFrame(1 - np.linspace(0, 1, 1000))
assert utils.median_survival_times(sv) == 500
def test_median_accepts_series():
sv = pd.Series(1 - np.linspace(0, 1, 1000))
assert utils.median_survival_times(sv) == 500
def test_qth_survival_times_with_varying_datatype_inputs():
sf_list = [1.0, 0.75, 0.5, 0.25, 0.0]
sf_array = np.array([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_no_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_series_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_series_no_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0])
q = 0.5
assert utils.qth_survival_times(q, sf_list) == 2
assert utils.qth_survival_times(q, sf_array) == 2
assert utils.qth_survival_times(q, sf_df_no_index) == 2
assert utils.qth_survival_times(q, sf_df_index) == 30
assert utils.qth_survival_times(q, sf_series_index) == 30
assert utils.qth_survival_times(q, sf_series_no_index) == 2
def test_qth_survival_times_multi_dim_input():
sf = np.linspace(1, 0, 50)
sf_multi_df = pd.DataFrame({"sf": sf, "sf**2": sf ** 2})
medians = utils.qth_survival_times(0.5, sf_multi_df)
assert medians["sf"].loc[0.5] == 25
assert medians["sf**2"].loc[0.5] == 15
def test_qth_survival_time_returns_inf():
sf = pd.Series([1.0, 0.7, 0.6])
assert utils.qth_survival_time(0.5, sf) == np.inf
def test_qth_survival_time_accepts_a_model():
kmf = KaplanMeierFitter().fit([1.0, 0.7, 0.6])
assert utils.qth_survival_time(0.8, kmf) > 0
def test_qth_survival_time_with_dataframe():
sf_df_no_index = | pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0]) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Retrieve bikeshare trips data."""
# pylint: disable=invalid-name
import os
import re
from glob import glob
from typing import Dict, List
from zipfile import ZipFile
import pandas as pd
import pandera as pa
import requests
from src.utils import log_prefect
trips_schema = pa.DataFrameSchema(
columns={
"TRIP_ID": pa.Column(pa.Int),
"TRIP__DURATION": pa.Column(pa.Int),
"START_STATION_ID": pa.Column(
pa.Int,
nullable=True,
),
"START_TIME": pa.Column(
pa.Timestamp,
checks=[pa.Check(lambda s: s.dt.year.isin([2021, 2022]))],
),
"START_STATION_NAME": pa.Column(pd.StringDtype()),
"END_STATION_ID": pa.Column(
pa.Int,
nullable=True,
),
"END_TIME": pa.Column(
pa.Timestamp,
checks=[pa.Check(lambda s: s.dt.year.isin([2021, 2022]))],
),
"END_STATION_NAME": pa.Column(pd.StringDtype()),
"BIKE_ID": pa.Column(pa.Int, nullable=True),
"USER_TYPE": pa.Column(
pd.StringDtype(),
checks=[
pa.Check(
lambda s: s.isin(["Annual Member", "Casual Member"]),
)
],
),
},
index=pa.Index(pa.Int),
)
urls_schema = pa.DataFrameSchema(
columns={
"url": pa.Column(pd.StringDtype()),
"name": pa.Column(pd.StringDtype()),
"format": pa.Column(pd.StringDtype()),
"state": pa.Column(pd.StringDtype()),
},
index=pa.Index(pa.Int),
)
get_data_status_schema = pa.DataFrameSchema(
columns={
"trips_file_name": pa.Column(pd.StringDtype()),
"last_modified_opendata": pa.Column(
pd.DatetimeTZDtype(tz="America/Toronto")
),
"parquet_file_exists": pa.Column(pd.BooleanDtype()),
"parquet_file_outdated": pa.Column(pd.BooleanDtype()),
"downloaded_file": pa.Column(pd.BooleanDtype()),
},
index=pa.Index(pa.Int),
)
raw_trips_schema = pa.DataFrameSchema(
columns={
"TRIP_ID": pa.Column(pa.Int),
"TRIP__DURATION": pa.Column(pa.Int),
"START_STATION_ID": pa.Column(pa.Int, nullable=True),
"START_STATION_NAME": pa.Column(pd.StringDtype()),
"START_TIME": pa.Column(
pa.Timestamp,
checks=[pa.Check(lambda s: s.dt.year.isin([2021, 2022]))],
),
"USER_TYPE": pa.Column(
pd.StringDtype(),
checks=[
pa.Check(
lambda s: s.isin(["Annual Member", "Casual Member"]),
)
],
),
},
index=pa.Index(pa.Int),
)
def get_local_csv_list(
raw_data_dir: str, years_wanted: List[int], use_prefect: bool = False
) -> List[str]:
"""Getting list of local CSV data files."""
log_prefect("Getting list of local CSV data files...", True, use_prefect)
files_by_year = [glob(f"{raw_data_dir}/*{y}*.csv") for y in years_wanted]
csvs = sorted([f for files_list in files_by_year for f in files_list])
log_prefect("Done.", False, use_prefect)
return csvs
def get_ridership_data(
raw_data_dir: str,
url: str,
last_modified_timestamp: pd.Timestamp,
use_prefect: bool = False,
) -> Dict[str, str]:
"""Download bikeshare trips data."""
# Split URL to get the file name
file_name = os.path.basename(url)
year = os.path.splitext(file_name)[0].split("-")[-1]
zip_filepath = os.path.join(raw_data_dir, file_name)
destination_dir = os.path.abspath(os.path.join(zip_filepath, os.pardir))
# Check if previously downloaded contents are up-to-dte
parquet_data_filepath = os.path.join(raw_data_dir, "agg_data.parquet.gzip")
has_parquet = os.path.exists(parquet_data_filepath)
if has_parquet:
parquet_file_modified_time = (
pd.read_parquet(parquet_data_filepath)
.iloc[0]
.loc["last_modified_timestamp"]
)
# print(last_modified_timestamp, parquet_file_modified_time)
parquet_file_outdated_check = (
os.path.exists(zip_filepath)
and parquet_file_modified_time < last_modified_timestamp
)
else:
parquet_file_modified_time = pd.NaT
parquet_file_outdated_check = True
if not has_parquet or parquet_file_outdated_check:
log_prefect(
f"Downloading raw data file {file_name}...", True, use_prefect
)
# print(zip_filepath, destination_dir, dest_filepath, existing_file)
if not os.path.exists(zip_filepath):
r = requests.get(url)
# Writing the file to the local file system
with open(zip_filepath, "wb") as output_file:
output_file.write(r.content)
csv_files = glob(f"{raw_data_dir}/*{year}-*.csv")
# print(csv_files)
if not csv_files:
with ZipFile(zip_filepath, "r") as zipObj:
# Extract all the contents of zip file
zipObj.extractall(destination_dir)
status_dict = dict(
trips_file_name=file_name,
last_modified_opendata=last_modified_timestamp,
parquet_file_exists=has_parquet,
parquet_file_data_last_modified=parquet_file_modified_time,
parquet_file_outdated=parquet_file_outdated_check,
downloaded_file=True,
)
log_prefect("Done downloading raw data.", False, use_prefect)
else:
log_prefect(
f"Found the most recent version of {file_name} locally. "
"Did nothing.",
True,
use_prefect,
)
status_dict = dict(
trips_file_name=file_name,
last_modified_opendata=last_modified_timestamp,
parquet_file_exists=has_parquet,
parquet_file_data_last_modified=parquet_file_modified_time,
parquet_file_outdated=parquet_file_outdated_check,
downloaded_file=False,
)
return status_dict
@pa.check_output(urls_schema)
def get_file_urls(
main_dataset_url: str,
dataset_params: Dict,
years_wanted: Dict[int, List],
use_prefect: bool = False,
) -> pd.DataFrame:
"""Get list of ridership file URLs."""
log_prefect("Retrieving data URLs...", True, use_prefect)
package = requests.get(main_dataset_url, params=dataset_params).json()
resources = package["result"]["resources"]
df = (
pd.DataFrame.from_records(resources)[
["last_modified", "url", "name", "format", "state"]
]
.rename(columns={"last_modified": "last_modified_opendata"})
.astype(
{
"url": pd.StringDtype(),
"name": pd.StringDtype(),
"format": pd.StringDtype(),
"state": | pd.StringDtype() | pandas.StringDtype |
from flask import Flask, request, jsonify, g, render_template
from flask_json import FlaskJSON, JsonError, json_response, as_json
import plotly.graph_objects as go
from datetime import datetime
from datetime import timedelta
import glob
import requests
from app import db
from app.models import *
from app.plots import bp
import pandas as pd
import io
from app.api import vis
from sqlalchemy import sql
import numpy as np
from app.tools.curvefit.core.model import CurveModel
from app.tools.curvefit.core.functions import gaussian_cdf, gaussian_pdf
PHU = {'the_district_of_algoma':'The District of Algoma Health Unit',
'brant_county':'Brant County Health Unit',
'durham_regional':'Durham Regional Health Unit',
'grey_bruce':'Grey Bruce Health Unit',
'haldimand_norfolk':'Haldimand-Norfolk Health Unit',
'haliburton_kawartha_pine_ridge_district':'Haliburton, Kawartha, Pine Ridge District Health Unit',
'halton_regional':'Halton Regional Health Unit',
'city_of_hamilton':'City of Hamilton Health Unit',
'hastings_and_prince_edward_counties':'Hastings and Prince Edward Counties Health Unit',
'huron_county':'Huron County Health Unit',
'chatham_kent':'Chatham-Kent Health Unit',
'kingston_frontenac_and_lennox_and_addington':'Kingston, Frontenac, and Lennox and Addington Health Unit',
'lambton':'Lambton Health Unit',
'leeds_grenville_and_lanark_district':'Leeds, Grenville and Lanark District Health Unit',
'middlesex_london':'Middlesex-London Health Unit',
'niagara_regional_area':'Niagara Regional Area Health Unit',
'north_bay_parry_sound_district':'North Bay Parry Sound District Health Unit',
'northwestern':'Northwestern Health Unit',
'city_of_ottawa':'City of Ottawa Health Unit',
'peel_regional':'Peel Regional Health Unit',
'perth_district':'Perth District Health Unit',
'peterborough_county_city':'Peterborough County–City Health Unit',
'porcupine':'Porcupine Health Unit',
'renfrew_county_and_district':'Renfrew County and District Health Unit',
'the_eastern_ontario':'The Eastern Ontario Health Unit',
'simcoe_muskoka_district':'Simcoe Muskoka District Health Unit',
'sudbury_and_district':'Sudbury and District Health Unit',
'thunder_bay_district':'Thunder Bay District Health Unit',
'timiskaming':'Timiskaming Health Unit',
'waterloo':'Waterloo Health Unit',
'wellington_dufferin_guelph':'Wellington-Dufferin-Guelph Health Unit',
'windsor_essex_county':'Windsor-Essex County Health Unit',
'york_regional':'York Regional Health Unit',
'southwestern':'Southwestern Public Health Unit',
'city_of_toronto':'City of Toronto Health Unit',
'huron_perth_county':'Huron Perth Public Health Unit'}
def get_dir(data, today=datetime.today().strftime('%Y-%m-%d')):
source_dir = 'data/' + data['classification'] + '/' + data['stage'] + '/'
load_dir = source_dir + data['source_name'] + '/' + data['table_name']
file_name = data['table_name'] + '_' + today + '.' + data['type']
file_path = load_dir + '/' + file_name
return load_dir, file_path
def get_file(data):
load_dir, file_path = get_dir(data)
files = glob.glob(load_dir + "/*." + data['type'])
files = [file.split('_')[-1] for file in files]
files = [file.split('.csv')[0] for file in files]
dates = [datetime.strptime(file, '%Y-%m-%d') for file in files]
max_date = max(dates).strftime('%Y-%m-%d')
load_dir, file_path = get_dir(data, max_date)
return file_path
## Tests
def new_tests_plot():
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['New tests'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['New tests'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['New tests'],line=dict(color='#FFF', dash='dot'), visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=temp['New tests'].rolling(7).mean(),line=dict(color='red',width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['New tests'].iloc[-2],
'increasing': {'color':'green'},
'decreasing': {'color':'red'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"New Tests<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",)
div = fig.to_json()
p = Viz.query.filter_by(header="new tests").first()
p.html = div
db.session.add(p)
db.session.commit()
return
def total_tests_plot():
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['Total tested'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['Total tested'].tail(1).values[0],
number = {'font': {'size': 60}},
))
# fig.add_trace(go.Scatter(x=temp.Date,y=temp['Total tested'],line=dict(color='#5E5AA1',dash='dot'), visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=df['Total tested'].rolling(7).mean(),line=dict(color='red', width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['Total tested'].iloc[-2],
'increasing': {'color':'green'},
'decreasing': {'color':'red'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True,'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"Total Tested<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="tests").first()
p.html = div
db.session.add(p)
db.session.commit()
return
def tested_positve_plot():
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['New Positive pct'].notna()]
temp = df.loc[df['New Positive pct'] > 0]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['New Positive pct'].tail(1).values[0]*100,
number = {'font': {'size': 60}}
))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['New Positive pct'],line=dict(color='#FFF', dash='dot'),visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=temp['New Positive pct'].rolling(7).mean(),line=dict(color='red',width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['New Positive pct'].iloc[-2]*100,
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text': f"Percent Positivity<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="tested positive").first()
p.html = div
db.session.add(p)
db.session.commit()
return
def under_investigation_plot():
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['Total tested'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['Under Investigation'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['Under Investigation'],line=dict(color='#FFF', dash='dot'), visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=temp['Under Investigation'].rolling(7).mean(),line=dict(color='red',width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['Under Investigation'].iloc[-2],
'increasing': {'color':'grey'},
'decreasing': {'color':'grey'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"Under Investigation<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",)
div = fig.to_json()
p = Viz.query.filter_by(header="under investigation").first()
p.html = div
db.session.add(p)
db.session.commit()
return
## Hospital
def in_hospital_plot(region='ontario'):
if region=='ontario':
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['Hospitalized'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = temp['Hospitalized'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['Hospitalized'],line=dict(color='red', width=3),visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=temp.Date,y=temp['ICU'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['Hospitalized'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"COVID-19 Patients In Hospital<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",)
div = fig.to_json()
p = Viz.query.filter_by(header="in hospital", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
def in_icu_plot(region='ontario'):
if region=='ontario':
df = vis.get_testresults()
df['Date'] = | pd.to_datetime(df['Date']) | pandas.to_datetime |
'''
Function library for using PyTorch to run representational similarity analysis on convolutional neural networks.
This library contains a collection of functions for easily conducting representational similarity analysis (RSA) and related
analyses on convolutional neural networks (CNNs), serving as a wrapper for various PyTorch functions. There are also
many utility functions to improve quality of life.
'''
import torch
import torchvision.models as models
import torchvision.transforms as transforms
from torch.utils import model_zoo
import torchvision
import numpy as np
from os.path import join as opj
from collections import OrderedDict
from collections import defaultdict
import ipdb
import os
from PIL import Image, ImageOps
import copy
import random
from scipy import stats
import itertools as it
from sklearn import manifold
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.image import BboxImage
from matplotlib.transforms import Bbox, TransformedBbox
import matplotlib as mpl
import ipympl
import cv2
import pickle
import seaborn as sns
import pandas as pd
from sklearn import svm
from joblib import Parallel, delayed
import multiprocessing
import sys
from importlib import reload
import time
import statsmodels.api as sm
from scipy import math
import cornet
import pathos
###################################################
################ UTILITY FUNCTIONS ################
###################################################
def parallel_process(func,arg_dict,num_cores,method='pathos',make_combinations=False):
'''
Wrapper to streamline running a function many times in parallel.
Utility function to run a function many times in parallel, with many different combinations of arguments.
Provide the function, along with a dictionary specifying the arguments to feed into the function.
Can run either with the pathos or multiprocessing package.
Also contains a flag, make_combinations, to do all combinations of input arguments if desired.
Args:
func:
The function you want to run many times.
arg_dict:
The structure depends on the make_combinations flag. If this flag is set to false, input a list of dictionaries, where each dictionary contains a set of arguments for one call of the function (i.e., you manually specify each set of arguments for each function call); and each key is an argument name, with the corresponding value being an argument value. If make_combinations is set to false, input a single dictionary of lists, where each key is an argument name, and each value is a list of values to put in for that argument. In the latter case, the function will run every possible permutation of argument values from the different lists.
num_cores:
The number of cores to run in parallel
method:
Which Python module to use for the multiprocessing; either pathos or multiprocessing.
make_combinations:
Whether arg_dict is a list of dictionaries containing each set of arguments to put in (in this case put False), or a single dictionary of lists (in this case put True)
Returns:
List containing the results of all calls to the function.
Examples:
>>> def mult(x,y):
return x*y
>>> parallel_process(func = mult,
arg_dict = [{'x':2,'y':4},{'x':5,'y':3}],
num_cores = 2,
method = 'pathos',
make_combinations = False)
[8,15]
>>> parallel_process(func = mult,
arg_dict = {'x':[2,1,3],'y':[6,2,4]},
num_cores=2,
method='pathos',
make_combinations=True)
[12, 4, 8, 6, 2, 4, 18, 6, 12]
'''
# Depending on which method is used, different input structure is needed.
if make_combinations:
for arg in arg_dict:
if type(arg_dict[arg]) != list:
arg_dict[arg] = [arg_dict[arg]]
arg_dict_list = [dict(zip(arg_dict.keys(), values)) for values in it.product(*arg_dict.values())]
else:
arg_dict_list = arg_dict
if method=='pathos':
pool = pathos.multiprocessing.ProcessPool(nodes=num_cores)
worker_list = [pool.apipe(func,**kwd_dict) for kwd_dict in arg_dict_list]
results = [p.get() for p in worker_list]
pool.close()
pool.join()
pool.clear()
if method=='multiprocessing':
pool = multiprocessing.Pool(processes=num_cores)
worker_list = [pool.apply_async(func,kwds=kwd_dict) for kwd_dict in arg_dict_list]
results = [p.get() for p in worker_list]
pool.close()
pool.join()
return results
def corr_dissim(x,y):
"""
Convenience function for computing 1-r for two vectors (to turn correlation into a dissimilarity metric).
Function that returns 1-r for two vectors x and y.
Args:
x: First vector (numpy array)
y: Second vector (numpy array)
Returns:
1-r
"""
if np.max(np.abs(x-y))==0:
r = 0
elif (np.sum(np.abs(x))==0) or (np.sum(np.abs(y))==0):
r = np.nan
else:
r = 1-np.corrcoef(x,y)[0,1]
return r
# Definition of the default dissimilarity functions.
dissim_defs = {'corr':corr_dissim,
'euclidean_distance':lambda x,y:np.linalg.norm(x-y)}
def stderr(array):
'''
Convenience function that computes the standard error of an array (STD/sqrt(n))
Args:
array: 1D numpy array
Returns:
The standard error of the array.
'''
output = np.std(array)/(float(len(array))**.5)
return output
def almost_equals(x,y,thresh=5):
'''
Convenience function for determining whether two 1D arrays (x and y) are approximately equal to each other (within a range of thresh for each element)
Args:
x: 1D numpy array to compare to y
y: 1D numpy array to compare to x
thresh: how far apart any element of x can be from the corresponding element of y
Returns:
True if each element of x is within thresh of each corresponding element of y, false otherwise.
'''
return all(abs(a[i]-b[i])<thresh for i in range(len(a)))
def filedelete(fname):
"""
Convenience function to delete files and directories without fuss. Deletes the file or directory if it exists, does nothing otherwise.
Args:
fname: path of the file or directory
Returns:
Returns nothing.
"""
if os.path.exists(fname):
try:
if os.path.isdir(fname):
# delete folder
shutil.rmtree(fname)
return
else:
# delete file
os.remove(fname)
return
except:
return
else:
return
def pickle_read(fname):
'''
Convenience function to read in a pickle file.
Args:
fname: file path of the pickle file
Returns:
The Python object stored in the pickle file.
'''
output = pickle.load(open(fname,'rb'))
return output
def pickle_dump(obj,fname):
'''
Convenience function to dump a Python object to a pickle file.
Args:
obj: Any pickle-able Python object.
fname: The file path to dump the pickle file to.
Returns:
Nothing.
'''
pickle.dump(obj,open(fname,"wb"))
###################################################
################ IMAGE PREPARATION ################
###################################################
def nonwhite_pixel_mask(image,thresh=230):
'''
Takes in a 3D numpy array representing an image (m x n x 3), and returns a 2D boolean mask indicating every non-white pixel. Useful for making a mask indicating the stimulus footprint on a white background.
Args:
image: 3D numpy array (width m, height n, 3 pixel channels)
thresh: RGB cutoff to count as white; if all three of R, G, and B exceed this value, the pixel is counted as white.
Returns:
2D numpy array with a 0 for every white pixel and a 1 for every non-white pixel.
'''
x,y = image.shape[0],image.shape[1]
mask = np.zeros((x,y))
for i in range(x):
for j in range(y):
if all(image[i,j,:]>thresh):
mask[i,j] = 0
else:
mask[i,j] = 1
return mask
def save_mask(mask,fname):
'''
Takes in a 2D numpy array with a binary mask (1s and 0s), and saves it as an RGB JPEG image.
Args:
mask: 2D numpy array
fname: Filename to save the image.
Returns:
The Image object (in addition to saving it at specified filename)
'''
new_mask = Image.fromarray((np.dstack([mask,mask,mask])*255).astype('uint8'),'RGB')
new_mask.save(fname,format='jpeg')
return(new_mask)
def shrink_image(input_image,shrink_ratio,pad_color=(255,255,255),output_size=(224,224)):
'''
Put in a PIL image, and it'll shrink the image, while keeping the same resolution as original image and padding the sides with the desired color.
Args:
input_image: PIL image you want to shrink.
shrink_ratio: how much to shrink each dimension of the image by (e.g., .5 to halve the length and width)
pad_color: RGB tuple indicating the color to fill the margins of the image with
output_size: desired dimensions of the output image
Returns:
PIL image, shrunk to desired dimensions.
'''
orig_size = input_image.size
pad_amount = int(round(orig_size[0]*(1-shrink_ratio)/(2*shrink_ratio)))
new_size = (orig_size[0]+pad_amount*2,orig_size[1]+pad_amount*2)
output_image = Image.new("RGB",new_size,color=pad_color)
output_image.paste(input_image,(pad_amount,pad_amount))
output_image = output_image.resize(output_size,Image.ANTIALIAS)
return(output_image)
def alphatize_image(im,alpha_target=(255,255,255),alpha_range=5):
'''
Takes in a PIL image, and makes RGB values within a given range transparent. Any pixel all of whose RGB values are within alpha_range of alpha_target will be made transparent. Used in MDS plotting function.
Args:
im: Image in PIL format
alpha_target: RGB triplet indicating which values to make transparent.
alpha_range: how far away a particular RGB value can be from alpha_target and still
make it transparent.
Returns:
PIL image with all pixels in range of alpha_target made transparent.
'''
image_alpha = im.convert('RGBA')
pixel_data = list(image_alpha.getdata())
for i,pixel in enumerate(pixel_data):
if almost_equals(pixel[:3],alpha_target,alpha_range):
pixel_data[i] = (255,255,255,0)
image_alpha.putdata(pixel_data)
return image_alpha
# Standard transformation to apply to images before reading them in
standard_transform = transforms.Compose(
[transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
def preprocess_image(input_image,transform=standard_transform):
'''
Preprocesses an image to prepare it for running through a network; resizes it, turns it into a tensor, standardizes the mean and SD of the pixels, and pads it to be 4D, as required by many networks (one "dummy" dimension).
args:
input_image: PIL image
return:
Preprocessed image
'''
output_image = transform(input_image).unsqueeze(0)
return output_image
###################################################
############# DEALING WITH MODELS #################
###################################################
def get_layer_activation(module,input_,output_):
'''
Utility function that is attached as a "forward hook" to each model layer to store the activations as a numpy array in the dictionary layer_activations.
'''
layer_activations[module.layer_name] = output_.cpu().detach().numpy()
def index_nested(the_item,indices):
'''
Utility function to help with retrieving model layers that are buried in many levels of indexing that can be either numerical (array) or an attribute.
args:
the_item: top-level item you want to pull something out of
indices: list of indices; each element is either a number (if that level of indexing is array-based), or a string if that level is indexing based on attribute.
return:
Whatever you've pulled out of the_item following the given indices.
Examples:
>>> import torchvision.models as models
>>> alexnet = models.alexnet()
>>> index_nested(alexnet._modules,['features', 11])
ReLU(inplace=True)
'''
for ind in indices:
if type(ind)!=tuple:
the_item = the_item[ind]
else:
the_item = getattr(the_item,ind[1])
return the_item
def fetch_layers_internal(current_layer,layer_pointers,layer_indices,layer_counter):
'''
Internal helper function that recursively crawls through all layers and sublayers of a network and pulls out their addresses for easy reference.
'''
layer_type = type(current_layer)
if layer_type==torch.nn.modules.container.Sequential:
for i,sub_layer in enumerate(current_layer):
layer_pointers,layer_counter = fetch_layers_internal(current_layer[i],layer_pointers,layer_indices+[i],layer_counter)
return(layer_pointers,layer_counter)
if layer_type==torchvision.models.resnet.Bottleneck:
sub_layer_list = current_layer.named_children()
sub_layer_list = [sl[0] for sl in sub_layer_list]
for sub_layer in sub_layer_list:
layer_pointers,layer_counter = fetch_layers_internal(getattr(current_layer,sub_layer),layer_pointers,layer_indices+[('attr',sub_layer)],layer_counter)
return(layer_pointers,layer_counter)
elif layer_type.__name__=='BasicConv2d':
sub_layer_list = current_layer.named_children()
sub_layer_list = [sl[0] for sl in sub_layer_list]
for sub_layer in sub_layer_list:
layer_pointers,layer_counter = fetch_layers_internal(getattr(current_layer,sub_layer),layer_pointers,layer_indices+[('attr',sub_layer)],layer_counter)
return(layer_pointers,layer_counter)
elif 'Inception' in str(layer_type):
sub_layer_list = current_layer.named_children()
sub_layer_list = [sl[0] for sl in sub_layer_list]
for sub_layer in sub_layer_list:
layer_pointers,layer_counter = fetch_layers_internal(getattr(current_layer,sub_layer),layer_pointers,layer_indices+[('attr',sub_layer)],layer_counter)
return(layer_pointers,layer_counter)
elif 'CORblock_Z' in str(layer_type):
sub_layer_list = current_layer.named_children()
sub_layer_list = [sl[0] for sl in sub_layer_list]
for sub_layer in sub_layer_list:
layer_pointers,layer_counter = fetch_layers_internal(getattr(current_layer,sub_layer),layer_pointers,layer_indices+[('attr',sub_layer)],layer_counter)
return(layer_pointers,layer_counter)
elif 'CORblock_S' in str(layer_type):
sub_layer_list = current_layer.named_children()
sub_layer_list = [sl[0] for sl in sub_layer_list]
for sub_layer in sub_layer_list:
layer_pointers,layer_counter = fetch_layers_internal(getattr(current_layer,sub_layer),layer_pointers,layer_indices+[('attr',sub_layer)],layer_counter)
return(layer_pointers,layer_counter)
elif 'Conv2d' in str(layer_type):
num_convs = len([layer for layer in layer_pointers if 'conv' in layer[0]])
layer_pointers[('conv'+str(num_convs+1),layer_counter)] = layer_indices
layer_counter += 1
return(layer_pointers,layer_counter)
elif 'Linear' in str(layer_type):
num_linear = len([layer for layer in layer_pointers if 'fc' in layer[0]])
layer_pointers[('fc'+str(num_linear+1),layer_counter)] = layer_indices
layer_counter += 1
return(layer_pointers,layer_counter)
elif 'MaxPool2d' in str(layer_type):
num_maxpools = len([layer for layer in layer_pointers if 'maxpool' in layer[0]])
layer_pointers[('maxpool'+str(num_maxpools+1),layer_counter)] = layer_indices
layer_counter += 1
return(layer_pointers,layer_counter)
elif 'AvgPool2d' in str(layer_type):
num_avgpools = len([layer for layer in layer_pointers if 'avgpool' in layer[0]])
layer_pointers[('avgpool'+str(num_avgpools+1),layer_counter)] = layer_indices
layer_counter += 1
return(layer_pointers,layer_counter)
elif 'ReLU' in str(layer_type):
num_relu = len([layer for layer in layer_pointers if 'relu' in layer[0]])
layer_pointers[('relu'+str(num_relu+1),layer_counter)] = layer_indices
layer_counter += 1
return(layer_pointers,layer_counter)
elif 'BatchNorm2d' in str(layer_type):
num_batchnorm = len([layer for layer in layer_pointers if 'batchnorm' in layer[0]])
layer_pointers[('batchnorm'+str(num_batchnorm+1),layer_counter)] = layer_indices
layer_counter += 1
return(layer_pointers,layer_counter)
elif 'Dropout' in str(layer_type):
num_dropout = len([layer for layer in layer_pointers if 'dropout' in layer[0]])
layer_pointers[('dropout'+str(num_dropout+1),layer_counter)] = layer_indices
layer_counter += 1
return(layer_pointers,layer_counter)
elif 'Flatten' in str(layer_type):
num_flatten = len([layer for layer in layer_pointers if 'flatten' in layer[0]])
layer_pointers[('flatten'+str(num_flatten+1),layer_counter)] = layer_indices
layer_counter += 1
return(layer_pointers,layer_counter)
elif 'Identity' in str(layer_type):
num_identity = len([layer for layer in layer_pointers if 'identity' in layer[0]])
layer_pointers[('identity'+str(num_identity+1),layer_counter)] = layer_indices
layer_counter += 1
return(layer_pointers,layer_counter)
else:
return(layer_pointers,layer_counter)
def fetch_layers(model):
'''
Takes in a CNN model, and returns "addresses" of all the layers to refer to them easily; this is useful since different CNN models can be structured in different ways, with various chunks, layers, sublayers, etc.
args:
model: a PyTorch CNN model. Currently, at least AlexNet, VGG19, ResNet-50, GoogLeNet, and CORNet-S are supported; no guarantee yet that others will work.
returns:
ordered dictionary where each key is a layer label of format ('relu2',3) (that is, the second relu layer and third layer overall), and each value is the set of indices needed to refer to that layer in the model. The index_nested function can then be use those indices to refer to a layer of a model when needed.
Examples:
>>> import torchvision.models as models
>>> alexnet = models.alexnet()
>>> fetch_layers(alexnet)
OrderedDict([(('conv1', 1), ['features', 0]), (('relu1', 2), ['features', 1]), (('maxpool1', 3), ['features', 2]), (('conv2', 4), ['features', 3]), (('relu2', 5), ['features', 4]), (('maxpool2', 6), ['features', 5]), (('conv3', 7), ['features', 6]), (('relu3', 8), ['features', 7]), (('conv4', 9), ['features', 8]), (('relu4', 10), ['features', 9]), (('conv5', 11), ['features', 10]), (('relu5', 12), ['features', 11]), (('maxpool3', 13), ['features', 12]), (('avgpool1', 14), ['avgpool']), (('dropout1', 15), ['classifier', 0]), (('fc1', 16), ['classifier', 1]), (('relu6', 17), ['classifier', 2]), (('dropout2', 18), ['classifier', 3]), (('fc2', 19), ['classifier', 4]), (('relu7', 20), ['classifier', 5]), (('fc3', 21), ['classifier', 6])])
'''
layer_pointers = OrderedDict()
layer_counter = 1
for macro_layer in model._modules:
layer_pointers,layer_counter = fetch_layers_internal(model._modules[macro_layer],layer_pointers,[macro_layer],layer_counter)
return layer_pointers
def prepare_models(models_dict):
'''
Prepares the models you want to use: loads them with specified weight settings, and registers the forward hooks that allow you to save intermediate activations.
args:
models_dict:
Dictionary specifying the models to prepare, and the weight settings to use. Each key is your internal name you wish to use for your model. Each value is a (base_model,weight_setting), where base_model is the model to use (e.g., "alexnet"), and weight_setting is "trained" for the trained version of the network, "random" for an untrained version of the model with random weights, or a URL linking to the state_dict for the weights to use if you wish to use some custom setting of the weights.
returns:
A dictionary where each key is your internal name of the model, and each value is the model itself.
'''
models_prepped = OrderedDict()
for model_name in models_dict:
base_model,weights_url = models_dict[model_name]
models_prepped[model_name] = prepare_model(base_model,weights_url)
return models_prepped
def prepare_model(which_model,weights_opt='trained'):
'''
Prepares a single model to use: loads them with specified weight settings, and registers the forward hooks that allow you to save intermediate activations. Better to use the prepare_models function, since the output format is assumed by some of the other functions in this library.
args:
which_model: Base model to use (e.g., "alexnet")
weights_opt: Which weights to use. Set to "trained" for trained version of the network, "random" for random weights, or a url linking to the state_dict for the weights if you wish to use some custom setting of the weights.
returns:
The prepared model.
'''
if weights_opt == 'random':
pretrain_opt = False
else:
pretrain_opt = True
if which_model=='googlenet':
if pretrain_opt==True:
model = torch.hub.load('pytorch/vision', 'googlenet', pretrained=pretrain_opt)
else:
model = torch.hub.load('pytorch/vision', 'googlenet', pretrained=pretrain_opt,aux_logits=False)
state_dict = model.state_dict()
state_dict['fc.bias'] = state_dict['fc.bias']*0
model.load_state_dict(state_dict)
elif which_model=='cornet_z':
model = cornet.cornet_z(pretrained=pretrain_opt,map_location='cpu')
elif which_model=='cornet_s':
model = cornet.cornet_s(pretrained=pretrain_opt,map_location='cpu')
else:
model = getattr(models,which_model)(pretrained=pretrain_opt)
model.eval()
if (weights_opt != 'trained') and (weights_opt != 'random'):
checkpoint = model_zoo.load_url(weights_opt,map_location='cpu')
if 'module' in list(checkpoint['state_dict'].keys())[0]:
new_checkpoint_state_dict = OrderedDict()
for key in checkpoint['state_dict']:
new_key = key[7:]
new_checkpoint_state_dict[new_key] = checkpoint['state_dict'][key]
else:
new_checkpoint_state_dict = checkpoint['state_dict']
model.load_state_dict(new_checkpoint_state_dict,strict=True)
# Get the pointers to the layers of interest:
layer_pointers = fetch_layers(model)
# Now, register forward hooks to all these layers:
for layer_name in layer_pointers:
layer_index = layer_pointers[layer_name]
layer = index_nested(model._modules,layer_index)
layer.register_forward_hook(get_layer_activation)
layer.layer_name = layer_name
return model
def get_model_activations_for_image(fname,model):
'''
Takes in the file path of an image, and a prepared model (use the prepare_model or prepare_models function), runs the image through the model, and returns a dictionary containing the model activations in each layer.
args:
fname: file path of the image to run through a model
model: a CNN model object that has had the forward hooks attached to save the intermediate activation (use the prepare_model or prepare_models function to do this)
returns:
Dictionary where each key is a tuple denoting the layer (e.g., (conv3,5) is the third convolutiona layer and the fifth layer overall), and each value is a numpy array of the layer activations.
'''
image = Image.open(obj_fname).convert('RGB')
global layer_activations
layer_activations = OrderedDict()
preprocessed_image = preprocess_image(image)
model.forward(preprocessed_image)
return layer_activations
###################################################
############# DEALING WITH MATRICES ###############
###################################################
def get_upper_triang(matrix):
'''
Utility function that pulls out the upper triangular values of a matrix and returns them as a 1D vector.
args:
matrix: a 2D square numpy array
returns:
1D numpy vector with the upper triangular elements.
'''
n = matrix.shape[0]
inds = np.triu_indices(n,1)
vals = matrix[inds]
return vals
def correlate_upper_triang(m1,m2,func=lambda x,y:np.corrcoef(x,y)[0,1]):
'''
Utility function that correlates the upper triangular values of two matrices.
args:
m1,m2: the two matrices in question
func: the function to use; defaults to Pearson correlation, put in your own function (e.g., Spearman) if desired.
returns:
Correlation coefficient between the upper triangular values of the two matrices.
'''
out_val = func(get_upper_triang(m1),get_upper_triang(m2))
return out_val
def get_array_ranks(array):
'''
Put in array, spits out the same array with the entries replaced by their ranks after being sorted (in ascending order). Ties are sorted arbitrarily.
args:
array: Numpy array of any size.
returns:
Array where each element corresponds to the rank of the element after sorting (e.g., the smallest element in the original array has value 0 in the returned array, the seconds-smallest have value 1, and so on)
'''
return array.ravel().argsort().argsort().reshape(array.shape)
def get_extreme_dissims_total(input_rdm,num_items='all'):
'''
Takes in a representational dissimilarity matrix and a desired number of items, and returns a subsetted matrix with the items that maximize the total pairwise dissimilarity among all items (more colloquially, the items in the original matrix that are as "different as possible" from each other)
args:
input_rdm: representational dissimilarity matrix you wish to draw from
num_items: how many items to draw from input_rdm
returns:
output_rdm: the subsetted representational similarity matrix with the maximally dissimilar items
inds_to_extract: the indices of the items from the original matrix that ended up being used
'''
if num_items=='all':
num_items = input_rdm.shape[0]
current_rdm = copy.copy(input_rdm)
inds_to_extract = []
remaining_inds = list(range(input_rdm.shape[0]))
init_pair = list(np.argwhere(current_rdm==np.nanmax(current_rdm))[0])
inds_to_extract = copy.copy(init_pair)
for item in range(2,num_items):
sub_matrix = current_rdm[np.sort(inds_to_extract),:].sum(axis=0)
max_ind = 'NA'
max_ind_val = -np.infty
for i in range(len(sub_matrix)):
if i in inds_to_extract:
continue
if sub_matrix[i]>max_ind_val:
max_ind = i
max_ind_val = sub_matrix[i]
if (max_ind_val == -np.infty) or (np.isnan(max_ind_val)):
break
inds_to_extract.append(max_ind)
output_rdm = current_rdm[np.ix_(inds_to_extract,inds_to_extract)]
return output_rdm,inds_to_extract
def get_dissims_that_match(input_rdm,target_val,num_items_final='all'):
'''
Takes in a representational dissimilarity matrix and a desired number of items, and returns a subsetted matrix with the items with a mean pairwise similarity that is as close as possible to a target value (e.g., a set of items with a mean pairwise similarity that is as close as possible to .7)
args:
input_rdm: representational dissimilarity matrix you wish to draw from
target_val: the target similarity value for the output RDM
num_items_final: how many items to draw from input_rdm
returns:
output_rdm: The subsetted representational similarity matrix
inds_to_extract: The indices of the items from the original matrix that ended up being used
mean_dissim: The actual mean dissimilarity (which will hopefully be close to the target value!)
'''
if num_items_final=='all':
num_items_final = input_rdm.shape[0]
current_rdm = copy.copy(input_rdm)
inds_to_extract = []
num_items_init = input_rdm.shape[0]
remaining_inds = list(range(num_items_init))
init_pair = list(np.argwhere(abs(current_rdm-target_val)==np.nanmin(abs(current_rdm-target_val)))[0])
inds_to_extract = copy.copy(init_pair)
current_rdm = input_rdm[np.ix_(inds_to_extract,inds_to_extract)]
for item_count in range(2,num_items_final):
min_ind = 'NA'
min_ind_val = np.infty
# Test every item in the array, look at how close the mean similarity is to the desired value.
for test_item in range(num_items_init):
if test_item in inds_to_extract:
continue
test_matrix_inds = inds_to_extract + [test_item]
test_matrix = input_rdm[np.ix_(test_matrix_inds,test_matrix_inds)]
test_matrix_vals = test_matrix[np.triu_indices(test_matrix.shape[0],k=1)].flatten()
total_diff = abs(np.mean(test_matrix_vals)-target_val)
if total_diff<min_ind_val:
min_ind_val = total_diff
min_ind = test_item
inds_to_extract.append(min_ind)
current_rdm = input_rdm[np.ix_(inds_to_extract,inds_to_extract)]
mean_dissim = np.mean(current_rdm[np.triu_indices(current_rdm.shape[0],k=1)])
return current_rdm,inds_to_extract,mean_dissim
def get_most_uniform_dissims(input_rdm,num_items_final):
'''
Takes in a representational dissimilarity matrix and a desired number of items, and returns a subsetted matrix with the items that produces a maximally UNIFORM range of similarities (some very similar, some very dissimilar).
args:
input_rdm: representational dissimilarity matrix you wish to draw from
num_items: how many items to draw from input_rdm
returns:
output_rdm: the subsetted representational similarity matrix with the maximally uniform range of dissimilarities
inds_to_extract: the indices of the items from the original matrix that ended up being used
'''
if num_items_final=='all':
num_items_final = input_rdm.shape[0]
current_rdm = copy.copy(input_rdm)
inds_to_extract = []
num_items_init = input_rdm.shape[0]
remaining_inds = list(range(num_items_init))
init_pair = list(np.argwhere(current_rdm==current_rdm.max())[0])
inds_to_extract = copy.copy(init_pair)
current_rdm = input_rdm[np.ix_(inds_to_extract,inds_to_extract)]
for item_count in range(2,num_items_final):
max_ind = 'NA'
max_ind_val = -1 # initialize p-value, you want the GREATEST possible p-value (most uniform)
# Test every item in the array, look at the uniformity of the resulting distribution if you add it in.
for test_item in range(num_items_init):
if test_item in inds_to_extract:
continue
test_matrix_inds = inds_to_extract + [test_item]
test_matrix = input_rdm[np.ix_(test_matrix_inds,test_matrix_inds)]
test_matrix_vals = test_matrix[np.triu_indices(test_matrix.shape[0],k=1)].flatten()
trash,p = stats.kstest(test_matrix_vals,stats.uniform(loc=0, scale=1).cdf)
if p>max_ind_val:
max_ind_val = p
max_ind = test_item
inds_to_extract.append(max_ind)
current_rdm = input_rdm[np.ix_(inds_to_extract,inds_to_extract)]
return current_rdm,inds_to_extract
###################################################
############# INPUT AND OUTPUT ####################
###################################################
def create_or_append_csv(df,path):
'''
Utility function that creates a CSV from a pandas dataframe if no CSV with the given filepath exists; else, if the CSV already exists, appends the contents of the pandas dataframe to the CSV at that filepath.
args:
df: A pandas dataframe
path: The desired file path
returns:
Nothing
'''
if not os.path.exists(path):
df.to_csv(path)
else:
df.to_csv(path,mode='a',header=False)
def combine_csvs(path,keys=[]):
'''
Utility function that takes in a list of filenames, and loads them all and combines them into a single pandas dataframe. Alternatively, can take in a path for a directory, and it'll combine all the dataframes in that directory into one. Can also specify "keys": requires substrings of the CSV filename for them to be included in the new CSV (e.g., "cat" if you only want the CSVs with "cat" in the filename)
args:
path: Either a list of filepaths for the CSVs you want to combine, or the path of a directory with the desired CSVs.
keys: List of substrings that must appear in a CSV's filename for it to be included.
returns:
Pandas dataframe consisting of all the CSVs combined.
'''
if type(path)==str:
filepaths = os.listdir(path)
filepaths = [opj(path,f) for f in filepaths if ('.csv' in f) or ('.p' in f)]
else:
filepaths = path
if len(keys)>0:
filepaths = [f for f in filepaths if any([key in f for key in keys])]
df_list = []
for f in filepaths:
if '.csv' in f:
df_list.append(pd.read_csv(f,index_col=0))
elif '.p' in f:
df_list.append(pickle.load(open(f,'rb')))
out_df = pd.concat(df_list)
return(out_df)
###################################################
############# DATAFRAME OPERATIONS ################
###################################################
def df_inds2cols(df):
"""
Utility function to convert all indices in a dataframe to columns, so they can be handled in a more uniform way.
Args:
df: dataframe whose indices you want to convert
Returns:
Dataframe that now has columns where there were once indices.
"""
# make it a dataframe if not already:
if isinstance(df,pd.core.series.Series):
df = df.to_frame()
num_inds = len(df.index.names)
for i in range(0,num_inds):
df.reset_index(level=0,inplace=True)
# This operation reverses the column order, so need to reverse it back:
cols = df.columns.tolist()
cols = cols[num_inds-1::-1] + cols[num_inds:]
df = df[cols]
return df
def df_agg(df,group_vars,data_vars,agg_funcs):
'''
Utility function to easily aggregate values in a Pandas dataframe (e.g., if you want to take the mean and standard deviation within a bunch of subgroups). There's a built-in Pandas function that does this, but this one is more flexible and keeps the new column names as strings rather than tuples, and also doesn't convert things to indices rather than columns.
Args:
df: the dataframe whose data you want to aggregate
group_vars: list of variables you want to aggregate across (e.g., "city" and "gender" to create group averages across combinations of city and gender, collapsing across other variables). Put 'rem' if you want the group_vars to be all variables EXCEPT the specified data_vars.
data_vars: the actual values you want to aggregate (e.g., height if you want the average height). Put 'rem' if you want the data_vars to be all variables EXCEPT the specified group_vars.
agg_funcs: list of functions you want to use for aggregating
Returns:
Dataframe where the data_vars have been aggregated with the agg_funcs within each value of the group_vars.
Examples:
>>> import pandas as pd
>>> df = pd.DataFrame.from_dict({'city':['New York','New York','New York','New York',
'Boston','Boston','Boston','Boston'],
'gender':['Male','Male','Female','Female','Male','Male','Female','Female'],
'height':[70,72,66,65,69,73,64,63],
'income':[50000,100000,80000,150000,120000,90000,70000,110000]})
>>> pfa.df_agg(df,['city','gender'],['height','income'],[np.mean,np.std])
city gender height_mean height_std income_mean income_std
0 Boston Female 63.5 0.707107 90000 28284.271247
1 Boston Male 71.0 2.828427 105000 21213.203436
2 New York Female 65.5 0.707107 115000 49497.474683
3 New York Male 71.0 1.414214 75000 35355.339059
'''
if data_vars == 'rem':
data_vars = []
col_names = df.columns.values
for col in col_names:
if not col in group_vars:
data_vars.append(col)
if group_vars == 'rem':
group_vars = []
col_names = df.columns.values
for col in col_names:
if not col in data_vars:
group_vars.append(col)
groups = df.groupby(group_vars)
agg_df = groups[data_vars].agg(agg_funcs)
if type(agg_funcs)==list:
agg_df.columns = agg_df.columns.map('_'.join)
agg_df = inds2cols(agg_df)
return agg_df
def df_subset(df,subset_list):
'''
Takes in a Pandas dataframe and subsets it to the desired values of specified columns.
Args:
df: the dataframe you wish to subset
subset_list: a list of tuples specifying how to subset; each tuple is (column_name,vals_to_use),
where column_name is the name of the column, and vals_to_use is a list of which values
of that column to keep (or can be just a single value if you only want one)
Returns:
Dataframe that is subsetted in the desired way.
Examples:
>>> import pandas as pd
>>> df = pd.DataFrame.from_dict({'city':['New York','New York','Seattle','Seattle',
'Seattle','Boston','Boston','Boston'],
'gender':['Male','Male','Female','Female','Male','Male','Female','Female'],
'height':[70,72,66,65,69,73,64,63],
'income':[50000,100000,80000,150000,120000,90000,70000,110000]})
>>> pfa.df_subset(df,[('city',['New York','Seattle']),
('gender','Male')])
city gender height income
0 New York Male 70 50000
1 New York Male 72 100000
4 Seattle Male 69 120000
'''
for col in subset_list:
if type(col)==tuple:
if type(col[1])==list:
df = df[df[col[0]].isin(col[1])]
else:
df = df[df[col[0]]==col[1]]
elif type(col)==str:
var_value = eval(f"{col}")
if type(var_value)==list:
df = df[df[col.isin(var_value)]]
else:
df = df[df[col]==var_value]
return df
def df_filtsort(df,sortlist):
'''
Takes in a Pandas dataframe, and both filters and sorts it based on the values of specified columns.
Args:
df: The dataframe you wish to filter and sort
sortlist: A list of tuples, where each tuple is of format (column_name,sort_vals); column_name is the name of the column to sort by, sort_vals is the desired order of the values in that column. Columns will be sorted by priority based on how they are listed.
Returns:
Dataframe that is subsetted and sorted in the desired way.
Examples
>>> import pandas as pd
>>> df = pd.DataFrame.from_dict({'city':['New York','New York','New York','Seattle',
'Seattle','Seattle','Boston','Boston','Boston'],
'gender':['Male','Male','Female','Female','Female','Male','Male','Female','Female'],
'height':[70,72,65,66,65,69,73,64,63],
'income':[50000,100000,120000,80000,150000,120000,90000,70000,110000]})
>>> pfa.df_filtsort(df,[('city',['Seattle','New York']),('gender',['Female','Male'])])
city gender height income
3 Seattle Female 66 80000
4 Seattle Female 65 150000
5 Seattle Male 69 120000
2 New York Female 65 120000
0 New York Male 70 50000
1 New York Male 72 100000
'''
# filter it first
if type(sortlist)==tuple:
sortlist = [sortlist]
for (sortcol,sort_order) in sortlist:
df = df[df[sortcol].isin(sort_order)]
dummy_col_names = []
for ind,(sortcol,sort_order) in enumerate(sortlist):
recode_dict = {name:num for num,name in enumerate(sort_order)}
df.loc[:,'dummycol'+str(ind)] = df[sortcol].replace(recode_dict)
dummy_col_names.append('dummycol'+str(ind))
df = df.sort_values(by=dummy_col_names)
df = df.drop(dummy_col_names,axis=1)
return df
def df_pivot(df,index_cols,var_cols,value_cols,aggfunc='mean'):
'''
Functionally identical to the default Pandas pivot function, but allows you to have multiple values columns, and automatically converts indices to columns.
Args:
df: the df to turn into a pivot table
index_cols: list of column names to keep as index columns
var_cols: list of columns to pivot
value_cols: list of value columns
aggfunc: how to aggregate values when there are multiple values per cell
Returns:
Dataframe converted to pivot table.
Example:
>>> import pandas as pd
>>> df = pd.DataFrame.from_dict({'city':['New York','New York','New York','Seattle',
'Seattle','Seattle','Boston','Boston','Boston'],
'gender':['Male','Male','Female','Female','Female','Male','Male','Female','Female'],
'height':[70,72,65,66,65,69,73,64,63],
'income':[50000,100000,120000,80000,150000,120000,90000,70000,110000]})
>>> pfa.df_pivot(df,['city'],['gender'],['height','income'])
city Female_height Male_height Female_income Male_income
0 Boston 63.5 73.0 90000 90000
1 New York 65.0 71.0 120000 75000
2 Seattle 65.5 69.0 115000 120000
'''
if type(value_cols) != list:
value_cols = [value_cols]
if type(var_cols) != list:
var_cols = [var_cols]
if type(index_cols) != list:
index_cols = [index_cols]
sub_dfs = []
for value_col in value_cols:
for var_col in var_cols:
new_df = df.pivot_table(value_col,index_cols,var_cols,aggfunc)
if len(value_cols)>1:
colname_dict = {val:val+'_'+value_col for val in list(df[var_col].unique())}
new_df = new_df.rename(columns=colname_dict)
new_df = new_df.reset_index()
new_df.columns.name = ''
sub_dfs.append(new_df)
# Now merge all the dfs together. Start with the first one, then tack on columns from the others.
out_df = sub_dfs[0]
for ind in range(1,len(sub_dfs)):
new_cols = [col for col in sub_dfs[ind].columns.values if col not in out_df.columns.values]
for new_col in new_cols:
out_df[new_col] = sub_dfs[ind][new_col]
out_df = out_df.reset_index(drop=True)
return out_df
###########################################
################# RSA #####################
###########################################
def package_image_sets(image_list,entry_vars,grouping_vars):
'''
This is a convenience function for preparing image sets for representational similarity analysis. The use case is that you want to make multiple RDMs, where certain variables vary WITHIN the entries of each RDM (entry_vars), and some variables vary BETWEEN RDMs (grouping_vars). So, you give it a list of images, specifying the entry_vars and grouping_vars for each image, and this function returns a stack of image sets that can then be fed into the get_rdms function.
Args:
image_list: a list of dictionaries, where each dictionary contains information about one image.
Each dictionary must AT LEAST contain a key called "path", whose value is the file path
of the image. The other keys and values correspond to the other variables and values
associated with the image.
entry_vars: list of variable names that vary across the entries of each RDM.
grouping vars: list of variable names that vary BETWEEN RDMs.
Returns:
List of image sets, where each image set is intended to be turned into an RDM;
each image set is represented by a tuple that contains:
(
Dictionary of grouping variables for that image set
Dictionary of images for that image set; each key is the value of an entry_var, each value is the
path for that image
Tuple of the names of the entry variables
Blank list (which can specify ways of combining activation patterns from images if desired)
)
This output structure can be directly fed into the get_rdms function to make an RDM for each
image set.
Examples:
>>> image_list = [{'color':'red','shape':'square','path':'red_square.jpg'},
{'color':'blue','shape':'square','path':'blue_square.jpg'},
{'color':'red','shape':'circle','path':'red_circle.jpg'},
{'color':'blue','shape':'circle','path':'blue_square.jpg'}]
>>> pfa.package_image_sets(image_list,['shape'],['color'])
[(OrderedDict([('color', 'red')]),
OrderedDict([(('square',), 'red_square.jpg'),
(('circle',), 'red_circle.jpg')]),
('shape',),
[]),
(OrderedDict([('color', 'blue')]),
OrderedDict([(('square',), 'blue_square.jpg'),
(('circle',), 'blue_square.jpg')]),
('shape',),
[])]
Each of the top-level tuples is an image set.
'''
sample_image = image_list[0]
image_keys = list(sample_image.keys())
if 'path' not in image_keys:
raise Exception("Need to specify the image path!")
image_keys = [key for key in image_keys if key!='path']
if len(image_keys) != len(entry_vars)+len(grouping_vars):
raise Exception("Make sure the image variables match the entry/grouping variables.")
image_sets = []
for image in image_list:
# Check that the image set exists:
image_set_dict = OrderedDict()
for key in image:
if key in grouping_vars:
image_set_dict[key] = image[key]
image_set_exists = False
for i,image_set in enumerate(image_sets):
if image_set[0]==image_set_dict:
image_set_exists=True
which_image_set = i
if not image_set_exists:
new_image_set = []
new_image_set.append(image_set_dict)
new_image_set.append(OrderedDict())
new_image_set.append(tuple(entry_vars))
new_image_set.append([])
new_image_set = tuple(new_image_set)
image_sets.append(new_image_set)
which_image_set = -1
image_key = []
for key in image:
if key in entry_vars:
image_key.append(image[key])
image_key = tuple(image_key)
image_sets[which_image_set][1][image_key] = image['path']
return(image_sets)
def get_image_set_rdm(image_set,
model_name,
models_dict,
out_fname,
rdm_name='rdm_default_name',
which_layers={},
dissim_metrics=['corr'],
kernel_sets=('all','all'),
num_perms=0,
append=True,
verbose=False,
debug_check=False):
'''
Internal helper function for the get_rdms function in order to enable parallel processing. See get_rdms function for full documentation.
'''
image_set_labels,images,entry_types,combination_list = image_set
if append and os.path.exists(out_fname):
append = True
existing_df = ut.pickle_read(out_fname)
subset_opts = [(var,image_set_labels[var]) for var in image_set_labels]
existing_df = ut.df_subset(existing_df,[('model',model_name)])
existing_df = ut.df_subset(existing_df,subset_opts)
else:
append = False
print(f"{','.join(list(image_set_labels.values()))}")
image_names = list(images.keys())
combination_names = [comb[0] for comb in combination_list]
entry_names = image_names + [comb[0] for comb in combination_list] # labels of the rows and columns
# If you want to do different color spaces, fill this in someday. Fill in funcs that take in RGB and spit out desired color space.
color_space_funcs = {}
color_space_list = []
rdms_dict = defaultdict(lambda:[])
perm_list = ['orig_data'] + list(range(1,num_perms+1))
model = models_dict[model_name]
print(f"\tModel {model_name}")
# Get layer activations for images.
print("\t\tComputing image activations...")
obj_activations = OrderedDict()
for image_name in images:
if verbose:
print(image_name)
image = images[image_name]
if type(image)==str: # in case it's a file path.
image = Image.open(image)
preprocessed_image = preprocess_image(image)
obj_activations[image_name] = OrderedDict()
obj_activations[image_name][('original_rgb',0),'feature_means'] = np.array(preprocessed_image).squeeze()
obj_activations[image_name][('original_rgb',0),'unit_level'] = np.array(preprocessed_image).squeeze().mean(axis=(1,2))
# If you want to include different color spaces.
for cs in color_space_list:
converted_image = copy.copy(preprocessed_image)
for i,j in it.product(range(converted_image.shape[0]),range(converted_image.shape[1])):
converted_image[i,j,:] = color_space_funcs[cs](converted_image[i,j,:])
obj_activations[image_name][f'original_{cs}','with_space'] = preprocessed_image.squeeze()
obj_activations[image_name][f'original_{cs}','no_space'] = preprocessed_image.squeeze().mean(axis=(0,1))
global layer_activations
layer_activations = OrderedDict()
model.forward(preprocessed_image)
for layer in layer_activations:
layer_dim = len(layer_activations[layer].squeeze().shape)
if layer_dim == 1:
obj_activations[image_name][layer,'unit_level'] = layer_activations[layer].squeeze()
obj_activations[image_name][layer,'feature_means'] = layer_activations[layer].squeeze()
else:
obj_activations[image_name][layer,'unit_level'] = layer_activations[layer].squeeze()
obj_activations[image_name][layer,'feature_means'] = layer_activations[layer].squeeze().mean(axis=(1,2))
# If you want to combine the patterns in any way.
for comb_name,comb_stim,comb_func,comb_func_args,unpack_args in combination_list:
obj_activations[comb_name] = OrderedDict()
# For different layers and activation types.
keys = list(obj_activations[comb_stim[0]].keys())
for key in keys:
stim_activations = [obj_activations[stim][key] for stim in comb_stim]
if unpack_args:
obj_activations[comb_name][key] = comb_func(*stim_activations,**comb_func_args)
else:
obj_activations[comb_name][key] = comb_func(stim_activations,**comb_func_args)
# Now make the RDM.
print("Making RDMs...")
layer_list = [('original_rgb',0)] + list(layer_activations.keys())
for layer,dm in it.product(layer_list,dissim_metrics):
layer_name = layer[0]
# Check if the layer is one of the specified layers. If it's a model with only specified layers, check
# if the current layer is in it or not.
if model_name in which_layers:
model_layer_list = which_layers[model_name]
if (layer not in model_layer_list) and (layer_name not in model_layer_list) and (layer_name != 'original_rgb'):
continue
print(f"\n\t{layer},{dm}")
activation_types = ['unit_level','feature_means']
if dm in dissim_defs:
dissim_func = dissim_defs[dm]
elif type(dm)==tuple:
dissim_func = dm[1]
dm = dm[0]
for activation_type,ks,which_perm in it.product(activation_types,kernel_sets,perm_list):
print(f"\t\tKernel set {ks[0]}, activation {activation_type}, perm {which_perm}")
ks_name,ks_dict = ks
perm_scramble = list(range(len(entry_names)))
if which_perm != 'orig_data':
random.shuffle(perm_scramble)
perm_dict = {i:perm_scramble[i] for i in range(len(entry_names))}
if (layer,activation_type) not in obj_activations[list(obj_activations.keys())[0]]:
continue
if append:
subset_opts = [('model',model_name),('layer_label',layer),('activation_type',activation_type),('kernel_set_name',ks_name),('dissim_metric',dm),('perm',which_perm)]
for label in image_set_labels:
subset_opts.append((label,image_set_labels[label]))
df_subset = ut.df_subset(existing_df,subset_opts)
if len(df_subset)>0:
print("\t\t\t Already done, skipping...")
continue
sample_image = obj_activations[list(obj_activations.keys())[0]][layer,activation_type]
if ks_dict == 'all':
kernel_inds = list(range(sample_image.shape[0]))
elif (model_name,layer_name) in ks_dict:
kernel_inds = ks_dict[(model_name,layer_name)]
else:
print(f"*****Kernels not specified for {model_name},{layer}; using all kernels.*****")
kernel_inds = list(range(sample_image.shape[0]))
rdm = np.empty((len(entry_names),len(entry_names))) * np.nan
num_pairs = len(entry_names)*len(entry_names)/2
pair_num = 0
next_percent = 0
for (i1,im1) in enumerate(entry_names):
for (i2,im2) in enumerate(entry_names):
if i2<i1:
continue
if verbose:
pair_num += 1
if (pair_num/num_pairs)*100 > next_percent:
print(f"{next_percent}%",end=' ',flush=True)
next_percent=next_percent+1
ind1 = perm_dict[i1]
ind2 = perm_dict[i2]
ind1_im = entry_names[ind1]
ind2_im = entry_names[ind2]
pattern1 = obj_activations[ind1_im][layer,activation_type]
pattern2 = obj_activations[ind2_im][layer,activation_type]
pattern1_final = pattern1[kernel_inds,...].flatten().astype(float)
pattern2_final = pattern2[kernel_inds,...].flatten().astype(float)
dissim = dissim_func(pattern1_final,pattern2_final)
rdm[i1,i2] = dissim
rdm[i2,i1] = dissim
trash,dissim_rankings = get_extreme_dissims_total(rdm,'all')
rdms_dict['df_name'].append(rdm_name)
rdms_dict['model'].append(model_name)
rdms_dict['layer'].append(layer_name)
rdms_dict['layer_num'].append(layer[1])
rdms_dict['layer_label'].append(layer)
rdms_dict['dissim_metric'].append(dm)
rdms_dict['activation_type'].append(activation_type)
rdms_dict['matrix'].append(rdm)
rdms_dict['entry_keys'].append(tuple(entry_names))
rdms_dict['entry_labels'].append(tuple([{entry_types[i]:entry[i] for i in range(len(entry))} for entry in entry_names]))
rdms_dict['kernel_set_name'].append(ks_name)
rdms_dict['kernel_inds'].append(tuple(kernel_inds))
rdms_dict['perm'].append(which_perm)
rdms_dict['perm_scramble'].append(perm_scramble)
rdms_dict['dissim_rankings'].append(tuple(dissim_rankings))
for label in image_set_labels:
rdms_dict[label].append(image_set_labels[label])
if debug_check:
print(pd.DataFrame.from_dict(rdms_dict))
ipdb.set_trace()
debug_check=False # Only check the first one, then continue.
del(obj_activations)
rdms_df = pd.DataFrame.from_dict(rdms_dict)
if append:
rdms_df = pd.concat([rdms_df,existing_df])
return rdms_df
def get_rdms(image_sets,
models_dict,
out_fname,
rdm_name='default_name',
which_layers={},
dissim_metrics=['corr'],
kernel_sets=[('all','all')],
num_perms = 0,
num_cores = 'na',
verbose=False,
debug_check=False,
append=True):
'''
This is the core function of this package: it takes in several (possibly many) image sets, creates an RDM for each one, and stores this stack of RDMs as a Pandas dataframe, which can then be easily fed into other functions to perform higher-order RSA (e.g., to compare RDMs across layers and models), or to visualize the results. The basic idea is that your images can vary according to multiple variables; some of these will vary within an RDM (entry_vars), and some will vary between RDMs (grouping_vars). The only work involved is preparing the inputs to this function; a minimal example is shown at the end of this documentation, and also in the accompanying readme.txt in the GitHub repo.
Args:
image_sets: A list of image sets; an RDM will be computed separately for each one.
Each image_set is a tuple of the format:
(
Dictionary of grouping variables for that image set
Dictionary of images for that image set; each key is the value of an entry_var, each value is the
path for that image
Tuple of the names of the entry variables
Blank list (which can specify ways of combining activation patterns from images if desired)
)
models_dict: a dictionary of models to use, where each key is your name for the model,
and each value is the model itself.
out_fname: the file path to save the dataframe at, stored in Pickle format (so .p suffix)
rdm_name: the name to give to the RDM
which_layers: a dictionary specifying which layers from each model to use
(e.g., {'alexnet':['conv1','fc3']}); if a model is not specified, all layers are assumed
dissim_metrics: a list of which dissimilarity metrics to use for the RDMs. Put 'corr' for 1-correlation; put 'euclidean_distance' for euclidean distance. If you wish to use a different one, have that entry in the list be of format (dissim_func_name,dissim_func), such as ('spearman',stats.spearmanr)
kernel_sets: If you wish to only compute the RDMs over a subset of the kernels, specify it here. This will be a list of tuples. Each tuple is of (kernel_set_name,kernel_set_dict). Kernel_set_name is the name of the kernel_set. kerkel_set_dict is a dictionary specifying which kernels to select from each layer; each key is of format (model,layer_name), and each value is the list of kernel indices to use (so, {('alexnet','conv1'):[1,5,7]}). By default, all kernels are used and this can be ignored.
num_perms:
If you wish to do a permutation test where you shuffle the labels of your RDM entries multiple times, specify how many permutations to do here.
num_cores:
If you wish to use parallel processing, specify how many cores here; put 'na' if you don't want to process in parallel
verbose:
If you want it to give you the progress of each RDM as it's computed, put True, else put False.
debug_check:
Put True if you want to stop and debug with ipdb after each RDM is computed,
put False otherwise.
append:
If you want to append the output dataframe to an existing dataframe, put True and it'll append the results to the dataframe at that filename (and skip any entries that are already in that dataframe). Else, that dataframe will be overwritten.
Returns:
Dataframe where each row is an RDM along with accompanying metadata. The columns of this
dataframe are:
matrix: The actual RDM, as a 2D numpy aray.
df_name: The name of the dataframe. Purely cosmetic
model: The name of the model used.
layer: The layer name (e.g., conv1)
layer_num: Where the layer falls in the network (e.g., conv1 is 1)
layer_label: The layer name along with where it is in the network (e.g., (conv1,1) )
entry_keys: tuple of tuples where each tuple indicates the values of the entry variables for the items in the RDM. For example, if the entries of the RDM are square, circle, and triangle, this would be (('square'),('circle'),('triangle')).
entry_labels: Same as entry keys, but it indicates the name of the entry variables.
For example, ({'shape':'square'},{'shape':'circle'},{'shape':'triangle'})
dissim_metric: the dissim metric used in that RDM (e.g., euclidean_distance)
activation_type: Whether the RDM was computed by averaging across space for the convolutional layers ('feature_means'), versus simply vectorizing the spatial dimension ('unit_level'). By default the function does both.
kernel_set_name: The name of the kernel set being used.
kernel_set_inds: The indices of the kernels that are used.
perm: 'orig_data' if it's not permuted, or the permutation number if you specified permuted data.
dissim_rankings: The ranking of the entries that would yield a sub-matrix with the highest dissimilarity. For example, (3,5,6,2) means that a submatrix with entries 3 and 5 from the matrix would yield the highest dissimilarity for two elements, a submatrix with 3,5,6 would yieldthe highest dissimilarity for three elements, and so on. Additionally, there will be columns specifying the labels for each RDM, which will be whatever you specified in the input image sets.
Examples:
>>> image_list = [{'color':'red','shape':'square','path':'red_square.jpg'},
{'color':'blue','shape':'square','path':'blue_square.jpg'},
{'color':'red','shape':'circle','path':'red_circle.jpg'},
{'color':'blue','shape':'circle','path':'blue_square.jpg'}]
>>> image_sets = package_image_sets(image_list,['shape'],['color'])
>>> models_prepped = prepare_models({'alexnet':('alexnet','trained'),
'vgg19':('vgg19','random')})
>>> get_rdms(image_sets = image_sets, # the stack of image sets
models_dict = models_prepped, # the desired models
which_layers = {'alexnet':['conv1','fc3']} # which layers to use; if not specified uses all layers
out_fname = os.path.join(rdm_df.p), # where to save the output
rdm_name = 'my_rdms', # the name of the output RDM
dissim_metrics = ['corr','euclidean_distance'], # the dissimilarity metrics you want to try
num_cores = 2 # How many cores to use for parallel processing)
'''
print("*****Computing RDMs*****")
if num_cores=='na':
rdm_df_list = []
for image_set,model_name in it.product(image_sets,models_dict):
new_df = get_image_set_rdm(image_set,model_name,models_dict,out_fname,rdm_name,which_layers,
dissim_metrics,kernel_sets,num_perms,append,verbose,debug_check)
rdm_df_list.append(new_df)
else:
#rdm_df_list = Parallel(n_jobs=num_cores)(delayed(get_image_set_rdm)(image_set,model_name,models,
#rdm_name,which_layer_types,
#dissim_metrics,kernel_sets,num_perms,debug_check=False) for image_set,model_name in it.product(image_sets,models))
pool = multiprocessing.Pool(processes=num_cores)
rdm_df_list = [pool.apply_async(get_image_set_rdm,args=(image_set,model_name,models_dict,out_fname,rdm_name,which_layers,
dissim_metrics,kernel_sets,num_perms,append,verbose,debug_check)) for image_set,model_name in it.product(image_sets,models_dict)]
rdm_df_list = [p.get() for p in rdm_df_list]
rdms_df = | pd.concat(rdm_df_list) | pandas.concat |
import os
import numpy as np
import pandas as pd
from covid19model.data.utils import convert_age_stratified_property
class QALY_model():
def __init__(self, comorbidity_distribution):
self.comorbidity_distribution = comorbidity_distribution
# Define absolute path
abs_dir = os.path.dirname(__file__)
# Import life table (q_x)
self.life_table = pd.read_csv(os.path.join(abs_dir, '../../../data/interim/QALYs/Life_table_Belgium_2019.csv'),sep=';',index_col=0)
# Compute the vector mu_x and append to life table
self.life_table['mu_x']= self.compute_death_rate(self.life_table['q_x'])
# Define mu_x explictly to enhance readability of the code
self.mu_x = self.life_table['mu_x']
# Load comorbidity QoL scores for the Belgian population from <NAME>
QoL_Van_Wilder=pd.read_excel(os.path.join(abs_dir,"../../../data/interim/QALYs/De_Wilder_QoL_scores.xlsx"),index_col=0,sheet_name='QoL_scores')
QoL_Van_Wilder.columns = ['0','1','2','3+']
QoL_Van_Wilder.index = pd.IntervalIndex.from_tuples([(0,10),(10,20),(20,30),(30,40),(40,50),(50,60),(60,70),(70,80),(80,120)], closed='left')
self.QoL_Van_Wilder = QoL_Van_Wilder
# Define overall Belgian QoL scores
self.QoL_Belgium = pd.Series(index=pd.IntervalIndex.from_tuples([(0,10),(10,20),(20,30),(30,40),(40,50),(50,60),(60,70),(70,80),(80,120)], closed='left'), data=[0.85, 0.85, 0.84, 0.83, 0.805, 0.78, 0.75, 0.72, 0.72])
# Convert Belgian QoL and Van Wilder QoL to age bins of self.comorbidity_distribution
self.QoL_Belgium = convert_age_stratified_property(self.QoL_Belgium, self.comorbidity_distribution.index)
tmp_QoL_Van_Wilder = pd.DataFrame(index=self.comorbidity_distribution.index, columns=self.QoL_Van_Wilder.columns)
for column in self.QoL_Van_Wilder.columns:
tmp_QoL_Van_Wilder[column] = convert_age_stratified_property(self.QoL_Van_Wilder[column], self.comorbidity_distribution.index)
self.QoL_Van_Wilder = tmp_QoL_Van_Wilder
# Compute the QoL scores of the studied population
self.QoL_df = self.build_comorbidity_QoL(self.comorbidity_distribution, self.QoL_Van_Wilder, self.QoL_Belgium)
# Load comorbidity SMR estimates
SMR_pop_df=pd.read_excel(os.path.join(abs_dir,"../../../data/interim/QALYs/De_Wilder_QoL_scores.xlsx"), index_col=0, sheet_name='SMR')
SMR_pop_df.columns = ['0','1','2','3+']
SMR_pop_df.index = pd.IntervalIndex.from_tuples([(0,10),(10,20),(20,30),(30,40),(40,50),(50,60),(60,70),(70,80),(80,120)], closed='left')
self.SMR_pop_df = SMR_pop_df
# Convert comorbidity SMR estimates to age bins of self.comorbidity_distribution
tmp_SMR_pop_df = pd.DataFrame(index=self.comorbidity_distribution.index, columns=self.SMR_pop_df.columns)
for column in self.SMR_pop_df.columns:
tmp_SMR_pop_df[column] = convert_age_stratified_property(self.SMR_pop_df[column], self.comorbidity_distribution.index)
self.SMR_pop_df = tmp_SMR_pop_df
# Compute the SMR of the studied population
self.SMR_df = self.build_comorbidity_SMR(self.comorbidity_distribution, self.SMR_pop_df)
def build_comorbidity_SMR(self, comorbidity_distribution, population_SMR):
""" A function to compute the Standardized Mortality Ratios (SMRs) in a studied population, based on the comorbidity distribution of the studied population and the comorbidity distribution of the Belgian population
Parameters
----------
comorbidity_distribution : pd.Dataframe
A dataframe containing the studied population fraction with x comorbidities.
This dataframe is the input of the comorbidity-QALY model. The studied population are usually recovered or deceased COVID-19 patients in hospitals.
The dataframe must have te age group as its index and make use of a pandas multicolumn, where the first level denotes the population (usually R or D, but the code is written to use n populations).
The second level denotes the number of comorbidities, which must be equal to 0, 1, 2 or 3+.
population_SMR : pd.Dataframe
A dataframe containing the age-stratified SMRs for individuals with 0, 1, 2 or 3+ comorbidities in the general Belgian population.
Computed using the comorbidity distributions for the general Belgian population obtained from <NAME>, and the relative risk of dying by Charslon et. al (computation performed in MS Excel).
Returns
-------
SMR_df: pd.DataFrame
The weighted Standardized Mortality Ratios (SMRs) in the studied population.
An SMR > 1 indicates the studied population is less healthy than the general Belgian population.
"""
# Extract names of populations
populations = list(comorbidity_distribution.columns.get_level_values(0).unique())
# Initialize dataframe
df = pd.DataFrame(index=population_SMR.index, columns=populations)
# Fill dataframe
for idx,age_group in enumerate(df.index):
for jdx,pop in enumerate(populations):
df.loc[age_group, pop] = sum(comorbidity_distribution.loc[age_group, pop]*population_SMR.loc[age_group])
# Append SMR of average Belgian
df.loc[slice(None), 'BE'] = 1
return df
def build_comorbidity_QoL(self, comorbidity_distribution, comorbidity_QoL, average_QoL):
""" A function to compute the QoL scores in a studied population, based on the comorbidity distribution of the studied population and the QoL scores for 0, 1, 2, 3+ comorbidities for the Belgian population
Parameters
----------
comorbidity_distribution : pd.Dataframe
A dataframe containing the studied population fraction with x comorbidities.
This dataframe is the input of the comorbidity-QALY model. The studied population are usually recovered or deceased COVID-19 patients in hospitals.
The dataframe must have te age group as its index and make use of a pandas multicolumn, where the first level denotes the population (usually R or D, but the code is written to use n populations).
The second level denotes the number of comorbidities, which must be equal to 0, 1, 2 or 3+.
comorbidity_QoL : pd.Dataframe
A dataframe containing the age-stratified QoL scores for individuals with 0, 1, 2 or 3+ comorbidities in the general Belgian population.
Obtained from <NAME>.
average_QoL : pd.Series
A series containing the average QoL score for the (Belgian) population
Returns
-------
QoL_df: pd.DataFrame
The comorbidity-weighted QoL scores of the studied population.
"""
# Extract names of populations
populations = list(comorbidity_distribution.columns.get_level_values(0).unique())
# Initialize dataframe
df = pd.DataFrame(index=comorbidity_QoL.index, columns=populations)
# Fill dataframe
for idx,age_group in enumerate(df.index):
for jdx,pop in enumerate(populations):
df.loc[age_group, pop] = sum(comorbidity_distribution.loc[age_group, pop]*comorbidity_QoL.loc[age_group])
# Append SMR of average Belgian
df.loc[slice(None), 'BE'] = average_QoL
return df
def compute_death_rate(self, q_x):
""" A function to compute the force of mortality (instantaneous death rate at age x)
Parameters
----------
q_x : list or np.array
Probability of dying between age x and age x+1.
Returns
-------
mu_x : np.array
Instantaneous death rage at age x
"""
# Pre-allocate
mu_x = np.zeros(len(q_x))
# Compute first entry
mu_x[0] = -np.log(1-q_x[0])
# Loop over remaining entries
for age in range(1,len(q_x)):
mu_x[age] = -0.5*(np.log(1-q_x[age])+np.log(1-q_x[age-1]))
return mu_x
def survival_function(self, SMR=1):
""" A function to compute the probability of surviving until age x
Parameters
----------
self.mu_x : list or np.array
Instantaneous death rage at age x
SMR : float
"Standardized mortality ratio" (SMR) is the ratio of observed deaths in a study group to expected deaths in the general population.
An SMR of 1 corresponds to an average life expectancy, an increase in SMR shortens the expected lifespan.
Returns
-------
S_x : pd.Series
Survival function, i.e. the probability of surviving up until age x
"""
# Pre-allocate as np.array
S_x = np.zeros(len(self.mu_x))
# Survival rate at age 0 is 100%
S_x[0] = 1
# Loop
for age in range(1,len(self.mu_x)):
S_x[age] = S_x[age-1]*np.exp(-SMR*self.mu_x[age])
# Post-allocate as pd.Series object
S_x = pd.Series(index=range(len(self.mu_x)), data=S_x)
S_x.index.name = 'x'
return S_x
def life_expectancy(self,SMR=1):
""" A function to compute the life expectancy at age x
Parameters
----------
SMR : float
"Standardized mortality ratio" (SMR) is the ratio of observed deaths in a study group to expected deaths in the general population.
An SMR of 1 corresponds to an average life expectancy, an increase in SMR shortens the expected lifespan.
Returns
-------
LE_x : pd.Series
Life expectancy at age x
"""
# Compute survival function
S_x = self.survival_function(SMR)
# First compute inner sum
tmp = np.zeros(len(S_x))
for age in range(len(S_x)-1):
tmp[age] = 0.5*(S_x[age]+S_x[age+1])
# Then sum from x to the end of the table to obtain life expectancy
LE_x = np.zeros(len(S_x))
for x in range(len(S_x)):
LE_x[x] = np.sum(tmp[x:])
# Post-allocate to pd.Series object
LE_x = pd.Series(index=range(len(self.mu_x)), data=LE_x)
LE_x.index.name = 'x'
return LE_x
def compute_QALE_x(self, population='BE', SMR_method='convergent'):
""" A function to compute the quality-adjusted life expectancy at age x
Parameters
----------
self.mu_x : list or np.array
Instantaneous death rage at age x
self.SMR_df : pd.Dataframe
"Standardized mortality ratio" (SMR) is the ratio of observed deaths in a study group to expected deaths in the general population.
An SMR of 1 corresponds to an average life expectancy, an increase in SMR shortens the expected lifespan.
self.QoL_df : pd.Dataframe
Quality-of-life utility weights, as imported from `~/data/interim/QALYs/QoL_scores_Belgium_2018_v3.csv`.
Must contain two columns: "group_limit" and "QoL_score"
population : string
Choice of QoL scores. Valid options are 'Belgium', 'R' and 'D'.
'Belgium' : Overall QoL scores for the Belgian population by De Wilder et. al and an SMR=1 are applied (this represents average QALY loss)
'R' : QoL scores and SMR for those recovering from COVID-19 in the hospital (most likely higher quality than average)
'D' : QoL scores and SMR for those dying from COVID-19 in the hospital (most likely lower quality than average)
SMR_method : string
Choice of SMR model for remainder of life. Valid options are 'convergent' and 'constant'.
'convergent' : the SMR gradually converges to SMR=1 by the end of the subjects life.
If a person is expected to be healthy (SMR<1), this method represents the heuristic that we do not know how healthy this person will be in the future.
We just assume his "healthiness" converges back to the population average as time goes by.
'constant' : the SMR used to compute the QALEs remains equal to the expected value for the rest of the subjects life.
If a person is expected to be healthy (SMR<1), this method assumes the person will remain equally healthy for his entire life.
Returns
-------
QALE_x : pd.Series
Quality-adjusted ife expectancy at age x
"""
# Pre-allocate results
QALE_x = np.zeros(len(self.mu_x))
# Loop over x
for x in range(len(self.mu_x)):
# Pre-allocate dQALY
dQALE = np.zeros([len(self.mu_x)-x-1])
# Set age-dependant utility weights to lowest possible
j=0
age_limit=self.QoL_df.index[j].right - 1
QoL_x=self.QoL_df[population].values[j]
# Calculate the SMR at age x
if ((SMR_method == 'convergent')|(SMR_method == 'constant')):
k = np.where(self.QoL_df.index.contains(x))[0][-1]
age_limit = self.QoL_df.index[k].right - 1
SMR_x = self.SMR_df[population].values[k]
# Loop over years remaining after year x
for i in range(x,len(self.mu_x)-1):
# Find the right age bin
j = np.where(self.QoL_df.index.contains(i))[0][-1]
age_limit = self.QoL_df.index[j].right - 1
# Choose the right QoL score
QoL_x = self.QoL_df[population].values[j]
# Choose the right SMR
if SMR_method == 'convergent':
# SMR gradually converges to one by end of life
SMR = 1 + (SMR_x-1)*((len(self.mu_x)-1-i)/(len(self.mu_x)-1-x))
elif SMR_method == 'constant':
# SMR is equal to SMR at age x for remainder of life
SMR = SMR_x
# Compute the survival function
S_x = self.survival_function(SMR)
# Then compute the quality-adjusted life years lived between age x and x+1
dQALE[i-x] = QoL_x*0.5*(S_x[i] + S_x[i+1])
# Sum dQALY to obtain QALY_x
QALE_x[x] = np.sum(dQALE)
# Post-allocate to pd.Series object
QALE_x = pd.Series(index=range(len(self.mu_x)), data=QALE_x)
QALE_x.index.name = 'x'
return QALE_x
def compute_QALY_x(self, population='BE', r=0.03, SMR_method='convergent'):
""" A function to compute the quality-adjusted life years remaining at age x
Parameters
----------
self.mu_x : list or np.array
Instantaneous death rage at age x
self.SMR_df : pd.Dataframe
"Standardized mortality ratio" (SMR) is the ratio of observed deaths in a study group to expected deaths in the general population.
An SMR of 1 corresponds to an average life expectancy, an increase in SMR shortens the expected lifespan.
self.QoL_df : pd.Dataframe
Quality-of-life utility weights, as imported from `~/data/interim/QALYs/QoL_scores_Belgium_2018_v3.csv`.
Must contain two columns: "group_limit" and "QoL_score"
population : string
Choice of QoL scores. Valid options are 'Belgium', 'R' and 'D'.
'Belgium' : Overall QoL scores for the Belgian population by De Wilder et. al and an SMR=1 are applied (this represents average QALY loss)
'R' : QoL scores and SMR for those recovering from COVID-19 in the hospital (most likely higher quality than average)
'D' : QoL scores and SMR for those dying from COVID-19 in the hospital (most likely lower quality than average)
r : float
Discount rate (default 3%)
SMR_method : string
Choice of SMR model for remainder of life. Valid options are 'convergent' and 'constant'.
'convergent' : the SMR gradually converges to SMR=1 by the end of the subjects life.
If a person is expected to be healthy (SMR<1), this method represents the heuristic that we do not know how healthy this person will be in the future.
We just assume his "healthiness" converges back to the population average as time goes by.
'constant' : the SMR used to compute the QALEs remains equal to the expected value for the rest of the subjects life.
If a person is expected to be healthy (SMR<1), this method assumes the person will remain equally healthy for his entire life.
Returns
-------
QALY_x : pd.Series
Quality-adjusted life years remaining at age x
"""
# Pre-allocate results
QALY_x = np.zeros(len(self.mu_x))
# Loop over x
for x in range(len(self.mu_x)):
# Pre-allocate dQALY
dQALY = np.zeros([len(self.mu_x)-x-1])
# Set age-dependant utility weights to lowest possible
j=0
age_limit=self.QoL_df.index[j].right -1
QoL_x=self.QoL_df[population].values[j]
# Calculate the SMR at age x
if ((SMR_method == 'convergent')|(SMR_method == 'constant')):
k = np.where(self.QoL_df.index.contains(x))[0][-1]
age_limit = self.QoL_df.index[k].right - 1
SMR_x = self.SMR_df[population].values[k]
# Loop over years remaining after year x
for i in range(x,len(self.mu_x)-1):
# Find the right age bin
j = np.where(self.QoL_df.index.contains(i))[0][-1]
age_limit = self.QoL_df.index[j].right - 1
# Choose the right QoL score
QoL_x = self.QoL_df[population].values[j]
# Choose the right SMR
if SMR_method == 'convergent':
# SMR gradually converges to one by end of life
SMR = 1 + (SMR_x-1)*((len(self.mu_x)-1-i)/(len(self.mu_x)-1-x))
elif SMR_method == 'constant':
# SMR is equal to SMR at age x for remainder of life
SMR = SMR_x
# Compute the survival function
S_x = self.survival_function(SMR)
# Then compute the quality-adjusted life years lived between age x and x+1
dQALY[i-x] = QoL_x*0.5*(S_x[i] + S_x[i+1])*(1+r)**(x-i)
# Sum dQALY to obtain QALY_x
QALY_x[x] = np.sum(dQALY)
# Post-allocate to pd.Series object
QALY_x = pd.Series(index=range(len(self.mu_x)), data=QALY_x)
QALY_x.index.name = 'x'
return QALY_x
def bin_QALY_x(self, QALY_x, model_bins=pd.IntervalIndex.from_tuples([(0,12),(12,18),(18,25),(25,35),(35,45),(45,55),(55,65),(65,75),(75,85),(85,120)], closed='left')):
""" A function to bin the vector QALY_x according to the age groups in the COVID-19 SEIQRD
Parameters
----------
QALY_x : np.array
Quality-adjusted life years remaining at age x
model_bins : pd.IntervalIndex
Desired age bins
Returns
-------
QALY_binned: pd.Series
Quality-adjusted life years lost upon death for every age bin of the COVID-19 SEIQRD model
"""
# Pre-allocate results vector
QALY_binned = np.zeros(len(model_bins))
# Loop over model bins
for i in range(len(model_bins)):
# Map QALY_x to model bins
QALY_binned[i] = np.mean(QALY_x[model_bins[i].left:model_bins[i].right-1])
# Post-allocate to pd.Series object
QALY_binned = pd.Series(index=model_bins, data=QALY_binned)
QALY_binned.index.name = 'age_group'
return QALY_binned
def build_binned_QALY_df(self, r=0.03, SMR_method='convergent', model_bins=pd.IntervalIndex.from_tuples([(0,12),(12,18),(18,25),(25,35),(35,45),(45,55),(55,65),(65,75),(75,85),(85,120)], closed='left')):
# Extract names of populations
populations = list(self.SMR_df.columns.get_level_values(0).unique())
# Initialize empty dataframe
df = | pd.DataFrame() | pandas.DataFrame |
# coding: utf-8
# In[1]:
#This is the works for Udacity Self-driving-car-nd Term1 Project 3.
import os, sys, random
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pandas as pd
# In[2]:
# Print iterations progress
def print_progress(iteration, total):
"""
Call in a loop to create terminal progress bar
Parameters
----------
iteration :
Current iteration (Int)
total :
Total iterations (Int)
"""
str_format = "{0:.0f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(100 * iteration / float(total)))
bar = '█' * filled_length + '-' * (100 - filled_length)
sys.stdout.write('\r |%s| %s%%' % (bar, percents)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
# ### Dataset
# In[3]:
#Load sample data
Folder_path = "./dataset/behavioral-cloning/sample_data/"
Img_path = Folder_path + "IMG/"
df = pd.read_csv(Folder_path + "driving_log.csv")
df['left'] = df['left'].str.replace(' IMG/','./dataset/behavioral-cloning/sample_data/IMG/')
df['right'] = df['right'].str.replace(' IMG/','./dataset/behavioral-cloning/sample_data/IMG/')
df['center'] = df['center'].str.replace('IMG/','./dataset/behavioral-cloning/sample_data/IMG/')
# In[4]:
Folder_path = "./dataset/behavioral-cloning/train_data_new/"
Img_path = Folder_path + "IMG/"
df = | pd.read_csv(Folder_path + "driving_log.csv", names=['center', 'left', 'right', 'steering', 'gas', 'brake', 'speed']) | pandas.read_csv |
import itertools
import operator
from functools import reduce
import numpy as np
import pandas as pd
import regex as re
import toolz
from pandas.core.groupby import SeriesGroupBy
import ibis.expr.operations as ops
import ibis.util
from ibis.backends.pandas.core import integer_types, scalar_types
from ibis.backends.pandas.dispatch import execute_node
@execute_node.register(ops.StringLength, pd.Series)
def execute_string_length_series(op, data, **kwargs):
return data.str.len().astype('int32')
@execute_node.register(ops.Substring, pd.Series, integer_types, integer_types)
def execute_substring_int_int(op, data, start, length, **kwargs):
return data.str[start : start + length]
@execute_node.register(ops.Substring, pd.Series, pd.Series, integer_types)
def execute_substring_series_int(op, data, start, length, **kwargs):
return execute_substring_series_series(
op, data, start, pd.Series(np.repeat(length, len(start))), **kwargs
)
@execute_node.register(ops.Substring, pd.Series, integer_types, pd.Series)
def execute_string_substring_int_series(op, data, start, length, **kwargs):
return execute_substring_series_series(
op, data, pd.Series(np.repeat(start, len(length))), length, **kwargs
)
@execute_node.register(ops.Substring, pd.Series, pd.Series, pd.Series)
def execute_substring_series_series(op, data, start, length, **kwargs):
end = start + length
def iterate(value, start_iter=start.values.flat, end_iter=end.values.flat):
begin = next(start_iter)
end = next(end_iter)
if (begin is not None and pd.isnull(begin)) or (
end is not None and | pd.isnull(end) | pandas.isnull |
import functools
from threading import Thread
from contextlib import contextmanager
import signal
from scipy.stats._continuous_distns import _distn_names
import scipy
import importlib
from hydroDL.master import basins
from hydroDL.app import waterQuality
from hydroDL import kPath, utils
from hydroDL.model import trainTS
from hydroDL.data import gageII, usgs
from hydroDL.post import axplot, figplot
import torch
import os
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
import scipy
wqData = waterQuality.DataModelWQ('rbWN5')
siteNoLst = wqData.siteNoLst
dirSel = os.path.join(kPath.dirData, 'USGS', 'inventory', 'siteSel')
with open(os.path.join(dirSel, 'dictRB_Y30N5.json')) as f:
dictSite = json.load(f)
dirWRTDS = os.path.join(kPath.dirWQ, 'modelStat', 'WRTDS-W', 'All')
dirOut = os.path.join(dirWRTDS, 'output')
dirPar = os.path.join(dirWRTDS, 'params')
# read a temp file
saveFile = os.path.join(dirOut, siteNoLst[0])
dfP = pd.read_csv(saveFile, index_col=None).set_index('date')
t = dfP.index
nt = len(dfP.index)
nc = len(usgs.newC)
ns = len(siteNoLst)
matR = np.ndarray([ns, nt, nc])
matC = np.ndarray([ns, nt, nc])
# calculate residual
t0 = time.time()
for kk, siteNo in enumerate(siteNoLst):
print('{}/{} {:.2f}'.format(
kk, len(siteNoLst), time.time()-t0))
saveFile = os.path.join(dirOut, siteNo)
dfP = pd.read_csv(saveFile, index_col=None).set_index('date')
dfP.index = pd.to_datetime(dfP.index)
dfC = waterQuality.readSiteTS(siteNo, varLst=usgs.newC, freq='W')
matR[kk, :, :] = dfP.values-dfC.values
matC[kk, :, :] = dfC.values
def timeout(timeout):
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
res = [Exception('function [%s] timeout [%s seconds] exceeded!' % (
func.__name__, timeout))]
def newFunc():
try:
res[0] = func(*args, **kwargs)
except Exception as e:
res[0] = e
t = Thread(target=newFunc)
t.daemon = True
try:
t.start()
t.join(timeout)
except Exception as je:
print('error starting thread')
raise je
ret = res[0]
if isinstance(ret, BaseException):
raise ret
return ret
return wrapper
return deco
codeLst2 = ['00095', '00400', '00405', '00600', '00605',
'00618', '00660', '00665', '00681', '00915',
'00925', '00930', '00935', '00940', '00945',
'00950', '00955', '70303', '71846', '80154']
distLst = _distn_names
# distLst=['laplace']
dfP = | pd.DataFrame(index=distLst, columns=codeLst2) | pandas.DataFrame |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2020, empress development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import empress.taxonomy_utils as tax_utils
def assert_taxcols_ok(taxcols, exp_num_levels=7):
"""Checks that a given taxcols list (returned by
taxonomy_utils.split_taxonomy()) looks how we expect it to.
exp_num_levels is the number of levels in the taxonomy -- it
defaults to 7, which is the case for the moving pictures dataset
(kingdom, phylum, class, order, family, genus, species).
"""
expcols = ["Level {}".format(i) for i in range(1, exp_num_levels + 1)]
# I guess ideally we'd use self.assertEqual() from within a unittest class,
# but just using the vanilla assert here makes it easy to use this function
# from multiple unittest classes
assert taxcols == expcols
class TestTaxonomyUtils(unittest.TestCase):
def setUp(self):
self.feature_metadata = pd.DataFrame(
{
"Taxonomy": [
(
"k__Bacteria; p__Bacteroidetes; c__Bacteroidia; "
"o__Bacteroidales; f__Bacteroidaceae; g__Bacteroides; "
"s__"
),
(
"k__Bacteria; p__Proteobacteria; "
"c__Gammaproteobacteria; o__Pasteurellales; "
"f__Pasteurellaceae; g__; s__"
),
# add a variable number of whitespace characters to check
# these are all successfully removed
(
"k__Bacteria;p__Bacteroidetes ; c__Bacteroidia; "
"o__Bacteroidales; f__Bacteroidaceae; g__Bacteroides; "
"s__uniformis"
),
"k__Bacteria; p__Firmicutes; c__Bacilli"
],
"Confidence": [0.95, 0.8, 0, 1]
},
index=["f1", "f2", "f3", "f4"]
)
def _check_basic_case_worked(self, split_fm, taxcols):
"""Checks that a given DataFrame (and list of split-up taxonomy columns)
matches the expected output from running split_taxonomy() on
self.feature_metadata.
"""
# Let's verify that split_fm looks how we expect it to look.
# ...First, by checking the columns -- should indicate that the
# correct number of taxonomic levels were identified
self.assertCountEqual(split_fm.columns, [
"Level 1", "Level 2", "Level 3", "Level 4", "Level 5", "Level 6",
"Level 7", "Confidence"
])
# ...Next, check the index -- no features should've been dropped (that
# isn't even a thing that split_taxonomy() does, but let's be safe :P)
self.assertCountEqual(split_fm.index, ["f1", "f2", "f3", "f4"])
# While we're at it, let's check that taxcols looks good
assert_taxcols_ok(taxcols)
# Finally, let's check each row individually. This is kinda inelegant.
assert_series_equal(
split_fm.loc["f1"],
pd.Series({
"Level 1": "k__Bacteria",
"Level 2": "p__Bacteroidetes",
"Level 3": "c__Bacteroidia",
"Level 4": "o__Bacteroidales",
"Level 5": "f__Bacteroidaceae",
"Level 6": "g__Bacteroides",
"Level 7": "s__",
"Confidence": 0.95
}, name="f1")
)
assert_series_equal(
split_fm.loc["f2"],
pd.Series({
"Level 1": "k__Bacteria",
"Level 2": "p__Proteobacteria",
"Level 3": "c__Gammaproteobacteria",
"Level 4": "o__Pasteurellales",
"Level 5": "f__Pasteurellaceae",
"Level 6": "g__",
"Level 7": "s__",
"Confidence": 0.8
}, name="f2")
)
assert_series_equal(
split_fm.loc["f3"],
pd.Series({
"Level 1": "k__Bacteria",
"Level 2": "p__Bacteroidetes",
"Level 3": "c__Bacteroidia",
"Level 4": "o__Bacteroidales",
"Level 5": "f__Bacteroidaceae",
"Level 6": "g__Bacteroides",
"Level 7": "s__uniformis",
"Confidence": 0
}, name="f3")
)
assert_series_equal(
split_fm.loc["f4"],
pd.Series({
"Level 1": "k__Bacteria",
"Level 2": "p__Firmicutes",
"Level 3": "c__Bacilli",
"Level 4": "Unspecified",
"Level 5": "Unspecified",
"Level 6": "Unspecified",
"Level 7": "Unspecified",
"Confidence": 1
}, name="f4")
)
def test_split_taxonomy_no_tax_column(self):
fm2 = self.feature_metadata.copy()
fm2.columns = ["asdf", "ghjkl"]
fm3, taxcols = tax_utils.split_taxonomy(fm2)
| assert_frame_equal(fm2, fm3) | pandas.testing.assert_frame_equal |
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt, rcParams
"""
Utility functions for understanding and visualizing the data
"""
class DataAnalyzer():
def __init__(self, text_key):
self.text_key = text_key
self.fig_num = -1
"""
Show all plots
"""
def show_plots(self):
plt.show()
return
"""
Plot histogram of classes given a y vector
"""
def class_hist(self, y, label_indices, label_names, print_hist=False, show_now=False):
counts = np.bincount(y)
counts = [counts[i] for i in range(len(counts)) if i in label_indices]
x = np.arange(len(counts))
ymax = max(counts)*1.1
self.fig_num += 1
plt.figure(self.fig_num, figsize=(12, 10))
plt.bar(x, counts, align='center', width=0.5)
plt.ylim(0, ymax)
plt.xticks(x, label_names, rotation=45, ha='right')
rcParams.update({'figure.autolayout': True, 'font.size': 20})
if print_hist:
print(counts)
if show_now:
plt.show()
return
"""
Plot scores of classes in a bar chart
"""
def class_scores(self, scores, label_names, show_now=False):
if type(scores) is tuple:
prec, rec = scores
scores = np.array([2/(1/prec[i] + 1/rec[i])\
for i in range(len(prec))])
x = np.arange(len(label_names))
ymax = max(scores)*1.1
self.fig_num += 1
plt.figure(self.fig_num, figsize=(12,10))
plt.bar(x, scores, align='center', width=0.5)
plt.ylim(0, ymax)
plt.xticks(x, label_names, rotation=45, ha='right')
rcParams.update({'figure.autolayout': True, 'font.size': 20})
if show_now:
plt.show()
return
"""
Returns mean confidence of each class
"""
def class_confidence(self, y, conf_scores):
df = | pd.DataFrame({'y': y, 'cs': conf_scores}) | pandas.DataFrame |
# coding: utf-8
# In[37]:
import pandas as pd
from sklearn import preprocessing
import numpy as np
import os
import h5py
import json
import h5py
# In[17]:
distance_data_path = "data.csv"
hnsw_result_path = "/home/lab4/code/HNSW/KNN-Evaluate/hnsw_result1111.h5py"
test_file_path = "test_image_feature.csv"
train_file_path = "vect_itemid130k.h5py"
# ### Loading hnsw data
# In[18]:
f = h5py.File(hnsw_result_path)
hnsw_ids_result = np.array(f["itemID"])
# ### Loading testing data and sort() test_ids to match to result_hnsw_ids
#
# In[19]:
df = pd.read_csv(test_file_path, sep="\t", converters={1: json.loads}).reset_index()
df.columns = ["item_id", "vect"]
test_vects = np.array(df.vect.tolist())
test_ids = np.array(df.item_id.tolist())
test_ids = sorted(test_ids)
# ### Load train_id list to make list fit()
# In[43]:
h5 = h5py.File(train_file_path)
train_item_fit = np.array(h5["item"])
# In[44]:
len(train_item_fit)
# ### Loading KNN distance reference
# In[103]:
df_distance_data = | pd.read_csv(distance_data_path, iterator=True, chunksize=500000) | pandas.read_csv |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/7/8 22:08
Desc: 金十数据中心-经济指标-美国
https://datacenter.jin10.com/economic
"""
import json
import time
import pandas as pd
import demjson
import requests
from akshare.economic.cons import (
JS_USA_NON_FARM_URL,
JS_USA_UNEMPLOYMENT_RATE_URL,
JS_USA_EIA_CRUDE_URL,
JS_USA_INITIAL_JOBLESS_URL,
JS_USA_CORE_PCE_PRICE_URL,
JS_USA_CPI_MONTHLY_URL,
JS_USA_LMCI_URL,
JS_USA_ADP_NONFARM_URL,
JS_USA_GDP_MONTHLY_URL,
)
# 东方财富-美国-未决房屋销售月率
def macro_usa_phs():
"""
未决房屋销售月率
http://data.eastmoney.com/cjsj/foreign_0_5.html
:return: 未决房屋销售月率
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
'type': 'GJZB',
'sty': 'HKZB',
'js': '({data:[(x)],pages:(pc)})',
'p': '1',
'ps': '2000',
'mkt': '0',
'stat': '5',
'pageNo': '1',
'pageNum': '1',
'_': '1625474966006'
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(',') for item in data_json['data']])
temp_df.columns = [
'时间',
'前值',
'现值',
'发布日期',
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
return temp_df
# 金十数据中心-经济指标-美国-经济状况-美国GDP
def macro_usa_gdp_monthly():
"""
美国国内生产总值(GDP)报告, 数据区间从20080228-至今
https://datacenter.jin10.com/reportType/dc_usa_gdp
:return: pandas.Series
2008-02-28 0.6
2008-03-27 0.6
2008-04-30 0.9
2008-06-26 1
2008-07-31 1.9
...
2019-06-27 3.1
2019-07-26 2.1
2019-08-29 2
2019-09-26 2
2019-10-30 0
"""
t = time.time()
res = requests.get(
JS_USA_GDP_MONTHLY_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国国内生产总值(GDP)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "53",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "gdp"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国CPI月率报告
def macro_usa_cpi_monthly():
"""
美国CPI月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_cpi
https://cdn.jin10.com/dc/reports/dc_usa_cpi_all.js?v=1578741110
:return: 美国CPI月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_CPI_MONTHLY_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国居民消费价格指数(CPI)(月环比)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "9",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "cpi_monthly"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国核心CPI月率报告
def macro_usa_core_cpi_monthly():
"""
美国核心CPI月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_core_cpi
https://cdn.jin10.com/dc/reports/dc_usa_core_cpi_all.js?v=1578740570
:return: 美国核心CPI月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_core_cpi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国核心CPI月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "6",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_core_cpi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国个人支出月率报告
def macro_usa_personal_spending():
"""
美国个人支出月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_personal_spending
https://cdn.jin10.com/dc/reports/dc_usa_personal_spending_all.js?v=1578741327
:return: 美国个人支出月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_personal_spending_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国个人支出月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "35",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_personal_spending"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国零售销售月率报告
def macro_usa_retail_sales():
"""
美国零售销售月率报告, 数据区间从19920301-至今
https://datacenter.jin10.com/reportType/dc_usa_retail_sales
https://cdn.jin10.com/dc/reports/dc_usa_retail_sales_all.js?v=1578741528
:return: 美国零售销售月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_retail_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国零售销售月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "39",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_retail_sales"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国进口物价指数报告
def macro_usa_import_price():
"""
美国进口物价指数报告, 数据区间从19890201-至今
https://datacenter.jin10.com/reportType/dc_usa_import_price
https://cdn.jin10.com/dc/reports/dc_usa_import_price_all.js?v=1578741716
:return: 美国进口物价指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_import_price_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国进口物价指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "18",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_import_price"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国出口价格指数报告
def macro_usa_export_price():
"""
美国出口价格指数报告, 数据区间从19890201-至今
https://datacenter.jin10.com/reportType/dc_usa_export_price
https://cdn.jin10.com/dc/reports/dc_usa_export_price_all.js?v=1578741832
:return: 美国出口价格指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_export_price_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国出口价格指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "79",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_export_price"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-LMCI
def macro_usa_lmci():
"""
美联储劳动力市场状况指数报告, 数据区间从20141006-至今
https://datacenter.jin10.com/reportType/dc_usa_lmci
https://cdn.jin10.com/dc/reports/dc_usa_lmci_all.js?v=1578742043
:return: 美联储劳动力市场状况指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_LMCI_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美联储劳动力市场状况指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "93",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "lmci"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-失业率-美国失业率报告
def macro_usa_unemployment_rate():
"""
美国失业率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_unemployment_rate
https://cdn.jin10.com/dc/reports/dc_usa_unemployment_rate_all.js?v=1578821511
:return: 获取美国失业率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_UNEMPLOYMENT_RATE_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国失业率"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "47",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "unemployment_rate"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-失业率-美国挑战者企业裁员人数报告
def macro_usa_job_cuts():
"""
美国挑战者企业裁员人数报告, 数据区间从19940201-至今
https://datacenter.jin10.com/reportType/dc_usa_job_cuts
https://cdn.jin10.com/dc/reports/dc_usa_job_cuts_all.js?v=1578742262
:return: 美国挑战者企业裁员人数报告-今值(万人)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_job_cuts_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国挑战者企业裁员人数报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万人)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "78",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_job_cuts"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-就业人口-美国非农就业人数报告
def macro_usa_non_farm():
"""
美国非农就业人数报告, 数据区间从19700102-至今
https://datacenter.jin10.com/reportType/dc_nonfarm_payrolls
https://cdn.jin10.com/dc/reports/dc_nonfarm_payrolls_all.js?v=1578742490
:return: 美国非农就业人数报告-今值(万人)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_NON_FARM_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国非农就业人数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万人)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "33",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "non_farm"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-就业人口-美国ADP就业人数报告
def macro_usa_adp_employment():
"""
美国ADP就业人数报告, 数据区间从20010601-至今
https://datacenter.jin10.com/reportType/dc_adp_nonfarm_employment
https://cdn.jin10.com/dc/reports/dc_adp_nonfarm_employment_all.js?v=1578742564
:return: 美国ADP就业人数报告-今值(万人)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_ADP_NONFARM_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ADP就业人数(万人)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万人)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "adp"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-消费者收入与支出-美国核心PCE物价指数年率报告
def macro_usa_core_pce_price():
"""
美国核心PCE物价指数年率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_core_pce_price
https://cdn.jin10.com/dc/reports/dc_usa_core_pce_price_all.js?v=1578742641
:return: 美国核心PCE物价指数年率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_CORE_PCE_PRICE_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国核心PCE物价指数年率"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "80",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "core_pce_price"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-消费者收入与支出-美国实际个人消费支出季率初值报告
def macro_usa_real_consumer_spending():
"""
美国实际个人消费支出季率初值报告, 数据区间从20131107-至今
https://datacenter.jin10.com/reportType/dc_usa_real_consumer_spending
https://cdn.jin10.com/dc/reports/dc_usa_real_consumer_spending_all.js?v=1578742802
:return: 美国实际个人消费支出季率初值报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_real_consumer_spending_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国实际个人消费支出季率初值报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "81",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_real_consumer_spending"
return temp_df
# 金十数据中心-经济指标-美国-贸易状况-美国贸易帐报告
def macro_usa_trade_balance():
"""
美国贸易帐报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_trade_balance
https://cdn.jin10.com/dc/reports/dc_usa_trade_balance_all.js?v=1578742911
:return: 美国贸易帐报告-今值(亿美元)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_trade_balance_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国贸易帐报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(亿美元)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "42",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_trade_balance"
return temp_df
# 金十数据中心-经济指标-美国-贸易状况-美国经常帐报告
def macro_usa_current_account():
"""
美国经常帐报告, 数据区间从20080317-至今
https://datacenter.jin10.com/reportType/dc_usa_current_account
https://cdn.jin10.com/dc/reports/dc_usa_current_account_all.js?v=1578743012
:return: 美国经常帐报告-今值(亿美元)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_current_account_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国经常账报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(亿美元)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "12",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_current_account"
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-贝克休斯钻井报告
def macro_usa_rig_count():
"""
贝克休斯钻井报告, 数据区间从20080317-至今
https://datacenter.jin10.com/reportType/dc_rig_count_summary
https://cdn.jin10.com/dc/reports/dc_rig_count_summary_all.js?v=1578743203
:return: 贝克休斯钻井报告-当周
:rtype: pandas.Series
"""
t = time.time()
params = {
"_": t
}
res = requests.get("https://cdn.jin10.com/data_center/reports/baker.json", params=params)
temp_df = pd.DataFrame(res.json().get("values")).T
big_df = pd.DataFrame()
big_df["钻井总数_钻井数"] = temp_df["钻井总数"].apply(lambda x: x[0])
big_df["钻井总数_变化"] = temp_df["钻井总数"].apply(lambda x: x[1])
big_df["美国石油钻井_钻井数"] = temp_df["美国石油钻井"].apply(lambda x: x[0])
big_df["美国石油钻井_变化"] = temp_df["美国石油钻井"].apply(lambda x: x[1])
big_df["混合钻井_钻井数"] = temp_df["混合钻井"].apply(lambda x: x[0])
big_df["混合钻井_变化"] = temp_df["混合钻井"].apply(lambda x: x[1])
big_df["美国天然气钻井_钻井数"] = temp_df["美国天然气钻井"].apply(lambda x: x[0])
big_df["美国天然气钻井_变化"] = temp_df["美国天然气钻井"].apply(lambda x: x[1])
big_df = big_df.astype("float")
return big_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国个人支出月率报告
# 金十数据中心-经济指标-美国-产业指标-制造业-美国生产者物价指数(PPI)报告
def macro_usa_ppi():
"""
美国生产者物价指数(PPI)报告, 数据区间从20080226-至今
https://datacenter.jin10.com/reportType/dc_usa_ppi
https://cdn.jin10.com/dc/reports/dc_usa_ppi_all.js?v=1578743628
:return: 美国生产者物价指数(PPI)报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ppi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国生产者物价指数(PPI)报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "37",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ppi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国核心生产者物价指数(PPI)报告
def macro_usa_core_ppi():
"""
美国核心生产者物价指数(PPI)报告, 数据区间从20080318-至今
https://datacenter.jin10.com/reportType/dc_usa_core_ppi
https://cdn.jin10.com/dc/reports/dc_usa_core_ppi_all.js?v=1578743709
:return: 美国核心生产者物价指数(PPI)报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_core_ppi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国核心生产者物价指数(PPI)报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "7",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_core_ppi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国API原油库存报告
def macro_usa_api_crude_stock():
"""
美国API原油库存报告, 数据区间从20120328-至今
https://datacenter.jin10.com/reportType/dc_usa_api_crude_stock
https://cdn.jin10.com/dc/reports/dc_usa_api_crude_stock_all.js?v=1578743859
:return: 美国API原油库存报告-今值(万桶)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_api_crude_stock_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国API原油库存报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万桶)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "69",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_api_crude_stock"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国Markit制造业PMI初值报告
def macro_usa_pmi():
"""
美国Markit制造业PMI初值报告, 数据区间从20120601-至今
https://datacenter.jin10.com/reportType/dc_usa_pmi
https://cdn.jin10.com/dc/reports/dc_usa_pmi_all.js?v=1578743969
:return: 美国Markit制造业PMI初值报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国Markit制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "74",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国ISM制造业PMI报告
def macro_usa_ism_pmi():
"""
美国ISM制造业PMI报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_ism_pmi
https://cdn.jin10.com/dc/reports/dc_usa_ism_pmi_all.js?v=1578744071
:return: 美国ISM制造业PMI报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ism_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ISM制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "28",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ism_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-工业-美国工业产出月率报告
def macro_usa_industrial_production():
"""
美国工业产出月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_industrial_production
https://cdn.jin10.com/dc/reports/dc_usa_industrial_production_all.js?v=1578744188
:return: 美国工业产出月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_industrial_production_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国工业产出月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "20",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_industrial_production"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-工业-美国耐用品订单月率报告
def macro_usa_durable_goods_orders():
"""
美国耐用品订单月率报告, 数据区间从20080227-至今
https://datacenter.jin10.com/reportType/dc_usa_durable_goods_orders
https://cdn.jin10.com/dc/reports/dc_usa_durable_goods_orders_all.js?v=1578744295
:return: 美国耐用品订单月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_durable_goods_orders_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国耐用品订单月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "13",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_durable_goods_orders"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-工业-美国工厂订单月率报告
def macro_usa_factory_orders():
"""
美国工厂订单月率报告, 数据区间从19920401-至今
https://datacenter.jin10.com/reportType/dc_usa_factory_orders
https://cdn.jin10.com/dc/reports/dc_usa_factory_orders_all.js?v=1578744385
:return: 美国工厂订单月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_factory_orders_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国工厂订单月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "16",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_factory_orders"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-服务业-美国Markit服务业PMI初值报告
def macro_usa_services_pmi():
"""
美国Markit服务业PMI初值报告, 数据区间从20120701-至今
https://datacenter.jin10.com/reportType/dc_usa_services_pmi
https://cdn.jin10.com/dc/reports/dc_usa_services_pmi_all.js?v=1578744503
:return: 美国Markit服务业PMI初值报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_services_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国Markit服务业PMI初值报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "89",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_services_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-服务业-美国商业库存月率报告
def macro_usa_business_inventories():
"""
美国商业库存月率报告, 数据区间从19920301-至今
https://datacenter.jin10.com/reportType/dc_usa_business_inventories
https://cdn.jin10.com/dc/reports/dc_usa_business_inventories_all.js?v=1578744618
:return: 美国商业库存月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_business_inventories_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国商业库存月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "4",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_business_inventories"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-服务业-美国ISM非制造业PMI报告
def macro_usa_ism_non_pmi():
"""
美国ISM非制造业PMI报告, 数据区间从19970801-至今
https://datacenter.jin10.com/reportType/dc_usa_ism_non_pmi
https://cdn.jin10.com/dc/reports/dc_usa_ism_non_pmi_all.js?v=1578744693
:return: 美国ISM非制造业PMI报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ism_non_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ISM非制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "29",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ism_non_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国NAHB房产市场指数报告
def macro_usa_nahb_house_market_index():
"""
美国NAHB房产市场指数报告, 数据区间从19850201-至今
https://datacenter.jin10.com/reportType/dc_usa_nahb_house_market_index
https://cdn.jin10.com/dc/reports/dc_usa_nahb_house_market_index_all.js?v=1578744817
:return: 美国NAHB房产市场指数报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_nahb_house_market_index_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国NAHB房产市场指数报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "31",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_nahb_house_market_index"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国新屋开工总数年化报告
def macro_usa_house_starts():
"""
美国新屋开工总数年化报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_house_starts
https://cdn.jin10.com/dc/reports/dc_usa_house_starts_all.js?v=1578747388
:return: 美国新屋开工总数年化报告-今值(万户)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_house_starts_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国新屋开工总数年化报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万户)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "17",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_house_starts"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国新屋销售总数年化报告
def macro_usa_new_home_sales():
"""
美国新屋销售总数年化报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_new_home_sales
https://cdn.jin10.com/dc/reports/dc_usa_new_home_sales_all.js?v=1578747501
:return: 美国新屋销售总数年化报告-今值(万户)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_new_home_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国新屋销售总数年化报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万户)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "32",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_new_home_sales"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国营建许可总数报告
def macro_usa_building_permits():
"""
美国营建许可总数报告, 数据区间从20080220-至今
https://datacenter.jin10.com/reportType/dc_usa_building_permits
https://cdn.jin10.com/dc/reports/dc_usa_building_permits_all.js?v=1578747599
:return: 美国营建许可总数报告-今值(万户)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_building_permits_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国营建许可总数报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万户)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "3",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_building_permits"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国成屋销售总数年化报告
def macro_usa_exist_home_sales():
"""
美国成屋销售总数年化报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_exist_home_sales
https://cdn.jin10.com/dc/reports/dc_usa_exist_home_sales_all.js?v=1578747703
:return: 美国成屋销售总数年化报告-今值(万户)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_exist_home_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国成屋销售总数年化报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万户)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "15",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_exist_home_sales"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国FHFA房价指数月率报告
def macro_usa_house_price_index():
"""
美国FHFA房价指数月率报告, 数据区间从19910301-至今
https://datacenter.jin10.com/reportType/dc_usa_house_price_index
https://cdn.jin10.com/dc/reports/dc_usa_house_price_index_all.js?v=1578747781
:return: 美国FHFA房价指数月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_house_price_index_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国FHFA房价指数月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "51",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_house_price_index"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国S&P/CS20座大城市房价指数年率报告
def macro_usa_spcs20():
"""
美国S&P/CS20座大城市房价指数年率报告, 数据区间从20010201-至今
https://datacenter.jin10.com/reportType/dc_usa_spcs20
https://cdn.jin10.com/dc/reports/dc_usa_spcs20_all.js?v=1578747873
:return: 美国S&P/CS20座大城市房价指数年率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_spcs20_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国S&P/CS20座大城市房价指数年率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "52",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_spcs20"
temp_df = temp_df.astype(float)
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国成屋签约销售指数月率报告
def macro_usa_pending_home_sales():
"""
美国成屋签约销售指数月率报告, 数据区间从20010301-至今
https://datacenter.jin10.com/reportType/dc_usa_pending_home_sales
https://cdn.jin10.com/dc/reports/dc_usa_pending_home_sales_all.js?v=1578747959
:return: 美国成屋签约销售指数月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_pending_home_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国成屋签约销售指数月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "34",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = | pd.to_datetime(temp_se.iloc[:, 0]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = | MultiIndex.from_arrays(arrays, names=self.index.names) | pandas.MultiIndex.from_arrays |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 29 13:15:30 2021
@author: haoli
"""
import sys
sys.path.append("../")
import const_common as constA
# import downloadUpdateData as mydownPy
import Const_LME_A as constLME_a
# import requests
from os import makedirs
import os.path as myPath
# import os
import pandas as pd
# from urllib.request import urlopen, Request
# import pyautogui, time
#Set up a 2.5 second pause after each PyAutoGUI call:
# pyautogui.PAUSE = 2.5
# from selenium import webdriver
from datetime import datetime
# import ntpath
from pathlib import Path
import pyodbc
import numpy as np
# from urllib.request import urlopen, Request
#----------------------------------
errorFileTargetDir = '../'
_dateSearchBy_ClassName_nonFerrous_gold_silver = "delayed-date.left"
_dateSearchKeyword = "Data valid for"
_waitTime_loadWebsite_LME = 3
_columnName_goldSilver = ['Date', 'Volume', 'Open Interest']
import key as pconst
_server = pconst.RYAN_SQL['server']
_database = pconst.RYAN_SQL['database']
_username = pconst.RYAN_SQL['username']
_password = pconst.RYAN_SQL['password']
_sqlTable_LME_baseMetal_stock = 'LME_baseMetal_stock'
_sqlTable_LME_baseMetal_price = 'LME_baseMetal_price'
_sqlTable_LME_precious_price = 'LME_precious_price'
_sqlTable_LME_precious_VolOpenInterest = 'LME_precious_VolOpenInterest'
_sqlTable_LME_TraderReport_CA = 'LME_weeklyTraderReport_CA'
_sqlTable_LME_TraderReport_AL = 'LME_weeklyTraderReport_AL'
_sqlTable_LME_TraderReport_gold = 'LME_weeklyTraderReport_Gold'
_sqlTable_LME_TraderReport_silver = 'LME_weeklyTraderReport_Silver'
_sqlTable_LME_daily_Volume = 'LME_Daily_Volume'
_sqlTable_LME_daily_OpenInterest_option = 'LME_Daily_OpenInterest_Option_E'
_sqlTable_LME_daily_OpenInterest_future = 'LME_Daily_OpenInterest_Future_E'
_REPORT_DATE_col = 'REPORT_DATE'
_FORWARD_DATE_col = 'FORWARD_DATE'
_FORWARD_MONTH_col = 'FORWARD_MONTH'
# mydownPy.logError("my test message")
#logError(errorFileTargetDir, msg)
# mydownPy.logError(errorFileTargetDir, "my test message")
#------- ---------
def makeTodayDataDir(newDir):
# if not myPath.lexists(newDir): #lexists
if not myPath.exists(newDir):
makedirs(newDir)
def convertDDMM_date(df):
for i in range(len(df)):
d = df.iloc[i][0]
d2 = d.strip()
yy = datetime.strptime(d2, '%Y%m%d').strftime('%Y-%m-%d')
df.iat[i, 0] = yy
return df
#--------------------------------
def sql_openInterest_daily_Option(df, dbName):
# --- SQL ----
df = df.replace({np.NAN: None})
df[df.columns[2]] = df[df.columns[2]].str.slice(0,17)
cnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER='+_server+';DATABASE='+_database+';UID='+_username+';PWD='+_password)
cursor = cnxn.cursor()
#delete today data before enter
dateStr = df.iloc[1,0]
# productStr = row[1]
# descriptionStr = row[2]
query = """DELETE FROM %s where Date = '%s';""" % (dbName, dateStr)
cursor.execute(query)
for index, row in df.iterrows():
params = tuple(row)
query = """INSERT INTO %s VALUES (?,?,?,?, ?,?,?,?);""" %(dbName)
cursor.execute(query, params)
cnxn.commit()
cursor.close()
cnxn.close()
def sql_openInterest_daily_Future(df, dbName):
df = df.replace({np.NAN: None})
df[df.columns[2]] = df[df.columns[2]].str.slice(0,17)
cnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER='+_server+';DATABASE='+_database+';UID='+_username+';PWD='+_password)
cursor = cnxn.cursor()
for index, row in df.iterrows():
#delete today data before enter
#Date UNDERLYING ContractType ForwardDate OpenInterest Turnover
#--------disable this when load large files------------------------
query = """DELETE FROM %s where Date = '%s' and [Underlying] = '%s' and [ContractType] = '%s' and [ForwardDate] ='%s';""" \
% (dbName, row[0], row[1], row[2],row[3])
cursor.execute(query)
#---------------------------------------
params = tuple(row)
query = """INSERT INTO %s VALUES (?,?,?,?, ?,? );""" %(dbName)
cursor.execute(query, params)
cnxn.commit()
cursor.close()
cnxn.close()
def extract_OpenInterestData_Preciou_sql(fileFullPath, dbNameFuture):
# df.astype(str) # df = pd.read_csv(fileFullPath, dtype=str)
df = pd.read_csv(fileFullPath)
df1 = df.dropna(how='all')
df_data = df1.dropna(axis = 1, how='all')
# df2 = df1.dropna(axis = 1, how='all')
#-- convert integer to datatime format
df_data[_REPORT_DATE_col] = (pd.to_datetime(df_data[_REPORT_DATE_col], format = '%Y%m%d'))
df_data[_FORWARD_DATE_col] = (pd.to_datetime(df_data[_FORWARD_DATE_col], format = '%Y%m%d'))
#--------aggrate the data
df_future = df_data.groupby([_REPORT_DATE_col,'UNDERLYING','CONTRACT_TYPE',
| pd.Grouper(key=_FORWARD_DATE_col,freq='M') | pandas.Grouper |
"""
This module provides an abstract base class for a callback and a default
implementation.
If you want to store the data in a way that is different from the
functionality provided by the default callback, you can write your own
extension of callback. For example, you can easily implement a callback
that stores the data in e.g. a NoSQL file.
The only method to implement is the __call__ magic method. To use logging of
progress, always call super.
"""
import abc
import csv
import os
import shutil
import numpy as np
import pandas as pd
from .parameters import (CategoricalParameter, IntegerParameter,
BooleanParameter)
from ..util import ema_exceptions, get_module_logger
#
# Created on 22 Jan 2013
#
# .. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
#
__all__ = ['AbstractCallback',
'DefaultCallback',
'FileBasedCallback']
_logger = get_module_logger(__name__)
class AbstractCallback(object):
"""
Abstract base class from which different call back classes can be derived.
Callback is responsible for storing the results of the runs.
Parameters
----------
uncs : list
a list of the parameters over which the experiments
are being run.
outcomes : list
a list of outcomes
nr_experiments : int
the total number of experiments to be executed
reporting_interval : int, optional
the interval at which to provide progress
information via logging.
reporting_frequency: int, optional
the total number of progress logs
Attributes
----------
i : int
a counter that keeps track of how many experiments have been
saved
reporting_interval : int,
the interval between progress logs
"""
__metaclass__ = abc.ABCMeta
i = 0
def __init__(self, uncertainties, outcomes, levers,
nr_experiments, reporting_interval=None,
reporting_frequency=10):
if reporting_interval is None:
reporting_interval = max(
1, int(round(nr_experiments / reporting_frequency)))
self.reporting_interval = reporting_interval
@abc.abstractmethod
def __call__(self, experiment, outcomes):
"""
Method responsible for storing results. The implementation in this
class only keeps track of how many runs have been completed and
logging this. Any extension of AbstractCallback needs to implement
this method. If one want to use the logging provided here, call it via
super.
Parameters
----------
experiment: Experiment instance
outcomes: dict
the outcomes dict
"""
#
# TODO:: https://github.com/alexanderkuk/log-progress
# can we detect whether we are running within Jupyter?
# yes:
# https://stackoverflow.com/questions/15411967/how-can-i-check-if-code-is-executed-in-the-ipython-notebook
self.i += 1
_logger.debug(str(self.i) + " cases completed")
if self.i % self.reporting_interval == 0:
_logger.info(str(self.i) + " cases completed")
@abc.abstractmethod
def get_results(self):
"""
method for retrieving the results. Called after all experiments
have been completed. Any extension of AbstractCallback needs to
implement this method.
"""
class DefaultCallback(AbstractCallback):
"""
default callback system
callback can be used in perform_experiments as a means for
specifying the way in which the results should be handled. If no
callback is specified, this default implementation is used. This
one can be overwritten or replaced with a callback of your own
design. For example if you prefer to store the result in a database
or write them to a text file
"""
i = 0
cases = None
results = {}
shape_error_msg = "can only save up to 2d arrays, this array is {}d"
constraint_error_msg = ('can only save 1d arrays for constraint, '
'this array is {}d')
def __init__(self, uncs, levers, outcomes, nr_experiments,
reporting_interval=100, reporting_frequency=10):
"""
Parameters
----------
uncs : list
a list of the parameters over which the experiments
are being run.
outcomes : list
a list of outcomes
nr_experiments : int
the total number of experiments to be executed
reporting_interval : int, optional
the interval between progress logs
reporting_frequency: int, optional
the total number of progress logs
"""
super(DefaultCallback, self).__init__(uncs, levers, outcomes,
nr_experiments,
reporting_interval,
reporting_frequency)
self.i = 0
self.cases = None
self.results = {}
self.outcomes = [outcome.name for outcome in outcomes]
# determine data types of parameters
columns = []
dtypes = []
self.parameters = []
for parameter in uncs + levers:
name = parameter.name
self.parameters.append(name)
dataType = 'float'
if isinstance(parameter, CategoricalParameter):
dataType = 'object'
elif isinstance(parameter, BooleanParameter):
dataType = 'bool'
elif isinstance(parameter, IntegerParameter):
dataType = 'int'
columns.append(name)
dtypes.append(dataType)
for name in ['scenario', 'policy', 'model']:
columns.append(name)
dtypes.append('object')
df = pd.DataFrame(index=np.arange(nr_experiments))
for name, dtype in zip(columns, dtypes):
df[name] = | pd.Series(dtype=dtype) | pandas.Series |
"""
model training. To be executed directly within this directory. Input data for the model must be specified via `path`.
Among the other parameters, only do_train needs to be adjusted if the model should be retrained.
"""
from sklearn import ensemble as en
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.svm import SVR
import lightgbm as lgb
import numpy as np
import pandas as pd
import os
import joblib as jl
#import matplotlib.pyplot as plt
from sentinel import timer
#plt.rcParams["figure.figsize"] = (30,12)
#plt.rcParams["font.size"] = 20
# global params as I'm too lazy to build a CLI
path = 'data/vegetation_index_features_aggregated_all.parquet' # 'data/features_three_months_full.parquet' # 'data/features_three_months_improved.parquet' # 'data/vegetation_index_features_aggregated.parquet' # "data/df_empty_dummy.csv" #
np.random.seed(42)
w_dir = '.' # '/home/dario/_py/tree-cover' #
model_name = "model_sentinel_logtrans_stratified_mae_allveg_lgbm_depth8_5000.joblib" # "model_sentinel_logtrans_stratified_huber_3months_2000_60leaves.joblib" #
do_train = False
do_transform = True # logarithmic transform of y
do_stratify = True # only take an approximately equal amount for each tree-cover level into account
use_lgbm = True # faster than sklearn
target= 'tree_cover' # 'land_use_category' #
do_gridsearch = False
do_scale_X = False # use a MinMaxScaler to bring the data into a range bewteen -1 and 1 -> no need.
do_weight = False # assign a weight to each feature s.t. those occuring less frequent will have higher weights
method = 'boost' # 'svr' #
kernel = 'rbf' # for svr
# params for lgb:
objective= 'huber' # 'mean_absolute_error'
# cols to be dropped from the training data. Aridity Zone can be kept.
bastin_cols = ['longitude','latitude','dryland_assessment_region','land_use_category','tree_cover'] # 'Aridity_zone'
def load_data(path, cols=None):
if path.endswith('.csv'):
df = | pd.read_csv(path, sep=",") | pandas.read_csv |
from __future__ import division, unicode_literals, print_function
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import warnings
warnings.filterwarnings('ignore')
import gc
import spacy
import plac
import time
import ujson as json
import numpy as np
import pandas as pd
import en_core_web_md
from tqdm import tqdm
from pathlib import Path
from keras.utils.np_utils import to_categorical
from keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.metrics import log_loss
try:
import cPickle as pickle
except ImportError:
import pickle
from spacy_hook import get_embeddings, get_word_ids
from spacy_hook import create_similarity_pipeline
from keras_decomposable_attention import build_model
def attention_foldrun(X, X2, y, name, Xte = None, Xte2 = None, start_fold = 0):
skf = StratifiedKFold(n_splits = 10, random_state = 111, shuffle = True)
if isinstance(X, pd.core.frame.DataFrame):
X = X.values
if isinstance(y, pd.core.frame.DataFrame):
y = y.is_duplicate.values
if isinstance(y, pd.core.frame.Series):
y = y.values
print('Running Decomposable Attention model with parameters:', settings)
i = 1
losses = []
train_splits = []
val_splits = []
for tr_index, val_index in skf.split(X, y):
train_splits.append(tr_index)
val_splits.append(val_index)
for i in range(start_fold, start_fold + 2):
X_trq1, X_valq1 = X[train_splits[i]], X[val_splits[i]]
X_trq2, X_valq2 = X2[train_splits[i]], X2[val_splits[i]]
y_tr, y_val = y[train_splits[i]], y[val_splits[i]]
y_tr = to_categorical(y_tr)
y_val = to_categorical(y_val)
t = time.time()
print('Start training on fold: {}'.format(i))
callbacks = [ModelCheckpoint('checks/decomposable_{}_10SKF_fold{}.h5'.format(i, name),
monitor='val_loss',
verbose = 0, save_best_only = True),
EarlyStopping(monitor='val_loss', patience = 4, verbose = 1)]
model = build_model(get_embeddings(nlp.vocab), shape, settings)
model.fit([X_trq1, X_trq2], y_tr, validation_data=([X_valq1, X_valq2], y_val),
nb_epoch=settings['nr_epoch'], batch_size=settings['batch_size'], callbacks = callbacks)
val_pred = model.predict([X_valq1, X_valq2], batch_size = 64)
score = log_loss(y_val, val_pred)
losses.append(score)
print('Predicting training set.')
val_pred = pd.DataFrame(val_pred, index = val_splits[i])
val_pred.columns = ['attention_feat1', 'attention_feat2']
val_pred.to_pickle('OOF_preds/train_attentionpreds_fold{}.pkl'.format(i))
print(val_pred.head())
if Xte is not None:
print('Predicting test set.')
test_preds = model.predict([Xte, Xte2], batch_size = 64)
test_preds = pd.DataFrame(test_preds)
test_preds.columns = ['attention_feat1', 'attention_feat2']
test_preds.to_pickle('OOF_preds/test_attentionpreds_fold{}.pkl'.format(i))
del test_preds
gc.collect()
print('Final score for fold {} :'.format(i), score, '\n',
'Time it took to train and predict on fold:', time.time() - t, '\n')
del X_trq1, X_valq1, X_trq2, X_valq2, y_tr, y_val, val_pred
gc.collect()
i += 1
print('Mean logloss for model in 10-folds SKF:', np.array(losses).mean(axis = 0))
return
# In[ ]:
qsrc = '/media/w/1c392724-ecf3-4615-8f3c-79368ec36380/DS Projects/Kaggle/Quora/data/features/lemmatized_fullclean/'
qsrc2 = '/media/w/1c392724-ecf3-4615-8f3c-79368ec36380/DS Projects/Kaggle/Quora/data/features/NER/'
feats_src = '/media/w/1c392724-ecf3-4615-8f3c-79368ec36380/DS Projects/Kaggle/Quora/data/features/uncleaned/'
xgb_feats = | pd.read_csv(feats_src + '/the_1owl/owl_train.csv') | pandas.read_csv |
import os
import math
import numbers
import time
import numpy as np
import scipy as sp
from scipy.sparse import csr_matrix
import pandas as pd
import torch
from torch.nn import functional as F
from abc import ABC, abstractmethod
from bisect import bisect
from collections import Counter
from collections import deque
try:
import torchsso
TORCH_SSO_FOUND = True
except:
TORCH_SSO_FOUND = False
from src.naqs.network.nade import MaxBatchSizeExceededError
import src.naqs.network.torch_utils as torch_utils
import src.utils.complex as cplx
from src.utils.sparse_math import sparse_dense_mv
from src.utils.system import mk_dir
from src.optimizer.hamiltonian import PauliHamiltonian
from src.optimizer.utils import LogKey, KFACOptimizer
def to_sparse_vector(data, idxs, size=None):
if size is None:
size = len(idxs)
vec = csr_matrix((data, (idxs, [0]*len(idxs))), shape=(size,1))
return vec
def median_(val, freq):
ord = np.argsort(val)
cdf = np.cumsum(freq[ord])
return val[ord][np.searchsorted(cdf, cdf[-1] // 2)]
class OptimizerBase(ABC):
'''
Base class for optimizing the energy a wavefunction anzatz.
'''
def __init__(self,
wavefunction,
qubit_hamiltonian,
pre_compute_H=True,
n_electrons=None,
n_alpha_electrons=None,
n_beta_electrons=None,
n_fixed_electrons=None,
n_excitations_max=None,
reweight_samples_by_psi=False,
normalise_psi=False,
normalize_grads=False,
grad_clip_factor=3,
grad_clip_memory_length=50,
optimizer=torch.optim.Adam,
optimizer_args={'lr': 1e-3},
scheduler=None,
scheduler_args=None,
save_loc='./',
pauli_hamiltonian_fname=None,
overwrite_pauli_hamiltonian=False,
pauli_hamiltonian_dtype=np.float32,
verbose=False):
self.wavefunction = wavefunction
self.hilbert = self.wavefunction.hilbert
self.qubit_hamiltonian = qubit_hamiltonian
self.device = "cpu"
self.reweight_samples_by_psi = reweight_samples_by_psi
self.normalise_psi = normalise_psi
self.n_electrons = n_electrons
self.n_alpha_electrons = n_alpha_electrons
self.n_beta_electrons = n_beta_electrons
self.n_fixed_electrons = n_fixed_electrons
self.n_excitations_max = n_excitations_max
self.subspace_args = {"N_up": self.n_electrons,
"N_alpha": self.n_alpha_electrons,
"N_beta": self.n_beta_electrons,
"N_occ": self.n_fixed_electrons,
"N_exc_max": self.n_excitations_max}
self.optimizer_callable = optimizer
self.optimizer_args = optimizer_args
self.scheduler_callable = scheduler
self.scheduler_args = scheduler_args
self.grad_clip_factor = grad_clip_factor
self.grad_clip_memory_length = grad_clip_memory_length
self.normalize_grads = normalize_grads
self.save_loc = save_loc
self.verbose = verbose
self.pauli_hamiltonian_fname = pauli_hamiltonian_fname
self.overwrite_pauli_hamiltonian = overwrite_pauli_hamiltonian
restricted_idxs = self.hilbert.get_subspace(ret_states=False,
ret_idxs=True,
**self.subspace_args)
self.pauli_hamiltonian = PauliHamiltonian.get(self.hilbert,
qubit_hamiltonian,
hamiltonian_fname=self.pauli_hamiltonian_fname,
restricted_idxs=restricted_idxs,
verbose=self.verbose,
n_excitations_max=self.n_excitations_max,
dtype=pauli_hamiltonian_dtype)
if pre_compute_H and not self.pauli_hamiltonian.is_frozen():
print("Pre-computing Hamiltonian.")
# for idxs in torch.split(restricted_idxs, 1000000):
# self.pauli_hamiltonian.update_H(idxs, check_unseen=False, assume_unique=True)
self.pauli_hamiltonian.update_H(restricted_idxs, check_unseen=False, assume_unique=True)
self.pauli_hamiltonian.freeze_H()
if self.overwrite_pauli_hamiltonian:
self.pauli_hamiltonian.save_H(self.pauli_hamiltonian_fname)
self.sampled_idxs = Counter()
self.reset_log()
self.reset_optimizer()
def reset_log(self):
'''Reset the logging tools.
Resets the log to a dictionary of empty lists. Resets the total number of steps and run time.
'''
self.log = {LogKey.E: [], LogKey.E_LOC: [], LogKey.E_LOC_VAR: [], LogKey.N_UNIQUE_SAMP: [], LogKey.TIME: []}
self.last_samples = []
self.n_steps = 0
self.n_epochs = 0
self.run_time = 0
def reset_optimizer(self, cond_idx=None):
'''Reset the optimization tools.
Reset the optimizer and the tracked list of gradient norms used for gradient clipping.
'''
print("Resetting optimizer", end="...")
if TORCH_SSO_FOUND:
opt_list = [KFACOptimizer, torchsso.optim.SecondOrderOptimizer]
else:
opt_list = [KFACOptimizer]
if self.optimizer_callable in opt_list:
self.optimizer = self.optimizer_callable(self.wavefunction.model, **self.optimizer_args)
else:
if type(self.optimizer_args) is not dict:
args = []
for idx, args_i in enumerate(self.optimizer_args):
args_i['params'] = self.wavefunction.parameters(idx)
args.append(args_i)
self.optimizer = self.optimizer_callable(args)
else:
# self.optimizer = self.optimizer_callable(self.wavefunction.parameters(), **self.optimizer_args)
print(f"subnetwork {cond_idx}", end="...")
self.optimizer = self.optimizer_callable(self.wavefunction.conditional_parameters(cond_idx), **self.optimizer_args)
print("done.")
if self.scheduler_callable is not None:
print("Resetting scheduler", end="...")
self.scheduler = self.scheduler_callable(self.optimizer, **self.scheduler_args)
print("done.")
else:
self.scheduler = None
self.__grad_norms = [deque([], self.grad_clip_memory_length) for _ in range(len(self.optimizer.param_groups))]
@torch.no_grad()
def calculate_energy(self, normalise_psi=None):
'''Calculate the 'true' energy of the current wavefunction using the entire physically valid
Hilbert space.
Note that this might be very slow/intractable for large systems.
normalise_psi : Whether the distribution over the physically valid Hilbert space provided
by the wavefunction should be renormalised (it may have total probabilty < 1).
'''
states, states_idx = self.hilbert.get_subspace(ret_states=True,
ret_idxs=True,
use_restricted_idxs=False,
**self.subspace_args)
self.pauli_hamiltonian.update_H(states_idx, check_unseen=True, assume_unique=True)
# Here, we are computing all physically valid couplings, so we might as well
# freeze the Hamiltonian once its been updated, as no new couplings we add
# will be relevant.
self.pauli_hamiltonian.freeze_H()
psi = self.wavefunction.psi(states, ret_complex=True)
if normalise_psi:
psi /= np.sum(np.abs(psi) ** 2) ** 0.5
energy = psi.conj().dot( sparse_dense_mv(self.pauli_hamiltonian.get_restricted_H(), psi) )
return energy.real
@torch.no_grad()
def calculate_local_energy(self, states_idx, psi=None, set_unsampled_states_to_zero=True, ret_complex=False):
'''Calculate the local energy for each state.
The local energy is given by:
E_loc(|s>) = ( 1/|Psi(|s>) ) * sum_{s'} |Psi(|s'>) <s|H|s'>
(or E_loc(|s>) = ( 1/|Psi^*(|s>) ) * sum_{s'} |Psi^*(|s'>) <s'|H|s> for conj.)
We have a choice to make: namely if we pass a subset of states {|s>}, which couple
to a larger subset of states {|s'>}, do we compute psi for all |s'> even if they are
not in the origional sample, or do we treat un-sampled amplitudes as zero?
states_idx : The states for which we want to calculate the local energies.
psi : The complex amplitudes of states_idx (if None these will be computed on-demand).
set_unsampled_states_to_zero : Whether to assume all states not in states_idx have zero
amplitude. If false, and un-sampled but still coupled
states will be computed.
ret_complex : Return as compelx numpy array (False) or complex torch array (True default).
'''
if psi is None:
psi = self.wavefunction.psi(self.hilbert.idx2state(states_idx, use_restricted_idxs=False), ret_complex=True)
else:
psi = psi.detach()
if cplx.is_complex(psi):
psi = cplx.torch_to_numpy(psi)
self.pauli_hamiltonian.update_H(states_idx, check_unseen=True, assume_unique=True)
if set_unsampled_states_to_zero:
local_energy = (sparse_dense_mv(self.pauli_hamiltonian.get_H(states_idx), psi) / psi).conj()
else:
raise NotImplementedError()
# Note we are not using cython functions here as they are not optimised yer.
# coupled_state_idxs = self.pauli_hamiltonian.get_coupled_state_idxs(states_idx, ret_unqiue=True)
# coupled_state_idxs = np.sort(coupled_state_idxs)
# coupled_psi = self.wavefunction.psi(self.hilbert.idx2state(coupled_state_idxs), ret_complex=True)
#
# coupled_psi_sparse = to_sparse_vector(coupled_psi, coupled_state_idxs, H.shape[0])
# local_energy = np.squeeze(H.dot(coupled_psi_sparse)[states_idx].toarray()) / psi
if not ret_complex:
local_energy = cplx.np_to_torch(local_energy)
return local_energy
@abstractmethod
def solve_H(self, states_idx=None):
raise NotImplementedError()
@abstractmethod
def pre_train(self):
raise NotImplementedError()
def _SGD_step(self, states, states_idx,
log_psi=None, sample_weights=None, log_psi_eval=None, regularisation_loss=None,
n_samps=None, e_loc_clip_factor=None):
'''Take a step of gradient descent for samples states.
(0. Downstream sparse calculations are faster if states/states_idxs are sorted by ascending
state_idx, so sort first if needed.)
1. Compute log amplitudes of states (with network gradients): log_Psi(|s>).
2. Compute the local energies for each state (no network gradients requried): E_loc(|s>).
3. Compute the expectation << O >> = 2*<< log_Psi(|s>) E_loc(|s>) >>_{|s>~|Psi(|s>)|^2}.
4. Backpropagate << O >> --> << (dlog_Psi(|s>) / dtheta) E_loc(|s>) >>, i.e. the variational gradients.
5. Take step of gradient descent.
6. From the local energies of each state, calculate the overall energy estimation and their varience.
states : The sampled states.
states_idx : The sampled state_idxs.
log_psi : The log amplitudes of the states.
'''
# if not assume_sorted:
# sort_args = np.argsort(states_idx)
# states = states[sort_args]
# states_idx = states_idx[sort_args]
# log_psi = log_psi[sort_args]
if self.verbose:
print("Entering _SGD_step(...)")
t = time.time()
self.sampled_idxs.update(self.hilbert.to_idx_array(states_idx).squeeze())
# 1. Compute log amplitudes of states (with network gradients): log_Psi(|s>).
if log_psi is None:
log_psi = self.wavefunction.log_psi(states)
if self.verbose:
print(f"log_psi : {time.time()-t:.4f}s")
t = time.time()
# 2.Compute the local energies for each state (no network gradients requried): E_loc(|s>).
e_loc = self.calculate_local_energy(states_idx.squeeze(), psi=cplx.exp(log_psi.detach()))
if self.verbose:
print(f"e_loc : {time.time()-t:.4f}s")
t = time.time()
# 3. Compute the expectation << O >> = 2*<< log_Psi(|s>) E_loc(|s>) >>_{|s>~|Psi(|s>)|^2}.
if sample_weights is None:
if self.reweight_samples_by_psi:
sample_weights = log_psi.detach()[..., 0].exp().pow(2)
else:
raise NotImplementedError("Re-weighting by the number of samples is not yet implemented.")
if self.normalise_psi:
sample_weights /= sample_weights.sum()
if sample_weights.dim() < 2:
sample_weights = sample_weights.unsqueeze(-1)
e_loc_corr = e_loc - (sample_weights * e_loc).sum(axis=0).detach()
exp_op = 2 * cplx.real(sample_weights * cplx.scalar_mult(log_psi, e_loc_corr)).sum(axis=0)
# exp_op -= 2 * cplx.real(cplx.scalar_mult(
# (sample_weights * e_loc_corr).sum(axis=0),
# (sample_weights * log_psi).sum(axis=0)))
if self.verbose:
print(f"<<grad>> : {time.time()-t:.4f}s")
t = time.time()
# 4. Backpropagate << O >> --> << (dlog_Psi(|s>) / dtheta) E_loc(|s>) >>, i.e. the variational gradients.
self.optimizer.zero_grad()
if self.normalize_grads:
exp_op = exp_op / (exp_op.detach()).abs()
if regularisation_loss is not None:
# print(exp_op)
# print(regularisation_loss)
exp_op = exp_op + regularisation_loss
exp_op.backward()
del exp_op # <-- served it's purpose, so free up the memory.
self._clip_grads()
if self.verbose:
print(f"backprop: {time.time()-t:.4f}s")
t = time.time()
# 5. Take step of gradient descent.
self.optimizer.step()
self.optimizer.zero_grad()
if self.scheduler is not None:
self.scheduler.step()
if self.verbose:
print(f"step : {time.time()-t:.4f}s")
t = time.time()
if self.verbose:
print(f"<<E>>, var(<<E>>) : {time.time() - t:.4f}s")
with torch.no_grad():
# 6. From the local energies of each state, calculate the overall energy estimation and their varience.
if log_psi_eval is not None:
e_loc = self.calculate_local_energy(states_idx.squeeze(), psi=cplx.exp(log_psi_eval.detach()))
sample_weights /= sample_weights.sum()
local_energy = cplx.real(sample_weights * e_loc).sum()
local_energy_variance = ((cplx.real(e_loc) - local_energy).pow(2) * sample_weights.squeeze()).sum()
return local_energy.item(), local_energy_variance.item()
@abstractmethod
def run(self):
raise NotImplementedError()
def _clip_grads(self):
if self.grad_clip_factor is not None:
for grad_norms, group in zip(self.__grad_norms, self.optimizer.param_groups):
max_norm = self.grad_clip_factor * np.mean(grad_norms).item() if len(grad_norms) > 0 else 1e3
# Will be fixed to work with grads on different devices in 1.5.1:
# norm = torch.nn.utils.clip_grad_norm_(group['params'], max_norm, norm_type=2)
# Until then, use my custom clipper.
norm = torch_utils.clip_grad_norm_(group['params'], max_norm, norm_type=2)
try:
norm = norm.item()
except:
pass
grad_norms.append(min(max_norm, norm))
def __format_checkpoint_fname(self, fname):
'''Formats a checkpoint file location as an absolute path with a '.pth' file extension.
If a relative path is passed, this returns the absolute path relative to self.save_loc.
'''
if os.path.splitext(fname)[-1] != '.pth':
fname += '.pth'
if not os.path.isabs(fname):
fname = os.path.join(self.save_loc, fname)
return fname
def save(self, fname="energy_optimizer", quiet=False):
'''Save the current optimizer and all information required to load and restart optimisation.
The energy optimisation needs to save the following attributes:
- The network optimizer.
- The log.
- The number of steps taken / number of epochs / total running time.
Additionally, the wavefunction itself must be saved, but this is handled internally in the
object.
'''
fname = self.__format_checkpoint_fname(fname)
if not quiet:
print(f"Saving checkpoint {fname}.", end="...")
dir = os.path.dirname(fname)
if dir != '':
mk_dir(dir, quiet)
wavefunction_fname = os.path.splitext(fname)[0] + '_naqs'
wavefunction_fname = self.wavefunction.save(wavefunction_fname, quiet)
checkpoint = {
'optimizer:state_dict': self.optimizer.state_dict(),
'run_time': self.run_time,
'n_steps': self.n_steps,
'n_epochs': self.n_epochs,
'log': self.log,
'sampled_idxs': self.sampled_idxs,
'wavefunction:fname':wavefunction_fname,
'hamiltonian_fname':self.pauli_hamiltonian_fname
}
torch.save(checkpoint, fname)
if not quiet:
print("done.")
if self.overwrite_pauli_hamiltonian:
self.pauli_hamiltonian.save(self.pauli_hamiltonian_fname)
def load(self, fname="energy_optimizer", quiet=False):
'''Load a saved optimizer checkpoint.
The energy optimisation needs to load the following attributes:
- The network optimizer.
- The log.
- The number of steps taken / number of epochs / total running time.
Additionally, the wavefunction itself must be loaded, but this is handled internally in the
object. Here, we will try to load the wavefunction from the specified file path, but will
not raise an exception if it is not found. Instead we raise a warning and assume the user
will locate and load the wavefunction manually.
'''
fname = self.__format_checkpoint_fname(fname)
if not quiet:
print("Loading checkpoint {}.".format(fname), end="...")
try:
checkpoint = torch.load(fname, map_location=self.device)
except:
checkpoint = torch.jit.load(fname, map_location=self.device)
try:
self.wavefunction.load(checkpoint['wavefunction:fname'])
except:
print(f"\twavefunction not found (expected at {checkpoint['wavefunction:fname']})")
try:
self.optimizer.load_state_dict(checkpoint['optimizer:state_dict'])
except:
print("\tOptimizer could not be loaded.")
self.log = checkpoint['log']
self.n_steps = checkpoint['n_steps']
self.n_epochs = checkpoint['n_epochs']
self.run_time = checkpoint['run_time']
self.sampled_idxs = checkpoint['sampled_idxs']
if not quiet:
print("done.")
def save_psi(self, fname, subspace_args={}, quiet=False):
'''Save the wavefunction amplitudes to file.'''
fname = os.path.join(self.save_loc, fname)
if not quiet:
print("Saving psi to {}.".format(fname), end="...")
dir = os.path.dirname(fname)
if dir != '':
mk_dir(dir, quiet)
if subspace_args == {}:
subspace_args = {
"N_up": self.n_electrons,
"N_alpha": self.n_alpha_electrons,
"N_beta": self.n_beta_electrons,
"N_occ": self.n_fixed_electrons}
self.wavefunction.save_psi(fname, subspace_args)
print("done.")
def save_log(self, fname="log", quiet=False):
'''Save the optimizer's log to file.
'''
fname = os.path.join(self.save_loc, fname)
dir = os.path.dirname(fname)
if dir != '':
mk_dir(dir, quiet)
path, ext = os.path.splitext(fname)
if ext != ".pkl":
fname = path + ".pkl"
ITERS = "Iteration"
df = None
for key, value in self.log.items():
df_key = pd.DataFrame(value, columns=[ITERS, key])
if df is not None:
df = | pd.merge(df, df_key, how="outer", on=ITERS) | pandas.merge |
# -*- coding: utf-8 -*-
import os
import click
import logging
import pandas as pd
import glob
from dotenv import find_dotenv, load_dotenv
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(input_filepath, output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('Combining data set from raw data')
files = glob.glob(os.path.join(input_filepath, '**'))
parking_violations = [f for f in files if 'parking_violations' in f]
### Combine all csvs into one data frame
logger.info('starting merge')
list_ = []
for file_ in parking_violations[:]:
df = pd.read_csv(file_,index_col=None, header=0)
filename = file_[len(input_filepath):]
df['filename'] = filename
list_.append(df)
frame = | pd.concat(list_) | pandas.concat |
"""
Plotting of behavioral metrics during the full task (biased blocks) per lab
<NAME>
6 May 2020
"""
import seaborn as sns
import numpy as np
from os.path import join
import matplotlib.pyplot as plt
from scipy import stats
import scikit_posthocs as sp
from paper_behavior_functions import (figpath, seaborn_style, group_colors, institution_map,
load_csv, FIGURE_WIDTH, FIGURE_HEIGHT, QUERY, fit_psychfunc,
dj2pandas)
import pandas as pd
from statsmodels.stats.multitest import multipletests
# Initialize
seaborn_style()
figpath = figpath()
pal = group_colors()
institution_map, col_names = institution_map()
col_names = col_names[:-1]
# %% Process data
if QUERY is True:
# query sessions
from paper_behavior_functions import query_sessions_around_criterion
from ibl_pipeline import reference, subject, behavior
use_sessions, _ = query_sessions_around_criterion(criterion='biased',
days_from_criterion=[-1, 3])
use_sessions = use_sessions & 'task_protocol LIKE "%biased%"' # only get biased sessions
b = (use_sessions * subject.Subject * subject.SubjectLab * reference.Lab
* behavior.TrialSet.Trial)
b2 = b.proj('institution_short', 'subject_nickname', 'task_protocol', 'session_uuid',
'trial_stim_contrast_left', 'trial_stim_contrast_right', 'trial_response_choice',
'task_protocol', 'trial_stim_prob_left', 'trial_feedback_type',
'trial_response_time', 'trial_stim_on_time')
bdat = b2.fetch(order_by='institution_short, subject_nickname, session_start_time, trial_id',
format='frame').reset_index()
behav = dj2pandas(bdat)
behav['institution_code'] = behav.institution_short.map(institution_map)
else:
behav = load_csv('Fig4.csv')
biased_fits = pd.DataFrame()
for i, nickname in enumerate(behav['subject_nickname'].unique()):
if np.mod(i+1, 10) == 0:
print('Processing data of subject %d of %d' % (i+1,
len(behav['subject_nickname'].unique())))
# Get lab and subject uuid
lab = behav.loc[behav['subject_nickname'] == nickname, 'institution_code'].unique()[0]
uuid = behav.loc[behav['subject_nickname'] == nickname, 'subject_uuid'].unique()[0]
# Fit psychometric curve
left_fit = fit_psychfunc(behav[(behav['subject_nickname'] == nickname)
& (behav['probabilityLeft'] == 80)])
right_fit = fit_psychfunc(behav[(behav['subject_nickname'] == nickname)
& (behav['probabilityLeft'] == 20)])
neutral_fit = fit_psychfunc(behav[(behav['subject_nickname'] == nickname)
& (behav['probabilityLeft'] == 50)])
perf_easy = (behav.loc[behav['subject_nickname'] == nickname, 'correct_easy'].mean()) * 100
fits = pd.DataFrame(data={'perf_easy': perf_easy,
'threshold_l': left_fit['threshold'],
'threshold_r': right_fit['threshold'],
'threshold_n': neutral_fit['threshold'],
'bias_l': left_fit['bias'],
'bias_r': right_fit['bias'],
'bias_n': neutral_fit['bias'],
'nickname': nickname, 'lab': lab, 'subject_uuid': uuid})
biased_fits = biased_fits.append(fits, sort=False)
# %% Statistics
stats_tests = | pd.DataFrame(columns=['variable', 'test_type', 'p_value']) | pandas.DataFrame |
from itertools import product
import numpy as np
from numpy.linalg import lstsq
from numpy.testing import assert_allclose
import pandas as pd
import pytest
from linearmodels.panel.data import PanelData
from linearmodels.panel.model import FamaMacBeth
from linearmodels.shared.exceptions import (
InferenceUnavailableWarning,
MissingValueWarning,
)
from linearmodels.tests.panel._utility import (
access_attributes,
assert_frame_similar,
datatypes,
generate_data,
)
pytestmark = pytest.mark.filterwarnings(
"ignore::linearmodels.shared.exceptions.MissingValueWarning"
)
missing = [0.0, 0.20]
has_const = [True, False]
perms = list(product(missing, datatypes, has_const))
ids = ["-".join(str(param) for param in perms) for perm in perms]
@pytest.fixture(params=perms, ids=ids)
def data(request):
missing, datatype, const = request.param
return generate_data(
missing, datatype, const=const, other_effects=1, ntk=(25, 200, 5)
)
def test_fama_macbeth(data):
res = FamaMacBeth(data.y, data.x).fit(debiased=True)
y = PanelData(data.y)
x = PanelData(data.x)
missing = y.isnull | x.isnull
y.drop(missing)
x.drop(missing)
y = y.dataframe
x = x.dataframe
times = y.index.levels[1]
params = []
for t in times:
_y = y.xs(t, level=1)
_x = x.xs(t, level=1)
if _x.shape[0] < _x.shape[1]:
continue
_x = _x.loc[_y.index]
params.append(lstsq(_x.values, _y.values, rcond=None)[0])
params = np.array(params).squeeze()
all_params = params
params = params.mean(0)
assert_allclose(params.squeeze(), res.params)
assert_allclose(all_params, res.all_params.dropna(how="all"))
e_params = all_params - params[None, :]
ntime = e_params.shape[0]
cov = e_params.T @ e_params / ntime / (ntime - 1)
assert_allclose(cov, np.asarray(res.cov))
access_attributes(res)
def test_unknown_cov_type(data):
with pytest.raises(ValueError):
FamaMacBeth(data.y, data.x).fit(cov_type="unknown")
@pytest.mark.smoke
def test_fama_macbeth_kernel_smoke(data):
FamaMacBeth(data.y, data.x).fit(cov_type="kernel")
FamaMacBeth(data.y, data.x).fit(cov_type="kernel", kernel="bartlett")
FamaMacBeth(data.y, data.x).fit(cov_type="kernel", kernel="newey-west")
FamaMacBeth(data.y, data.x).fit(cov_type="kernel", kernel="parzen")
FamaMacBeth(data.y, data.x).fit(cov_type="kernel", kernel="qs")
FamaMacBeth(data.y, data.x).fit(cov_type="kernel", bandwidth=3)
res = FamaMacBeth(data.y, data.x).fit(cov_type="kernel", kernel="andrews")
access_attributes(res)
def test_fitted_effects_residuals(data):
mod = FamaMacBeth(data.y, data.x)
res = mod.fit()
expected = mod.exog.values2d @ res.params.values
expected = pd.DataFrame(expected, index=mod.exog.index, columns=["fitted_values"])
assert_allclose(res.fitted_values, expected)
assert_frame_similar(res.fitted_values, expected)
expected.iloc[:, 0] = mod.dependent.values2d - expected.values
expected.columns = ["idiosyncratic"]
assert_allclose(res.idiosyncratic, expected)
assert_frame_similar(res.idiosyncratic, expected)
expected.iloc[:, 0] = np.nan
expected.columns = ["estimated_effects"]
assert_allclose(res.estimated_effects, expected)
assert_frame_similar(res.estimated_effects, expected)
@pytest.mark.filterwarnings(
"always::linearmodels.shared.exceptions.MissingValueWarning"
)
def test_block_size_warnings():
y = np.arange(12.0)[:, None]
x = np.ones((12, 3))
x[:, 1] = np.arange(12.0)
x[:, 2] = np.arange(12.0) ** 2
idx = pd.MultiIndex.from_product(
[["a", "b", "c"], pd.date_range("2000-1-1", periods=4)]
)
y = pd.DataFrame(y, index=idx, columns=["y"])
x = pd.DataFrame(x, index=idx, columns=["x1", "x2", "x3"])
with pytest.warns(MissingValueWarning):
FamaMacBeth(y.iloc[:11], x.iloc[:11])
with pytest.warns(InferenceUnavailableWarning):
FamaMacBeth(y.iloc[::4], x.iloc[::4])
def test_block_size_error():
y = np.arange(12.0)[:, None]
x = np.ones((12, 2))
x[1::4, 1] = 2
x[2::4, 1] = 3
idx = pd.MultiIndex.from_product(
[["a", "b", "c"], pd.date_range("2000-1-1", periods=4)]
)
y = | pd.DataFrame(y, index=idx, columns=["y"]) | pandas.DataFrame |
from datetime import datetime
from decimal import Decimal
from io import StringIO
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, read_csv
import pandas._testing as tm
from pandas.core.base import SpecificationError
import pandas.core.common as com
def test_repr():
# GH18203
result = repr(pd.Grouper(key="A", level="B"))
expected = "Grouper(key='A', level='B', axis=0, sort=False)"
assert result == expected
@pytest.mark.parametrize("dtype", ["int64", "int32", "float64", "float32"])
def test_basic(dtype):
data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
for k, v in grouped:
assert len(v) == 3
agged = grouped.aggregate(np.mean)
assert agged[1] == 1
tm.assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
tm.assert_series_equal(agged, grouped.mean())
tm.assert_series_equal(grouped.agg(np.sum), grouped.sum())
expected = grouped.apply(lambda x: x * x.sum())
transformed = grouped.transform(lambda x: x * x.sum())
assert transformed[7] == 12
tm.assert_series_equal(transformed, expected)
value_grouped = data.groupby(data)
tm.assert_series_equal(
value_grouped.aggregate(np.mean), agged, check_index_type=False
)
# complex agg
agged = grouped.aggregate([np.mean, np.std])
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped.aggregate({"one": np.mean, "two": np.std})
group_constants = {0: 10, 1: 20, 2: 30}
agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())
assert agged[1] == 21
# corner cases
msg = "Must produce aggregated value"
# exception raised is type Exception
with pytest.raises(Exception, match=msg):
grouped.aggregate(lambda x: x * 2)
def test_groupby_nonobject_dtype(mframe, df_mixed_floats):
key = mframe.index.codes[0]
grouped = mframe.groupby(key)
result = grouped.sum()
expected = mframe.groupby(key.astype("O")).sum()
tm.assert_frame_equal(result, expected)
# GH 3911, mixed frame non-conversion
df = df_mixed_floats.copy()
df["value"] = range(len(df))
def max_value(group):
return group.loc[group["value"].idxmax()]
applied = df.groupby("A").apply(max_value)
result = applied.dtypes
expected = Series(
[np.dtype("object")] * 2 + [np.dtype("float64")] * 2 + [np.dtype("int64")],
index=["A", "B", "C", "D", "value"],
)
tm.assert_series_equal(result, expected)
def test_groupby_return_type():
# GH2893, return a reduced type
df1 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 2, "val2": 27},
{"val1": 2, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df1.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
df2 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 1, "val2": 27},
{"val1": 1, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df2.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
# GH3596, return a consistent type (regression in 0.11 from 0.10.1)
df = DataFrame([[1, 1], [1, 1]], columns=["X", "Y"])
with tm.assert_produces_warning(FutureWarning):
result = df.groupby("X", squeeze=False).count()
assert isinstance(result, DataFrame)
def test_inconsistent_return_type():
# GH5592
# inconsistent return type
df = DataFrame(
dict(
A=["Tiger", "Tiger", "Tiger", "Lamb", "Lamb", "Pony", "Pony"],
B=Series(np.arange(7), dtype="int64"),
C=date_range("20130101", periods=7),
)
)
def f(grp):
return grp.iloc[0]
expected = df.groupby("A").first()[["B"]]
result = df.groupby("A").apply(f)[["B"]]
tm.assert_frame_equal(result, expected)
def f(grp):
if grp.name == "Tiger":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Tiger"] = np.nan
tm.assert_frame_equal(result, e)
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Pony"] = np.nan
tm.assert_frame_equal(result, e)
# 5592 revisited, with datetimes
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["C"]]
e = df.groupby("A").first()[["C"]]
e.loc["Pony"] = pd.NaT
tm.assert_frame_equal(result, e)
# scalar outputs
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0].loc["C"]
result = df.groupby("A").apply(f)
e = df.groupby("A").first()["C"].copy()
e.loc["Pony"] = np.nan
e.name = None
tm.assert_series_equal(result, e)
def test_pass_args_kwargs(ts, tsframe):
def f(x, q=None, axis=0):
return np.percentile(x, q, axis=axis)
g = lambda x: np.percentile(x, 80, axis=0)
# Series
ts_grouped = ts.groupby(lambda x: x.month)
agg_result = ts_grouped.agg(np.percentile, 80, axis=0)
apply_result = ts_grouped.apply(np.percentile, 80, axis=0)
trans_result = ts_grouped.transform(np.percentile, 80, axis=0)
agg_expected = ts_grouped.quantile(0.8)
trans_expected = ts_grouped.transform(g)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
agg_result = ts_grouped.agg(f, q=80)
apply_result = ts_grouped.apply(f, q=80)
trans_result = ts_grouped.transform(f, q=80)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
# DataFrame
df_grouped = tsframe.groupby(lambda x: x.month)
agg_result = df_grouped.agg(np.percentile, 80, axis=0)
apply_result = df_grouped.apply(DataFrame.quantile, 0.8)
expected = df_grouped.quantile(0.8)
tm.assert_frame_equal(apply_result, expected, check_names=False)
tm.assert_frame_equal(agg_result, expected)
agg_result = df_grouped.agg(f, q=80)
apply_result = df_grouped.apply(DataFrame.quantile, q=0.8)
tm.assert_frame_equal(agg_result, expected)
tm.assert_frame_equal(apply_result, expected, check_names=False)
def test_len():
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
assert len(grouped) == len(df)
grouped = df.groupby([lambda x: x.year, lambda x: x.month])
expected = len({(x.year, x.month) for x in df.index})
assert len(grouped) == expected
# issue 11016
df = pd.DataFrame(dict(a=[np.nan] * 3, b=[1, 2, 3]))
assert len(df.groupby(("a"))) == 0
assert len(df.groupby(("b"))) == 3
assert len(df.groupby(["a", "b"])) == 3
def test_basic_regression():
# regression
result = Series([1.0 * x for x in list(range(1, 10)) * 10])
data = np.random.random(1100) * 10.0
groupings = Series(data)
grouped = result.groupby(groupings)
grouped.mean()
@pytest.mark.parametrize(
"dtype", ["float64", "float32", "int64", "int32", "int16", "int8"]
)
def test_with_na_groups(dtype):
index = Index(np.arange(10))
values = Series(np.ones(10), index, dtype=dtype)
labels = Series(
[np.nan, "foo", "bar", "bar", np.nan, np.nan, "bar", "bar", np.nan, "foo"],
index=index,
)
# this SHOULD be an int
grouped = values.groupby(labels)
agged = grouped.agg(len)
expected = Series([4, 2], index=["bar", "foo"])
tm.assert_series_equal(agged, expected, check_dtype=False)
# assert issubclass(agged.dtype.type, np.integer)
# explicitly return a float from my function
def f(x):
return float(len(x))
agged = grouped.agg(f)
expected = Series([4, 2], index=["bar", "foo"])
tm.assert_series_equal(agged, expected, check_dtype=False)
assert issubclass(agged.dtype.type, np.dtype(dtype).type)
def test_indices_concatenation_order():
# GH 2808
def f1(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(levels=[[]] * 2, codes=[[]] * 2, names=["b", "c"])
res = DataFrame(columns=["a"], index=multiindex)
return res
else:
y = y.set_index(["b", "c"])
return y
def f2(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
return DataFrame()
else:
y = y.set_index(["b", "c"])
return y
def f3(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(
levels=[[]] * 2, codes=[[]] * 2, names=["foo", "bar"]
)
res = DataFrame(columns=["a", "b"], index=multiindex)
return res
else:
return y
df = DataFrame({"a": [1, 2, 2, 2], "b": range(4), "c": range(5, 9)})
df2 = DataFrame({"a": [3, 2, 2, 2], "b": range(4), "c": range(5, 9)})
# correct result
result1 = df.groupby("a").apply(f1)
result2 = df2.groupby("a").apply(f1)
tm.assert_frame_equal(result1, result2)
# should fail (not the same number of levels)
msg = "Cannot concat indices that do not have the same number of levels"
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f2)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f2)
# should fail (incorrect shape)
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f3)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f3)
def test_attr_wrapper(ts):
grouped = ts.groupby(lambda x: x.weekday())
result = grouped.std()
expected = grouped.agg(lambda x: np.std(x, ddof=1))
tm.assert_series_equal(result, expected)
# this is pretty cool
result = grouped.describe()
expected = {name: gp.describe() for name, gp in grouped}
expected = DataFrame(expected).T
tm.assert_frame_equal(result, expected)
# get attribute
result = grouped.dtype
expected = grouped.agg(lambda x: x.dtype)
# make sure raises error
msg = "'SeriesGroupBy' object has no attribute 'foo'"
with pytest.raises(AttributeError, match=msg):
getattr(grouped, "foo")
def test_frame_groupby(tsframe):
grouped = tsframe.groupby(lambda x: x.weekday())
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == 5
assert len(aggregated.columns) == 4
# by string
tscopy = tsframe.copy()
tscopy["weekday"] = [x.weekday() for x in tscopy.index]
stragged = tscopy.groupby("weekday").aggregate(np.mean)
tm.assert_frame_equal(stragged, aggregated, check_names=False)
# transform
grouped = tsframe.head(30).groupby(lambda x: x.weekday())
transformed = grouped.transform(lambda x: x - x.mean())
assert len(transformed) == 30
assert len(transformed.columns) == 4
# transform propagate
transformed = grouped.transform(lambda x: x.mean())
for name, group in grouped:
mean = group.mean()
for idx in group.index:
tm.assert_series_equal(transformed.xs(idx), mean, check_names=False)
# iterate
for weekday, group in grouped:
assert group.index[0].weekday() == weekday
# groups / group_indices
groups = grouped.groups
indices = grouped.indices
for k, v in groups.items():
samething = tsframe.index.take(indices[k])
assert (samething == v).all()
def test_frame_groupby_columns(tsframe):
mapping = {"A": 0, "B": 0, "C": 1, "D": 1}
grouped = tsframe.groupby(mapping, axis=1)
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == len(tsframe)
assert len(aggregated.columns) == 2
# transform
tf = lambda x: x - x.mean()
groupedT = tsframe.T.groupby(mapping, axis=0)
tm.assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf))
# iterate
for k, v in grouped:
assert len(v.columns) == 2
def test_frame_set_name_single(df):
grouped = df.groupby("A")
result = grouped.mean()
assert result.index.name == "A"
result = df.groupby("A", as_index=False).mean()
assert result.index.name != "A"
result = grouped.agg(np.mean)
assert result.index.name == "A"
result = grouped.agg({"C": np.mean, "D": np.std})
assert result.index.name == "A"
result = grouped["C"].mean()
assert result.index.name == "A"
result = grouped["C"].agg(np.mean)
assert result.index.name == "A"
result = grouped["C"].agg([np.mean, np.std])
assert result.index.name == "A"
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"foo": np.mean, "bar": np.std})
def test_multi_func(df):
col1 = df["A"]
col2 = df["B"]
grouped = df.groupby([col1.get, col2.get])
agged = grouped.mean()
expected = df.groupby(["A", "B"]).mean()
# TODO groupby get drops names
tm.assert_frame_equal(
agged.loc[:, ["C", "D"]], expected.loc[:, ["C", "D"]], check_names=False
)
# some "groups" with no data
df = DataFrame(
{
"v1": np.random.randn(6),
"v2": np.random.randn(6),
"k1": np.array(["b", "b", "b", "a", "a", "a"]),
"k2": np.array(["1", "1", "1", "2", "2", "2"]),
},
index=["one", "two", "three", "four", "five", "six"],
)
# only verify that it works for now
grouped = df.groupby(["k1", "k2"])
grouped.agg(np.sum)
def test_multi_key_multiple_functions(df):
grouped = df.groupby(["A", "B"])["C"]
agged = grouped.agg([np.mean, np.std])
expected = DataFrame({"mean": grouped.agg(np.mean), "std": grouped.agg(np.std)})
tm.assert_frame_equal(agged, expected)
def test_frame_multi_key_function_list():
data = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": [
"dull",
"dull",
"shiny",
"dull",
"dull",
"shiny",
"shiny",
"dull",
"shiny",
"shiny",
"shiny",
],
"D": np.random.randn(11),
"E": np.random.randn(11),
"F": np.random.randn(11),
}
)
grouped = data.groupby(["A", "B"])
funcs = [np.mean, np.std]
agged = grouped.agg(funcs)
expected = pd.concat(
[grouped["D"].agg(funcs), grouped["E"].agg(funcs), grouped["F"].agg(funcs)],
keys=["D", "E", "F"],
axis=1,
)
assert isinstance(agged.index, MultiIndex)
assert isinstance(expected.index, MultiIndex)
tm.assert_frame_equal(agged, expected)
@pytest.mark.parametrize("op", [lambda x: x.sum(), lambda x: x.mean()])
def test_groupby_multiple_columns(df, op):
data = df
grouped = data.groupby(["A", "B"])
result1 = op(grouped)
keys = []
values = []
for n1, gp1 in data.groupby("A"):
for n2, gp2 in gp1.groupby("B"):
keys.append((n1, n2))
values.append(op(gp2.loc[:, ["C", "D"]]))
mi = MultiIndex.from_tuples(keys, names=["A", "B"])
expected = pd.concat(values, axis=1).T
expected.index = mi
# a little bit crude
for col in ["C", "D"]:
result_col = op(grouped[col])
pivoted = result1[col]
exp = expected[col]
tm.assert_series_equal(result_col, exp)
tm.assert_series_equal(pivoted, exp)
# test single series works the same
result = data["C"].groupby([data["A"], data["B"]]).mean()
expected = data.groupby(["A", "B"]).mean()["C"]
tm.assert_series_equal(result, expected)
def test_as_index_select_column():
# GH 5764
df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
result = df.groupby("A", as_index=False)["B"].get_group(1)
expected = pd.Series([2, 4], name="B")
tm.assert_series_equal(result, expected)
result = df.groupby("A", as_index=False)["B"].apply(lambda x: x.cumsum())
expected = pd.Series(
[2, 6, 6], name="B", index=pd.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 2)])
)
tm.assert_series_equal(result, expected)
def test_groupby_as_index_select_column_sum_empty_df():
# GH 35246
df = DataFrame(columns=["A", "B", "C"])
left = df.groupby(by="A", as_index=False)["B"].sum()
assert type(left) is DataFrame
assert left.to_dict() == {"A": {}, "B": {}}
def test_groupby_as_index_agg(df):
grouped = df.groupby("A", as_index=False)
# single-key
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
result2 = grouped.agg({"C": np.mean, "D": np.sum})
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
grouped = df.groupby("A", as_index=True)
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"Q": np.sum})
# multi-key
grouped = df.groupby(["A", "B"], as_index=False)
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
result2 = grouped.agg({"C": np.mean, "D": np.sum})
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
expected3 = grouped["C"].sum()
expected3 = DataFrame(expected3).rename(columns={"C": "Q"})
result3 = grouped["C"].agg({"Q": np.sum})
tm.assert_frame_equal(result3, expected3)
# GH7115 & GH8112 & GH8582
df = DataFrame(np.random.randint(0, 100, (50, 3)), columns=["jim", "joe", "jolie"])
ts = Series(np.random.randint(5, 10, 50), name="jim")
gr = df.groupby(ts)
gr.nth(0) # invokes set_selection_from_grouper internally
tm.assert_frame_equal(gr.apply(sum), df.groupby(ts).apply(sum))
for attr in ["mean", "max", "count", "idxmax", "cumsum", "all"]:
gr = df.groupby(ts, as_index=False)
left = getattr(gr, attr)()
gr = df.groupby(ts.values, as_index=True)
right = getattr(gr, attr)().reset_index(drop=True)
tm.assert_frame_equal(left, right)
def test_ops_not_as_index(reduction_func):
# GH 10355, 21090
# Using as_index=False should not modify grouped column
if reduction_func in ("corrwith",):
pytest.skip("Test not applicable")
if reduction_func in ("nth", "ngroup",):
pytest.skip("Skip until behavior is determined (GH #5755)")
df = DataFrame(np.random.randint(0, 5, size=(100, 2)), columns=["a", "b"])
expected = getattr(df.groupby("a"), reduction_func)()
if reduction_func == "size":
expected = expected.rename("size")
expected = expected.reset_index()
g = df.groupby("a", as_index=False)
result = getattr(g, reduction_func)()
tm.assert_frame_equal(result, expected)
result = g.agg(reduction_func)
tm.assert_frame_equal(result, expected)
result = getattr(g["b"], reduction_func)()
tm.assert_frame_equal(result, expected)
result = g["b"].agg(reduction_func)
tm.assert_frame_equal(result, expected)
def test_as_index_series_return_frame(df):
grouped = df.groupby("A", as_index=False)
grouped2 = df.groupby(["A", "B"], as_index=False)
result = grouped["C"].agg(np.sum)
expected = grouped.agg(np.sum).loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
result2 = grouped2["C"].agg(np.sum)
expected2 = grouped2.agg(np.sum).loc[:, ["A", "B", "C"]]
assert isinstance(result2, DataFrame)
tm.assert_frame_equal(result2, expected2)
result = grouped["C"].sum()
expected = grouped.sum().loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
result2 = grouped2["C"].sum()
expected2 = grouped2.sum().loc[:, ["A", "B", "C"]]
assert isinstance(result2, DataFrame)
tm.assert_frame_equal(result2, expected2)
def test_as_index_series_column_slice_raises(df):
# GH15072
grouped = df.groupby("A", as_index=False)
msg = r"Column\(s\) C already selected"
with pytest.raises(IndexError, match=msg):
grouped["C"].__getitem__("D")
def test_groupby_as_index_cython(df):
data = df
# single-key
grouped = data.groupby("A", as_index=False)
result = grouped.mean()
expected = data.groupby(["A"]).mean()
expected.insert(0, "A", expected.index)
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
# multi-key
grouped = data.groupby(["A", "B"], as_index=False)
result = grouped.mean()
expected = data.groupby(["A", "B"]).mean()
arrays = list(zip(*expected.index.values))
expected.insert(0, "A", arrays[0])
expected.insert(1, "B", arrays[1])
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
def test_groupby_as_index_series_scalar(df):
grouped = df.groupby(["A", "B"], as_index=False)
# GH #421
result = grouped["C"].agg(len)
expected = grouped.agg(len).loc[:, ["A", "B", "C"]]
tm.assert_frame_equal(result, expected)
def test_groupby_as_index_corner(df, ts):
msg = "as_index=False only valid with DataFrame"
with pytest.raises(TypeError, match=msg):
ts.groupby(lambda x: x.weekday(), as_index=False)
msg = "as_index=False only valid for axis=0"
with pytest.raises(ValueError, match=msg):
df.groupby(lambda x: x.lower(), as_index=False, axis=1)
def test_groupby_multiple_key(df):
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
agged = grouped.sum()
tm.assert_almost_equal(df.values, agged.values)
grouped = df.T.groupby(
[lambda x: x.year, lambda x: x.month, lambda x: x.day], axis=1
)
agged = grouped.agg(lambda x: x.sum())
tm.assert_index_equal(agged.index, df.columns)
tm.assert_almost_equal(df.T.values, agged.values)
agged = grouped.agg(lambda x: x.sum())
tm.assert_almost_equal(df.T.values, agged.values)
def test_groupby_multi_corner(df):
# test that having an all-NA column doesn't mess you up
df = df.copy()
df["bad"] = np.nan
agged = df.groupby(["A", "B"]).mean()
expected = df.groupby(["A", "B"]).mean()
expected["bad"] = np.nan
tm.assert_frame_equal(agged, expected)
def test_omit_nuisance(df):
grouped = df.groupby("A")
result = grouped.mean()
expected = df.loc[:, ["A", "C", "D"]].groupby("A").mean()
tm.assert_frame_equal(result, expected)
agged = grouped.agg(np.mean)
exp = grouped.mean()
tm.assert_frame_equal(agged, exp)
df = df.loc[:, ["A", "C", "D"]]
df["E"] = datetime.now()
grouped = df.groupby("A")
result = grouped.agg(np.sum)
expected = grouped.sum()
tm.assert_frame_equal(result, expected)
# won't work with axis = 1
grouped = df.groupby({"A": 0, "C": 0, "D": 1, "E": 1}, axis=1)
msg = "reduction operation 'sum' not allowed for this dtype"
with pytest.raises(TypeError, match=msg):
grouped.agg(lambda x: x.sum(0, numeric_only=False))
def test_omit_nuisance_python_multiple(three_group):
grouped = three_group.groupby(["A", "B"])
agged = grouped.agg(np.mean)
exp = grouped.mean()
tm.assert_frame_equal(agged, exp)
def test_empty_groups_corner(mframe):
# handle empty groups
df = DataFrame(
{
"k1": np.array(["b", "b", "b", "a", "a", "a"]),
"k2": np.array(["1", "1", "1", "2", "2", "2"]),
"k3": ["foo", "bar"] * 3,
"v1": np.random.randn(6),
"v2": np.random.randn(6),
}
)
grouped = df.groupby(["k1", "k2"])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
grouped = mframe[3:5].groupby(level=0)
agged = grouped.apply(lambda x: x.mean())
agged_A = grouped["A"].apply(np.mean)
tm.assert_series_equal(agged["A"], agged_A)
assert agged.index.name == "first"
def test_nonsense_func():
df = DataFrame([0])
msg = r"unsupported operand type\(s\) for \+: 'int' and 'str'"
with pytest.raises(TypeError, match=msg):
df.groupby(lambda x: x + "foo")
def test_wrap_aggregated_output_multindex(mframe):
df = mframe.T
df["baz", "two"] = "peekaboo"
keys = [np.array([0, 0, 1]), np.array([0, 0, 1])]
agged = df.groupby(keys).agg(np.mean)
assert isinstance(agged.columns, MultiIndex)
def aggfun(ser):
if ser.name == ("foo", "one"):
raise TypeError
else:
return ser.sum()
agged2 = df.groupby(keys).aggregate(aggfun)
assert len(agged2.columns) + 1 == len(df.columns)
def test_groupby_level_apply(mframe):
result = mframe.groupby(level=0).count()
assert result.index.name == "first"
result = mframe.groupby(level=1).count()
assert result.index.name == "second"
result = mframe["A"].groupby(level=0).count()
assert result.index.name == "first"
def test_groupby_level_mapper(mframe):
deleveled = mframe.reset_index()
mapper0 = {"foo": 0, "bar": 0, "baz": 1, "qux": 1}
mapper1 = {"one": 0, "two": 0, "three": 1}
result0 = mframe.groupby(mapper0, level=0).sum()
result1 = mframe.groupby(mapper1, level=1).sum()
mapped_level0 = np.array([mapper0.get(x) for x in deleveled["first"]])
mapped_level1 = np.array([mapper1.get(x) for x in deleveled["second"]])
expected0 = mframe.groupby(mapped_level0).sum()
expected1 = mframe.groupby(mapped_level1).sum()
expected0.index.name, expected1.index.name = "first", "second"
tm.assert_frame_equal(result0, expected0)
tm.assert_frame_equal(result1, expected1)
def test_groupby_level_nonmulti():
# GH 1313, GH 13901
s = Series([1, 2, 3, 10, 4, 5, 20, 6], Index([1, 2, 3, 1, 4, 5, 2, 6], name="foo"))
expected = Series([11, 22, 3, 4, 5, 6], Index(range(1, 7), name="foo"))
result = s.groupby(level=0).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=[0]).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=-1).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=[-1]).sum()
tm.assert_series_equal(result, expected)
msg = "level > 0 or level < -1 only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=1)
with pytest.raises(ValueError, match=msg):
s.groupby(level=-2)
msg = "No group keys passed!"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[])
msg = "multiple levels only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[0, 0])
with pytest.raises(ValueError, match=msg):
s.groupby(level=[0, 1])
msg = "level > 0 or level < -1 only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[1])
def test_groupby_complex():
# GH 12902
a = Series(data=np.arange(4) * (1 + 2j), index=[0, 0, 1, 1])
expected = Series((1 + 2j, 5 + 10j))
result = a.groupby(level=0).sum()
tm.assert_series_equal(result, expected)
result = a.sum(level=0)
tm.assert_series_equal(result, expected)
def test_groupby_series_indexed_differently():
s1 = Series(
[5.0, -9.0, 4.0, 100.0, -5.0, 55.0, 6.7],
index=Index(["a", "b", "c", "d", "e", "f", "g"]),
)
s2 = Series(
[1.0, 1.0, 4.0, 5.0, 5.0, 7.0], index=Index(["a", "b", "d", "f", "g", "h"])
)
grouped = s1.groupby(s2)
agged = grouped.mean()
exp = s1.groupby(s2.reindex(s1.index).get).mean()
tm.assert_series_equal(agged, exp)
def test_groupby_with_hier_columns():
tuples = list(
zip(
*[
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
)
)
index = MultiIndex.from_tuples(tuples)
columns = MultiIndex.from_tuples(
[("A", "cat"), ("B", "dog"), ("B", "cat"), ("A", "dog")]
)
df = DataFrame(np.random.randn(8, 4), index=index, columns=columns)
result = df.groupby(level=0).mean()
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0, axis=1).mean()
tm.assert_index_equal(result.index, df.index)
result = df.groupby(level=0).agg(np.mean)
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0).apply(lambda x: x.mean())
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0, axis=1).agg(lambda x: x.mean(1))
tm.assert_index_equal(result.columns, Index(["A", "B"]))
tm.assert_index_equal(result.index, df.index)
# add a nuisance column
sorted_columns, _ = columns.sortlevel(0)
df["A", "foo"] = "bar"
result = df.groupby(level=0).mean()
tm.assert_index_equal(result.columns, df.columns[:-1])
def test_grouping_ndarray(df):
grouped = df.groupby(df["A"].values)
result = grouped.sum()
expected = df.groupby("A").sum()
tm.assert_frame_equal(
result, expected, check_names=False
) # Note: no names when grouping by value
def test_groupby_wrong_multi_labels():
data = """index,foo,bar,baz,spam,data
0,foo1,bar1,baz1,spam2,20
1,foo1,bar2,baz1,spam3,30
2,foo2,bar2,baz1,spam2,40
3,foo1,bar1,baz2,spam1,50
4,foo3,bar1,baz2,spam1,60"""
data = read_csv(StringIO(data), index_col=0)
grouped = data.groupby(["foo", "bar", "baz", "spam"])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_groupby_series_with_name(df):
result = df.groupby(df["A"]).mean()
result2 = df.groupby(df["A"], as_index=False).mean()
assert result.index.name == "A"
assert "A" in result2
result = df.groupby([df["A"], df["B"]]).mean()
result2 = df.groupby([df["A"], df["B"]], as_index=False).mean()
assert result.index.names == ("A", "B")
assert "A" in result2
assert "B" in result2
def test_seriesgroupby_name_attr(df):
# GH 6265
result = df.groupby("A")["C"]
assert result.count().name == "C"
assert result.mean().name == "C"
testFunc = lambda x: np.sum(x) * 2
assert result.agg(testFunc).name == "C"
def test_consistency_name():
# GH 12363
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": np.random.randn(8) + 1.0,
"D": np.arange(8),
}
)
expected = df.groupby(["A"]).B.count()
result = df.B.groupby(df.A).count()
tm.assert_series_equal(result, expected)
def test_groupby_name_propagation(df):
# GH 6124
def summarize(df, name=None):
return Series({"count": 1, "mean": 2, "omissions": 3}, name=name)
def summarize_random_name(df):
# Provide a different name for each Series. In this case, groupby
# should not attempt to propagate the Series name since they are
# inconsistent.
return Series({"count": 1, "mean": 2, "omissions": 3}, name=df.iloc[0]["A"])
metrics = df.groupby("A").apply(summarize)
assert metrics.columns.name is None
metrics = df.groupby("A").apply(summarize, "metrics")
assert metrics.columns.name == "metrics"
metrics = df.groupby("A").apply(summarize_random_name)
assert metrics.columns.name is None
def test_groupby_nonstring_columns():
df = DataFrame([np.arange(10) for x in range(10)])
grouped = df.groupby(0)
result = grouped.mean()
expected = df.groupby(df[0]).mean()
tm.assert_frame_equal(result, expected)
def test_groupby_mixed_type_columns():
# GH 13432, unorderable types in py3
df = DataFrame([[0, 1, 2]], columns=["A", "B", 0])
expected = DataFrame([[1, 2]], columns=["B", 0], index=Index([0], name="A"))
result = df.groupby("A").first()
tm.assert_frame_equal(result, expected)
result = df.groupby("A").sum()
tm.assert_frame_equal(result, expected)
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:Mean of:RuntimeWarning")
def test_cython_grouper_series_bug_noncontig():
arr = np.empty((100, 100))
arr.fill(np.nan)
obj = Series(arr[:, 0])
inds = np.tile(range(10), 10)
result = obj.groupby(inds).agg(Series.median)
assert result.isna().all()
def test_series_grouper_noncontig_index():
index = Index(tm.rands_array(10, 100))
values = Series(np.random.randn(50), index=index[::2])
labels = np.random.randint(0, 5, 50)
# it works!
grouped = values.groupby(labels)
# accessing the index elements causes segfault
f = lambda x: len(set(map(id, x.index)))
grouped.agg(f)
def test_convert_objects_leave_decimal_alone():
s = Series(range(5))
labels = np.array(["a", "b", "c", "d", "e"], dtype="O")
def convert_fast(x):
return Decimal(str(x.mean()))
def convert_force_pure(x):
# base will be length 0
assert len(x.values.base) > 0
return Decimal(str(x.mean()))
grouped = s.groupby(labels)
result = grouped.agg(convert_fast)
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
result = grouped.agg(convert_force_pure)
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
def test_groupby_dtype_inference_empty():
# GH 6733
df = DataFrame({"x": [], "range": np.arange(0, dtype="int64")})
assert df["x"].dtype == np.float64
result = df.groupby("x").first()
exp_index = Index([], name="x", dtype=np.float64)
expected = DataFrame({"range": Series([], index=exp_index, dtype="int64")})
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_groupby_list_infer_array_like(df):
result = df.groupby(list(df["A"])).mean()
expected = df.groupby(df["A"]).mean()
tm.assert_frame_equal(result, expected, check_names=False)
with pytest.raises(KeyError, match=r"^'foo'$"):
df.groupby(list(df["A"][:-1]))
# pathological case of ambiguity
df = DataFrame({"foo": [0, 1], "bar": [3, 4], "val": np.random.randn(2)})
result = df.groupby(["foo", "bar"]).mean()
expected = df.groupby([df["foo"], df["bar"]]).mean()[["val"]]
def test_groupby_keys_same_size_as_index():
# GH 11185
freq = "s"
index = pd.date_range(
start=pd.Timestamp("2015-09-29T11:34:44-0700"), periods=2, freq=freq
)
df = pd.DataFrame([["A", 10], ["B", 15]], columns=["metric", "values"], index=index)
result = df.groupby([pd.Grouper(level=0, freq=freq), "metric"]).mean()
expected = df.set_index([df.index, "metric"])
tm.assert_frame_equal(result, expected)
def test_groupby_one_row():
# GH 11741
msg = r"^'Z'$"
df1 = pd.DataFrame(np.random.randn(1, 4), columns=list("ABCD"))
with pytest.raises(KeyError, match=msg):
df1.groupby("Z")
df2 = pd.DataFrame(np.random.randn(2, 4), columns=list("ABCD"))
with pytest.raises(KeyError, match=msg):
df2.groupby("Z")
def test_groupby_nat_exclude():
# GH 6992
df = pd.DataFrame(
{
"values": np.random.randn(8),
"dt": [
np.nan,
pd.Timestamp("2013-01-01"),
np.nan,
pd.Timestamp("2013-02-01"),
np.nan,
pd.Timestamp("2013-02-01"),
np.nan,
pd.Timestamp("2013-01-01"),
],
"str": [np.nan, "a", np.nan, "a", np.nan, "a", np.nan, "b"],
}
)
grouped = df.groupby("dt")
expected = [pd.Index([1, 7]), pd.Index([3, 5])]
keys = sorted(grouped.groups.keys())
assert len(keys) == 2
for k, e in zip(keys, expected):
# grouped.groups keys are np.datetime64 with system tz
# not to be affected by tz, only compare values
tm.assert_index_equal(grouped.groups[k], e)
# confirm obj is not filtered
tm.assert_frame_equal(grouped.grouper.groupings[0].obj, df)
assert grouped.ngroups == 2
expected = {
Timestamp("2013-01-01 00:00:00"): np.array([1, 7], dtype=np.intp),
Timestamp("2013-02-01 00:00:00"): np.array([3, 5], dtype=np.intp),
}
for k in grouped.indices:
tm.assert_numpy_array_equal(grouped.indices[k], expected[k])
tm.assert_frame_equal(grouped.get_group(Timestamp("2013-01-01")), df.iloc[[1, 7]])
tm.assert_frame_equal(grouped.get_group(Timestamp("2013-02-01")), df.iloc[[3, 5]])
with pytest.raises(KeyError, match=r"^NaT$"):
grouped.get_group(pd.NaT)
nan_df = DataFrame(
{"nan": [np.nan, np.nan, np.nan], "nat": [pd.NaT, pd.NaT, pd.NaT]}
)
assert nan_df["nan"].dtype == "float64"
assert nan_df["nat"].dtype == "datetime64[ns]"
for key in ["nan", "nat"]:
grouped = nan_df.groupby(key)
assert grouped.groups == {}
assert grouped.ngroups == 0
assert grouped.indices == {}
with pytest.raises(KeyError, match=r"^nan$"):
grouped.get_group(np.nan)
with pytest.raises(KeyError, match=r"^NaT$"):
grouped.get_group(pd.NaT)
def test_groupby_2d_malformed():
d = DataFrame(index=range(2))
d["group"] = ["g1", "g2"]
d["zeros"] = [0, 0]
d["ones"] = [1, 1]
d["label"] = ["l1", "l2"]
tmp = d.groupby(["group"]).mean()
res_values = np.array([[0, 1], [0, 1]], dtype=np.int64)
tm.assert_index_equal(tmp.columns, Index(["zeros", "ones"]))
tm.assert_numpy_array_equal(tmp.values, res_values)
def test_int32_overflow():
B = np.concatenate((np.arange(10000), np.arange(10000), np.arange(5000)))
A = np.arange(25000)
df = DataFrame({"A": A, "B": B, "C": A, "D": B, "E": np.random.randn(25000)})
left = df.groupby(["A", "B", "C", "D"]).sum()
right = df.groupby(["D", "C", "B", "A"]).sum()
assert len(left) == len(right)
def test_groupby_sort_multi():
df = DataFrame(
{
"a": ["foo", "bar", "baz"],
"b": [3, 2, 1],
"c": [0, 1, 2],
"d": np.random.randn(3),
}
)
tups = [tuple(row) for row in df[["a", "b", "c"]].values]
tups = com.asarray_tuplesafe(tups)
result = df.groupby(["a", "b", "c"], sort=True).sum()
tm.assert_numpy_array_equal(result.index.values, tups[[1, 2, 0]])
tups = [tuple(row) for row in df[["c", "a", "b"]].values]
tups = com.asarray_tuplesafe(tups)
result = df.groupby(["c", "a", "b"], sort=True).sum()
tm.assert_numpy_array_equal(result.index.values, tups)
tups = [tuple(x) for x in df[["b", "c", "a"]].values]
tups = com.asarray_tuplesafe(tups)
result = df.groupby(["b", "c", "a"], sort=True).sum()
tm.assert_numpy_array_equal(result.index.values, tups[[2, 1, 0]])
df = DataFrame(
{"a": [0, 1, 2, 0, 1, 2], "b": [0, 0, 0, 1, 1, 1], "d": np.random.randn(6)}
)
grouped = df.groupby(["a", "b"])["d"]
result = grouped.sum()
def _check_groupby(df, result, keys, field, f=lambda x: x.sum()):
tups = [tuple(row) for row in df[keys].values]
tups = com.asarray_tuplesafe(tups)
expected = f(df.groupby(tups)[field])
for k, v in expected.items():
assert result[k] == v
_check_groupby(df, result, ["a", "b"], "d")
def test_dont_clobber_name_column():
df = DataFrame(
{"key": ["a", "a", "a", "b", "b", "b"], "name": ["foo", "bar", "baz"] * 2}
)
result = df.groupby("key").apply(lambda x: x)
tm.assert_frame_equal(result, df)
def test_skip_group_keys():
tsf = tm.makeTimeDataFrame()
grouped = tsf.groupby(lambda x: x.month, group_keys=False)
result = grouped.apply(lambda x: x.sort_values(by="A")[:3])
pieces = [group.sort_values(by="A")[:3] for key, group in grouped]
expected = pd.concat(pieces)
tm.assert_frame_equal(result, expected)
grouped = tsf["A"].groupby(lambda x: x.month, group_keys=False)
result = grouped.apply(lambda x: x.sort_values()[:3])
pieces = [group.sort_values()[:3] for key, group in grouped]
expected = pd.concat(pieces)
tm.assert_series_equal(result, expected)
def test_no_nonsense_name(float_frame):
# GH #995
s = float_frame["C"].copy()
s.name = None
result = s.groupby(float_frame["A"]).agg(np.sum)
assert result.name is None
def test_multifunc_sum_bug():
# GH #1065
x = DataFrame(np.arange(9).reshape(3, 3))
x["test"] = 0
x["fl"] = [1.3, 1.5, 1.6]
grouped = x.groupby("test")
result = grouped.agg({"fl": "sum", 2: "size"})
assert result["fl"].dtype == np.float64
def test_handle_dict_return_value(df):
def f(group):
return {"max": group.max(), "min": group.min()}
def g(group):
return Series({"max": group.max(), "min": group.min()})
result = df.groupby("A")["C"].apply(f)
expected = df.groupby("A")["C"].apply(g)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("grouper", ["A", ["A", "B"]])
def test_set_group_name(df, grouper):
def f(group):
assert group.name is not None
return group
def freduce(group):
assert group.name is not None
return group.sum()
def foo(x):
return freduce(x)
grouped = df.groupby(grouper)
# make sure all these work
grouped.apply(f)
grouped.aggregate(freduce)
grouped.aggregate({"C": freduce, "D": freduce})
grouped.transform(f)
grouped["C"].apply(f)
grouped["C"].aggregate(freduce)
grouped["C"].aggregate([freduce, foo])
grouped["C"].transform(f)
def test_group_name_available_in_inference_pass():
# gh-15062
df = pd.DataFrame({"a": [0, 0, 1, 1, 2, 2], "b": np.arange(6)})
names = []
def f(group):
names.append(group.name)
return group.copy()
df.groupby("a", sort=False, group_keys=False).apply(f)
expected_names = [0, 1, 2]
assert names == expected_names
def test_no_dummy_key_names(df):
# see gh-1291
result = df.groupby(df["A"].values).sum()
assert result.index.name is None
result = df.groupby([df["A"].values, df["B"].values]).sum()
assert result.index.names == (None, None)
def test_groupby_sort_multiindex_series():
# series multiindex groupby sort argument was not being passed through
# _compress_group_index
# GH 9444
index = MultiIndex(
levels=[[1, 2], [1, 2]],
codes=[[0, 0, 0, 0, 1, 1], [1, 1, 0, 0, 0, 0]],
names=["a", "b"],
)
mseries = Series([0, 1, 2, 3, 4, 5], index=index)
index = MultiIndex(
levels=[[1, 2], [1, 2]], codes=[[0, 0, 1], [1, 0, 0]], names=["a", "b"]
)
mseries_result = Series([0, 2, 4], index=index)
result = mseries.groupby(level=["a", "b"], sort=False).first()
tm.assert_series_equal(result, mseries_result)
result = mseries.groupby(level=["a", "b"], sort=True).first()
tm.assert_series_equal(result, mseries_result.sort_index())
def test_groupby_reindex_inside_function():
periods = 1000
ind = date_range(start="2012/1/1", freq="5min", periods=periods)
df = DataFrame({"high": np.arange(periods), "low": np.arange(periods)}, index=ind)
def agg_before(hour, func, fix=False):
"""
Run an aggregate func on the subset of data.
"""
def _func(data):
d = data.loc[data.index.map(lambda x: x.hour < 11)].dropna()
if fix:
data[data.index[0]]
if len(d) == 0:
return None
return func(d)
return _func
def afunc(data):
d = data.select(lambda x: x.hour < 11).dropna()
return np.max(d)
grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))
closure_bad = grouped.agg({"high": agg_before(11, np.max)})
closure_good = grouped.agg({"high": agg_before(11, np.max, True)})
tm.assert_frame_equal(closure_bad, closure_good)
def test_groupby_multiindex_missing_pair():
# GH9049
df = DataFrame(
{
"group1": ["a", "a", "a", "b"],
"group2": ["c", "c", "d", "c"],
"value": [1, 1, 1, 5],
}
)
df = df.set_index(["group1", "group2"])
df_grouped = df.groupby(level=["group1", "group2"], sort=True)
res = df_grouped.agg("sum")
idx = MultiIndex.from_tuples(
[("a", "c"), ("a", "d"), ("b", "c")], names=["group1", "group2"]
)
exp = DataFrame([[2], [1], [5]], index=idx, columns=["value"])
tm.assert_frame_equal(res, exp)
def test_groupby_multiindex_not_lexsorted():
# GH 11640
# define the lexsorted version
lexsorted_mi = MultiIndex.from_tuples(
[("a", ""), ("b1", "c1"), ("b2", "c2")], names=["b", "c"]
)
lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
assert lexsorted_df.columns.is_lexsorted()
# define the non-lexsorted version
not_lexsorted_df = DataFrame(
columns=["a", "b", "c", "d"], data=[[1, "b1", "c1", 3], [1, "b2", "c2", 4]]
)
not_lexsorted_df = not_lexsorted_df.pivot_table(
index="a", columns=["b", "c"], values="d"
)
not_lexsorted_df = not_lexsorted_df.reset_index()
assert not not_lexsorted_df.columns.is_lexsorted()
# compare the results
tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
expected = lexsorted_df.groupby("a").mean()
with | tm.assert_produces_warning(PerformanceWarning) | pandas._testing.assert_produces_warning |
# @Time : 2020/6/28
# @Author : <NAME>
# @Email : <EMAIL>
# UPDATE:
# @Time : 2020/10/28 2020/10/13, 2020/11/10
# @Author : <NAME>, <NAME>, <NAME>
# @Email : <EMAIL>, <EMAIL>, <EMAIL>
"""
recbole.data.dataset
##########################
"""
import copy
import pickle
import os
from collections import Counter
from logging import getLogger
import numpy as np
import pandas as pd
import torch
import torch.nn.utils.rnn as rnn_utils
from scipy.sparse import coo_matrix
from recbole.data.interaction import Interaction
from recbole.data.utils import dlapi
from recbole.utils import FeatureSource, FeatureType, get_local_time
from recbole.utils.utils import set_color
class Dataset(object):
""":class:`Dataset` stores the original dataset in memory.
It provides many useful functions for data preprocessing, such as k-core data filtering and missing value
imputation. Features are stored as :class:`pandas.DataFrame` inside :class:`~recbole.data.dataset.dataset.Dataset`.
General and Context-aware Models can use this class.
By calling method :meth:`~recbole.data.dataset.dataset.Dataset.build()`, it will processing dataset into
DataLoaders, according to :class:`~recbole.config.eval_setting.EvalSetting`.
Args:
config (Config): Global configuration object.
Attributes:
dataset_name (str): Name of this dataset.
dataset_path (str): Local file path of this dataset.
field2type (dict): Dict mapping feature name (str) to its type (:class:`~recbole.utils.enum_type.FeatureType`).
field2source (dict): Dict mapping feature name (str) to its source
(:class:`~recbole.utils.enum_type.FeatureSource`).
Specially, if feature is loaded from Arg ``additional_feat_suffix``, its source has type str,
which is the suffix of its local file (also the suffix written in Arg ``additional_feat_suffix``).
field2id_token (dict): Dict mapping feature name (str) to a :class:`np.ndarray`, which stores the original token
of this feature. For example, if ``test`` is token-like feature, ``token_a`` is remapped to 1, ``token_b``
is remapped to 2. Then ``field2id_token['test'] = ['[PAD]', 'token_a', 'token_b']``. (Note that 0 is
always PADDING for token-like features.)
field2token_id (dict): Dict mapping feature name (str) to a dict, which stores the token remap table
of this feature. For example, if ``test`` is token-like feature, ``token_a`` is remapped to 1, ``token_b``
is remapped to 2. Then ``field2token_id['test'] = {'[PAD]': 0, 'token_a': 1, 'token_b': 2}``.
(Note that 0 is always PADDING for token-like features.)
field2seqlen (dict): Dict mapping feature name (str) to its sequence length (int).
For sequence features, their length can be either set in config,
or set to the max sequence length of this feature.
For token and float features, their length is 1.
uid_field (str or None): The same as ``config['USER_ID_FIELD']``.
iid_field (str or None): The same as ``config['ITEM_ID_FIELD']``.
label_field (str or None): The same as ``config['LABEL_FIELD']``.
time_field (str or None): The same as ``config['TIME_FIELD']``.
inter_feat (:class:`Interaction`): Internal data structure stores the interaction features.
It's loaded from file ``.inter``.
user_feat (:class:`Interaction` or None): Internal data structure stores the user features.
It's loaded from file ``.user`` if existed.
item_feat (:class:`Interaction` or None): Internal data structure stores the item features.
It's loaded from file ``.item`` if existed.
feat_name_list (list): A list contains all the features' name (:class:`str`), including additional features.
"""
def __init__(self, config):
self.config = config
self.dataset_name = config['dataset']
self.logger = getLogger()
self._dataloader_apis = {'field2type', 'field2source', 'field2id_token'}
self._dataloader_apis.update(dlapi.dataloader_apis)
self._from_scratch()
def _from_scratch(self):
"""Load dataset from scratch.
Initialize attributes firstly, then load data from atomic files, pre-process the dataset lastly.
"""
self.logger.debug(set_color(f'Loading {self.__class__} from scratch.', 'green'))
self._get_preset()
self._get_field_from_config()
self._load_data(self.dataset_name, self.dataset_path)
self._data_processing()
def _get_preset(self):
"""Initialization useful inside attributes.
"""
self.dataset_path = self.config['data_path']
self.field2type = {}
self.field2source = {}
self.field2id_token = {}
self.field2token_id = {}
self.field2seqlen = self.config['seq_len'] or {}
self._preloaded_weight = {}
self.benchmark_filename_list = self.config['benchmark_filename']
def _get_field_from_config(self):
"""Initialization common field names.
"""
self.uid_field = self.config['USER_ID_FIELD']
self.iid_field = self.config['ITEM_ID_FIELD']
self.label_field = self.config['LABEL_FIELD']
self.time_field = self.config['TIME_FIELD']
if (self.uid_field is None) ^ (self.iid_field is None):
raise ValueError(
'USER_ID_FIELD and ITEM_ID_FIELD need to be set at the same time or not set at the same time.'
)
self.logger.debug(set_color('uid_field', 'blue') + f': {self.uid_field}')
self.logger.debug(set_color('iid_field', 'blue') + f': {self.iid_field}')
def _data_processing(self):
"""Data preprocessing, including:
- Data filtering
- Remap ID
- Missing value imputation
- Normalization
- Preloading weights initialization
"""
self.feat_name_list = self._build_feat_name_list()
if self.benchmark_filename_list is None:
self._data_filtering()
self._remap_ID_all()
self._user_item_feat_preparation()
self._fill_nan()
self._set_label_by_threshold()
self._normalize()
self._preload_weight_matrix()
def _data_filtering(self):
"""Data filtering
- Filter missing user_id or item_id
- Remove duplicated user-item interaction
- Value-based data filtering
- Remove interaction by user or item
- K-core data filtering
Note:
After filtering, feats(``DataFrame``) has non-continuous index,
thus :meth:`~recbole.data.dataset.dataset.Dataset._reset_index` will reset the index of feats.
"""
self._filter_nan_user_or_item()
self._remove_duplication()
self._filter_by_field_value()
self._filter_inter_by_user_or_item()
self._filter_by_inter_num()
self._reset_index()
def _build_feat_name_list(self):
"""Feat list building.
Any feat loaded by Dataset can be found in ``feat_name_list``
Returns:
built feature name list.
Note:
Subclasses can inherit this method to add new feat.
"""
feat_name_list = [
feat_name for feat_name in ['inter_feat', 'user_feat', 'item_feat']
if getattr(self, feat_name, None) is not None
]
if self.config['additional_feat_suffix'] is not None:
for suf in self.config['additional_feat_suffix']:
if getattr(self, f'{suf}_feat', None) is not None:
feat_name_list.append(f'{suf}_feat')
return feat_name_list
def _load_data(self, token, dataset_path):
"""Load features.
Firstly load interaction features, then user/item features optionally,
finally load additional features if ``config['additional_feat_suffix']`` is set.
Args:
token (str): dataset name.
dataset_path (str): path of dataset dir.
"""
self._load_inter_feat(token, dataset_path)
self.user_feat = self._load_user_or_item_feat(token, dataset_path, FeatureSource.USER, 'uid_field')
self.item_feat = self._load_user_or_item_feat(token, dataset_path, FeatureSource.ITEM, 'iid_field')
self._load_additional_feat(token, dataset_path)
def _load_inter_feat(self, token, dataset_path):
"""Load interaction features.
If ``config['benchmark_filename']`` is not set, load interaction features from ``.inter``.
Otherwise, load interaction features from a file list, named ``dataset_name.xxx.inter``,
where ``xxx`` if from ``config['benchmark_filename']``.
After loading, ``self.file_size_list`` stores the length of each interaction file.
Args:
token (str): dataset name.
dataset_path (str): path of dataset dir.
"""
if self.benchmark_filename_list is None:
inter_feat_path = os.path.join(dataset_path, f'{token}.inter')
if not os.path.isfile(inter_feat_path):
raise ValueError(f'File {inter_feat_path} not exist.')
inter_feat = self._load_feat(inter_feat_path, FeatureSource.INTERACTION)
self.logger.debug(f'Interaction feature loaded successfully from [{inter_feat_path}].')
self.inter_feat = inter_feat
else:
sub_inter_lens = []
sub_inter_feats = []
for filename in self.benchmark_filename_list:
file_path = os.path.join(dataset_path, f'{token}.{filename}.inter')
if os.path.isfile(file_path):
temp = self._load_feat(file_path, FeatureSource.INTERACTION)
sub_inter_feats.append(temp)
sub_inter_lens.append(len(temp))
else:
raise ValueError(f'File {file_path} not exist.')
inter_feat = pd.concat(sub_inter_feats)
self.inter_feat, self.file_size_list = inter_feat, sub_inter_lens
def _load_user_or_item_feat(self, token, dataset_path, source, field_name):
"""Load user/item features.
Args:
token (str): dataset name.
dataset_path (str): path of dataset dir.
source (FeatureSource): source of user/item feature.
field_name (str): ``uid_field`` or ``iid_field``
Returns:
pandas.DataFrame: Loaded feature
Note:
``user_id`` and ``item_id`` has source :obj:`~recbole.utils.enum_type.FeatureSource.USER_ID` and
:obj:`~recbole.utils.enum_type.FeatureSource.ITEM_ID`
"""
feat_path = os.path.join(dataset_path, f'{token}.{source.value}')
if os.path.isfile(feat_path):
feat = self._load_feat(feat_path, source)
self.logger.debug(f'[{source.value}] feature loaded successfully from [{feat_path}].')
else:
feat = None
self.logger.debug(f'[{feat_path}] not found, [{source.value}] features are not loaded.')
field = getattr(self, field_name, None)
if feat is not None and field is None:
raise ValueError(f'{field_name} must be exist if {source.value}_feat exist.')
if feat is not None and field not in feat:
raise ValueError(f'{field_name} must be loaded if {source.value}_feat is loaded.')
if field in self.field2source:
self.field2source[field] = FeatureSource(source.value + '_id')
return feat
def _load_additional_feat(self, token, dataset_path):
"""Load additional features.
For those additional features, e.g. pretrained entity embedding, user can set them
as ``config['additional_feat_suffix']``, then they will be loaded and stored in
:attr:`feat_name_list`. See :doc:`../user_guide/data/data_args` for details.
Args:
token (str): dataset name.
dataset_path (str): path of dataset dir.
"""
if self.config['additional_feat_suffix'] is None:
return
for suf in self.config['additional_feat_suffix']:
if hasattr(self, f'{suf}_feat'):
raise ValueError(f'{suf}_feat already exist.')
feat_path = os.path.join(dataset_path, f'{token}.{suf}')
if os.path.isfile(feat_path):
feat = self._load_feat(feat_path, suf)
else:
raise ValueError(f'Additional feature file [{feat_path}] not found.')
setattr(self, f'{suf}_feat', feat)
def _get_load_and_unload_col(self, source):
"""Parsing ``config['load_col']`` and ``config['unload_col']`` according to source.
See :doc:`../user_guide/data/data_args` for detail arg setting.
Args:
source (FeatureSource): source of input file.
Returns:
tuple: tuple of parsed ``load_col`` and ``unload_col``, see :doc:`../user_guide/data/data_args` for details.
"""
if isinstance(source, FeatureSource):
source = source.value
if self.config['load_col'] is None:
load_col = None
elif source not in self.config['load_col']:
load_col = set()
elif self.config['load_col'][source] == '*':
load_col = None
else:
load_col = set(self.config['load_col'][source])
if self.config['unload_col'] is not None and source in self.config['unload_col']:
unload_col = set(self.config['unload_col'][source])
else:
unload_col = None
if load_col and unload_col:
raise ValueError(f'load_col [{load_col}] and unload_col [{unload_col}] can not be set the same time.')
self.logger.debug(set_color(f'[{source}]: ', 'pink'))
self.logger.debug(set_color('\t load_col', 'blue') + f': [{load_col}]')
self.logger.debug(set_color('\t unload_col', 'blue') + f': [{unload_col}]')
return load_col, unload_col
def _load_feat(self, filepath, source):
"""Load features according to source into :class:`pandas.DataFrame`.
Set features' properties, e.g. type, source and length.
Args:
filepath (str): path of input file.
source (FeatureSource or str): source of input file.
Returns:
pandas.DataFrame: Loaded feature
Note:
For sequence features, ``seqlen`` will be loaded, but data in DataFrame will not be cut off.
Their length is limited only after calling :meth:`~_dict_to_interaction` or
:meth:`~_dataframe_to_interaction`
"""
self.logger.debug(set_color(f'Loading feature from [{filepath}] (source: [{source}]).', 'green'))
load_col, unload_col = self._get_load_and_unload_col(source)
if load_col == set():
return None
field_separator = self.config['field_separator']
columns = []
usecols = []
dtype = {}
with open(filepath, 'r') as f:
head = f.readline()[:-1]
for field_type in head.split(field_separator):
field, ftype = field_type.split(':')
try:
ftype = FeatureType(ftype)
except ValueError:
raise ValueError(f'Type {ftype} from field {field} is not supported.')
if load_col is not None and field not in load_col:
continue
if unload_col is not None and field in unload_col:
continue
if isinstance(source, FeatureSource) or source != 'link':
self.field2source[field] = source
self.field2type[field] = ftype
if not ftype.value.endswith('seq'):
self.field2seqlen[field] = 1
columns.append(field)
usecols.append(field_type)
dtype[field_type] = np.float64 if ftype == FeatureType.FLOAT else str
if len(columns) == 0:
self.logger.warning(f'No columns has been loaded from [{source}]')
return None
df = | pd.read_csv(filepath, delimiter=self.config['field_separator'], usecols=usecols, dtype=dtype) | pandas.read_csv |
"""
Assignment 2
Part 2 - Classification
"""
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.model_selection import validation_curve
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from adspy_shared_utilities import plot_decision_tree
from adspy_shared_utilities import plot_feature_importances
import matplotlib.pyplot as plt
mush_df = pd.read_csv('data/mushrooms.csv')
mush_df2 = pd.get_dummies(mush_df)
X_mush = mush_df2.iloc[:,2:]
y_mush = mush_df2.iloc[:,1]
# use the variables X_train2, y_train2 for Question 5
X_train2, X_test2, y_train2, y_test2 = train_test_split(X_mush, y_mush, random_state=0)
# For performance reasons in Questions 6 and 7, we will create a smaller version of the
# entire mushroom dataset for use in those questions. For simplicity we'll just re-use
# the 25% test split created above as the representative subset.
#
# Use the variables X_subset, y_subset for Questions 6 and 7.
X_subset = X_test2
y_subset = y_test2
def answer_five():
clf = DecisionTreeClassifier(random_state=0).fit(X_train2, y_train2)
#show_dectree_info(clf)
# List of labeled feature importance
fi = | pd.DataFrame(data=clf.feature_importances_, index=X_train2.columns, columns=['feature importance']) | pandas.DataFrame |
from src.BandC.Parser import Parser
import arff
import pandas as pd
from pandas.core.frame import DataFrame
class Arff(Parser):
"""
An Arff Parser that can automatically detect the correct format.
"""
def parse_file(self):
column_names = [attribute[0] for attribute in self.attributes]
return pd.DataFrame.from_records(self.data, columns=column_names)
def parse_content(self):
column_names = [attribute[0] for attribute in self.attributes]
return | pd.DataFrame.from_records(self.data, columns=column_names) | pandas.DataFrame.from_records |
#%% Loading irish data
import pandas as pd
data1 = pd.read_fwf('bible.txt', header=None)
data2 = pd.read_fwf('blogs.txt', header=None)
data3 = pd.read_fwf('legal.txt', header=None)
data4 = pd.read_fwf('news.txt', header=None)
data5 = | pd.read_fwf('wiki.txt', header=None) | pandas.read_fwf |
from models import NominalACM
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# ===== Custom Functions =====
def get_excess_returns(df):
excess_returns = | pd.DataFrame(index=df.index, columns=df.columns) | pandas.DataFrame |
'''
This set of functions creates, loads, encodes, and saves DataFrames
of each sequence.
Pos: H(6), K(8), R(14)
Neg: D(2), E(3)
'''
import numpy as np
import pandas as pd
from sklearn import preprocessing
import plot_functions
def ordinal_decode(seq):
'ordinal to amino acid sequence'
AAlist=np.array(list("ACDEFGHIKLMNPQRSTVWY"))
enc_OH=preprocessing.OrdinalEncoder().fit(AAlist.reshape(-1,1))
AAlist=enc_OH.inverse_transform(seq.reshape(-1, 1)).flatten()
AA_sequence_list=''.join(AAlist)
return AA_sequence_list
def get_charge(seq):
'return # pos and # neg AA in seq'
seq=np.array(seq)
n_pos=sum(np.where((seq==6)|(seq==8)|(seq==14),1,0))
n_neg=sum(np.where((seq==2)|(seq==3),1,0))
return n_pos, n_neg
def make_datasets(n_samples=1000,seq_len=10):
'Pos: 0-19 stay, 20-29: H, 30-39:K, 40-49:R'
pos_data=np.random.randint(low=0,high=49,size=[n_samples,seq_len])
pos_list=[[]]*len(pos_data)
for i, seq in enumerate(pos_data):
seq_adj=np.where(((seq>=20)&(seq<30)),6,seq)
seq_adj=np.where(((seq_adj>=30)&(seq_adj<40)),8,seq_adj)
seq_adj=np.where(((seq_adj>=40)&(seq_adj<50)),14,seq_adj)
AA_seq=ordinal_decode(seq_adj)
n_pos,n_neg=get_charge(seq_adj)
pos_list[i]=[list(seq_adj),AA_seq,n_pos,n_neg]
pos_df=pd.DataFrame(pos_list)
pos_df.columns=['Ordinal','AA','N_Pos','N_Neg']
pos_df['Class']='Positive'
'Neg: 0-19 stay, 20-29:D, 30-39:E'
neg_data=np.random.randint(low=0,high=39,size=[n_samples,seq_len])
neg_list=[[]]*len(neg_data)
for i, seq in enumerate(neg_data):
seq_adj=np.where(((seq>=20)&(seq<30)),2,seq)
seq_adj=np.where(((seq_adj>=30)&(seq_adj<40)),3,seq_adj)
AA_seq=ordinal_decode(seq_adj)
n_pos,n_neg=get_charge(seq_adj)
neg_list[i]=[list(seq_adj),AA_seq,n_pos,n_neg]
neg_df= | pd.DataFrame(neg_list) | pandas.DataFrame |
import math
import gc
import os
import pickle
from math import pi
import numpy as np
import pandas as pd
import pytorch_lightning as pl
from omegaconf import DictConfig
from scipy.fft import fft
from scipy.signal import blackman
from scipy.signal import hilbert
from sklearn.model_selection import GroupKFold
from sklearn.preprocessing import RobustScaler
from torch.utils.data import DataLoader
from src.utils.technical_utils import load_obj
class VentilatorDataModule(pl.LightningDataModule):
def __init__(self, cfg: DictConfig):
super().__init__()
self.cfg = cfg
def prepare_data(self):
pass
def make_features(self, data):
if "pressure" not in data.columns:
data['pressure'] = 0
data['RC_sum'] = data['R'] + data['C']
data['RC_div'] = data['R'] / data['C']
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['RC'] = data['R'] + data['C']
data = pd.get_dummies(data)
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
# data['time_step_lag'] = data.groupby('breath_id')['time_step'].shift(1)
data['time_step_lag'] = data.groupby('breath_id')['time_step'].shift(2)
data['u_in_lag'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag'] = data.groupby('u_out')['u_in'].shift(1)
return data.fillna(0)
def make_features1(self, data):
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['u_in_lag1'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag1'] = data.groupby('breath_id')['u_out'].shift(1)
data['u_in_lag_back1'] = data.groupby('breath_id')['u_in'].shift(-1)
data['u_out_lag_back1'] = data.groupby('breath_id')['u_out'].shift(-1)
data['u_in_lag2'] = data.groupby('breath_id')['u_in'].shift(2)
data['u_out_lag2'] = data.groupby('breath_id')['u_out'].shift(2)
data['u_in_lag_back2'] = data.groupby('breath_id')['u_in'].shift(-2)
data['u_out_lag_back2'] = data.groupby('breath_id')['u_out'].shift(-2)
data['u_in_lag3'] = data.groupby('breath_id')['u_in'].shift(3)
data['u_out_lag3'] = data.groupby('breath_id')['u_out'].shift(3)
data['u_in_lag_back3'] = data.groupby('breath_id')['u_in'].shift(-3)
data['u_out_lag_back3'] = data.groupby('breath_id')['u_out'].shift(-3)
data['u_in_lag4'] = data.groupby('breath_id')['u_in'].shift(4)
data['u_out_lag4'] = data.groupby('breath_id')['u_out'].shift(4)
data['u_in_lag_back4'] = data.groupby('breath_id')['u_in'].shift(-4)
data['u_out_lag_back4'] = data.groupby('breath_id')['u_out'].shift(-4)
data = data.fillna(0)
data['breath_id__u_in__max'] = data.groupby(['breath_id'])['u_in'].transform('max')
data['breath_id__u_out__max'] = data.groupby(['breath_id'])['u_out'].transform('max')
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['u_in_diff3'] = data['u_in'] - data['u_in_lag3']
data['u_out_diff3'] = data['u_out'] - data['u_out_lag3']
data['u_in_diff4'] = data['u_in'] - data['u_in_lag4']
data['u_out_diff4'] = data['u_out'] - data['u_out_lag4']
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['R__C'] = data["R"].astype(str) + '__' + data["C"].astype(str)
data = pd.get_dummies(data)
data.drop(['id', 'breath_id'], axis=1, inplace=True)
if 'pressure' in data.columns:
data.drop('pressure', axis=1, inplace=True)
return data
def make_features2(self, data):
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['u_in_lag1'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag1'] = data.groupby('breath_id')['u_out'].shift(1)
data['u_in_lag_back1'] = data.groupby('breath_id')['u_in'].shift(-1)
data['u_out_lag_back1'] = data.groupby('breath_id')['u_out'].shift(-1)
data['u_in_lag2'] = data.groupby('breath_id')['u_in'].shift(2)
data['u_out_lag2'] = data.groupby('breath_id')['u_out'].shift(2)
data['u_in_lag_back2'] = data.groupby('breath_id')['u_in'].shift(-2)
data['u_out_lag_back2'] = data.groupby('breath_id')['u_out'].shift(-2)
data['u_in_lag3'] = data.groupby('breath_id')['u_in'].shift(3)
data['u_out_lag3'] = data.groupby('breath_id')['u_out'].shift(3)
data['u_in_lag_back3'] = data.groupby('breath_id')['u_in'].shift(-3)
data['u_out_lag_back3'] = data.groupby('breath_id')['u_out'].shift(-3)
data['u_in_lag4'] = data.groupby('breath_id')['u_in'].shift(4)
data['u_out_lag4'] = data.groupby('breath_id')['u_out'].shift(4)
data['u_in_lag_back4'] = data.groupby('breath_id')['u_in'].shift(-4)
data['u_out_lag_back4'] = data.groupby('breath_id')['u_out'].shift(-4)
data = data.fillna(0)
data['breath_id__u_in__max'] = data.groupby(['breath_id'])['u_in'].transform('max')
data['breath_id__u_out__max'] = data.groupby(['breath_id'])['u_out'].transform('max')
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['u_in_diff3'] = data['u_in'] - data['u_in_lag3']
data['u_out_diff3'] = data['u_out'] - data['u_out_lag3']
data['u_in_diff4'] = data['u_in'] - data['u_in_lag4']
data['u_out_diff4'] = data['u_out'] - data['u_out_lag4']
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data['breath_id_lag'] = data['breath_id'].shift(1).fillna(0)
data['breath_id_lag2'] = data['breath_id'].shift(2).fillna(0)
data['breath_id_lagsame'] = np.select([data['breath_id_lag'] == data['breath_id']], [1], 0)
data['breath_id_lag2same'] = np.select([data['breath_id_lag2'] == data['breath_id']], [1], 0)
data['breath_id__u_in_lag'] = data['u_in'].shift(1).fillna(0)
data['breath_id__u_in_lag'] = data['breath_id__u_in_lag'] * data['breath_id_lagsame']
data['breath_id__u_in_lag2'] = data['u_in'].shift(2).fillna(0)
data['breath_id__u_in_lag2'] = data['breath_id__u_in_lag2'] * data['breath_id_lag2same']
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['R__C'] = data["R"].astype(str) + '__' + data["C"].astype(str)
data = pd.get_dummies(data)
data.drop(['id', 'breath_id', 'one', 'count', 'breath_id_lag', 'breath_id_lag2', 'breath_id_lagsame',
'breath_id_lag2same'], axis=1, inplace=True)
if 'pressure' in data.columns:
data.drop('pressure', axis=1, inplace=True)
return data
def make_features3(self, data):
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['u_in_lag1'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag1'] = data.groupby('breath_id')['u_out'].shift(1)
data['u_in_lag_back1'] = data.groupby('breath_id')['u_in'].shift(-1)
data['u_out_lag_back1'] = data.groupby('breath_id')['u_out'].shift(-1)
data['u_in_lag2'] = data.groupby('breath_id')['u_in'].shift(2)
data['u_out_lag2'] = data.groupby('breath_id')['u_out'].shift(2)
data['u_in_lag_back2'] = data.groupby('breath_id')['u_in'].shift(-2)
data['u_out_lag_back2'] = data.groupby('breath_id')['u_out'].shift(-2)
data['u_in_lag3'] = data.groupby('breath_id')['u_in'].shift(3)
data['u_out_lag3'] = data.groupby('breath_id')['u_out'].shift(3)
data['u_in_lag_back3'] = data.groupby('breath_id')['u_in'].shift(-3)
data['u_out_lag_back3'] = data.groupby('breath_id')['u_out'].shift(-3)
data['u_in_lag4'] = data.groupby('breath_id')['u_in'].shift(4)
data['u_out_lag4'] = data.groupby('breath_id')['u_out'].shift(4)
data['u_in_lag_back4'] = data.groupby('breath_id')['u_in'].shift(-4)
data['u_out_lag_back4'] = data.groupby('breath_id')['u_out'].shift(-4)
data = data.fillna(0)
data['breath_id__u_in__max'] = data.groupby(['breath_id'])['u_in'].transform('max')
data['breath_id__u_out__max'] = data.groupby(['breath_id'])['u_out'].transform('max')
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['u_in_diff3'] = data['u_in'] - data['u_in_lag3']
data['u_out_diff3'] = data['u_out'] - data['u_out_lag3']
data['u_in_diff4'] = data['u_in'] - data['u_in_lag4']
data['u_out_diff4'] = data['u_out'] - data['u_out_lag4']
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data['breath_id_lag'] = data['breath_id'].shift(1).fillna(0)
data['breath_id_lag2'] = data['breath_id'].shift(2).fillna(0)
data['breath_id_lagsame'] = np.select([data['breath_id_lag'] == data['breath_id']], [1], 0)
data['breath_id_lag2same'] = np.select([data['breath_id_lag2'] == data['breath_id']], [1], 0)
data['breath_id__u_in_lag'] = data['u_in'].shift(1).fillna(0)
data['breath_id__u_in_lag'] = data['breath_id__u_in_lag'] * data['breath_id_lagsame']
data['breath_id__u_in_lag2'] = data['u_in'].shift(2).fillna(0)
data['breath_id__u_in_lag2'] = data['breath_id__u_in_lag2'] * data['breath_id_lag2same']
data['R_sum_c'] = (data['R'] + data['C']).astype(str)
data['R_mult_c'] = (data['R'] * data['C']).astype(str)
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['R__C'] = data["R"].astype(str) + '__' + data["C"].astype(str)
data = pd.get_dummies(data)
data[["15_in_sum", "15_in_min", "15_in_max", "15_in_mean"]] = (data \
.groupby('breath_id')['u_in'] \
.rolling(window=15, min_periods=1) \
.agg({"15_in_sum": "sum",
"15_in_min": "min",
"15_in_max": "max",
"15_in_mean": "mean"
# "15_in_std":"std"
}) \
.reset_index(level=0, drop=True))
data['u_in_lagback_diff1'] = data['u_in'] - data['u_in_lag_back1']
data['u_out_lagback_diff1'] = data['u_out'] - data['u_out_lag_back1']
data['u_in_lagback_diff2'] = data['u_in'] - data['u_in_lag_back2']
data['u_out_lagback_diff2'] = data['u_out'] - data['u_out_lag_back2']
data['ewm_u_in_mean'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).mean().reset_index(level=0, drop=True)
data['ewm_u_in_std'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).std().reset_index(level=0, drop=True)
data['ewm_u_in_corr'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).corr().reset_index(level=0, drop=True)
data['rolling_10_mean'] = data.groupby('breath_id')['u_in'].rolling(window=10, min_periods=1).mean().reset_index(
level=0, drop=True)
data['rolling_10_max'] = data.groupby('breath_id')['u_in'].rolling(window=10, min_periods=1).max().reset_index(level=0,
drop=True)
data['rolling_10_std'] = data.groupby('breath_id')['u_in'].rolling(window=10, min_periods=1).std().reset_index(level=0,
drop=True)
data['expand_mean'] = data.groupby('breath_id')['u_in'].expanding(2).mean().reset_index(level=0, drop=True)
data['expand_max'] = data.groupby('breath_id')['u_in'].expanding(2).max().reset_index(level=0, drop=True)
data['expand_std'] = data.groupby('breath_id')['u_in'].expanding(2).std().reset_index(level=0, drop=True)
data["u_in_rolling_mean2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).mean()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_mean4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).mean()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_mean10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).mean()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_max2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).max()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_max4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).max()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_max10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).max()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_min2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).min()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_min4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).min()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_min10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).min()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_std2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).std()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_std4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).std()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_std10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).std()[
"u_in"].reset_index(drop=True)
g = data.groupby('breath_id')['u_in']
data['ewm_u_in_mean'] = g.ewm(halflife=10).mean() \
.reset_index(level=0, drop=True)
data['ewm_u_in_std'] = g.ewm(halflife=10).std() \
.reset_index(level=0, drop=True)
data['ewm_u_in_corr'] = g.ewm(halflife=10).corr() \
.reset_index(level=0, drop=True)
data['rolling_10_mean'] = g.rolling(window=10, min_periods=1).mean() \
.reset_index(level=0, drop=True)
data['rolling_10_max'] = g.rolling(window=10, min_periods=1).max() \
.reset_index(level=0, drop=True)
data['rolling_10_std'] = g.rolling(window=10, min_periods=1).std() \
.reset_index(level=0, drop=True)
data['expand_mean'] = g.expanding(2).mean() \
.reset_index(level=0, drop=True)
data['expand_max'] = g.expanding(2).max() \
.reset_index(level=0, drop=True)
data['expand_std'] = g.expanding(2).std() \
.reset_index(level=0, drop=True)
data['u_in_lag_back10'] = data.groupby('breath_id')['u_in'].shift(-10)
data['u_out_lag_back10'] = data.groupby('breath_id')['u_out'].shift(-10)
data['time_step_diff'] = data.groupby('breath_id')['time_step'].diff().fillna(0)
### rolling window ts feats
data['ewm_u_in_mean'] = data.groupby('breath_id')['u_in'].ewm(halflife=9).mean().reset_index(level=0,
drop=True)
data['ewm_u_in_std'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).std().reset_index(level=0,
drop=True) ## could add covar?
data['ewm_u_in_corr'] = data.groupby('breath_id')['u_in'].ewm(halflife=15).corr().reset_index(level=0,
drop=True) # self umin corr
# data['ewm_u_in_corr'] = data.groupby('breath_id')['u_in'].ewm(halflife=6).corr(data.groupby('breath_id')["u_out"]).reset_index(level=0,drop=True) # corr with u_out # error
## rolling window of 15 periods
data[["15_in_sum", "15_in_min", "15_in_max", "15_in_mean", "15_out_std"]] = data.groupby('breath_id')[
'u_in'].rolling(window=15, min_periods=1).agg(
{"15_in_sum": "sum", "15_in_min": "min", "15_in_max": "max", "15_in_mean": "mean",
"15_in_std": "std"}).reset_index(level=0, drop=True)
# data[["45_in_sum","45_in_min","45_in_max","45_in_mean","45_out_std"]] = data.groupby('breath_id')['u_in'].rolling(window=45,min_periods=1).agg({"45_in_sum":"sum","45_in_min":"min","45_in_max":"max","45_in_mean":"mean","45_in_std":"std"}).reset_index(level=0,drop=True)
data[["45_in_sum", "45_in_min", "45_in_max", "45_in_mean", "45_out_std"]] = data.groupby('breath_id')[
'u_in'].rolling(window=45, min_periods=1).agg(
{"45_in_sum": "sum", "45_in_min": "min", "45_in_max": "max", "45_in_mean": "mean",
"45_in_std": "std"}).reset_index(level=0, drop=True)
data[["15_out_mean"]] = data.groupby('breath_id')['u_out'].rolling(window=15, min_periods=1).agg(
{"15_out_mean": "mean"}).reset_index(level=0, drop=True)
return data.fillna(0)
def make_features32(self, data):
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['u_in_lag1'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag1'] = data.groupby('breath_id')['u_out'].shift(1)
data['u_in_lag_back1'] = data.groupby('breath_id')['u_in'].shift(-1)
data['u_out_lag_back1'] = data.groupby('breath_id')['u_out'].shift(-1)
data['u_in_lag2'] = data.groupby('breath_id')['u_in'].shift(2)
data['u_out_lag2'] = data.groupby('breath_id')['u_out'].shift(2)
data['u_in_lag_back2'] = data.groupby('breath_id')['u_in'].shift(-2)
data['u_out_lag_back2'] = data.groupby('breath_id')['u_out'].shift(-2)
data['u_in_lag3'] = data.groupby('breath_id')['u_in'].shift(3)
data['u_out_lag3'] = data.groupby('breath_id')['u_out'].shift(3)
data['u_in_lag_back3'] = data.groupby('breath_id')['u_in'].shift(-3)
data['u_out_lag_back3'] = data.groupby('breath_id')['u_out'].shift(-3)
data['u_in_lag4'] = data.groupby('breath_id')['u_in'].shift(4)
data['u_out_lag4'] = data.groupby('breath_id')['u_out'].shift(4)
data['u_in_lag_back4'] = data.groupby('breath_id')['u_in'].shift(-4)
data['u_out_lag_back4'] = data.groupby('breath_id')['u_out'].shift(-4)
data = data.fillna(0)
data['breath_id__u_in__max'] = data.groupby(['breath_id'])['u_in'].transform('max')
data['breath_id__u_out__max'] = data.groupby(['breath_id'])['u_out'].transform('max')
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['u_in_diff3'] = data['u_in'] - data['u_in_lag3']
data['u_out_diff3'] = data['u_out'] - data['u_out_lag3']
data['u_in_diff4'] = data['u_in'] - data['u_in_lag4']
data['u_out_diff4'] = data['u_out'] - data['u_out_lag4']
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data['breath_id_lag'] = data['breath_id'].shift(1).fillna(0)
data['breath_id_lag2'] = data['breath_id'].shift(2).fillna(0)
data['breath_id_lagsame'] = np.select([data['breath_id_lag'] == data['breath_id']], [1], 0)
data['breath_id_lag2same'] = np.select([data['breath_id_lag2'] == data['breath_id']], [1], 0)
data['breath_id__u_in_lag'] = data['u_in'].shift(1).fillna(0)
data['breath_id__u_in_lag'] = data['breath_id__u_in_lag'] * data['breath_id_lagsame']
data['breath_id__u_in_lag2'] = data['u_in'].shift(2).fillna(0)
data['breath_id__u_in_lag2'] = data['breath_id__u_in_lag2'] * data['breath_id_lag2same']
data['R_sum_c'] = (data['R'] + data['C']).astype(str)
data['R_mult_c'] = (data['R'] * data['C']).astype(str)
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['R__C'] = data["R"].astype(str) + '__' + data["C"].astype(str)
data = pd.get_dummies(data)
data[["15_in_sum", "15_in_min", "15_in_max", "15_in_mean"]] = (data \
.groupby('breath_id')['u_in'] \
.rolling(window=15, min_periods=1) \
.agg({"15_in_sum": "sum",
"15_in_min": "min",
"15_in_max": "max",
"15_in_mean": "mean"
# "15_in_std":"std"
}) \
.reset_index(level=0, drop=True))
data['u_in_lagback_diff1'] = data['u_in'] - data['u_in_lag_back1']
data['u_out_lagback_diff1'] = data['u_out'] - data['u_out_lag_back1']
data['u_in_lagback_diff2'] = data['u_in'] - data['u_in_lag_back2']
data['u_out_lagback_diff2'] = data['u_out'] - data['u_out_lag_back2']
data['ewm_u_in_mean'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).mean().reset_index(level=0,
drop=True)
data['ewm_u_in_std'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).std().reset_index(level=0, drop=True)
data['ewm_u_in_corr'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).corr().reset_index(level=0,
drop=True)
data['rolling_10_mean'] = data.groupby('breath_id')['u_in'].rolling(window=10,
min_periods=1).mean().reset_index(
level=0, drop=True)
data['rolling_10_max'] = data.groupby('breath_id')['u_in'].rolling(window=10, min_periods=1).max().reset_index(
level=0,
drop=True)
data['rolling_10_std'] = data.groupby('breath_id')['u_in'].rolling(window=10, min_periods=1).std().reset_index(
level=0,
drop=True)
data['expand_mean'] = data.groupby('breath_id')['u_in'].expanding(2).mean().reset_index(level=0, drop=True)
data['expand_max'] = data.groupby('breath_id')['u_in'].expanding(2).max().reset_index(level=0, drop=True)
data['expand_std'] = data.groupby('breath_id')['u_in'].expanding(2).std().reset_index(level=0, drop=True)
data["u_in_rolling_mean2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).mean()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_mean4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).mean()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_mean10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).mean()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_max2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).max()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_max4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).max()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_max10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).max()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_min2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).min()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_min4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).min()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_min10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).min()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_std2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).std()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_std4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).std()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_std10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).std()[
"u_in"].reset_index(drop=True)
g = data.groupby('breath_id')['u_in']
data['ewm_u_in_mean'] = g.ewm(halflife=10).mean() \
.reset_index(level=0, drop=True)
data['ewm_u_in_std'] = g.ewm(halflife=10).std() \
.reset_index(level=0, drop=True)
data['ewm_u_in_corr'] = g.ewm(halflife=10).corr() \
.reset_index(level=0, drop=True)
data['rolling_10_mean'] = g.rolling(window=10, min_periods=1).mean() \
.reset_index(level=0, drop=True)
data['rolling_10_max'] = g.rolling(window=10, min_periods=1).max() \
.reset_index(level=0, drop=True)
data['rolling_10_std'] = g.rolling(window=10, min_periods=1).std() \
.reset_index(level=0, drop=True)
data['expand_mean'] = g.expanding(2).mean() \
.reset_index(level=0, drop=True)
data['expand_max'] = g.expanding(2).max() \
.reset_index(level=0, drop=True)
data['expand_std'] = g.expanding(2).std() \
.reset_index(level=0, drop=True)
data['u_in_lag_back10'] = data.groupby('breath_id')['u_in'].shift(-10)
data['u_out_lag_back10'] = data.groupby('breath_id')['u_out'].shift(-10)
data['time_step_diff'] = data.groupby('breath_id')['time_step'].diff().fillna(0)
### rolling window ts feats
data['ewm_u_in_mean'] = data.groupby('breath_id')['u_in'].ewm(halflife=9).mean().reset_index(level=0,
drop=True)
data['ewm_u_in_std'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).std().reset_index(level=0,
drop=True) ## could add covar?
data['ewm_u_in_corr'] = data.groupby('breath_id')['u_in'].ewm(halflife=15).corr().reset_index(level=0,
drop=True) # self umin corr
# data['ewm_u_in_corr'] = data.groupby('breath_id')['u_in'].ewm(halflife=6).corr(data.groupby('breath_id')["u_out"]).reset_index(level=0,drop=True) # corr with u_out # error
## rolling window of 15 periods
data[["15_in_sum", "15_in_min", "15_in_max", "15_in_mean", "15_out_std"]] = data.groupby('breath_id')[
'u_in'].rolling(window=15, min_periods=1).agg(
{"15_in_sum": "sum", "15_in_min": "min", "15_in_max": "max", "15_in_mean": "mean",
"15_in_std": "std"}).reset_index(level=0, drop=True)
# data[["45_in_sum","45_in_min","45_in_max","45_in_mean","45_out_std"]] = data.groupby('breath_id')['u_in'].rolling(window=45,min_periods=1).agg({"45_in_sum":"sum","45_in_min":"min","45_in_max":"max","45_in_mean":"mean","45_in_std":"std"}).reset_index(level=0,drop=True)
data[["45_in_sum", "45_in_min", "45_in_max", "45_in_mean", "45_out_std"]] = data.groupby('breath_id')[
'u_in'].rolling(window=45, min_periods=1).agg(
{"45_in_sum": "sum", "45_in_min": "min", "45_in_max": "max", "45_in_mean": "mean",
"45_in_std": "std"}).reset_index(level=0, drop=True)
data[["15_out_mean"]] = data.groupby('breath_id')['u_out'].rolling(window=15, min_periods=1).agg(
{"15_out_mean": "mean"}).reset_index(level=0, drop=True)
return data.fillna(0)
def make_features4(self, data):
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['u_in_lag1'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag1'] = data.groupby('breath_id')['u_out'].shift(1)
data['u_in_lag_back1'] = data.groupby('breath_id')['u_in'].shift(-1)
data['u_out_lag_back1'] = data.groupby('breath_id')['u_out'].shift(-1)
data['u_in_lag2'] = data.groupby('breath_id')['u_in'].shift(2)
data['u_out_lag2'] = data.groupby('breath_id')['u_out'].shift(2)
data['u_in_lag_back2'] = data.groupby('breath_id')['u_in'].shift(-2)
data['u_out_lag_back2'] = data.groupby('breath_id')['u_out'].shift(-2)
data['u_in_lag3'] = data.groupby('breath_id')['u_in'].shift(3)
data['u_out_lag3'] = data.groupby('breath_id')['u_out'].shift(3)
data['u_in_lag_back3'] = data.groupby('breath_id')['u_in'].shift(-3)
data['u_out_lag_back3'] = data.groupby('breath_id')['u_out'].shift(-3)
data['u_in_lag4'] = data.groupby('breath_id')['u_in'].shift(4)
data['u_out_lag4'] = data.groupby('breath_id')['u_out'].shift(4)
data['u_in_lag_back4'] = data.groupby('breath_id')['u_in'].shift(-4)
data['u_out_lag_back4'] = data.groupby('breath_id')['u_out'].shift(-4)
data = data.fillna(0)
data['breath_id__u_in__max'] = data.groupby(['breath_id'])['u_in'].transform('max')
data['breath_id__u_out__max'] = data.groupby(['breath_id'])['u_out'].transform('max')
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['u_in_diff3'] = data['u_in'] - data['u_in_lag3']
data['u_out_diff3'] = data['u_out'] - data['u_out_lag3']
data['u_in_diff4'] = data['u_in'] - data['u_in_lag4']
data['u_out_diff4'] = data['u_out'] - data['u_out_lag4']
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data['breath_id_lag'] = data['breath_id'].shift(1).fillna(0)
data['breath_id_lag2'] = data['breath_id'].shift(2).fillna(0)
data['breath_id_lagsame'] = np.select([data['breath_id_lag'] == data['breath_id']], [1], 0)
data['breath_id_lag2same'] = np.select([data['breath_id_lag2'] == data['breath_id']], [1], 0)
data['breath_id__u_in_lag'] = data['u_in'].shift(1).fillna(0)
data['breath_id__u_in_lag'] = data['breath_id__u_in_lag'] * data['breath_id_lagsame']
data['breath_id__u_in_lag2'] = data['u_in'].shift(2).fillna(0)
data['breath_id__u_in_lag2'] = data['breath_id__u_in_lag2'] * data['breath_id_lag2same']
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['R__C'] = data["R"].astype(str) + '__' + data["C"].astype(str)
data = pd.get_dummies(data)
data['time_delta'] = data['time_step'].diff()
data['time_delta'].fillna(0, inplace=True)
data['time_delta'].mask(data['time_delta'] < 0, 0, inplace=True)
data['tmp'] = data['time_delta'] * data['u_in']
data['area_true'] = data.groupby('breath_id')['tmp'].cumsum()
# u_in_max_dict = data.groupby('breath_id')['u_in'].max().to_dict()
# data['u_in_max'] = data['breath_id'].map(u_in_max_dict)
# u_in_min_dict = data.groupby('breath_id')['u_in'].min().to_dict()
# data['u_in_min'] = data['breath_id'].map(u_in_min_dict)
u_in_mean_dict = data.groupby('breath_id')['u_in'].mean().to_dict()
data['u_in_mean'] = data['breath_id'].map(u_in_mean_dict)
del u_in_mean_dict
u_in_std_dict = data.groupby('breath_id')['u_in'].std().to_dict()
data['u_in_std'] = data['breath_id'].map(u_in_std_dict)
del u_in_std_dict
# u_in_half is time:0 - time point of u_out:1 rise (almost 1.0s)
data['tmp'] = data['u_out'] * (-1) + 1 # inversion of u_out
data['u_in_half'] = data['tmp'] * data['u_in']
# u_in_half: max, min, mean, std
u_in_half_max_dict = data.groupby('breath_id')['u_in_half'].max().to_dict()
data['u_in_half_max'] = data['breath_id'].map(u_in_half_max_dict)
del u_in_half_max_dict
u_in_half_min_dict = data.groupby('breath_id')['u_in_half'].min().to_dict()
data['u_in_half_min'] = data['breath_id'].map(u_in_half_min_dict)
del u_in_half_min_dict
u_in_half_mean_dict = data.groupby('breath_id')['u_in_half'].mean().to_dict()
data['u_in_half_mean'] = data['breath_id'].map(u_in_half_mean_dict)
del u_in_half_mean_dict
u_in_half_std_dict = data.groupby('breath_id')['u_in_half'].std().to_dict()
data['u_in_half_std'] = data['breath_id'].map(u_in_half_std_dict)
del u_in_half_std_dict
gc.collect()
# All entries are first point of each breath_id
first_data = data.loc[0::80, :]
# All entries are first point of each breath_id
last_data = data.loc[79::80, :]
# The Main mode DataFrame and flag
main_data = last_data[(last_data['u_in'] > 4.8) & (last_data['u_in'] < 5.1)]
main_mode_dict = dict(zip(main_data['breath_id'], [1] * len(main_data)))
data['main_mode'] = data['breath_id'].map(main_mode_dict)
data['main_mode'].fillna(0, inplace=True)
del main_data
del main_mode_dict
# u_in: first point, last point
u_in_first_dict = dict(zip(first_data['breath_id'], first_data['u_in']))
data['u_in_first'] = data['breath_id'].map(u_in_first_dict)
del u_in_first_dict
u_in_last_dict = dict(zip(first_data['breath_id'], last_data['u_in']))
data['u_in_last'] = data['breath_id'].map(u_in_last_dict)
del u_in_last_dict
# time(sec) of end point
time_end_dict = dict(zip(last_data['breath_id'], last_data['time_step']))
data['time_end'] = data['breath_id'].map(time_end_dict)
del time_end_dict
del last_data
# u_out1_timing flag and DataFrame: speed up
# 高速版 uout1_data 作成
data['u_out_diff'] = data['u_out'].diff()
data['u_out_diff'].fillna(0, inplace=True)
data['u_out_diff'].replace(-1, 0, inplace=True)
uout1_data = data[data['u_out_diff'] == 1]
gc.collect()
# main_uout1 = uout1_data[uout1_data['main_mode']==1]
# nomain_uout1 = uout1_data[uout1_data['main_mode']==1]
# Register Area when u_out becomes 1
uout1_area_dict = dict(zip(first_data['breath_id'], first_data['u_in']))
data['area_uout1'] = data['breath_id'].map(uout1_area_dict)
del uout1_area_dict
# time(sec) when u_out becomes 1
uout1_dict = dict(zip(uout1_data['breath_id'], uout1_data['time_step']))
data['time_uout1'] = data['breath_id'].map(uout1_dict)
del uout1_dict
# u_in when u_out becomes1
u_in_uout1_dict = dict(zip(uout1_data['breath_id'], uout1_data['u_in']))
data['u_in_uout1'] = data['breath_id'].map(u_in_uout1_dict)
del u_in_uout1_dict
# Dict that puts 0 at the beginning of the 80row cycle
first_0_dict = dict(zip(first_data['id'], [0] * len(uout1_data)))
del first_data
del uout1_data
gc.collect()
# Faster version u_in_diff creation, faster than groupby
data['u_in_diff'] = data['u_in'].diff()
data['tmp'] = data['id'].map(first_0_dict) # put 0, the 80row cycle
data.iloc[0::80, data.columns.get_loc('u_in_diff')] = data.iloc[0::80, data.columns.get_loc('tmp')]
# Create u_in vibration
data['diff_sign'] = np.sign(data['u_in_diff'])
data['sign_diff'] = data['diff_sign'].diff()
data['tmp'] = data['id'].map(first_0_dict) # put 0, the 80row cycle
data.iloc[0::80, data.columns.get_loc('sign_diff')] = data.iloc[0::80, data.columns.get_loc('tmp')]
del first_0_dict
# Count the number of inversions, so take the absolute value and sum
data['sign_diff'] = abs(data['sign_diff'])
sign_diff_dict = data.groupby('breath_id')['sign_diff'].sum().to_dict()
data['diff_vib'] = data['breath_id'].map(sign_diff_dict)
data.drop(['id', 'breath_id', 'one', 'count', 'breath_id_lag', 'breath_id_lag2', 'breath_id_lagsame',
'breath_id_lag2same'], axis=1, inplace=True)
if 'pressure' in data.columns:
data.drop('pressure', axis=1, inplace=True)
return data.fillna(0)
def make_features5(self, data):
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['u_in_lag1'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag1'] = data.groupby('breath_id')['u_out'].shift(1)
data['u_in_lag_back1'] = data.groupby('breath_id')['u_in'].shift(-1)
data['u_out_lag_back1'] = data.groupby('breath_id')['u_out'].shift(-1)
data['u_in_lag2'] = data.groupby('breath_id')['u_in'].shift(2)
data['u_out_lag2'] = data.groupby('breath_id')['u_out'].shift(2)
data['u_in_lag_back2'] = data.groupby('breath_id')['u_in'].shift(-2)
data['u_out_lag_back2'] = data.groupby('breath_id')['u_out'].shift(-2)
data['u_in_lag3'] = data.groupby('breath_id')['u_in'].shift(3)
data['u_out_lag3'] = data.groupby('breath_id')['u_out'].shift(3)
data['u_in_lag_back3'] = data.groupby('breath_id')['u_in'].shift(-3)
data['u_out_lag_back3'] = data.groupby('breath_id')['u_out'].shift(-3)
data['u_in_lag4'] = data.groupby('breath_id')['u_in'].shift(4)
data['u_out_lag4'] = data.groupby('breath_id')['u_out'].shift(4)
data['u_in_lag_back4'] = data.groupby('breath_id')['u_in'].shift(-4)
data['u_out_lag_back4'] = data.groupby('breath_id')['u_out'].shift(-4)
data = data.fillna(0)
data['breath_id__u_in__max'] = data.groupby(['breath_id'])['u_in'].transform('max')
data['breath_id__u_out__max'] = data.groupby(['breath_id'])['u_out'].transform('max')
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['u_in_diff3'] = data['u_in'] - data['u_in_lag3']
data['u_out_diff3'] = data['u_out'] - data['u_out_lag3']
data['u_in_diff4'] = data['u_in'] - data['u_in_lag4']
data['u_out_diff4'] = data['u_out'] - data['u_out_lag4']
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data['breath_id_lag'] = data['breath_id'].shift(1).fillna(0)
data['breath_id_lag2'] = data['breath_id'].shift(2).fillna(0)
data['breath_id_lagsame'] = np.select([data['breath_id_lag'] == data['breath_id']], [1], 0)
data['breath_id_lag2same'] = np.select([data['breath_id_lag2'] == data['breath_id']], [1], 0)
data['breath_id__u_in_lag'] = data['u_in'].shift(1).fillna(0)
data['breath_id__u_in_lag'] = data['breath_id__u_in_lag'] * data['breath_id_lagsame']
data['breath_id__u_in_lag2'] = data['u_in'].shift(2).fillna(0)
data['breath_id__u_in_lag2'] = data['breath_id__u_in_lag2'] * data['breath_id_lag2same']
data['R_sum_c'] = (data['R'] + data['C']).astype(str)
data['R_mult_c'] = (data['R'] * data['C']).astype(str)
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['R__C'] = data["R"].astype(str) + '__' + data["C"].astype(str)
data = pd.get_dummies(data)
data[["15_in_sum", "15_in_min", "15_in_max", "15_in_mean"]] = (data \
.groupby('breath_id')['u_in'] \
.rolling(window=15, min_periods=1) \
.agg({"15_in_sum": "sum",
"15_in_min": "min",
"15_in_max": "max",
"15_in_mean": "mean"
# "15_in_std":"std"
}) \
.reset_index(level=0, drop=True))
data['u_in_lagback_diff1'] = data['u_in'] - data['u_in_lag_back1']
data['u_out_lagback_diff1'] = data['u_out'] - data['u_out_lag_back1']
data['u_in_lagback_diff2'] = data['u_in'] - data['u_in_lag_back2']
data['u_out_lagback_diff2'] = data['u_out'] - data['u_out_lag_back2']
data['ewm_u_in_mean'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).mean().reset_index(level=0,
drop=True)
data['ewm_u_in_std'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).std().reset_index(level=0, drop=True)
data['ewm_u_in_corr'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).corr().reset_index(level=0,
drop=True)
data['rolling_10_mean'] = data.groupby('breath_id')['u_in'].rolling(window=10,
min_periods=1).mean().reset_index(
level=0, drop=True)
data['rolling_10_max'] = data.groupby('breath_id')['u_in'].rolling(window=10, min_periods=1).max().reset_index(
level=0,
drop=True)
data['rolling_10_std'] = data.groupby('breath_id')['u_in'].rolling(window=10, min_periods=1).std().reset_index(
level=0,
drop=True)
data['expand_mean'] = data.groupby('breath_id')['u_in'].expanding(2).mean().reset_index(level=0, drop=True)
data['expand_max'] = data.groupby('breath_id')['u_in'].expanding(2).max().reset_index(level=0, drop=True)
data['expand_std'] = data.groupby('breath_id')['u_in'].expanding(2).std().reset_index(level=0, drop=True)
data["u_in_rolling_mean2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).mean()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_mean4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).mean()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_mean10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).mean()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_max2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).max()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_max4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).max()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_max10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).max()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_min2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).min()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_min4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).min()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_min10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).min()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_std2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).std()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_std4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).std()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_std10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).std()[
"u_in"].reset_index(drop=True)
g = data.groupby('breath_id')['u_in']
data['ewm_u_in_mean'] = g.ewm(halflife=10).mean() \
.reset_index(level=0, drop=True)
data['ewm_u_in_std'] = g.ewm(halflife=10).std() \
.reset_index(level=0, drop=True)
data['ewm_u_in_corr'] = g.ewm(halflife=10).corr() \
.reset_index(level=0, drop=True)
data['rolling_10_mean'] = g.rolling(window=10, min_periods=1).mean() \
.reset_index(level=0, drop=True)
data['rolling_10_max'] = g.rolling(window=10, min_periods=1).max() \
.reset_index(level=0, drop=True)
data['rolling_10_std'] = g.rolling(window=10, min_periods=1).std() \
.reset_index(level=0, drop=True)
data['expand_mean'] = g.expanding(2).mean() \
.reset_index(level=0, drop=True)
data['expand_max'] = g.expanding(2).max() \
.reset_index(level=0, drop=True)
data['expand_std'] = g.expanding(2).std() \
.reset_index(level=0, drop=True)
data['u_in_lag_back10'] = data.groupby('breath_id')['u_in'].shift(-10)
data['u_out_lag_back10'] = data.groupby('breath_id')['u_out'].shift(-10)
data['time_step_diff'] = data.groupby('breath_id')['time_step'].diff().fillna(0)
### rolling window ts feats
data['ewm_u_in_mean'] = data.groupby('breath_id')['u_in'].ewm(halflife=9).mean().reset_index(level=0,
drop=True)
data['ewm_u_in_std'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).std().reset_index(level=0,
drop=True) ## could add covar?
data['ewm_u_in_corr'] = data.groupby('breath_id')['u_in'].ewm(halflife=15).corr().reset_index(level=0,
drop=True) # self umin corr
# data['ewm_u_in_corr'] = data.groupby('breath_id')['u_in'].ewm(halflife=6).corr(data.groupby('breath_id')["u_out"]).reset_index(level=0,drop=True) # corr with u_out # error
## rolling window of 15 periods
data[["15_in_sum", "15_in_min", "15_in_max", "15_in_mean", "15_out_std"]] = data.groupby('breath_id')[
'u_in'].rolling(window=15, min_periods=1).agg(
{"15_in_sum": "sum", "15_in_min": "min", "15_in_max": "max", "15_in_mean": "mean",
"15_in_std": "std"}).reset_index(level=0, drop=True)
# data[["45_in_sum","45_in_min","45_in_max","45_in_mean","45_out_std"]] = data.groupby('breath_id')['u_in'].rolling(window=45,min_periods=1).agg({"45_in_sum":"sum","45_in_min":"min","45_in_max":"max","45_in_mean":"mean","45_in_std":"std"}).reset_index(level=0,drop=True)
data[["45_in_sum", "45_in_min", "45_in_max", "45_in_mean", "45_out_std"]] = data.groupby('breath_id')[
'u_in'].rolling(window=45, min_periods=1).agg(
{"45_in_sum": "sum", "45_in_min": "min", "45_in_max": "max", "45_in_mean": "mean",
"45_in_std": "std"}).reset_index(level=0, drop=True)
data[["15_out_mean"]] = data.groupby('breath_id')['u_out'].rolling(window=15, min_periods=1).agg(
{"15_out_mean": "mean"}).reset_index(level=0, drop=True)
data['time_delta'] = data['time_step'].diff()
data['time_delta'].fillna(0, inplace=True)
data['time_delta'].mask(data['time_delta'] < 0, 0, inplace=True)
data['tmp'] = data['time_delta'] * data['u_in']
data['area_true'] = data.groupby('breath_id')['tmp'].cumsum()
# u_in_max_dict = data.groupby('breath_id')['u_in'].max().to_dict()
# data['u_in_max'] = data['breath_id'].map(u_in_max_dict)
# u_in_min_dict = data.groupby('breath_id')['u_in'].min().to_dict()
# data['u_in_min'] = data['breath_id'].map(u_in_min_dict)
u_in_mean_dict = data.groupby('breath_id')['u_in'].mean().to_dict()
data['u_in_mean'] = data['breath_id'].map(u_in_mean_dict)
del u_in_mean_dict
u_in_std_dict = data.groupby('breath_id')['u_in'].std().to_dict()
data['u_in_std'] = data['breath_id'].map(u_in_std_dict)
del u_in_std_dict
# u_in_half is time:0 - time point of u_out:1 rise (almost 1.0s)
data['tmp'] = data['u_out'] * (-1) + 1 # inversion of u_out
data['u_in_half'] = data['tmp'] * data['u_in']
# u_in_half: max, min, mean, std
u_in_half_max_dict = data.groupby('breath_id')['u_in_half'].max().to_dict()
data['u_in_half_max'] = data['breath_id'].map(u_in_half_max_dict)
del u_in_half_max_dict
u_in_half_min_dict = data.groupby('breath_id')['u_in_half'].min().to_dict()
data['u_in_half_min'] = data['breath_id'].map(u_in_half_min_dict)
del u_in_half_min_dict
u_in_half_mean_dict = data.groupby('breath_id')['u_in_half'].mean().to_dict()
data['u_in_half_mean'] = data['breath_id'].map(u_in_half_mean_dict)
del u_in_half_mean_dict
u_in_half_std_dict = data.groupby('breath_id')['u_in_half'].std().to_dict()
data['u_in_half_std'] = data['breath_id'].map(u_in_half_std_dict)
del u_in_half_std_dict
gc.collect()
# All entries are first point of each breath_id
first_data = data.loc[0::80, :]
# All entries are first point of each breath_id
last_data = data.loc[79::80, :]
# The Main mode DataFrame and flag
main_data = last_data[(last_data['u_in'] > 4.8) & (last_data['u_in'] < 5.1)]
main_mode_dict = dict(zip(main_data['breath_id'], [1] * len(main_data)))
data['main_mode'] = data['breath_id'].map(main_mode_dict)
data['main_mode'].fillna(0, inplace=True)
del main_data
del main_mode_dict
# u_in: first point, last point
u_in_first_dict = dict(zip(first_data['breath_id'], first_data['u_in']))
data['u_in_first'] = data['breath_id'].map(u_in_first_dict)
del u_in_first_dict
u_in_last_dict = dict(zip(first_data['breath_id'], last_data['u_in']))
data['u_in_last'] = data['breath_id'].map(u_in_last_dict)
del u_in_last_dict
# time(sec) of end point
time_end_dict = dict(zip(last_data['breath_id'], last_data['time_step']))
data['time_end'] = data['breath_id'].map(time_end_dict)
del time_end_dict
del last_data
# u_out1_timing flag and DataFrame: speed up
# 高速版 uout1_data 作成
data['u_out_diff'] = data['u_out'].diff()
data['u_out_diff'].fillna(0, inplace=True)
data['u_out_diff'].replace(-1, 0, inplace=True)
uout1_data = data[data['u_out_diff'] == 1]
gc.collect()
# main_uout1 = uout1_data[uout1_data['main_mode']==1]
# nomain_uout1 = uout1_data[uout1_data['main_mode']==1]
# Register Area when u_out becomes 1
uout1_area_dict = dict(zip(first_data['breath_id'], first_data['u_in']))
data['area_uout1'] = data['breath_id'].map(uout1_area_dict)
del uout1_area_dict
# time(sec) when u_out becomes 1
uout1_dict = dict(zip(uout1_data['breath_id'], uout1_data['time_step']))
data['time_uout1'] = data['breath_id'].map(uout1_dict)
del uout1_dict
# u_in when u_out becomes1
u_in_uout1_dict = dict(zip(uout1_data['breath_id'], uout1_data['u_in']))
data['u_in_uout1'] = data['breath_id'].map(u_in_uout1_dict)
del u_in_uout1_dict
# Dict that puts 0 at the beginning of the 80row cycle
first_0_dict = dict(zip(first_data['id'], [0] * len(uout1_data)))
del first_data
del uout1_data
gc.collect()
# Faster version u_in_diff creation, faster than groupby
data['u_in_diff'] = data['u_in'].diff()
data['tmp'] = data['id'].map(first_0_dict) # put 0, the 80row cycle
data.iloc[0::80, data.columns.get_loc('u_in_diff')] = data.iloc[0::80, data.columns.get_loc('tmp')]
# Create u_in vibration
data['diff_sign'] = np.sign(data['u_in_diff'])
data['sign_diff'] = data['diff_sign'].diff()
data['tmp'] = data['id'].map(first_0_dict) # put 0, the 80row cycle
data.iloc[0::80, data.columns.get_loc('sign_diff')] = data.iloc[0::80, data.columns.get_loc('tmp')]
del first_0_dict
# Count the number of inversions, so take the absolute value and sum
data['sign_diff'] = abs(data['sign_diff'])
sign_diff_dict = data.groupby('breath_id')['sign_diff'].sum().to_dict()
data['diff_vib'] = data['breath_id'].map(sign_diff_dict)
data.drop(['id', 'breath_id', 'one', 'count', 'breath_id_lag', 'breath_id_lag2', 'breath_id_lagsame',
'breath_id_lag2same'], axis=1, inplace=True)
if 'pressure' in data.columns:
data.drop('pressure', axis=1, inplace=True)
return data.fillna(0)
def make_features6(self, data):
# CATE_FEATURES = ['R_cate', 'C_cate', 'RC_dot', 'RC_sum']
CONT_FEATURES = ['u_in', 'u_out', 'time_step'] + ['u_in_cumsum', 'u_in_cummean', 'area', 'cross', 'cross2'] + [
'R_cate', 'C_cate']
LAG_FEATURES = ['breath_time']
LAG_FEATURES += [f'u_in_lag_{i}' for i in range(1, self.cfg.datamodule.use_lag + 1)]
# LAG_FEATURES += [f'u_in_lag_{i}_back' for i in range(1, USE_LAG+1)]
LAG_FEATURES += [f'u_in_time{i}' for i in range(1, self.cfg.datamodule.use_lag + 1)]
# LAG_FEATURES += [f'u_in_time{i}_back' for i in range(1, USE_LAG+1)]
LAG_FEATURES += [f'u_out_lag_{i}' for i in range(1, self.cfg.datamodule.use_lag + 1)]
# LAG_FEATURES += [f'u_out_lag_{i}_back' for i in range(1, USE_LAG+1)]
# ALL_FEATURES = CATE_FEATURES + CONT_FEATURES + LAG_FEATURES
ALL_FEATURES = CONT_FEATURES + LAG_FEATURES
for col in ['id', 'breath_id', 'one', 'count', 'breath_id_lag', 'breath_id_lag2', 'breath_id_lagsame',
'breath_id_lag2same', 'pressure', 'fold']:
if col in data.columns:
ALL_FEATURES.append(col)
data['time_delta'] = data.groupby('breath_id')['time_step'].diff().fillna(0)
data['delta'] = data['time_delta'] * data['u_in']
data['area'] = data.groupby('breath_id')['delta'].cumsum()
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data = data.drop(['count', 'one'], axis=1)
for lag in range(1, self.cfg.datamodule.use_lag + 1):
data[f'breath_id_lag{lag}'] = data['breath_id'].shift(lag).fillna(0)
data[f'breath_id_lag{lag}same'] = np.select([data[f'breath_id_lag{lag}'] == data['breath_id']], [1], 0)
# u_in
data[f'u_in_lag_{lag}'] = data['u_in'].shift(lag).fillna(0) * data[f'breath_id_lag{lag}same']
# data[f'u_in_lag_{lag}_back'] = data['u_in'].shift(-lag).fillna(0) * data[f'breath_id_lag{lag}same']
data[f'u_in_time{lag}'] = data['u_in'] - data[f'u_in_lag_{lag}']
# data[f'u_in_time{lag}_back'] = data['u_in'] - data[f'u_in_lag_{lag}_back']
data[f'u_out_lag_{lag}'] = data['u_out'].shift(lag).fillna(0) * data[f'breath_id_lag{lag}same']
# data[f'u_out_lag_{lag}_back'] = data['u_out'].shift(-lag).fillna(0) * data[f'breath_id_lag{lag}same']
# breath_time
data['time_step_lag'] = data['time_step'].shift(1).fillna(0) * data[f'breath_id_lag{lag}same']
data['breath_time'] = data['time_step'] - data['time_step_lag']
drop_columns = ['time_step_lag']
drop_columns += [f'breath_id_lag{i}' for i in range(1, self.cfg.datamodule.use_lag + 1)]
drop_columns += [f'breath_id_lag{i}same' for i in range(1, self.cfg.datamodule.use_lag + 1)]
data = data.drop(drop_columns, axis=1)
# fill na by zero
data = data.fillna(0)
c_dic = {10: 0, 20: 1, 50: 2}
r_dic = {5: 0, 20: 1, 50: 2}
rc_sum_dic = {v: i for i, v in enumerate([15, 25, 30, 40, 55, 60, 70, 100])}
rc_dot_dic = {v: i for i, v in enumerate([50, 100, 200, 250, 400, 500, 2500, 1000])}
data['C_cate'] = data['C'].map(c_dic)
data['R_cate'] = data['R'].map(r_dic)
data['RC_sum'] = (data['R'] + data['C']).map(rc_sum_dic)
data['RC_dot'] = (data['R'] * data['C']).map(rc_dot_dic)
norm_features = CONT_FEATURES + LAG_FEATURES
for col in ['id', 'breath_id', 'one', 'count', 'breath_id_lag', 'breath_id_lag2', 'breath_id_lagsame',
'breath_id_lag2same', 'pressure', 'fold']:
if col in data.columns:
norm_features.append(col)
if 'fold' in data.columns:
norm_features.append('fold')
norm_features = list(set(norm_features))
ALL_FEATURES = list(set(ALL_FEATURES))
print('data.columns', data.columns)
print('ALL_FEATURES', ALL_FEATURES)
# assert norm_features == ALL_FEATURES, 'something went wrong'
return data[ALL_FEATURES].fillna(0)
def make_features62(self, data):
# CATE_FEATURES = ['R_cate', 'C_cate', 'RC_dot', 'RC_sum']
CONT_FEATURES = ['u_in', 'u_out', 'time_step'] + ['u_in_cumsum', 'u_in_cummean', 'area', 'cross', 'cross2'] + [
'R_cate', 'C_cate']
LAG_FEATURES = ['breath_time']
LAG_FEATURES += [f'u_in_lag_{i}' for i in range(1, self.cfg.datamodule.use_lag + 1)]
# LAG_FEATURES += [f'u_in_lag_{i}_back' for i in range(1, USE_LAG+1)]
LAG_FEATURES += [f'u_in_time{i}' for i in range(1, self.cfg.datamodule.use_lag + 1)]
# LAG_FEATURES += [f'u_in_time{i}_back' for i in range(1, USE_LAG+1)]
LAG_FEATURES += [f'u_out_lag_{i}' for i in range(1, self.cfg.datamodule.use_lag + 1)]
# LAG_FEATURES += [f'u_out_lag_{i}_back' for i in range(1, USE_LAG+1)]
# ALL_FEATURES = CATE_FEATURES + CONT_FEATURES + LAG_FEATURES
ALL_FEATURES = CONT_FEATURES + LAG_FEATURES
for col in ['id', 'breath_id', 'one', 'count', 'breath_id_lag', 'breath_id_lag2', 'breath_id_lagsame',
'breath_id_lag2same', 'pressure', 'fold']:
if col in data.columns:
ALL_FEATURES.append(col)
data['time_delta'] = data.groupby('breath_id')['time_step'].diff().fillna(0)
data['delta'] = data['time_delta'] * data['u_in']
data['area'] = data.groupby('breath_id')['delta'].cumsum()
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data = data.drop(['count', 'one'], axis=1)
for lag in range(1, self.cfg.datamodule.use_lag + 1):
data[f'breath_id_lag{lag}'] = data['breath_id'].shift(lag).fillna(0)
data[f'breath_id_lag{lag}same'] = np.select([data[f'breath_id_lag{lag}'] == data['breath_id']], [1], 0)
# u_in
data[f'u_in_lag_{lag}'] = data['u_in'].shift(lag).fillna(0) * data[f'breath_id_lag{lag}same']
# data[f'u_in_lag_{lag}_back'] = data['u_in'].shift(-lag).fillna(0) * data[f'breath_id_lag{lag}same']
data[f'u_in_time{lag}'] = data['u_in'] - data[f'u_in_lag_{lag}']
# data[f'u_in_time{lag}_back'] = data['u_in'] - data[f'u_in_lag_{lag}_back']
data[f'u_out_lag_{lag}'] = data['u_out'].shift(lag).fillna(0) * data[f'breath_id_lag{lag}same']
# data[f'u_out_lag_{lag}_back'] = data['u_out'].shift(-lag).fillna(0) * data[f'breath_id_lag{lag}same']
# breath_time
data['time_step_lag'] = data['time_step'].shift(1).fillna(0) * data[f'breath_id_lag{lag}same']
data['breath_time'] = data['time_step'] - data['time_step_lag']
drop_columns = ['time_step_lag']
drop_columns += [f'breath_id_lag{i}' for i in range(1, self.cfg.datamodule.use_lag + 1)]
drop_columns += [f'breath_id_lag{i}same' for i in range(1, self.cfg.datamodule.use_lag + 1)]
data = data.drop(drop_columns, axis=1)
# fill na by zero
data = data.fillna(0)
c_dic = {10: 0, 20: 1, 50: 2}
r_dic = {5: 0, 20: 1, 50: 2}
rc_sum_dic = {v: i for i, v in enumerate([15, 25, 30, 40, 55, 60, 70, 100])}
rc_dot_dic = {v: i for i, v in enumerate([50, 100, 200, 250, 400, 500, 2500, 1000])}
data['C_cate'] = data['C'].map(c_dic)
data['R_cate'] = data['R'].map(r_dic)
data['RC_sum'] = (data['R'] + data['C']).map(rc_sum_dic)
data['RC_dot'] = (data['R'] * data['C']).map(rc_dot_dic)
data = pd.get_dummies(data)
norm_features = CONT_FEATURES + LAG_FEATURES
for col in ['id', 'breath_id', 'one', 'count', 'breath_id_lag', 'breath_id_lag2', 'breath_id_lagsame',
'breath_id_lag2same', 'pressure', 'fold']:
if col in data.columns:
norm_features.append(col)
if 'fold' in data.columns:
norm_features.append('fold')
norm_features = list(set(norm_features))
ALL_FEATURES = list(set(ALL_FEATURES))
print('data.columns', data.columns)
print('ALL_FEATURES', ALL_FEATURES)
# assert norm_features == ALL_FEATURES, 'something went wrong'
return data[ALL_FEATURES + ['clusterIDeu_0.0', 'clusterIDeu_1.0',
'clusterIDeu_2.0', 'clusterIDeu_3.0', 'clusterIDeu_4.0',
'clusterIDeu_5.0', 'clusterIDeu_6.0', 'clusterIDeu_7.0',
'clusterIDeu_8.0', 'clusterIDeu_9.0', 'clusterIDdtw_0.0',
'clusterIDdtw_1.0', 'clusterIDdtw_2.0', 'clusterIDdtw_3.0',
'clusterIDdtw_4.0', 'clusterIDdtw_5.0', 'clusterIDdtw_6.0',
'clusterIDdtw_7.0', 'clusterIDdtw_8.0', 'clusterIDdtw_9.0']].fillna(0)
def make_features7(self, data):
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['u_in_lag1'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag1'] = data.groupby('breath_id')['u_out'].shift(1)
data['u_in_lag_back1'] = data.groupby('breath_id')['u_in'].shift(-1)
data['u_out_lag_back1'] = data.groupby('breath_id')['u_out'].shift(-1)
data['u_in_lag2'] = data.groupby('breath_id')['u_in'].shift(2)
data['u_out_lag2'] = data.groupby('breath_id')['u_out'].shift(2)
data['u_in_lag_back2'] = data.groupby('breath_id')['u_in'].shift(-2)
data['u_out_lag_back2'] = data.groupby('breath_id')['u_out'].shift(-2)
data['u_in_lag3'] = data.groupby('breath_id')['u_in'].shift(3)
data['u_out_lag3'] = data.groupby('breath_id')['u_out'].shift(3)
data['u_in_lag_back3'] = data.groupby('breath_id')['u_in'].shift(-3)
data['u_out_lag_back3'] = data.groupby('breath_id')['u_out'].shift(-3)
data['u_in_lag4'] = data.groupby('breath_id')['u_in'].shift(4)
data['u_out_lag4'] = data.groupby('breath_id')['u_out'].shift(4)
data['u_in_lag_back4'] = data.groupby('breath_id')['u_in'].shift(-4)
data['u_out_lag_back4'] = data.groupby('breath_id')['u_out'].shift(-4)
data = data.fillna(0)
data['breath_id__u_in__max'] = data.groupby(['breath_id'])['u_in'].transform('max')
data['breath_id__u_out__max'] = data.groupby(['breath_id'])['u_out'].transform('max')
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['u_in_diff3'] = data['u_in'] - data['u_in_lag3']
data['u_out_diff3'] = data['u_out'] - data['u_out_lag3']
data['u_in_diff4'] = data['u_in'] - data['u_in_lag4']
data['u_out_diff4'] = data['u_out'] - data['u_out_lag4']
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data['breath_id_lag'] = data['breath_id'].shift(1).fillna(0)
data['breath_id_lag2'] = data['breath_id'].shift(2).fillna(0)
data['breath_id_lagsame'] = np.select([data['breath_id_lag'] == data['breath_id']], [1], 0)
data['breath_id_lag2same'] = np.select([data['breath_id_lag2'] == data['breath_id']], [1], 0)
data['breath_id__u_in_lag'] = data['u_in'].shift(1).fillna(0)
data['breath_id__u_in_lag'] = data['breath_id__u_in_lag'] * data['breath_id_lagsame']
data['breath_id__u_in_lag2'] = data['u_in'].shift(2).fillna(0)
data['breath_id__u_in_lag2'] = data['breath_id__u_in_lag2'] * data['breath_id_lag2same']
data['R_sum_c'] = (data['R'] + data['C']).astype(str)
data['R_mult_c'] = (data['R'] * data['C']).astype(str)
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['R__C'] = data["R"].astype(str) + '__' + data["C"].astype(str)
data = pd.get_dummies(data)
data['u_in_lagback_diff1'] = data['u_in'] - data['u_in_lag_back1']
data['u_out_lagback_diff1'] = data['u_out'] - data['u_out_lag_back1']
data['u_in_lagback_diff2'] = data['u_in'] - data['u_in_lag_back2']
data['u_out_lagback_diff2'] = data['u_out'] - data['u_out_lag_back2']
data['u_in_lagback_diff3'] = data['u_in'] - data['u_in_lag_back3']
data['u_out_lagback_diff3'] = data['u_out'] - data['u_out_lag_back3']
data['u_in_lagback_diff4'] = data['u_in'] - data['u_in_lag_back4']
data['u_out_lagback_diff4'] = data['u_out'] - data['u_out_lag_back4']
######
data['u_in_lag_back10'] = data.groupby('breath_id')['u_in'].shift(-10)
data['u_out_lag_back10'] = data.groupby('breath_id')['u_out'].shift(-10)
data['u_in_lagback_diff10'] = data['u_in'] - data['u_in_lag_back10']
data['u_out_lagback_diff10'] = data['u_out'] - data['u_out_lag_back10']
data['time_step_diff'] = data['time_step'] - data.groupby('breath_id')['time_step'].shift().fillna(0)
data[["15_out_sum", "15_out_min", "15_out_max", "15_out_mean", "15_out_std"]] = (data \
.groupby('breath_id')['u_out'] \
.rolling(window=15, min_periods=1) \
.agg({"15_out_sum": "sum",
"15_out_min": "min",
"15_out_max": "max",
"15_out_mean": "mean",
"15_out_std": "std"
}).reset_index(level=0, drop=True))
for window in [2, 4, 5, 10, 15, 20, 30, 45]:
data[[f"{window}_in_sum", f"{window}_in_min", f"{window}_in_max",
f"{window}_in_mean", f"{window}_in_std"]] = (data.groupby('breath_id')['u_in'].rolling(window=window,
min_periods=1) \
.agg({f"{window}_in_sum": "sum",
f"{window}_in_min": "min",
f"{window}_in_max": "max",
f"{window}_in_mean": "mean",
f"{window}_in_std": "std"
}).reset_index(level=0,
drop=True))
for halflife in [5, 9, 10, 15, 20]:
data[f'ewm_u_in_mean_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).mean().reset_index(level=0,
drop=True)
data[f'ewm_u_in_std_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).std().reset_index(level=0, drop=True)
data[f'ewm_u_in_corr_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).corr().reset_index(level=0,
drop=True)
data['expand_mean'] = data.groupby('breath_id')['u_in'].expanding(2).mean().reset_index(level=0, drop=True)
data['expand_max'] = data.groupby('breath_id')['u_in'].expanding(2).max().reset_index(level=0, drop=True)
data['expand_std'] = data.groupby('breath_id')['u_in'].expanding(2).std().reset_index(level=0, drop=True)
data.drop(['id', 'breath_id', 'one', 'count', 'breath_id_lag', 'breath_id_lag2', 'breath_id_lagsame',
'breath_id_lag2same'], axis=1, inplace=True)
if 'pressure' in data.columns:
data.drop('pressure', axis=1, inplace=True)
return data.fillna(0)
def make_features8(self, data):
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['u_in_lag1'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag1'] = data.groupby('breath_id')['u_out'].shift(1)
data['u_in_lag_back1'] = data.groupby('breath_id')['u_in'].shift(-1)
data['u_out_lag_back1'] = data.groupby('breath_id')['u_out'].shift(-1)
data['u_in_lag2'] = data.groupby('breath_id')['u_in'].shift(2)
data['u_out_lag2'] = data.groupby('breath_id')['u_out'].shift(2)
data['u_in_lag_back2'] = data.groupby('breath_id')['u_in'].shift(-2)
data['u_out_lag_back2'] = data.groupby('breath_id')['u_out'].shift(-2)
data['u_in_lag3'] = data.groupby('breath_id')['u_in'].shift(3)
data['u_out_lag3'] = data.groupby('breath_id')['u_out'].shift(3)
data['u_in_lag_back3'] = data.groupby('breath_id')['u_in'].shift(-3)
data['u_out_lag_back3'] = data.groupby('breath_id')['u_out'].shift(-3)
data['u_in_lag4'] = data.groupby('breath_id')['u_in'].shift(4)
data['u_out_lag4'] = data.groupby('breath_id')['u_out'].shift(4)
data['u_in_lag_back4'] = data.groupby('breath_id')['u_in'].shift(-4)
data['u_out_lag_back4'] = data.groupby('breath_id')['u_out'].shift(-4)
data = data.fillna(0)
data['breath_id__u_in__max'] = data.groupby(['breath_id'])['u_in'].transform('max')
data['breath_id__u_out__max'] = data.groupby(['breath_id'])['u_out'].transform('max')
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['u_in_diff3'] = data['u_in'] - data['u_in_lag3']
data['u_out_diff3'] = data['u_out'] - data['u_out_lag3']
data['u_in_diff4'] = data['u_in'] - data['u_in_lag4']
data['u_out_diff4'] = data['u_out'] - data['u_out_lag4']
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data['breath_id_lag'] = data['breath_id'].shift(1).fillna(0)
data['breath_id_lag2'] = data['breath_id'].shift(2).fillna(0)
data['breath_id_lagsame'] = np.select([data['breath_id_lag'] == data['breath_id']], [1], 0)
data['breath_id_lag2same'] = np.select([data['breath_id_lag2'] == data['breath_id']], [1], 0)
data['breath_id__u_in_lag'] = data['u_in'].shift(1).fillna(0)
data['breath_id__u_in_lag'] = data['breath_id__u_in_lag'] * data['breath_id_lagsame']
data['breath_id__u_in_lag2'] = data['u_in'].shift(2).fillna(0)
data['breath_id__u_in_lag2'] = data['breath_id__u_in_lag2'] * data['breath_id_lag2same']
c_dic = {10: 0, 20: 1, 50: 2}
r_dic = {5: 0, 20: 1, 50: 2}
rc_sum_dic = {v: i for i, v in enumerate([15, 25, 30, 40, 55, 60, 70, 100])}
rc_dot_dic = {v: i for i, v in enumerate([50, 100, 200, 250, 400, 500, 2500, 1000])}
data['C_cate'] = data['C'].map(c_dic)
data['R_cate'] = data['R'].map(r_dic)
data['RC_sum'] = (data['R'] + data['C']).map(rc_sum_dic)
data['RC_dot'] = (data['R'] * data['C']).map(rc_dot_dic)
data['R_sum_c'] = (data['R'] + data['C']).astype(str)
data['R_mult_c'] = (data['R'] * data['C']).astype(str)
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['R__C'] = data["R"].astype(str) + '__' + data["C"].astype(str)
data = pd.get_dummies(data)
data['u_in_lagback_diff1'] = data['u_in'] - data['u_in_lag_back1']
data['u_out_lagback_diff1'] = data['u_out'] - data['u_out_lag_back1']
data['u_in_lagback_diff2'] = data['u_in'] - data['u_in_lag_back2']
data['u_out_lagback_diff2'] = data['u_out'] - data['u_out_lag_back2']
data['u_in_lagback_diff3'] = data['u_in'] - data['u_in_lag_back3']
data['u_out_lagback_diff3'] = data['u_out'] - data['u_out_lag_back3']
data['u_in_lagback_diff4'] = data['u_in'] - data['u_in_lag_back4']
data['u_out_lagback_diff4'] = data['u_out'] - data['u_out_lag_back4']
######
data['u_in_lag_back10'] = data.groupby('breath_id')['u_in'].shift(-10)
data['u_out_lag_back10'] = data.groupby('breath_id')['u_out'].shift(-10)
data['u_in_lagback_diff10'] = data['u_in'] - data['u_in_lag_back10']
data['u_out_lagback_diff10'] = data['u_out'] - data['u_out_lag_back10']
data['time_step_diff'] = data['time_step'] - data.groupby('breath_id')['time_step'].shift().fillna(0)
data[["15_out_sum", "15_out_min", "15_out_max", "15_out_mean", "15_out_std"]] = (data \
.groupby('breath_id')['u_out'] \
.rolling(window=15, min_periods=1) \
.agg({"15_out_sum": "sum",
"15_out_min": "min",
"15_out_max": "max",
"15_out_mean": "mean",
"15_out_std": "std"
}).reset_index(level=0, drop=True))
for window in [2, 4, 5, 10, 15, 20, 30, 45]:
data[[f"{window}_in_sum", f"{window}_in_min", f"{window}_in_max",
f"{window}_in_mean", f"{window}_in_std"]] = (data.groupby('breath_id')['u_in'].rolling(window=window,
min_periods=1) \
.agg({f"{window}_in_sum": "sum",
f"{window}_in_min": "min",
f"{window}_in_max": "max",
f"{window}_in_mean": "mean",
f"{window}_in_std": "std"
}).reset_index(level=0,
drop=True))
for halflife in [5, 9, 10, 15, 20]:
data[f'ewm_u_in_mean_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).mean().reset_index(level=0,
drop=True)
data[f'ewm_u_in_std_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).std().reset_index(level=0, drop=True)
data[f'ewm_u_in_corr_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).corr().reset_index(level=0,
drop=True)
data['expand_mean'] = data.groupby('breath_id')['u_in'].expanding(2).mean().reset_index(level=0, drop=True)
data['expand_max'] = data.groupby('breath_id')['u_in'].expanding(2).max().reset_index(level=0, drop=True)
data['expand_std'] = data.groupby('breath_id')['u_in'].expanding(2).std().reset_index(level=0, drop=True)
return data.fillna(0)
def make_features82(self, data):
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['u_in_lag1'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag1'] = data.groupby('breath_id')['u_out'].shift(1)
data['u_in_lag_back1'] = data.groupby('breath_id')['u_in'].shift(-1)
data['u_out_lag_back1'] = data.groupby('breath_id')['u_out'].shift(-1)
data['u_in_lag2'] = data.groupby('breath_id')['u_in'].shift(2)
data['u_out_lag2'] = data.groupby('breath_id')['u_out'].shift(2)
data['u_in_lag_back2'] = data.groupby('breath_id')['u_in'].shift(-2)
data['u_out_lag_back2'] = data.groupby('breath_id')['u_out'].shift(-2)
data['u_in_lag3'] = data.groupby('breath_id')['u_in'].shift(3)
data['u_out_lag3'] = data.groupby('breath_id')['u_out'].shift(3)
data['u_in_lag_back3'] = data.groupby('breath_id')['u_in'].shift(-3)
data['u_out_lag_back3'] = data.groupby('breath_id')['u_out'].shift(-3)
data['u_in_lag4'] = data.groupby('breath_id')['u_in'].shift(4)
data['u_out_lag4'] = data.groupby('breath_id')['u_out'].shift(4)
data['u_in_lag_back4'] = data.groupby('breath_id')['u_in'].shift(-4)
data['u_out_lag_back4'] = data.groupby('breath_id')['u_out'].shift(-4)
data = data.fillna(0)
data['breath_id__u_in__max'] = data.groupby(['breath_id'])['u_in'].transform('max')
data['breath_id__u_out__max'] = data.groupby(['breath_id'])['u_out'].transform('max')
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['u_in_diff3'] = data['u_in'] - data['u_in_lag3']
data['u_out_diff3'] = data['u_out'] - data['u_out_lag3']
data['u_in_diff4'] = data['u_in'] - data['u_in_lag4']
data['u_out_diff4'] = data['u_out'] - data['u_out_lag4']
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data['breath_id_lag'] = data['breath_id'].shift(1).fillna(0)
data['breath_id_lag2'] = data['breath_id'].shift(2).fillna(0)
data['breath_id_lagsame'] = np.select([data['breath_id_lag'] == data['breath_id']], [1], 0)
data['breath_id_lag2same'] = np.select([data['breath_id_lag2'] == data['breath_id']], [1], 0)
data['breath_id__u_in_lag'] = data['u_in'].shift(1).fillna(0)
data['breath_id__u_in_lag'] = data['breath_id__u_in_lag'] * data['breath_id_lagsame']
data['breath_id__u_in_lag2'] = data['u_in'].shift(2).fillna(0)
data['breath_id__u_in_lag2'] = data['breath_id__u_in_lag2'] * data['breath_id_lag2same']
c_dic = {10: 0, 20: 1, 50: 2}
r_dic = {5: 0, 20: 1, 50: 2}
rc_sum_dic = {v: i for i, v in enumerate([15, 25, 30, 40, 55, 60, 70, 100])}
rc_dot_dic = {v: i for i, v in enumerate([50, 100, 200, 250, 400, 500, 2500, 1000])}
data['C_cate'] = data['C'].map(c_dic)
data['R_cate'] = data['R'].map(r_dic)
data['RC_sum'] = (data['R'] + data['C']).map(rc_sum_dic)
data['RC_dot'] = (data['R'] * data['C']).map(rc_dot_dic)
data['R_sum_c'] = (data['R'] + data['C']).astype(str)
data['R_mult_c'] = (data['R'] * data['C']).astype(str)
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['R__C'] = data["R"].astype(str) + '__' + data["C"].astype(str)
data = pd.get_dummies(data)
data['u_in_lagback_diff1'] = data['u_in'] - data['u_in_lag_back1']
data['u_out_lagback_diff1'] = data['u_out'] - data['u_out_lag_back1']
data['u_in_lagback_diff2'] = data['u_in'] - data['u_in_lag_back2']
data['u_out_lagback_diff2'] = data['u_out'] - data['u_out_lag_back2']
data['u_in_lagback_diff3'] = data['u_in'] - data['u_in_lag_back3']
data['u_out_lagback_diff3'] = data['u_out'] - data['u_out_lag_back3']
data['u_in_lagback_diff4'] = data['u_in'] - data['u_in_lag_back4']
data['u_out_lagback_diff4'] = data['u_out'] - data['u_out_lag_back4']
######
data['u_in_lag_back10'] = data.groupby('breath_id')['u_in'].shift(-10)
data['u_out_lag_back10'] = data.groupby('breath_id')['u_out'].shift(-10)
data['u_in_lagback_diff10'] = data['u_in'] - data['u_in_lag_back10']
data['u_out_lagback_diff10'] = data['u_out'] - data['u_out_lag_back10']
data['time_step_diff'] = data['time_step'] - data.groupby('breath_id')['time_step'].shift().fillna(0)
data[["15_out_sum", "15_out_min", "15_out_max", "15_out_mean", "15_out_std"]] = (data \
.groupby('breath_id')['u_out'] \
.rolling(window=15, min_periods=1) \
.agg({"15_out_sum": "sum",
"15_out_min": "min",
"15_out_max": "max",
"15_out_mean": "mean",
"15_out_std": "std"
}).reset_index(level=0, drop=True))
for window in [2, 4, 5, 10, 15, 20, 30, 45]:
data[[f"{window}_in_sum", f"{window}_in_min", f"{window}_in_max",
f"{window}_in_mean", f"{window}_in_std"]] = (data.groupby('breath_id')['u_in'].rolling(window=window,
min_periods=1) \
.agg({f"{window}_in_sum": "sum",
f"{window}_in_min": "min",
f"{window}_in_max": "max",
f"{window}_in_mean": "mean",
f"{window}_in_std": "std"
}).reset_index(level=0,
drop=True))
for halflife in [5, 9, 10, 15, 20]:
data[f'ewm_u_in_mean_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).mean().reset_index(level=0,
drop=True)
data[f'ewm_u_in_std_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).std().reset_index(level=0, drop=True)
data[f'ewm_u_in_corr_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).corr().reset_index(level=0,
drop=True)
data['expand_mean'] = data.groupby('breath_id')['u_in'].expanding(2).mean().reset_index(level=0, drop=True)
data['expand_max'] = data.groupby('breath_id')['u_in'].expanding(2).max().reset_index(level=0, drop=True)
data['expand_std'] = data.groupby('breath_id')['u_in'].expanding(2).std().reset_index(level=0, drop=True)
return data.fillna(0)
def make_features9(self, data):
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['u_in_lag1'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag1'] = data.groupby('breath_id')['u_out'].shift(1)
data['u_in_lag_back1'] = data.groupby('breath_id')['u_in'].shift(-1)
data['u_out_lag_back1'] = data.groupby('breath_id')['u_out'].shift(-1)
data['u_in_lag2'] = data.groupby('breath_id')['u_in'].shift(2)
data['u_out_lag2'] = data.groupby('breath_id')['u_out'].shift(2)
data['u_in_lag_back2'] = data.groupby('breath_id')['u_in'].shift(-2)
data['u_out_lag_back2'] = data.groupby('breath_id')['u_out'].shift(-2)
data['u_in_lag3'] = data.groupby('breath_id')['u_in'].shift(3)
data['u_out_lag3'] = data.groupby('breath_id')['u_out'].shift(3)
data['u_in_lag_back3'] = data.groupby('breath_id')['u_in'].shift(-3)
data['u_out_lag_back3'] = data.groupby('breath_id')['u_out'].shift(-3)
data['u_in_lag4'] = data.groupby('breath_id')['u_in'].shift(4)
data['u_out_lag4'] = data.groupby('breath_id')['u_out'].shift(4)
data['u_in_lag_back4'] = data.groupby('breath_id')['u_in'].shift(-4)
data['u_out_lag_back4'] = data.groupby('breath_id')['u_out'].shift(-4)
data = data.fillna(0)
data['breath_id__u_in__max'] = data.groupby(['breath_id'])['u_in'].transform('max')
data['breath_id__u_out__max'] = data.groupby(['breath_id'])['u_out'].transform('max')
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['u_in_diff3'] = data['u_in'] - data['u_in_lag3']
data['u_out_diff3'] = data['u_out'] - data['u_out_lag3']
data['u_in_diff4'] = data['u_in'] - data['u_in_lag4']
data['u_out_diff4'] = data['u_out'] - data['u_out_lag4']
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data['breath_id_lag'] = data['breath_id'].shift(1).fillna(0)
data['breath_id_lag2'] = data['breath_id'].shift(2).fillna(0)
data['breath_id_lagsame'] = np.select([data['breath_id_lag'] == data['breath_id']], [1], 0)
data['breath_id_lag2same'] = np.select([data['breath_id_lag2'] == data['breath_id']], [1], 0)
data['breath_id__u_in_lag'] = data['u_in'].shift(1).fillna(0)
data['breath_id__u_in_lag'] = data['breath_id__u_in_lag'] * data['breath_id_lagsame']
data['breath_id__u_in_lag2'] = data['u_in'].shift(2).fillna(0)
data['breath_id__u_in_lag2'] = data['breath_id__u_in_lag2'] * data['breath_id_lag2same']
c_dic = {10: 0, 20: 1, 50: 2}
r_dic = {5: 0, 20: 1, 50: 2}
rc_sum_dic = {v: i for i, v in enumerate([15, 25, 30, 40, 55, 60, 70, 100])}
rc_dot_dic = {v: i for i, v in enumerate([50, 100, 200, 250, 400, 500, 2500, 1000])}
data['C_cate'] = data['C'].map(c_dic)
data['R_cate'] = data['R'].map(r_dic)
data['RC_sum'] = (data['R'] + data['C']).map(rc_sum_dic)
data['RC_dot'] = (data['R'] * data['C']).map(rc_dot_dic)
data['R_sum_c'] = (data['R'] + data['C']).astype(str)
data['R_mult_c'] = (data['R'] * data['C']).astype(str)
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['R__C'] = data["R"].astype(str) + '__' + data["C"].astype(str)
data = pd.get_dummies(data)
data[["15_in_sum", "15_in_min", "15_in_max", "15_in_mean"]] = (data \
.groupby('breath_id')['u_in'] \
.rolling(window=15, min_periods=1) \
.agg({"15_in_sum": "sum",
"15_in_min": "min",
"15_in_max": "max",
"15_in_mean": "mean"
# "15_in_std":"std"
}) \
.reset_index(level=0, drop=True))
data['u_in_lagback_diff1'] = data['u_in'] - data['u_in_lag_back1']
data['u_out_lagback_diff1'] = data['u_out'] - data['u_out_lag_back1']
data['u_in_lagback_diff2'] = data['u_in'] - data['u_in_lag_back2']
data['u_out_lagback_diff2'] = data['u_out'] - data['u_out_lag_back2']
data['ewm_u_in_mean'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).mean().reset_index(level=0,
drop=True)
data['ewm_u_in_std'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).std().reset_index(level=0, drop=True)
data['ewm_u_in_corr'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).corr().reset_index(level=0,
drop=True)
data['rolling_10_mean'] = data.groupby('breath_id')['u_in'].rolling(window=10,
min_periods=1).mean().reset_index(
level=0, drop=True)
data['rolling_10_max'] = data.groupby('breath_id')['u_in'].rolling(window=10, min_periods=1).max().reset_index(
level=0,
drop=True)
data['rolling_10_std'] = data.groupby('breath_id')['u_in'].rolling(window=10, min_periods=1).std().reset_index(
level=0,
drop=True)
data['expand_mean'] = data.groupby('breath_id')['u_in'].expanding(2).mean().reset_index(level=0, drop=True)
data['expand_max'] = data.groupby('breath_id')['u_in'].expanding(2).max().reset_index(level=0, drop=True)
data['expand_std'] = data.groupby('breath_id')['u_in'].expanding(2).std().reset_index(level=0, drop=True)
data["u_in_rolling_mean2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).mean()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_mean4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).mean()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_mean10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).mean()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_max2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).max()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_max4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).max()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_max10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).max()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_min2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).min()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_min4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).min()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_min10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).min()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_std2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).std()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_std4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).std()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_std10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).std()[
"u_in"].reset_index(drop=True)
g = data.groupby('breath_id')['u_in']
data['ewm_u_in_mean'] = g.ewm(halflife=10).mean() \
.reset_index(level=0, drop=True)
data['ewm_u_in_std'] = g.ewm(halflife=10).std() \
.reset_index(level=0, drop=True)
data['ewm_u_in_corr'] = g.ewm(halflife=10).corr() \
.reset_index(level=0, drop=True)
data['rolling_10_mean'] = g.rolling(window=10, min_periods=1).mean() \
.reset_index(level=0, drop=True)
data['rolling_10_max'] = g.rolling(window=10, min_periods=1).max() \
.reset_index(level=0, drop=True)
data['rolling_10_std'] = g.rolling(window=10, min_periods=1).std() \
.reset_index(level=0, drop=True)
data['expand_mean'] = g.expanding(2).mean() \
.reset_index(level=0, drop=True)
data['expand_max'] = g.expanding(2).max() \
.reset_index(level=0, drop=True)
data['expand_std'] = g.expanding(2).std() \
.reset_index(level=0, drop=True)
data['u_in_lag_back10'] = data.groupby('breath_id')['u_in'].shift(-10)
data['u_out_lag_back10'] = data.groupby('breath_id')['u_out'].shift(-10)
data['time_step_diff'] = data.groupby('breath_id')['time_step'].diff().fillna(0)
### rolling window ts feats
data['ewm_u_in_mean'] = data.groupby('breath_id')['u_in'].ewm(halflife=9).mean().reset_index(level=0,
drop=True)
data['ewm_u_in_std'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).std().reset_index(level=0,
drop=True) ## could add covar?
data['ewm_u_in_corr'] = data.groupby('breath_id')['u_in'].ewm(halflife=15).corr().reset_index(level=0,
drop=True) # self umin corr
# data['ewm_u_in_corr'] = data.groupby('breath_id')['u_in'].ewm(halflife=6).corr(data.groupby('breath_id')["u_out"]).reset_index(level=0,drop=True) # corr with u_out # error
## rolling window of 15 periods
data[["15_in_sum", "15_in_min", "15_in_max", "15_in_mean", "15_out_std"]] = data.groupby('breath_id')[
'u_in'].rolling(window=15, min_periods=1).agg(
{"15_in_sum": "sum", "15_in_min": "min", "15_in_max": "max", "15_in_mean": "mean",
"15_in_std": "std"}).reset_index(level=0, drop=True)
# data[["45_in_sum","45_in_min","45_in_max","45_in_mean","45_out_std"]] = data.groupby('breath_id')['u_in'].rolling(window=45,min_periods=1).agg({"45_in_sum":"sum","45_in_min":"min","45_in_max":"max","45_in_mean":"mean","45_in_std":"std"}).reset_index(level=0,drop=True)
data[["45_in_sum", "45_in_min", "45_in_max", "45_in_mean", "45_out_std"]] = data.groupby('breath_id')[
'u_in'].rolling(window=45, min_periods=1).agg(
{"45_in_sum": "sum", "45_in_min": "min", "45_in_max": "max", "45_in_mean": "mean",
"45_in_std": "std"}).reset_index(level=0, drop=True)
data[["15_out_mean"]] = data.groupby('breath_id')['u_out'].rolling(window=15, min_periods=1).agg(
{"15_out_mean": "mean"}).reset_index(level=0, drop=True)
return data.fillna(0)
def make_features10(self, data):
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
lags = [-10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for lag in lags:
data[f'u_in_lag{lag}'] = data.groupby('breath_id')['u_in'].shift(lag)
data[f'u_out_lag{lag}'] = data.groupby('breath_id')['u_out'].shift(lag)
data[f'u_in_diff{lag}'] = data['u_in'] - data[f'u_in_lag{lag}']
data[f'u_out_diff{lag}'] = data['u_out'] - data[f'u_out_lag{lag}']
data = data.fillna(0)
data['breath_id__u_in__max'] = data.groupby(['breath_id'])['u_in'].transform('max')
data['breath_id__u_out__max'] = data.groupby(['breath_id'])['u_out'].transform('max')
data['breath_id__u_in__diffmax'] = data['breath_id__u_in__max'] - data['u_in']
data['breath_id__u_in__diffmean'] = data['breath_id__u_out__max'] - data['u_in']
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data['breath_id_lag'] = data['breath_id'].shift(1).fillna(0)
data['breath_id_lag2'] = data['breath_id'].shift(2).fillna(0)
data['breath_id_lagsame'] = np.select([data['breath_id_lag'] == data['breath_id']], [1], 0)
data['breath_id_lag2same'] = np.select([data['breath_id_lag2'] == data['breath_id']], [1], 0)
data['breath_id__u_in_lag'] = data['u_in'].shift(1).fillna(0)
data['breath_id__u_in_lag'] = data['breath_id__u_in_lag'] * data['breath_id_lagsame']
data['breath_id__u_in_lag2'] = data['u_in'].shift(2).fillna(0)
data['breath_id__u_in_lag2'] = data['breath_id__u_in_lag2'] * data['breath_id_lag2same']
c_dic = {10: 0, 20: 1, 50: 2}
r_dic = {5: 0, 20: 1, 50: 2}
rc_sum_dic = {v: i for i, v in enumerate([15, 25, 30, 40, 55, 60, 70, 100])}
rc_dot_dic = {v: i for i, v in enumerate([50, 100, 200, 250, 400, 500, 2500, 1000])}
data['C_cate'] = data['C'].map(c_dic)
data['R_cate'] = data['R'].map(r_dic)
data['RC_sum'] = (data['R'] + data['C']).map(rc_sum_dic)
data['RC_dot'] = (data['R'] * data['C']).map(rc_dot_dic)
data['R_sum_c'] = (data['R'] + data['C']).astype(str)
data['R_mult_c'] = (data['R'] * data['C']).astype(str)
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['R__C'] = data["R"].astype(str) + '__' + data["C"].astype(str)
data = pd.get_dummies(data)
data['time_step_diff'] = data['time_step'] - data.groupby('breath_id')['time_step'].shift().fillna(0)
# data[["15_out_sum", "15_out_min", "15_out_max", "15_out_mean", "15_out_std"]] = (data \
# .groupby('breath_id')['u_out'] \
# .rolling(window=15, min_periods=1) \
# .agg({"15_out_sum": "sum",
# "15_out_min": "min",
# "15_out_max": "max",
# "15_out_mean": "mean",
# "15_out_std": "std"
# }).reset_index(level=0, drop=True))
for window in [2, 3, 4, 5, 7, 10, 15, 20, 30, 40, 45]:
data[[f"{window}_in_sum", f"{window}_in_min", f"{window}_in_max",
f"{window}_in_mean", f"{window}_in_std"]] = (data.groupby('breath_id')['u_in'].rolling(window=window,
min_periods=1) \
.agg({f"{window}_in_sum": "sum",
f"{window}_in_min": "min",
f"{window}_in_max": "max",
f"{window}_in_mean": "mean",
f"{window}_in_std": "std"
}).reset_index(level=0,
drop=True))
data[[f"{window}_out_sum", f"{window}_out_min", f"{window}_out_max",
f"{window}_out_mean", f"{window}_out_std"]] = (data.groupby('breath_id')['u_out'].rolling(window=window,
min_periods=1) \
.agg({f"{window}_out_sum": "sum",
f"{window}_out_min": "min",
f"{window}_out_max": "max",
f"{window}_out_mean": "mean",
f"{window}_out_std": "std"
}).reset_index(level=0,
drop=True))
for halflife in [2, 3, 4, 5, 7, 10, 15, 20, 30, 40, 45]:
data[f'ewm_u_in_mean_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).mean().reset_index(level=0,
drop=True)
data[f'ewm_u_in_std_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).std().reset_index(level=0, drop=True)
data[f'ewm_u_in_corr_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).corr().reset_index(level=0,
drop=True)
data['expand_mean'] = data.groupby('breath_id')['u_in'].expanding(2).mean().reset_index(level=0, drop=True)
data['expand_max'] = data.groupby('breath_id')['u_in'].expanding(2).max().reset_index(level=0, drop=True)
data['expand_std'] = data.groupby('breath_id')['u_in'].expanding(2).std().reset_index(level=0, drop=True)
# data.drop(['id', 'breath_id', 'one', 'count', 'breath_id_lag', 'breath_id_lag2', 'breath_id_lagsame',
# 'breath_id_lag2same'], axis=1, inplace=True)
#
# if 'pressure' in data.columns:
# data.drop('pressure', axis=1, inplace=True)
return data.fillna(0)
def make_features11(self, data):
"""
make_features 8 + public
Args:
data:
Returns:
"""
w = blackman(80 + 1)
data['Ruin'] = data['R'].astype(float) * data['u_in'].astype(float)
data['Cuin'] = data['C'].astype(float) * data['u_in'].astype(float)
ffta = lambda x: np.abs(fft(np.append(x.values, x.values[0]))[:80])
ffta.__name__ = 'ffta'
fftw = lambda x: np.abs(fft(np.append(x.values, x.values[0]) * w)[:80])
fftw.__name__ = 'fftw'
data['fft_u_in'] = data.groupby('breath_id')['u_in'].transform(ffta)
data['fft_u_in_w'] = data.groupby('breath_id')['u_in'].transform(fftw)
data['analytical'] = data.groupby('breath_id')['u_in'].transform(hilbert)
data['envelope'] = np.abs(data['analytical'])
data['phase'] = np.angle(data['analytical'])
data['unwrapped_phase'] = data.groupby('breath_id')['phase'].transform(np.unwrap)
data['phase_shift1'] = data.groupby('breath_id')['unwrapped_phase'].shift(1).astype(np.float32)
data['IF'] = data['unwrapped_phase'] - data['phase_shift1'].astype(np.float32)
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['u_in_lag1'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag1'] = data.groupby('breath_id')['u_out'].shift(1)
data['u_in_lag_back1'] = data.groupby('breath_id')['u_in'].shift(-1)
data['u_out_lag_back1'] = data.groupby('breath_id')['u_out'].shift(-1)
data['u_in_lag2'] = data.groupby('breath_id')['u_in'].shift(2)
data['u_out_lag2'] = data.groupby('breath_id')['u_out'].shift(2)
data['u_in_lag_back2'] = data.groupby('breath_id')['u_in'].shift(-2)
data['u_out_lag_back2'] = data.groupby('breath_id')['u_out'].shift(-2)
data['u_in_lag3'] = data.groupby('breath_id')['u_in'].shift(3)
data['u_out_lag3'] = data.groupby('breath_id')['u_out'].shift(3)
data['u_in_lag_back3'] = data.groupby('breath_id')['u_in'].shift(-3)
data['u_out_lag_back3'] = data.groupby('breath_id')['u_out'].shift(-3)
data['u_in_lag4'] = data.groupby('breath_id')['u_in'].shift(4)
data['u_out_lag4'] = data.groupby('breath_id')['u_out'].shift(4)
data['u_in_lag_back4'] = data.groupby('breath_id')['u_in'].shift(-4)
data['u_out_lag_back4'] = data.groupby('breath_id')['u_out'].shift(-4)
data = data.fillna(0)
data['breath_id__u_in__max'] = data.groupby(['breath_id'])['u_in'].transform('max')
data['breath_id__u_out__max'] = data.groupby(['breath_id'])['u_out'].transform('max')
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['u_in_diff3'] = data['u_in'] - data['u_in_lag3']
data['u_out_diff3'] = data['u_out'] - data['u_out_lag3']
data['u_in_diff4'] = data['u_in'] - data['u_in_lag4']
data['u_out_diff4'] = data['u_out'] - data['u_out_lag4']
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data['breath_id_lag'] = data['breath_id'].shift(1).fillna(0)
data['breath_id_lag2'] = data['breath_id'].shift(2).fillna(0)
data['breath_id_lagsame'] = np.select([data['breath_id_lag'] == data['breath_id']], [1], 0)
data['breath_id_lag2same'] = np.select([data['breath_id_lag2'] == data['breath_id']], [1], 0)
data['breath_id__u_in_lag'] = data['u_in'].shift(1).fillna(0)
data['breath_id__u_in_lag'] = data['breath_id__u_in_lag'] * data['breath_id_lagsame']
data['breath_id__u_in_lag2'] = data['u_in'].shift(2).fillna(0)
data['breath_id__u_in_lag2'] = data['breath_id__u_in_lag2'] * data['breath_id_lag2same']
c_dic = {10: 0, 20: 1, 50: 2}
r_dic = {5: 0, 20: 1, 50: 2}
rc_sum_dic = {v: i for i, v in enumerate([15, 25, 30, 40, 55, 60, 70, 100])}
rc_dot_dic = {v: i for i, v in enumerate([50, 100, 200, 250, 400, 500, 2500, 1000])}
data['C_cate'] = data['C'].map(c_dic)
data['R_cate'] = data['R'].map(r_dic)
data['RC_sum'] = (data['R'] + data['C']).map(rc_sum_dic)
data['RC_dot'] = (data['R'] * data['C']).map(rc_dot_dic)
data['R_sum_c'] = (data['R'] + data['C']).astype(str)
data['R_mult_c'] = (data['R'] * data['C']).astype(str)
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['R__C'] = data["R"].astype(str) + '__' + data["C"].astype(str)
data = pd.get_dummies(data)
data['u_in_lagback_diff1'] = data['u_in'] - data['u_in_lag_back1']
data['u_out_lagback_diff1'] = data['u_out'] - data['u_out_lag_back1']
data['u_in_lagback_diff2'] = data['u_in'] - data['u_in_lag_back2']
data['u_out_lagback_diff2'] = data['u_out'] - data['u_out_lag_back2']
data['u_in_lagback_diff3'] = data['u_in'] - data['u_in_lag_back3']
data['u_out_lagback_diff3'] = data['u_out'] - data['u_out_lag_back3']
data['u_in_lagback_diff4'] = data['u_in'] - data['u_in_lag_back4']
data['u_out_lagback_diff4'] = data['u_out'] - data['u_out_lag_back4']
######
data['u_in_lag_back10'] = data.groupby('breath_id')['u_in'].shift(-10)
data['u_out_lag_back10'] = data.groupby('breath_id')['u_out'].shift(-10)
data['u_in_lagback_diff10'] = data['u_in'] - data['u_in_lag_back10']
data['u_out_lagback_diff10'] = data['u_out'] - data['u_out_lag_back10']
data['time_step_diff'] = data['time_step'] - data.groupby('breath_id')['time_step'].shift().fillna(0)
data[["15_out_sum", "15_out_min", "15_out_max", "15_out_mean", "15_out_std"]] = (data \
.groupby('breath_id')['u_out'] \
.rolling(window=15, min_periods=1) \
.agg({"15_out_sum": "sum",
"15_out_min": "min",
"15_out_max": "max",
"15_out_mean": "mean",
"15_out_std": "std"
}).reset_index(level=0, drop=True))
for window in [2, 4, 5, 10, 15, 20, 30, 45]:
data[[f"{window}_in_sum", f"{window}_in_min", f"{window}_in_max",
f"{window}_in_mean", f"{window}_in_std"]] = (data.groupby('breath_id')['u_in'].rolling(window=window,
min_periods=1) \
.agg({f"{window}_in_sum": "sum",
f"{window}_in_min": "min",
f"{window}_in_max": "max",
f"{window}_in_mean": "mean",
f"{window}_in_std": "std"
}).reset_index(level=0,
drop=True))
for halflife in [5, 9, 10, 15, 20]:
data[f'ewm_u_in_mean_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).mean().reset_index(level=0,
drop=True)
data[f'ewm_u_in_std_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).std().reset_index(level=0, drop=True)
data[f'ewm_u_in_corr_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).corr().reset_index(level=0,
drop=True)
data['expand_mean'] = data.groupby('breath_id')['u_in'].expanding(2).mean().reset_index(level=0, drop=True)
data['expand_max'] = data.groupby('breath_id')['u_in'].expanding(2).max().reset_index(level=0, drop=True)
data['expand_std'] = data.groupby('breath_id')['u_in'].expanding(2).std().reset_index(level=0, drop=True)
data.drop(['id', 'breath_id', 'one', 'count', 'breath_id_lag', 'breath_id_lag2', 'breath_id_lagsame',
'breath_id_lag2same', 'analytical'], axis=1, inplace=True)
if 'pressure' in data.columns:
data.drop('pressure', axis=1, inplace=True)
return data.fillna(0)
def make_features12(self, data):
"""
from 8 and add teammate features
Args:
data:
Returns:
"""
if "pressure" not in data.columns:
data['pressure'] = 0
# data = data.merge(
# data.groupby(['R', 'C', 'time_step']).pressure.std().reset_index().rename(columns={'pressure': 'p_std'}),
# on=['R', 'C', 'time_step'], how='left')
# data = data.merge(data.groupby(['R', 'C', 'time_step']).pressure.median().reset_index().rename(
# columns={'pressure': 'p_mean'}), on=['R', 'C', 'time_step'], how='left')
# data.sort_values(by='id', inplace=True)
# data.reset_index(drop=True, inplace=True)
tmp = data.to_numpy().reshape(-1, 80, data.shape[-1])
tmp = tmp[:, :35, :]
tmp[:, :, 4] = np.tile(np.arange(35).astype(int), (tmp.shape[0], 1))
tmp = tmp.reshape(-1, data.shape[-1])
data_small = pd.DataFrame(tmp, columns=data.columns)
data = data.merge(
data_small.groupby(['R', 'C', 'time_step']).pressure.std().reset_index().rename(columns={'pressure': 'p_envelope_std'}),
on=['R', 'C', 'time_step'], how='left')
data = data.merge(data_small.groupby(['R', 'C', 'time_step']).pressure.median().reset_index().rename(
columns={'pressure': 'p_envelope_mean'}), on=['R', 'C', 'time_step'], how='left')
data.sort_values(by='id', inplace=True)
data.reset_index(drop=True, inplace=True)
del tmp, data_small
gc.collect()
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['u_in_lag1'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag1'] = data.groupby('breath_id')['u_out'].shift(1)
data['u_in_lag_back1'] = data.groupby('breath_id')['u_in'].shift(-1)
data['u_out_lag_back1'] = data.groupby('breath_id')['u_out'].shift(-1)
data['u_in_lag2'] = data.groupby('breath_id')['u_in'].shift(2)
data['u_out_lag2'] = data.groupby('breath_id')['u_out'].shift(2)
data['u_in_lag_back2'] = data.groupby('breath_id')['u_in'].shift(-2)
data['u_out_lag_back2'] = data.groupby('breath_id')['u_out'].shift(-2)
data['u_in_lag3'] = data.groupby('breath_id')['u_in'].shift(3)
data['u_out_lag3'] = data.groupby('breath_id')['u_out'].shift(3)
data['u_in_lag_back3'] = data.groupby('breath_id')['u_in'].shift(-3)
data['u_out_lag_back3'] = data.groupby('breath_id')['u_out'].shift(-3)
data['u_in_lag4'] = data.groupby('breath_id')['u_in'].shift(4)
data['u_out_lag4'] = data.groupby('breath_id')['u_out'].shift(4)
data['u_in_lag_back4'] = data.groupby('breath_id')['u_in'].shift(-4)
data['u_out_lag_back4'] = data.groupby('breath_id')['u_out'].shift(-4)
data = data.fillna(0)
data['breath_id__u_in__max'] = data.groupby(['breath_id'])['u_in'].transform('max')
data['breath_id__u_out__max'] = data.groupby(['breath_id'])['u_out'].transform('max')
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['u_in_diff3'] = data['u_in'] - data['u_in_lag3']
data['u_out_diff3'] = data['u_out'] - data['u_out_lag3']
data['u_in_diff4'] = data['u_in'] - data['u_in_lag4']
data['u_out_diff4'] = data['u_out'] - data['u_out_lag4']
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data['breath_id_lag'] = data['breath_id'].shift(1).fillna(0)
data['breath_id_lag2'] = data['breath_id'].shift(2).fillna(0)
data['breath_id_lagsame'] = np.select([data['breath_id_lag'] == data['breath_id']], [1], 0)
data['breath_id_lag2same'] = np.select([data['breath_id_lag2'] == data['breath_id']], [1], 0)
data['breath_id__u_in_lag'] = data['u_in'].shift(1).fillna(0)
data['breath_id__u_in_lag'] = data['breath_id__u_in_lag'] * data['breath_id_lagsame']
data['breath_id__u_in_lag2'] = data['u_in'].shift(2).fillna(0)
data['breath_id__u_in_lag2'] = data['breath_id__u_in_lag2'] * data['breath_id_lag2same']
c_dic = {10: 0, 20: 1, 50: 2}
r_dic = {5: 0, 20: 1, 50: 2}
rc_sum_dic = {v: i for i, v in enumerate([15, 25, 30, 40, 55, 60, 70, 100])}
rc_dot_dic = {v: i for i, v in enumerate([50, 100, 200, 250, 400, 500, 2500, 1000])}
data['C_cate'] = data['C'].map(c_dic)
data['R_cate'] = data['R'].map(r_dic)
data['RC_sum'] = (data['R'] + data['C']).map(rc_sum_dic)
data['RC_dot'] = (data['R'] * data['C']).map(rc_dot_dic)
data['R_sum_c'] = (data['R'] + data['C']).astype(str)
data['R_mult_c'] = (data['R'] * data['C']).astype(str)
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['R__C'] = data["R"].astype(str) + '__' + data["C"].astype(str)
data = pd.get_dummies(data)
data['u_in_lagback_diff1'] = data['u_in'] - data['u_in_lag_back1']
data['u_out_lagback_diff1'] = data['u_out'] - data['u_out_lag_back1']
data['u_in_lagback_diff2'] = data['u_in'] - data['u_in_lag_back2']
data['u_out_lagback_diff2'] = data['u_out'] - data['u_out_lag_back2']
data['u_in_lagback_diff3'] = data['u_in'] - data['u_in_lag_back3']
data['u_out_lagback_diff3'] = data['u_out'] - data['u_out_lag_back3']
data['u_in_lagback_diff4'] = data['u_in'] - data['u_in_lag_back4']
data['u_out_lagback_diff4'] = data['u_out'] - data['u_out_lag_back4']
######
data['u_in_lag_back10'] = data.groupby('breath_id')['u_in'].shift(-10)
data['u_out_lag_back10'] = data.groupby('breath_id')['u_out'].shift(-10)
data['u_in_lagback_diff10'] = data['u_in'] - data['u_in_lag_back10']
data['u_out_lagback_diff10'] = data['u_out'] - data['u_out_lag_back10']
data['time_step_diff'] = data['time_step'] - data.groupby('breath_id')['time_step'].shift().fillna(0)
data[["15_out_sum", "15_out_min", "15_out_max", "15_out_mean", "15_out_std"]] = (data \
.groupby('breath_id')['u_out'] \
.rolling(window=15, min_periods=1) \
.agg({"15_out_sum": "sum",
"15_out_min": "min",
"15_out_max": "max",
"15_out_mean": "mean",
"15_out_std": "std"
}).reset_index(level=0, drop=True))
for window in [2, 4, 5, 10, 15, 20, 30, 45]:
data[[f"{window}_in_sum", f"{window}_in_min", f"{window}_in_max",
f"{window}_in_mean", f"{window}_in_std"]] = (data.groupby('breath_id')['u_in'].rolling(window=window,
min_periods=1) \
.agg({f"{window}_in_sum": "sum",
f"{window}_in_min": "min",
f"{window}_in_max": "max",
f"{window}_in_mean": "mean",
f"{window}_in_std": "std"
}).reset_index(level=0,
drop=True))
for halflife in [5, 9, 10, 15, 20]:
data[f'ewm_u_in_mean_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).mean().reset_index(level=0,
drop=True)
data[f'ewm_u_in_std_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).std().reset_index(level=0, drop=True)
data[f'ewm_u_in_corr_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).corr().reset_index(level=0,
drop=True)
data['expand_mean'] = data.groupby('breath_id')['u_in'].expanding(2).mean().reset_index(level=0, drop=True)
data['expand_max'] = data.groupby('breath_id')['u_in'].expanding(2).max().reset_index(level=0, drop=True)
data['expand_std'] = data.groupby('breath_id')['u_in'].expanding(2).std().reset_index(level=0, drop=True)
data.drop(['id', 'breath_id', 'one', 'count', 'breath_id_lag', 'breath_id_lag2', 'breath_id_lagsame',
'breath_id_lag2same'], axis=1, inplace=True)
if 'pressure' in data.columns:
data.drop('pressure', axis=1, inplace=True)
return data.fillna(0)
def make_features13(self, data):
"""
huge. based on 3, 8, 10
Args:
data:
Returns:
"""
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['u_in_lag1'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag1'] = data.groupby('breath_id')['u_out'].shift(1)
data['u_in_lag_back1'] = data.groupby('breath_id')['u_in'].shift(-1)
data['u_out_lag_back1'] = data.groupby('breath_id')['u_out'].shift(-1)
data['u_in_lag2'] = data.groupby('breath_id')['u_in'].shift(2)
data['u_out_lag2'] = data.groupby('breath_id')['u_out'].shift(2)
data['u_in_lag_back2'] = data.groupby('breath_id')['u_in'].shift(-2)
data['u_out_lag_back2'] = data.groupby('breath_id')['u_out'].shift(-2)
data['u_in_lag3'] = data.groupby('breath_id')['u_in'].shift(3)
data['u_out_lag3'] = data.groupby('breath_id')['u_out'].shift(3)
data['u_in_lag_back3'] = data.groupby('breath_id')['u_in'].shift(-3)
data['u_out_lag_back3'] = data.groupby('breath_id')['u_out'].shift(-3)
data['u_in_lag4'] = data.groupby('breath_id')['u_in'].shift(4)
data['u_out_lag4'] = data.groupby('breath_id')['u_out'].shift(4)
data['u_in_lag_back4'] = data.groupby('breath_id')['u_in'].shift(-4)
data['u_out_lag_back4'] = data.groupby('breath_id')['u_out'].shift(-4)
data = data.fillna(0)
data['breath_id__u_in__max'] = data.groupby(['breath_id'])['u_in'].transform('max')
data['breath_id__u_out__max'] = data.groupby(['breath_id'])['u_out'].transform('max')
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['u_in_diff3'] = data['u_in'] - data['u_in_lag3']
data['u_out_diff3'] = data['u_out'] - data['u_out_lag3']
data['u_in_diff4'] = data['u_in'] - data['u_in_lag4']
data['u_out_diff4'] = data['u_out'] - data['u_out_lag4']
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data['breath_id_lag'] = data['breath_id'].shift(1).fillna(0)
data['breath_id_lag2'] = data['breath_id'].shift(2).fillna(0)
data['breath_id_lagsame'] = np.select([data['breath_id_lag'] == data['breath_id']], [1], 0)
data['breath_id_lag2same'] = np.select([data['breath_id_lag2'] == data['breath_id']], [1], 0)
data['breath_id__u_in_lag'] = data['u_in'].shift(1).fillna(0)
data['breath_id__u_in_lag'] = data['breath_id__u_in_lag'] * data['breath_id_lagsame']
data['breath_id__u_in_lag2'] = data['u_in'].shift(2).fillna(0)
data['breath_id__u_in_lag2'] = data['breath_id__u_in_lag2'] * data['breath_id_lag2same']
data['R_sum_c'] = (data['R'] + data['C']).astype(str)
data['R_mult_c'] = (data['R'] * data['C']).astype(str)
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['R__C'] = data["R"].astype(str) + '__' + data["C"].astype(str)
data = pd.get_dummies(data)
data[["15_in_sum", "15_in_min", "15_in_max", "15_in_mean"]] = (data \
.groupby('breath_id')['u_in'] \
.rolling(window=15, min_periods=1) \
.agg({"15_in_sum": "sum",
"15_in_min": "min",
"15_in_max": "max",
"15_in_mean": "mean"
# "15_in_std":"std"
}) \
.reset_index(level=0, drop=True))
data['u_in_lagback_diff1'] = data['u_in'] - data['u_in_lag_back1']
data['u_out_lagback_diff1'] = data['u_out'] - data['u_out_lag_back1']
data['u_in_lagback_diff2'] = data['u_in'] - data['u_in_lag_back2']
data['u_out_lagback_diff2'] = data['u_out'] - data['u_out_lag_back2']
data['ewm_u_in_mean'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).mean().reset_index(level=0,
drop=True)
data['ewm_u_in_std'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).std().reset_index(level=0, drop=True)
data['ewm_u_in_corr'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).corr().reset_index(level=0,
drop=True)
data['rolling_10_mean'] = data.groupby('breath_id')['u_in'].rolling(window=10,
min_periods=1).mean().reset_index(
level=0, drop=True)
data['rolling_10_max'] = data.groupby('breath_id')['u_in'].rolling(window=10, min_periods=1).max().reset_index(
level=0,
drop=True)
data['rolling_10_std'] = data.groupby('breath_id')['u_in'].rolling(window=10, min_periods=1).std().reset_index(
level=0,
drop=True)
data['expand_mean'] = data.groupby('breath_id')['u_in'].expanding(2).mean().reset_index(level=0, drop=True)
data['expand_max'] = data.groupby('breath_id')['u_in'].expanding(2).max().reset_index(level=0, drop=True)
data['expand_std'] = data.groupby('breath_id')['u_in'].expanding(2).std().reset_index(level=0, drop=True)
data["u_in_rolling_mean2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).mean()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_mean4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).mean()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_mean10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).mean()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_max2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).max()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_max4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).max()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_max10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).max()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_min2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).min()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_min4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).min()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_min10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).min()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_std2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).std()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_std4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).std()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_std10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).std()[
"u_in"].reset_index(drop=True)
g = data.groupby('breath_id')['u_in']
data['ewm_u_in_mean'] = g.ewm(halflife=10).mean() \
.reset_index(level=0, drop=True)
data['ewm_u_in_std'] = g.ewm(halflife=10).std() \
.reset_index(level=0, drop=True)
data['ewm_u_in_corr'] = g.ewm(halflife=10).corr() \
.reset_index(level=0, drop=True)
data['rolling_10_mean'] = g.rolling(window=10, min_periods=1).mean() \
.reset_index(level=0, drop=True)
data['rolling_10_max'] = g.rolling(window=10, min_periods=1).max() \
.reset_index(level=0, drop=True)
data['rolling_10_std'] = g.rolling(window=10, min_periods=1).std() \
.reset_index(level=0, drop=True)
data['expand_mean'] = g.expanding(2).mean() \
.reset_index(level=0, drop=True)
data['expand_max'] = g.expanding(2).max() \
.reset_index(level=0, drop=True)
data['expand_std'] = g.expanding(2).std() \
.reset_index(level=0, drop=True)
data['u_in_lag_back10'] = data.groupby('breath_id')['u_in'].shift(-10)
data['u_out_lag_back10'] = data.groupby('breath_id')['u_out'].shift(-10)
data['time_step_diff'] = data.groupby('breath_id')['time_step'].diff().fillna(0)
### rolling window ts feats
data['ewm_u_in_mean'] = data.groupby('breath_id')['u_in'].ewm(halflife=9).mean().reset_index(level=0,
drop=True)
data['ewm_u_in_std'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).std().reset_index(level=0,
drop=True) ## could add covar?
data['ewm_u_in_corr'] = data.groupby('breath_id')['u_in'].ewm(halflife=15).corr().reset_index(level=0,
drop=True) # self umin corr
# data['ewm_u_in_corr'] = data.groupby('breath_id')['u_in'].ewm(halflife=6).corr(data.groupby('breath_id')["u_out"]).reset_index(level=0,drop=True) # corr with u_out # error
## rolling window of 15 periods
data[["15_in_sum", "15_in_min", "15_in_max", "15_in_mean", "15_out_std"]] = data.groupby('breath_id')[
'u_in'].rolling(window=15, min_periods=1).agg(
{"15_in_sum": "sum", "15_in_min": "min", "15_in_max": "max", "15_in_mean": "mean",
"15_in_std": "std"}).reset_index(level=0, drop=True)
# data[["45_in_sum","45_in_min","45_in_max","45_in_mean","45_out_std"]] = data.groupby('breath_id')['u_in'].rolling(window=45,min_periods=1).agg({"45_in_sum":"sum","45_in_min":"min","45_in_max":"max","45_in_mean":"mean","45_in_std":"std"}).reset_index(level=0,drop=True)
data[["45_in_sum", "45_in_min", "45_in_max", "45_in_mean", "45_out_std"]] = data.groupby('breath_id')[
'u_in'].rolling(window=45, min_periods=1).agg(
{"45_in_sum": "sum", "45_in_min": "min", "45_in_max": "max", "45_in_mean": "mean",
"45_in_std": "std"}).reset_index(level=0, drop=True)
data[["15_out_mean"]] = data.groupby('breath_id')['u_out'].rolling(window=15, min_periods=1).agg(
{"15_out_mean": "mean"}).reset_index(level=0, drop=True)
data['u_in_lagback_diff1'] = data['u_in'] - data['u_in_lag_back1']
data['u_out_lagback_diff1'] = data['u_out'] - data['u_out_lag_back1']
data['u_in_lagback_diff2'] = data['u_in'] - data['u_in_lag_back2']
data['u_out_lagback_diff2'] = data['u_out'] - data['u_out_lag_back2']
data['u_in_lagback_diff3'] = data['u_in'] - data['u_in_lag_back3']
data['u_out_lagback_diff3'] = data['u_out'] - data['u_out_lag_back3']
data['u_in_lagback_diff4'] = data['u_in'] - data['u_in_lag_back4']
data['u_out_lagback_diff4'] = data['u_out'] - data['u_out_lag_back4']
######
data['u_in_lag_back10'] = data.groupby('breath_id')['u_in'].shift(-10)
data['u_out_lag_back10'] = data.groupby('breath_id')['u_out'].shift(-10)
data['u_in_lagback_diff10'] = data['u_in'] - data['u_in_lag_back10']
data['u_out_lagback_diff10'] = data['u_out'] - data['u_out_lag_back10']
data['time_step_diff'] = data['time_step'] - data.groupby('breath_id')['time_step'].shift().fillna(0)
data[["15_out_sum", "15_out_min", "15_out_max", "15_out_mean", "15_out_std"]] = (data \
.groupby('breath_id')['u_out'] \
.rolling(window=15, min_periods=1) \
.agg({"15_out_sum": "sum",
"15_out_min": "min",
"15_out_max": "max",
"15_out_mean": "mean",
"15_out_std": "std"
}).reset_index(level=0, drop=True))
for window in [2, 4, 5, 10, 15, 20, 30, 45]:
data[[f"{window}_in_sum", f"{window}_in_min", f"{window}_in_max",
f"{window}_in_mean", f"{window}_in_std"]] = (data.groupby('breath_id')['u_in'].rolling(window=window,
min_periods=1) \
.agg({f"{window}_in_sum": "sum",
f"{window}_in_min": "min",
f"{window}_in_max": "max",
f"{window}_in_mean": "mean",
f"{window}_in_std": "std"
}).reset_index(level=0,
drop=True))
for halflife in [5, 9, 10, 15, 20]:
data[f'ewm_u_in_mean_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).mean().reset_index(level=0,
drop=True)
data[f'ewm_u_in_std_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).std().reset_index(level=0, drop=True)
data[f'ewm_u_in_corr_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).corr().reset_index(level=0,
drop=True)
data['expand_mean'] = data.groupby('breath_id')['u_in'].expanding(2).mean().reset_index(level=0, drop=True)
data['expand_max'] = data.groupby('breath_id')['u_in'].expanding(2).max().reset_index(level=0, drop=True)
data['expand_std'] = data.groupby('breath_id')['u_in'].expanding(2).std().reset_index(level=0, drop=True)
data['time_step_diff'] = data['time_step'] - data.groupby('breath_id')['time_step'].shift().fillna(0)
# data[["15_out_sum", "15_out_min", "15_out_max", "15_out_mean", "15_out_std"]] = (data \
# .groupby('breath_id')['u_out'] \
# .rolling(window=15, min_periods=1) \
# .agg({"15_out_sum": "sum",
# "15_out_min": "min",
# "15_out_max": "max",
# "15_out_mean": "mean",
# "15_out_std": "std"
# }).reset_index(level=0, drop=True))
for window in [2, 3, 4, 5, 7, 10, 15, 20, 30, 40, 45]:
data[[f"{window}_in_sum", f"{window}_in_min", f"{window}_in_max",
f"{window}_in_mean", f"{window}_in_std"]] = (data.groupby('breath_id')['u_in'].rolling(window=window,
min_periods=1) \
.agg({f"{window}_in_sum": "sum",
f"{window}_in_min": "min",
f"{window}_in_max": "max",
f"{window}_in_mean": "mean",
f"{window}_in_std": "std"
}).reset_index(level=0,
drop=True))
data[[f"{window}_out_sum", f"{window}_out_min", f"{window}_out_max",
f"{window}_out_mean", f"{window}_out_std"]] = (
data.groupby('breath_id')['u_out'].rolling(window=window,
min_periods=1) \
.agg({f"{window}_out_sum": "sum",
f"{window}_out_min": "min",
f"{window}_out_max": "max",
f"{window}_out_mean": "mean",
f"{window}_out_std": "std"
}).reset_index(level=0,
drop=True))
for halflife in [2, 3, 4, 5, 7, 10, 15, 20, 30, 40, 45]:
data[f'ewm_u_in_mean_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(
halflife=halflife).mean().reset_index(level=0,
drop=True)
data[f'ewm_u_in_std_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(
halflife=halflife).std().reset_index(level=0, drop=True)
data[f'ewm_u_in_corr_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(
halflife=halflife).corr().reset_index(level=0,
drop=True)
data['expand_mean'] = data.groupby('breath_id')['u_in'].expanding(2).mean().reset_index(level=0, drop=True)
data['expand_max'] = data.groupby('breath_id')['u_in'].expanding(2).max().reset_index(level=0, drop=True)
data['expand_std'] = data.groupby('breath_id')['u_in'].expanding(2).std().reset_index(level=0, drop=True)
best_cols = ['10_in_max',
'10_in_mean',
'10_in_mean',
'10_in_min',
'10_in_min',
'10_in_std',
'10_in_std',
'10_in_sum',
'10_in_sum',
'10_out_std',
'15_in_max',
'15_in_max',
'15_in_mean',
'15_in_mean',
'15_in_mean',
'15_in_min',
'15_in_min',
'15_in_min',
'15_in_std',
'15_in_sum',
'15_in_sum',
'15_in_sum',
'15_out_max',
'15_out_mean',
'15_out_std',
'15_out_std',
'15_out_std',
'20_in_max',
'20_in_mean',
'20_in_mean',
'20_in_min',
'20_in_min',
'20_in_std',
'20_in_sum',
'20_in_sum',
'20_out_std',
'2_in_max',
'2_in_max',
'2_in_mean',
'2_in_mean',
'2_in_min',
'2_in_min',
'2_in_std',
'2_in_std',
'2_in_sum',
'2_in_sum',
'30_in_max',
'30_in_mean',
'30_in_mean',
'30_in_min',
'30_in_min',
'30_in_std',
'30_in_sum',
'30_in_sum',
'3_in_max',
'3_in_mean',
'3_in_min',
'3_in_std',
'3_in_sum',
'3_out_std',
'40_in_mean',
'40_in_min',
'40_in_sum',
'45_in_max',
'45_in_max',
'45_in_mean',
'45_in_mean',
'45_in_mean',
'45_in_min',
'45_in_min',
'45_in_min',
'45_in_std',
'45_in_sum',
'45_in_sum',
'45_in_sum',
'45_out_std',
'4_in_max',
'4_in_max',
'4_in_mean',
'4_in_mean',
'4_in_min',
'4_in_min',
'4_in_std',
'4_in_std',
'4_in_sum',
'4_in_sum',
'4_out_std',
'5_in_max',
'5_in_max',
'5_in_mean',
'5_in_mean',
'5_in_min',
'5_in_min',
'5_in_std',
'5_in_std',
'5_in_sum',
'5_in_sum',
'7_in_max',
'7_in_mean',
'7_in_min',
'7_in_std',
'7_in_sum',
'7_out_std',
'C_10',
'C_10',
'C_10',
'C_20',
'C_20',
'C_50',
'C_50',
'C_50',
'C_cate',
'C_cate',
'RC_dot',
'RC_dot',
'RC_sum',
'RC_sum',
'R_20',
'R_20',
'R_5',
'R_5',
'R_50',
'R_50',
'R__C_20__10',
'R__C_20__10',
'R__C_20__20',
'R__C_20__20',
'R__C_20__50',
'R__C_20__50',
'R__C_20__50',
'R__C_50__10',
'R__C_50__10',
'R__C_50__10',
'R__C_50__20',
'R__C_50__20',
'R__C_50__50',
'R__C_50__50',
'R__C_5__10',
'R__C_5__10',
'R__C_5__20',
'R__C_5__20',
'R__C_5__50',
'R__C_5__50',
'R__C_5__50',
'R_cate',
'R_cate',
'R_mult_c_100',
'R_mult_c_100',
'R_mult_c_1000',
'R_mult_c_1000',
'R_mult_c_200',
'R_mult_c_200',
'R_mult_c_250',
'R_mult_c_250',
'R_mult_c_250',
'R_mult_c_2500',
'R_mult_c_2500',
'R_mult_c_400',
'R_mult_c_400',
'R_mult_c_50',
'R_mult_c_50',
'R_mult_c_500',
'R_mult_c_500',
'R_mult_c_500',
'R_sum_c_100',
'R_sum_c_100',
'R_sum_c_15',
'R_sum_c_15',
'R_sum_c_25',
'R_sum_c_25',
'R_sum_c_30',
'R_sum_c_30',
'R_sum_c_40',
'R_sum_c_40',
'R_sum_c_55',
'R_sum_c_55',
'R_sum_c_55',
'R_sum_c_60',
'R_sum_c_60',
'R_sum_c_60',
'R_sum_c_70',
'R_sum_c_70',
'area',
'area',
'area',
'breath_id__u_in__diffmax',
'breath_id__u_in__diffmax',
'breath_id__u_in__diffmean',
'breath_id__u_in__diffmean',
'breath_id__u_in__diffmean',
'breath_id__u_in__max',
'breath_id__u_in__max',
'breath_id__u_in_lag',
'breath_id__u_in_lag',
'breath_id__u_in_lag',
'breath_id__u_in_lag2',
'breath_id__u_in_lag2',
'breath_id__u_in_lag2',
'cross',
'cross',
'cross2',
'ewm_u_in_corr',
'ewm_u_in_corr_15',
'ewm_u_in_corr_20',
'ewm_u_in_corr_5',
'ewm_u_in_corr_9',
'ewm_u_in_mean',
'ewm_u_in_mean_10',
'ewm_u_in_mean_10',
'ewm_u_in_mean_15',
'ewm_u_in_mean_15',
'ewm_u_in_mean_2',
'ewm_u_in_mean_20',
'ewm_u_in_mean_20',
'ewm_u_in_mean_3',
'ewm_u_in_mean_30',
'ewm_u_in_mean_4',
'ewm_u_in_mean_40',
'ewm_u_in_mean_45',
'ewm_u_in_mean_5',
'ewm_u_in_mean_5',
'ewm_u_in_mean_7',
'ewm_u_in_mean_9',
'ewm_u_in_std',
'ewm_u_in_std_10',
'ewm_u_in_std_15',
'ewm_u_in_std_2',
'ewm_u_in_std_20',
'ewm_u_in_std_5',
'ewm_u_in_std_9',
'expand_max',
'expand_max',
'expand_mean',
'expand_mean',
'expand_mean',
'expand_std',
'expand_std',
'rolling_10_max',
'rolling_10_mean',
'rolling_10_std',
'time_step',
'time_step',
'time_step_diff',
'time_step_diff',
'time_step_diff',
'u_in',
'u_in',
'u_in',
'u_in_cummean',
'u_in_cummean',
'u_in_cummean',
'u_in_cumsum',
'u_in_cumsum',
'u_in_cumsum',
'u_in_diff-1',
'u_in_diff-10',
'u_in_diff-2',
'u_in_diff-3',
'u_in_diff-4',
'u_in_diff-5',
'u_in_diff-6',
'u_in_diff-7',
'u_in_diff-8',
'u_in_diff-9',
'u_in_diff1',
'u_in_diff1',
'u_in_diff1',
'u_in_diff10',
'u_in_diff2',
'u_in_diff2',
'u_in_diff2',
'u_in_diff3',
'u_in_diff3',
'u_in_diff3',
'u_in_diff4',
'u_in_diff4',
'u_in_diff4',
'u_in_diff5',
'u_in_diff6',
'u_in_diff7',
'u_in_diff8',
'u_in_diff9',
'u_in_lag-1',
'u_in_lag-10',
'u_in_lag-2',
'u_in_lag-3',
'u_in_lag-4',
'u_in_lag-5',
'u_in_lag-6',
'u_in_lag-7',
'u_in_lag-8',
'u_in_lag-9',
'u_in_lag1',
'u_in_lag1',
'u_in_lag1',
'u_in_lag10',
'u_in_lag2',
'u_in_lag2',
'u_in_lag2',
'u_in_lag3',
'u_in_lag3',
'u_in_lag3',
'u_in_lag4',
'u_in_lag4',
'u_in_lag4',
'u_in_lag5',
'u_in_lag6',
'u_in_lag7',
'u_in_lag8',
'u_in_lag9',
'u_in_lag_back1',
'u_in_lag_back1',
'u_in_lag_back10',
'u_in_lag_back10',
'u_in_lag_back2',
'u_in_lag_back2',
'u_in_lag_back3',
'u_in_lag_back3',
'u_in_lag_back4',
'u_in_lag_back4',
'u_in_lagback_diff1',
'u_in_lagback_diff1',
'u_in_lagback_diff10',
'u_in_lagback_diff2',
'u_in_lagback_diff2',
'u_in_lagback_diff3',
'u_in_lagback_diff4',
'u_in_rolling_max10',
'u_in_rolling_max2',
'u_in_rolling_max4',
'u_in_rolling_mean10',
'u_in_rolling_mean2',
'u_in_rolling_mean4',
'u_in_rolling_min10',
'u_in_rolling_min2',
'u_in_rolling_min4',
'u_in_rolling_std10',
'u_in_rolling_std2',
'u_in_rolling_std4',
'u_out',
'u_out',
'u_out_diff1',
'u_out_diff2',
'u_out_diff3',
'u_out_diff3',
'u_out_diff4',
'u_out_diff4',
'u_out_lag1',
'u_out_lag2',
'u_out_lag3',
'u_out_lag4',
'u_out_lag_back1',
'u_out_lag_back10',
'u_out_lag_back10',
'u_out_lag_back2',
'u_out_lag_back3',
'u_out_lag_back3',
'u_out_lag_back4',
'u_out_lag_back4',
'u_out_lagback_diff1',
'u_out_lagback_diff1',
'u_out_lagback_diff10',
'u_out_lagback_diff2',
'u_out_lagback_diff2',
'u_out_lagback_diff3',
'u_out_lagback_diff4'] + ['fold'] + ['id', 'breath_id', 'one', 'count', 'breath_id_lag', 'breath_id_lag2', 'breath_id_lagsame',
'breath_id_lag2same', 'pressure']
best_cols = [col for col in best_cols if col in data.columns]
data = data[best_cols]
return data.fillna(0)
def make_features14(self, data):
"""
8 and several more
Args:
data:
Returns:
"""
data['time_step_diff'] = data['time_step'] - data.groupby('breath_id')['time_step'].shift(1).fillna(0)
data['u_in_rate'] = (data['u_in'] - data.groupby('breath_id')['u_in'].shift(1).fillna(0)) / data['time_step_diff']
data['area_gap'] = data['u_in'] * data['time_step_diff']
data['area_gap'] = data.groupby('breath_id')['area_gap'].cumsum()
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['u_in_lag1'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag1'] = data.groupby('breath_id')['u_out'].shift(1)
data['u_in_lag_back1'] = data.groupby('breath_id')['u_in'].shift(-1)
data['u_out_lag_back1'] = data.groupby('breath_id')['u_out'].shift(-1)
data['u_in_lag2'] = data.groupby('breath_id')['u_in'].shift(2)
data['u_out_lag2'] = data.groupby('breath_id')['u_out'].shift(2)
data['u_in_lag_back2'] = data.groupby('breath_id')['u_in'].shift(-2)
data['u_out_lag_back2'] = data.groupby('breath_id')['u_out'].shift(-2)
data['u_in_lag3'] = data.groupby('breath_id')['u_in'].shift(3)
data['u_out_lag3'] = data.groupby('breath_id')['u_out'].shift(3)
data['u_in_lag_back3'] = data.groupby('breath_id')['u_in'].shift(-3)
data['u_out_lag_back3'] = data.groupby('breath_id')['u_out'].shift(-3)
data['u_in_lag4'] = data.groupby('breath_id')['u_in'].shift(4)
data['u_out_lag4'] = data.groupby('breath_id')['u_out'].shift(4)
data['u_in_lag_back4'] = data.groupby('breath_id')['u_in'].shift(-4)
data['u_out_lag_back4'] = data.groupby('breath_id')['u_out'].shift(-4)
data = data.fillna(0)
data['breath_id__u_in__max'] = data.groupby(['breath_id'])['u_in'].transform('max')
data['breath_id__u_out__max'] = data.groupby(['breath_id'])['u_out'].transform('max')
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['u_in_diff3'] = data['u_in'] - data['u_in_lag3']
data['u_out_diff3'] = data['u_out'] - data['u_out_lag3']
data['u_in_diff4'] = data['u_in'] - data['u_in_lag4']
data['u_out_diff4'] = data['u_out'] - data['u_out_lag4']
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data['breath_id_lag'] = data['breath_id'].shift(1).fillna(0)
data['breath_id_lag2'] = data['breath_id'].shift(2).fillna(0)
data['breath_id_lagsame'] = np.select([data['breath_id_lag'] == data['breath_id']], [1], 0)
data['breath_id_lag2same'] = np.select([data['breath_id_lag2'] == data['breath_id']], [1], 0)
data['breath_id__u_in_lag'] = data['u_in'].shift(1).fillna(0)
data['breath_id__u_in_lag'] = data['breath_id__u_in_lag'] * data['breath_id_lagsame']
data['breath_id__u_in_lag2'] = data['u_in'].shift(2).fillna(0)
data['breath_id__u_in_lag2'] = data['breath_id__u_in_lag2'] * data['breath_id_lag2same']
c_dic = {10: 0, 20: 1, 50: 2}
r_dic = {5: 0, 20: 1, 50: 2}
rc_sum_dic = {v: i for i, v in enumerate([15, 25, 30, 40, 55, 60, 70, 100])}
rc_dot_dic = {v: i for i, v in enumerate([50, 100, 200, 250, 400, 500, 2500, 1000])}
data['C_cate'] = data['C'].map(c_dic)
data['R_cate'] = data['R'].map(r_dic)
data['RC_sum'] = (data['R'] + data['C']).map(rc_sum_dic)
data['RC_dot'] = (data['R'] * data['C']).map(rc_dot_dic)
data['R_sum_c'] = (data['R'] + data['C']).astype(str)
data['R_mult_c'] = (data['R'] * data['C']).astype(str)
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['R__C'] = data["R"].astype(str) + '__' + data["C"].astype(str)
data = pd.get_dummies(data)
data['u_in_lagback_diff1'] = data['u_in'] - data['u_in_lag_back1']
data['u_out_lagback_diff1'] = data['u_out'] - data['u_out_lag_back1']
data['u_in_lagback_diff2'] = data['u_in'] - data['u_in_lag_back2']
data['u_out_lagback_diff2'] = data['u_out'] - data['u_out_lag_back2']
data['u_in_lagback_diff3'] = data['u_in'] - data['u_in_lag_back3']
data['u_out_lagback_diff3'] = data['u_out'] - data['u_out_lag_back3']
data['u_in_lagback_diff4'] = data['u_in'] - data['u_in_lag_back4']
data['u_out_lagback_diff4'] = data['u_out'] - data['u_out_lag_back4']
######
data['u_in_lag_back10'] = data.groupby('breath_id')['u_in'].shift(-10)
data['u_out_lag_back10'] = data.groupby('breath_id')['u_out'].shift(-10)
data['u_in_lagback_diff10'] = data['u_in'] - data['u_in_lag_back10']
data['u_out_lagback_diff10'] = data['u_out'] - data['u_out_lag_back10']
data['time_step_diff'] = data['time_step'] - data.groupby('breath_id')['time_step'].shift().fillna(0)
data[["15_out_sum", "15_out_min", "15_out_max", "15_out_mean", "15_out_std"]] = (data \
.groupby('breath_id')['u_out'] \
.rolling(window=15, min_periods=1) \
.agg({"15_out_sum": "sum",
"15_out_min": "min",
"15_out_max": "max",
"15_out_mean": "mean",
"15_out_std": "std"
}).reset_index(level=0, drop=True))
for window in [2, 4, 5, 10, 15, 20, 30, 45]:
data[[f"{window}_in_sum", f"{window}_in_min", f"{window}_in_max",
f"{window}_in_mean", f"{window}_in_std"]] = (data.groupby('breath_id')['u_in'].rolling(window=window,
min_periods=1) \
.agg({f"{window}_in_sum": "sum",
f"{window}_in_min": "min",
f"{window}_in_max": "max",
f"{window}_in_mean": "mean",
f"{window}_in_std": "std"
}).reset_index(level=0,
drop=True))
for halflife in [5, 9, 10, 15, 20]:
data[f'ewm_u_in_mean_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).mean().reset_index(level=0,
drop=True)
data[f'ewm_u_in_std_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).std().reset_index(level=0, drop=True)
data[f'ewm_u_in_corr_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).corr().reset_index(level=0,
drop=True)
data['expand_mean'] = data.groupby('breath_id')['u_in'].expanding(2).mean().reset_index(level=0, drop=True)
data['expand_max'] = data.groupby('breath_id')['u_in'].expanding(2).max().reset_index(level=0, drop=True)
data['expand_std'] = data.groupby('breath_id')['u_in'].expanding(2).std().reset_index(level=0, drop=True)
return data.fillna(0).replace(np.inf, 0)
def make_features15(self, data):
"""
8 and new
Args:
data:
Returns:
"""
data['RCT'] = data['R'] * data['time_step'] / data['C']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['time_step_cumsum'] = data.groupby(['breath_id'])['time_step'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['exponent'] = (-1.0 * data['time_step']) / (data['R'] * data['C'])
data['factor'] = np.exp(data['exponent'])
data['vf'] = (data['u_in_cumsum'] * data['R']) / data['factor']
data['vt'] = 0
data.loc[data['time_step'] != 0, 'vt'] = data['area'] / (
data['C'] * (-1.0 * data['factor'] + 1.0))
data['v'] = data['vf'] + data['vt']
data['max_u_in_diff'] = data['u_in'] - data.groupby(['breath_id'])['u_in'].max()
data['max_u_in_diff_u_out_0'] = data['u_in'] - data.loc[data['u_out'] == 0].groupby(['breath_id'])['u_in'].max()
data['last_u_in_diff'] = data['u_in'] - data.groupby(['breath_id'])['u_in'].last()
data['last_u_in_diff_u_out_0'] = data['u_in'] - data.loc[data['u_out'] == 0].groupby(['breath_id'])['u_in'].last()
data['count_till_max_u_in'] = data['count'] - data.groupby(['breath_id'])['u_in'].idxmax()
data['count_till_max_u_in_out_0'] = data['count'] - data.loc[data['u_out'] == 0].groupby(['breath_id'])['u_in'].idxmax()
data['count_till_last_u_out_0'] = data['count'] - data.loc[data['u_out'] == 0].groupby(['breath_id'])['count'].last()
data['time_step_till_max_u_in'] = data['time_step'] - data.loc[data.groupby(['breath_id'])['u_in'].idxmax(), 'time_step']
data['time_step_till_max_u_in_out_0'] = data['time_step'] - data.loc[data.loc[data['u_out'] == 0].groupby(['breath_id'])['u_in'].idxmax(), 'time_step']
data['time_step_till_last_u_out_0'] = data['time_step'] - data.loc[data['u_out'] == 0].groupby(['breath_id'])['time_step'].last()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['u_in_lag1'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag1'] = data.groupby('breath_id')['u_out'].shift(1)
data['u_in_lag_back1'] = data.groupby('breath_id')['u_in'].shift(-1)
data['u_out_lag_back1'] = data.groupby('breath_id')['u_out'].shift(-1)
data['u_in_lag2'] = data.groupby('breath_id')['u_in'].shift(2)
data['u_out_lag2'] = data.groupby('breath_id')['u_out'].shift(2)
data['u_in_lag_back2'] = data.groupby('breath_id')['u_in'].shift(-2)
data['u_out_lag_back2'] = data.groupby('breath_id')['u_out'].shift(-2)
data['u_in_lag3'] = data.groupby('breath_id')['u_in'].shift(3)
data['u_out_lag3'] = data.groupby('breath_id')['u_out'].shift(3)
data['u_in_lag_back3'] = data.groupby('breath_id')['u_in'].shift(-3)
data['u_out_lag_back3'] = data.groupby('breath_id')['u_out'].shift(-3)
data['u_in_lag4'] = data.groupby('breath_id')['u_in'].shift(4)
data['u_out_lag4'] = data.groupby('breath_id')['u_out'].shift(4)
data['u_in_lag_back4'] = data.groupby('breath_id')['u_in'].shift(-4)
data['u_out_lag_back4'] = data.groupby('breath_id')['u_out'].shift(-4)
data = data.fillna(0)
data['breath_id__u_in__max'] = data.groupby(['breath_id'])['u_in'].transform('max')
data['breath_id__u_out__max'] = data.groupby(['breath_id'])['u_out'].transform('max')
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['u_in_diff3'] = data['u_in'] - data['u_in_lag3']
data['u_out_diff3'] = data['u_out'] - data['u_out_lag3']
data['u_in_diff4'] = data['u_in'] - data['u_in_lag4']
data['u_out_diff4'] = data['u_out'] - data['u_out_lag4']
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data['breath_id_lag'] = data['breath_id'].shift(1).fillna(0)
data['breath_id_lag2'] = data['breath_id'].shift(2).fillna(0)
data['breath_id_lagsame'] = np.select([data['breath_id_lag'] == data['breath_id']], [1], 0)
data['breath_id_lag2same'] = np.select([data['breath_id_lag2'] == data['breath_id']], [1], 0)
data['breath_id__u_in_lag'] = data['u_in'].shift(1).fillna(0)
data['breath_id__u_in_lag'] = data['breath_id__u_in_lag'] * data['breath_id_lagsame']
data['breath_id__u_in_lag2'] = data['u_in'].shift(2).fillna(0)
data['breath_id__u_in_lag2'] = data['breath_id__u_in_lag2'] * data['breath_id_lag2same']
c_dic = {10: 0, 20: 1, 50: 2}
r_dic = {5: 0, 20: 1, 50: 2}
rc_sum_dic = {v: i for i, v in enumerate([15, 25, 30, 40, 55, 60, 70, 100])}
rc_dot_dic = {v: i for i, v in enumerate([50, 100, 200, 250, 400, 500, 2500, 1000])}
data['C_cate'] = data['C'].map(c_dic)
data['R_cate'] = data['R'].map(r_dic)
data['RC_sum'] = (data['R'] + data['C']).map(rc_sum_dic)
data['RC_dot'] = (data['R'] * data['C']).map(rc_dot_dic)
data['R_sum_c'] = (data['R'] + data['C']).astype(str)
data['R_mult_c'] = (data['R'] * data['C']).astype(str)
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['R__C'] = data["R"].astype(str) + '__' + data["C"].astype(str)
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_RC_diff'] = data['u_in_diff1'] - data.groupby(['R__C', 'count'])['u_in_diff1'].transform('mean')
data['u_in_RC_diff_v2'] = data['u_in_diff1'] - data.groupby(['R__C'])['u_in_diff1'].transform('mean')
data = pd.get_dummies(data)
data['u_in_lagback_diff1'] = data['u_in'] - data['u_in_lag_back1']
data['u_out_lagback_diff1'] = data['u_out'] - data['u_out_lag_back1']
data['u_in_lagback_diff2'] = data['u_in'] - data['u_in_lag_back2']
data['u_out_lagback_diff2'] = data['u_out'] - data['u_out_lag_back2']
data['u_in_lagback_diff3'] = data['u_in'] - data['u_in_lag_back3']
data['u_out_lagback_diff3'] = data['u_out'] - data['u_out_lag_back3']
data['u_in_lagback_diff4'] = data['u_in'] - data['u_in_lag_back4']
data['u_out_lagback_diff4'] = data['u_out'] - data['u_out_lag_back4']
######
data['u_in_lag_back10'] = data.groupby('breath_id')['u_in'].shift(-10)
data['u_out_lag_back10'] = data.groupby('breath_id')['u_out'].shift(-10)
data['u_in_lagback_diff10'] = data['u_in'] - data['u_in_lag_back10']
data['u_out_lagback_diff10'] = data['u_out'] - data['u_out_lag_back10']
data['time_step_diff'] = data['time_step'] - data.groupby('breath_id')['time_step'].shift().fillna(0)
data[["15_out_sum", "15_out_min", "15_out_max", "15_out_mean", "15_out_std"]] = (data \
.groupby('breath_id')['u_out'] \
.rolling(window=15, min_periods=1) \
.agg({"15_out_sum": "sum",
"15_out_min": "min",
"15_out_max": "max",
"15_out_mean": "mean",
"15_out_std": "std"
}).reset_index(level=0, drop=True))
for window in [2, 4, 5, 10, 15, 20, 30, 45]:
data[[f"{window}_in_sum", f"{window}_in_min", f"{window}_in_max",
f"{window}_in_mean", f"{window}_in_std"]] = (data.groupby('breath_id')['u_in'].rolling(window=window,
min_periods=1) \
.agg({f"{window}_in_sum": "sum",
f"{window}_in_min": "min",
f"{window}_in_max": "max",
f"{window}_in_mean": "mean",
f"{window}_in_std": "std"
}).reset_index(level=0,
drop=True))
for halflife in [5, 9, 10, 15, 20]:
data[f'ewm_u_in_mean_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).mean().reset_index(level=0,
drop=True)
data[f'ewm_u_in_std_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).std().reset_index(level=0, drop=True)
data[f'ewm_u_in_corr_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).corr().reset_index(level=0,
drop=True)
data['expand_mean'] = data.groupby('breath_id')['u_in'].expanding(2).mean().reset_index(level=0, drop=True)
data['expand_max'] = data.groupby('breath_id')['u_in'].expanding(2).max().reset_index(level=0, drop=True)
data['expand_std'] = data.groupby('breath_id')['u_in'].expanding(2).std().reset_index(level=0, drop=True)
return data.fillna(0)
def make_features81(self, data):
data['u_in'] = 2 * data.u_in * (np.exp(-0.1 * data.C / data.R))
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['u_in_lag1'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag1'] = data.groupby('breath_id')['u_out'].shift(1)
data['u_in_lag_back1'] = data.groupby('breath_id')['u_in'].shift(-1)
data['u_out_lag_back1'] = data.groupby('breath_id')['u_out'].shift(-1)
data['u_in_lag2'] = data.groupby('breath_id')['u_in'].shift(2)
data['u_out_lag2'] = data.groupby('breath_id')['u_out'].shift(2)
data['u_in_lag_back2'] = data.groupby('breath_id')['u_in'].shift(-2)
data['u_out_lag_back2'] = data.groupby('breath_id')['u_out'].shift(-2)
data['u_in_lag3'] = data.groupby('breath_id')['u_in'].shift(3)
data['u_out_lag3'] = data.groupby('breath_id')['u_out'].shift(3)
data['u_in_lag_back3'] = data.groupby('breath_id')['u_in'].shift(-3)
data['u_out_lag_back3'] = data.groupby('breath_id')['u_out'].shift(-3)
data['u_in_lag4'] = data.groupby('breath_id')['u_in'].shift(4)
data['u_out_lag4'] = data.groupby('breath_id')['u_out'].shift(4)
data['u_in_lag_back4'] = data.groupby('breath_id')['u_in'].shift(-4)
data['u_out_lag_back4'] = data.groupby('breath_id')['u_out'].shift(-4)
data = data.fillna(0)
data['breath_id__u_in__max'] = data.groupby(['breath_id'])['u_in'].transform('max')
data['breath_id__u_out__max'] = data.groupby(['breath_id'])['u_out'].transform('max')
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['u_in_diff3'] = data['u_in'] - data['u_in_lag3']
data['u_out_diff3'] = data['u_out'] - data['u_out_lag3']
data['u_in_diff4'] = data['u_in'] - data['u_in_lag4']
data['u_out_diff4'] = data['u_out'] - data['u_out_lag4']
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data['breath_id_lag'] = data['breath_id'].shift(1).fillna(0)
data['breath_id_lag2'] = data['breath_id'].shift(2).fillna(0)
data['breath_id_lagsame'] = np.select([data['breath_id_lag'] == data['breath_id']], [1], 0)
data['breath_id_lag2same'] = np.select([data['breath_id_lag2'] == data['breath_id']], [1], 0)
data['breath_id__u_in_lag'] = data['u_in'].shift(1).fillna(0)
data['breath_id__u_in_lag'] = data['breath_id__u_in_lag'] * data['breath_id_lagsame']
data['breath_id__u_in_lag2'] = data['u_in'].shift(2).fillna(0)
data['breath_id__u_in_lag2'] = data['breath_id__u_in_lag2'] * data['breath_id_lag2same']
c_dic = {10: 0, 20: 1, 50: 2}
r_dic = {5: 0, 20: 1, 50: 2}
rc_sum_dic = {v: i for i, v in enumerate([15, 25, 30, 40, 55, 60, 70, 100])}
rc_dot_dic = {v: i for i, v in enumerate([50, 100, 200, 250, 400, 500, 2500, 1000])}
data['C_cate'] = data['C'].map(c_dic)
data['R_cate'] = data['R'].map(r_dic)
data['RC_sum'] = (data['R'] + data['C']).map(rc_sum_dic)
data['RC_dot'] = (data['R'] * data['C']).map(rc_dot_dic)
data['R_sum_c'] = (data['R'] + data['C']).astype(str)
data['R_mult_c'] = (data['R'] * data['C']).astype(str)
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['R__C'] = data["R"].astype(str) + '__' + data["C"].astype(str)
data = pd.get_dummies(data)
data['u_in_lagback_diff1'] = data['u_in'] - data['u_in_lag_back1']
data['u_out_lagback_diff1'] = data['u_out'] - data['u_out_lag_back1']
data['u_in_lagback_diff2'] = data['u_in'] - data['u_in_lag_back2']
data['u_out_lagback_diff2'] = data['u_out'] - data['u_out_lag_back2']
data['u_in_lagback_diff3'] = data['u_in'] - data['u_in_lag_back3']
data['u_out_lagback_diff3'] = data['u_out'] - data['u_out_lag_back3']
data['u_in_lagback_diff4'] = data['u_in'] - data['u_in_lag_back4']
data['u_out_lagback_diff4'] = data['u_out'] - data['u_out_lag_back4']
######
data['u_in_lag_back10'] = data.groupby('breath_id')['u_in'].shift(-10)
data['u_out_lag_back10'] = data.groupby('breath_id')['u_out'].shift(-10)
data['u_in_lagback_diff10'] = data['u_in'] - data['u_in_lag_back10']
data['u_out_lagback_diff10'] = data['u_out'] - data['u_out_lag_back10']
data['time_step_diff'] = data['time_step'] - data.groupby('breath_id')['time_step'].shift().fillna(0)
data[["15_out_sum", "15_out_min", "15_out_max", "15_out_mean", "15_out_std"]] = (data \
.groupby('breath_id')['u_out'] \
.rolling(window=15, min_periods=1) \
.agg({"15_out_sum": "sum",
"15_out_min": "min",
"15_out_max": "max",
"15_out_mean": "mean",
"15_out_std": "std"
}).reset_index(level=0, drop=True))
for window in [2, 4, 5, 10, 15, 20, 30, 45]:
data[[f"{window}_in_sum", f"{window}_in_min", f"{window}_in_max",
f"{window}_in_mean", f"{window}_in_std"]] = (data.groupby('breath_id')['u_in'].rolling(window=window,
min_periods=1) \
.agg({f"{window}_in_sum": "sum",
f"{window}_in_min": "min",
f"{window}_in_max": "max",
f"{window}_in_mean": "mean",
f"{window}_in_std": "std"
}).reset_index(level=0,
drop=True))
for halflife in [5, 9, 10, 15, 20]:
data[f'ewm_u_in_mean_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).mean().reset_index(level=0,
drop=True)
data[f'ewm_u_in_std_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).std().reset_index(level=0, drop=True)
data[f'ewm_u_in_corr_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).corr().reset_index(level=0,
drop=True)
data['expand_mean'] = data.groupby('breath_id')['u_in'].expanding(2).mean().reset_index(level=0, drop=True)
data['expand_max'] = data.groupby('breath_id')['u_in'].expanding(2).max().reset_index(level=0, drop=True)
data['expand_std'] = data.groupby('breath_id')['u_in'].expanding(2).std().reset_index(level=0, drop=True)
return data.fillna(0)
def make_features61(self, data):
data['u_in'] = 2 * data.u_in * (np.exp(-0.1 * data.C / data.R))
# CATE_FEATURES = ['R_cate', 'C_cate', 'RC_dot', 'RC_sum']
CONT_FEATURES = ['u_in', 'u_out', 'time_step'] + ['u_in_cumsum', 'u_in_cummean', 'area', 'cross', 'cross2'] + [
'R_cate', 'C_cate']
LAG_FEATURES = ['breath_time']
LAG_FEATURES += [f'u_in_lag_{i}' for i in range(1, self.cfg.datamodule.use_lag + 1)]
# LAG_FEATURES += [f'u_in_lag_{i}_back' for i in range(1, USE_LAG+1)]
LAG_FEATURES += [f'u_in_time{i}' for i in range(1, self.cfg.datamodule.use_lag + 1)]
# LAG_FEATURES += [f'u_in_time{i}_back' for i in range(1, USE_LAG+1)]
LAG_FEATURES += [f'u_out_lag_{i}' for i in range(1, self.cfg.datamodule.use_lag + 1)]
# LAG_FEATURES += [f'u_out_lag_{i}_back' for i in range(1, USE_LAG+1)]
# ALL_FEATURES = CATE_FEATURES + CONT_FEATURES + LAG_FEATURES
ALL_FEATURES = CONT_FEATURES + LAG_FEATURES
for col in ['id', 'breath_id', 'one', 'count', 'breath_id_lag', 'breath_id_lag2', 'breath_id_lagsame',
'breath_id_lag2same', 'pressure', 'fold']:
if col in data.columns:
ALL_FEATURES.append(col)
data['time_delta'] = data.groupby('breath_id')['time_step'].diff().fillna(0)
data['delta'] = data['time_delta'] * data['u_in']
data['area'] = data.groupby('breath_id')['delta'].cumsum()
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data = data.drop(['count', 'one'], axis=1)
for lag in range(1, self.cfg.datamodule.use_lag + 1):
data[f'breath_id_lag{lag}'] = data['breath_id'].shift(lag).fillna(0)
data[f'breath_id_lag{lag}same'] = np.select([data[f'breath_id_lag{lag}'] == data['breath_id']], [1], 0)
# u_in
data[f'u_in_lag_{lag}'] = data['u_in'].shift(lag).fillna(0) * data[f'breath_id_lag{lag}same']
# data[f'u_in_lag_{lag}_back'] = data['u_in'].shift(-lag).fillna(0) * data[f'breath_id_lag{lag}same']
data[f'u_in_time{lag}'] = data['u_in'] - data[f'u_in_lag_{lag}']
# data[f'u_in_time{lag}_back'] = data['u_in'] - data[f'u_in_lag_{lag}_back']
data[f'u_out_lag_{lag}'] = data['u_out'].shift(lag).fillna(0) * data[f'breath_id_lag{lag}same']
# data[f'u_out_lag_{lag}_back'] = data['u_out'].shift(-lag).fillna(0) * data[f'breath_id_lag{lag}same']
# breath_time
data['time_step_lag'] = data['time_step'].shift(1).fillna(0) * data[f'breath_id_lag{lag}same']
data['breath_time'] = data['time_step'] - data['time_step_lag']
drop_columns = ['time_step_lag']
drop_columns += [f'breath_id_lag{i}' for i in range(1, self.cfg.datamodule.use_lag + 1)]
drop_columns += [f'breath_id_lag{i}same' for i in range(1, self.cfg.datamodule.use_lag + 1)]
data = data.drop(drop_columns, axis=1)
# fill na by zero
data = data.fillna(0)
c_dic = {10: 0, 20: 1, 50: 2}
r_dic = {5: 0, 20: 1, 50: 2}
rc_sum_dic = {v: i for i, v in enumerate([15, 25, 30, 40, 55, 60, 70, 100])}
rc_dot_dic = {v: i for i, v in enumerate([50, 100, 200, 250, 400, 500, 2500, 1000])}
data['C_cate'] = data['C'].map(c_dic)
data['R_cate'] = data['R'].map(r_dic)
data['RC_sum'] = (data['R'] + data['C']).map(rc_sum_dic)
data['RC_dot'] = (data['R'] * data['C']).map(rc_dot_dic)
norm_features = CONT_FEATURES + LAG_FEATURES
for col in ['id', 'breath_id', 'one', 'count', 'breath_id_lag', 'breath_id_lag2', 'breath_id_lagsame',
'breath_id_lag2same', 'pressure', 'fold']:
if col in data.columns:
norm_features.append(col)
if 'fold' in data.columns:
norm_features.append('fold')
norm_features = list(set(norm_features))
ALL_FEATURES = list(set(ALL_FEATURES))
print('data.columns', data.columns)
print('ALL_FEATURES', ALL_FEATURES)
# assert norm_features == ALL_FEATURES, 'something went wrong'
return data[ALL_FEATURES].fillna(0)
def make_features31(self, data):
data['u_in'] = 2 * data.u_in * (np.exp(-0.1 * data.C / data.R))
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['u_in_lag1'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag1'] = data.groupby('breath_id')['u_out'].shift(1)
data['u_in_lag_back1'] = data.groupby('breath_id')['u_in'].shift(-1)
data['u_out_lag_back1'] = data.groupby('breath_id')['u_out'].shift(-1)
data['u_in_lag2'] = data.groupby('breath_id')['u_in'].shift(2)
data['u_out_lag2'] = data.groupby('breath_id')['u_out'].shift(2)
data['u_in_lag_back2'] = data.groupby('breath_id')['u_in'].shift(-2)
data['u_out_lag_back2'] = data.groupby('breath_id')['u_out'].shift(-2)
data['u_in_lag3'] = data.groupby('breath_id')['u_in'].shift(3)
data['u_out_lag3'] = data.groupby('breath_id')['u_out'].shift(3)
data['u_in_lag_back3'] = data.groupby('breath_id')['u_in'].shift(-3)
data['u_out_lag_back3'] = data.groupby('breath_id')['u_out'].shift(-3)
data['u_in_lag4'] = data.groupby('breath_id')['u_in'].shift(4)
data['u_out_lag4'] = data.groupby('breath_id')['u_out'].shift(4)
data['u_in_lag_back4'] = data.groupby('breath_id')['u_in'].shift(-4)
data['u_out_lag_back4'] = data.groupby('breath_id')['u_out'].shift(-4)
data = data.fillna(0)
data['breath_id__u_in__max'] = data.groupby(['breath_id'])['u_in'].transform('max')
data['breath_id__u_out__max'] = data.groupby(['breath_id'])['u_out'].transform('max')
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['u_in_diff3'] = data['u_in'] - data['u_in_lag3']
data['u_out_diff3'] = data['u_out'] - data['u_out_lag3']
data['u_in_diff4'] = data['u_in'] - data['u_in_lag4']
data['u_out_diff4'] = data['u_out'] - data['u_out_lag4']
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data['breath_id_lag'] = data['breath_id'].shift(1).fillna(0)
data['breath_id_lag2'] = data['breath_id'].shift(2).fillna(0)
data['breath_id_lagsame'] = np.select([data['breath_id_lag'] == data['breath_id']], [1], 0)
data['breath_id_lag2same'] = np.select([data['breath_id_lag2'] == data['breath_id']], [1], 0)
data['breath_id__u_in_lag'] = data['u_in'].shift(1).fillna(0)
data['breath_id__u_in_lag'] = data['breath_id__u_in_lag'] * data['breath_id_lagsame']
data['breath_id__u_in_lag2'] = data['u_in'].shift(2).fillna(0)
data['breath_id__u_in_lag2'] = data['breath_id__u_in_lag2'] * data['breath_id_lag2same']
data['R_sum_c'] = (data['R'] + data['C']).astype(str)
data['R_mult_c'] = (data['R'] * data['C']).astype(str)
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['R__C'] = data["R"].astype(str) + '__' + data["C"].astype(str)
data = pd.get_dummies(data)
data[["15_in_sum", "15_in_min", "15_in_max", "15_in_mean"]] = (data \
.groupby('breath_id')['u_in'] \
.rolling(window=15, min_periods=1) \
.agg({"15_in_sum": "sum",
"15_in_min": "min",
"15_in_max": "max",
"15_in_mean": "mean"
# "15_in_std":"std"
}) \
.reset_index(level=0, drop=True))
data['u_in_lagback_diff1'] = data['u_in'] - data['u_in_lag_back1']
data['u_out_lagback_diff1'] = data['u_out'] - data['u_out_lag_back1']
data['u_in_lagback_diff2'] = data['u_in'] - data['u_in_lag_back2']
data['u_out_lagback_diff2'] = data['u_out'] - data['u_out_lag_back2']
data['ewm_u_in_mean'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).mean().reset_index(level=0,
drop=True)
data['ewm_u_in_std'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).std().reset_index(level=0, drop=True)
data['ewm_u_in_corr'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).corr().reset_index(level=0,
drop=True)
data['rolling_10_mean'] = data.groupby('breath_id')['u_in'].rolling(window=10,
min_periods=1).mean().reset_index(
level=0, drop=True)
data['rolling_10_max'] = data.groupby('breath_id')['u_in'].rolling(window=10, min_periods=1).max().reset_index(
level=0,
drop=True)
data['rolling_10_std'] = data.groupby('breath_id')['u_in'].rolling(window=10, min_periods=1).std().reset_index(
level=0,
drop=True)
data['expand_mean'] = data.groupby('breath_id')['u_in'].expanding(2).mean().reset_index(level=0, drop=True)
data['expand_max'] = data.groupby('breath_id')['u_in'].expanding(2).max().reset_index(level=0, drop=True)
data['expand_std'] = data.groupby('breath_id')['u_in'].expanding(2).std().reset_index(level=0, drop=True)
data["u_in_rolling_mean2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).mean()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_mean4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).mean()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_mean10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).mean()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_max2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).max()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_max4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).max()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_max10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).max()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_min2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).min()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_min4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).min()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_min10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).min()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_std2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).std()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_std4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).std()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_std10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).std()[
"u_in"].reset_index(drop=True)
g = data.groupby('breath_id')['u_in']
data['ewm_u_in_mean'] = g.ewm(halflife=10).mean() \
.reset_index(level=0, drop=True)
data['ewm_u_in_std'] = g.ewm(halflife=10).std() \
.reset_index(level=0, drop=True)
data['ewm_u_in_corr'] = g.ewm(halflife=10).corr() \
.reset_index(level=0, drop=True)
data['rolling_10_mean'] = g.rolling(window=10, min_periods=1).mean() \
.reset_index(level=0, drop=True)
data['rolling_10_max'] = g.rolling(window=10, min_periods=1).max() \
.reset_index(level=0, drop=True)
data['rolling_10_std'] = g.rolling(window=10, min_periods=1).std() \
.reset_index(level=0, drop=True)
data['expand_mean'] = g.expanding(2).mean() \
.reset_index(level=0, drop=True)
data['expand_max'] = g.expanding(2).max() \
.reset_index(level=0, drop=True)
data['expand_std'] = g.expanding(2).std() \
.reset_index(level=0, drop=True)
data['u_in_lag_back10'] = data.groupby('breath_id')['u_in'].shift(-10)
data['u_out_lag_back10'] = data.groupby('breath_id')['u_out'].shift(-10)
data['time_step_diff'] = data.groupby('breath_id')['time_step'].diff().fillna(0)
### rolling window ts feats
data['ewm_u_in_mean'] = data.groupby('breath_id')['u_in'].ewm(halflife=9).mean().reset_index(level=0,
drop=True)
data['ewm_u_in_std'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).std().reset_index(level=0,
drop=True) ## could add covar?
data['ewm_u_in_corr'] = data.groupby('breath_id')['u_in'].ewm(halflife=15).corr().reset_index(level=0,
drop=True) # self umin corr
# data['ewm_u_in_corr'] = data.groupby('breath_id')['u_in'].ewm(halflife=6).corr(data.groupby('breath_id')["u_out"]).reset_index(level=0,drop=True) # corr with u_out # error
## rolling window of 15 periods
data[["15_in_sum", "15_in_min", "15_in_max", "15_in_mean", "15_out_std"]] = data.groupby('breath_id')[
'u_in'].rolling(window=15, min_periods=1).agg(
{"15_in_sum": "sum", "15_in_min": "min", "15_in_max": "max", "15_in_mean": "mean",
"15_in_std": "std"}).reset_index(level=0, drop=True)
# data[["45_in_sum","45_in_min","45_in_max","45_in_mean","45_out_std"]] = data.groupby('breath_id')['u_in'].rolling(window=45,min_periods=1).agg({"45_in_sum":"sum","45_in_min":"min","45_in_max":"max","45_in_mean":"mean","45_in_std":"std"}).reset_index(level=0,drop=True)
data[["45_in_sum", "45_in_min", "45_in_max", "45_in_mean", "45_out_std"]] = data.groupby('breath_id')[
'u_in'].rolling(window=45, min_periods=1).agg(
{"45_in_sum": "sum", "45_in_min": "min", "45_in_max": "max", "45_in_mean": "mean",
"45_in_std": "std"}).reset_index(level=0, drop=True)
data[["15_out_mean"]] = data.groupby('breath_id')['u_out'].rolling(window=15, min_periods=1).agg(
{"15_out_mean": "mean"}).reset_index(level=0, drop=True)
return data.fillna(0)
def make_features16(self, data):
"""
8 and new
Args:
data:
Returns:
"""
data['RCT'] = data['R'] * data['time_step'] / data['C']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['time_step_cumsum'] = data.groupby(['breath_id'])['time_step'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['exponent'] = (-1.0 * data['time_step']) / (data['R'] * data['C'])
data['factor'] = np.exp(data['exponent'])
data['vf'] = (data['u_in_cumsum'] * data['R']) / data['factor']
data['vt'] = 0
data.loc[data['time_step'] != 0, 'vt'] = data['area'] / (
data['C'] * (-1.0 * data['factor'] + 1.0))
data['v'] = data['vf'] + data['vt']
data['rt'] = (3 * data['v'] / (4 * pi)) ** (1 / 3)
data['pt'] = ((1 - (data['rt'] / 30) ** 6) / (data['rt'] * (30 ** 2)))
data['max_u_in_diff'] = data['u_in'] - data.groupby(['breath_id'])['u_in'].max()
data['max_u_in_diff_u_out_0'] = data['u_in'] - data.loc[data['u_out'] == 0].groupby(['breath_id'])['u_in'].max()
data['last_u_in_diff'] = data['u_in'] - data.groupby(['breath_id'])['u_in'].last()
data['last_u_in_diff_u_out_0'] = data['u_in'] - data.loc[data['u_out'] == 0].groupby(['breath_id'])['u_in'].last()
data['count_till_max_u_in'] = data['count'] - data.groupby(['breath_id'])['u_in'].idxmax()
data['count_till_max_u_in_out_0'] = data['count'] - data.loc[data['u_out'] == 0].groupby(['breath_id'])['u_in'].idxmax()
data['count_till_last_u_out_0'] = data['count'] - data.loc[data['u_out'] == 0].groupby(['breath_id'])['count'].last()
data['time_step_till_max_u_in'] = data['time_step'] - data.loc[data.groupby(['breath_id'])['u_in'].idxmax(), 'time_step']
data['time_step_till_max_u_in_out_0'] = data['time_step'] - data.loc[data.loc[data['u_out'] == 0].groupby(['breath_id'])['u_in'].idxmax(), 'time_step']
data['time_step_till_last_u_out_0'] = data['time_step'] - data.loc[data['u_out'] == 0].groupby(['breath_id'])['time_step'].last()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['u_in_lag1'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag1'] = data.groupby('breath_id')['u_out'].shift(1)
data['u_in_lag_back1'] = data.groupby('breath_id')['u_in'].shift(-1)
data['u_out_lag_back1'] = data.groupby('breath_id')['u_out'].shift(-1)
data['u_in_lag2'] = data.groupby('breath_id')['u_in'].shift(2)
data['u_out_lag2'] = data.groupby('breath_id')['u_out'].shift(2)
data['u_in_lag_back2'] = data.groupby('breath_id')['u_in'].shift(-2)
data['u_out_lag_back2'] = data.groupby('breath_id')['u_out'].shift(-2)
data['u_in_lag3'] = data.groupby('breath_id')['u_in'].shift(3)
data['u_out_lag3'] = data.groupby('breath_id')['u_out'].shift(3)
data['u_in_lag_back3'] = data.groupby('breath_id')['u_in'].shift(-3)
data['u_out_lag_back3'] = data.groupby('breath_id')['u_out'].shift(-3)
data['u_in_lag4'] = data.groupby('breath_id')['u_in'].shift(4)
data['u_out_lag4'] = data.groupby('breath_id')['u_out'].shift(4)
data['u_in_lag_back4'] = data.groupby('breath_id')['u_in'].shift(-4)
data['u_out_lag_back4'] = data.groupby('breath_id')['u_out'].shift(-4)
data = data.fillna(0)
data['breath_id__u_in__max'] = data.groupby(['breath_id'])['u_in'].transform('max')
data['breath_id__u_out__max'] = data.groupby(['breath_id'])['u_out'].transform('max')
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['u_in_diff3'] = data['u_in'] - data['u_in_lag3']
data['u_out_diff3'] = data['u_out'] - data['u_out_lag3']
data['u_in_diff4'] = data['u_in'] - data['u_in_lag4']
data['u_out_diff4'] = data['u_out'] - data['u_out_lag4']
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data['breath_id_lag'] = data['breath_id'].shift(1).fillna(0)
data['breath_id_lag2'] = data['breath_id'].shift(2).fillna(0)
data['breath_id_lagsame'] = np.select([data['breath_id_lag'] == data['breath_id']], [1], 0)
data['breath_id_lag2same'] = np.select([data['breath_id_lag2'] == data['breath_id']], [1], 0)
data['breath_id__u_in_lag'] = data['u_in'].shift(1).fillna(0)
data['breath_id__u_in_lag'] = data['breath_id__u_in_lag'] * data['breath_id_lagsame']
data['breath_id__u_in_lag2'] = data['u_in'].shift(2).fillna(0)
data['breath_id__u_in_lag2'] = data['breath_id__u_in_lag2'] * data['breath_id_lag2same']
c_dic = {10: 0, 20: 1, 50: 2}
r_dic = {5: 0, 20: 1, 50: 2}
rc_sum_dic = {v: i for i, v in enumerate([15, 25, 30, 40, 55, 60, 70, 100])}
rc_dot_dic = {v: i for i, v in enumerate([50, 100, 200, 250, 400, 500, 2500, 1000])}
data['C_cate'] = data['C'].map(c_dic)
data['R_cate'] = data['R'].map(r_dic)
data['RC_sum'] = (data['R'] + data['C']).map(rc_sum_dic)
data['RC_dot'] = (data['R'] * data['C']).map(rc_dot_dic)
data['R_sum_c'] = (data['R'] + data['C']).astype(str)
data['R_mult_c'] = (data['R'] * data['C']).astype(str)
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['R__C'] = data["R"].astype(str) + '__' + data["C"].astype(str)
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_RC_diff'] = data['u_in_diff1'] - data.groupby(['R__C', 'count'])['u_in_diff1'].transform('mean')
data['u_in_RC_diff_v2'] = data['u_in_diff1'] - data.groupby(['R__C'])['u_in_diff1'].transform('mean')
data = pd.get_dummies(data)
data['u_in_lagback_diff1'] = data['u_in'] - data['u_in_lag_back1']
data['u_out_lagback_diff1'] = data['u_out'] - data['u_out_lag_back1']
data['u_in_lagback_diff2'] = data['u_in'] - data['u_in_lag_back2']
data['u_out_lagback_diff2'] = data['u_out'] - data['u_out_lag_back2']
data['u_in_lagback_diff3'] = data['u_in'] - data['u_in_lag_back3']
data['u_out_lagback_diff3'] = data['u_out'] - data['u_out_lag_back3']
data['u_in_lagback_diff4'] = data['u_in'] - data['u_in_lag_back4']
data['u_out_lagback_diff4'] = data['u_out'] - data['u_out_lag_back4']
######
data['u_in_lag_back10'] = data.groupby('breath_id')['u_in'].shift(-10)
data['u_out_lag_back10'] = data.groupby('breath_id')['u_out'].shift(-10)
data['u_in_lagback_diff10'] = data['u_in'] - data['u_in_lag_back10']
data['u_out_lagback_diff10'] = data['u_out'] - data['u_out_lag_back10']
data['time_step_diff'] = data['time_step'] - data.groupby('breath_id')['time_step'].shift().fillna(0)
data[["15_out_sum", "15_out_min", "15_out_max", "15_out_mean", "15_out_std"]] = (data \
.groupby('breath_id')['u_out'] \
.rolling(window=15, min_periods=1) \
.agg({"15_out_sum": "sum",
"15_out_min": "min",
"15_out_max": "max",
"15_out_mean": "mean",
"15_out_std": "std"
}).reset_index(level=0, drop=True))
for window in [2, 4, 5, 10, 15, 20, 30, 45]:
data[[f"{window}_in_sum", f"{window}_in_min", f"{window}_in_max",
f"{window}_in_mean", f"{window}_in_std"]] = (data.groupby('breath_id')['u_in'].rolling(window=window,
min_periods=1) \
.agg({f"{window}_in_sum": "sum",
f"{window}_in_min": "min",
f"{window}_in_max": "max",
f"{window}_in_mean": "mean",
f"{window}_in_std": "std"
}).reset_index(level=0,
drop=True))
for halflife in [5, 9, 10, 15, 20]:
data[f'ewm_u_in_mean_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).mean().reset_index(level=0,
drop=True)
data[f'ewm_u_in_std_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).std().reset_index(level=0, drop=True)
data[f'ewm_u_in_corr_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).corr().reset_index(level=0,
drop=True)
data['expand_mean'] = data.groupby('breath_id')['u_in'].expanding(2).mean().reset_index(level=0, drop=True)
data['expand_max'] = data.groupby('breath_id')['u_in'].expanding(2).max().reset_index(level=0, drop=True)
data['expand_std'] = data.groupby('breath_id')['u_in'].expanding(2).std().reset_index(level=0, drop=True)
return data.replace((np.inf, -np.inf, np.nan), 0)
def make_features17(self, data):
"""
8 and new
Args:
data:
Returns:
"""
data['RCT'] = data['R'] * data['time_step'] / data['C']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['time_step_cumsum'] = data.groupby(['breath_id'])['time_step'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['exponent'] = (-1.0 * data['time_step'] * 1000) / (data['R'] * data['C'])
data['factor'] = np.exp(data['exponent'])
data['vf'] = (data['u_in_cumsum'] * data['R']) / data['factor']
data['vt'] = 0
data.loc[data['time_step'] != 0, 'vt'] = data['area'] / (
data['C'] * (-1.0 * data['factor'] + 1.0))
data['v'] = data['vf'] + data['vt']
data['v0'] = data.groupby(['breath_id'])['v'].transform('first')
data['pt'] = (1 - (data['v'] / data['v0']) ** 2) * (1 / (3 * (data['v'] / (4 * math.pi)) ** (1 / 3))) * (
3 * data['v0'] / (4 * math.pi)) ** (2 / 3)
data = data.drop(['exponent', 'factor', 'vf', 'vt', 'v', 'v0'], axis=1)
data['max_u_in_diff'] = data['u_in'] - data.groupby(['breath_id'])['u_in'].max()
data['max_u_in_diff_u_out_0'] = data['u_in'] - data.loc[data['u_out'] == 0].groupby(['breath_id'])['u_in'].max()
data['last_u_in_diff'] = data['u_in'] - data.groupby(['breath_id'])['u_in'].last()
data['last_u_in_diff_u_out_0'] = data['u_in'] - data.loc[data['u_out'] == 0].groupby(['breath_id'])['u_in'].last()
data['count_till_max_u_in'] = data['count'] - data.groupby(['breath_id'])['u_in'].idxmax()
data['count_till_max_u_in_out_0'] = data['count'] - data.loc[data['u_out'] == 0].groupby(['breath_id'])['u_in'].idxmax()
data['count_till_last_u_out_0'] = data['count'] - data.loc[data['u_out'] == 0].groupby(['breath_id'])['count'].last()
data['time_step_till_max_u_in'] = data['time_step'] - data.loc[data.groupby(['breath_id'])['u_in'].idxmax(), 'time_step']
data['time_step_till_max_u_in_out_0'] = data['time_step'] - data.loc[data.loc[data['u_out'] == 0].groupby(['breath_id'])['u_in'].idxmax(), 'time_step']
data['time_step_till_last_u_out_0'] = data['time_step'] - data.loc[data['u_out'] == 0].groupby(['breath_id'])['time_step'].last()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['u_in_lag1'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag1'] = data.groupby('breath_id')['u_out'].shift(1)
data['u_in_lag_back1'] = data.groupby('breath_id')['u_in'].shift(-1)
data['u_out_lag_back1'] = data.groupby('breath_id')['u_out'].shift(-1)
data['u_in_lag2'] = data.groupby('breath_id')['u_in'].shift(2)
data['u_out_lag2'] = data.groupby('breath_id')['u_out'].shift(2)
data['u_in_lag_back2'] = data.groupby('breath_id')['u_in'].shift(-2)
data['u_out_lag_back2'] = data.groupby('breath_id')['u_out'].shift(-2)
data['u_in_lag3'] = data.groupby('breath_id')['u_in'].shift(3)
data['u_out_lag3'] = data.groupby('breath_id')['u_out'].shift(3)
data['u_in_lag_back3'] = data.groupby('breath_id')['u_in'].shift(-3)
data['u_out_lag_back3'] = data.groupby('breath_id')['u_out'].shift(-3)
data['u_in_lag4'] = data.groupby('breath_id')['u_in'].shift(4)
data['u_out_lag4'] = data.groupby('breath_id')['u_out'].shift(4)
data['u_in_lag_back4'] = data.groupby('breath_id')['u_in'].shift(-4)
data['u_out_lag_back4'] = data.groupby('breath_id')['u_out'].shift(-4)
data = data.fillna(0)
data['breath_id__u_in__max'] = data.groupby(['breath_id'])['u_in'].transform('max')
data['breath_id__u_out__max'] = data.groupby(['breath_id'])['u_out'].transform('max')
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['u_in_diff3'] = data['u_in'] - data['u_in_lag3']
data['u_out_diff3'] = data['u_out'] - data['u_out_lag3']
data['u_in_diff4'] = data['u_in'] - data['u_in_lag4']
data['u_out_diff4'] = data['u_out'] - data['u_out_lag4']
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data['breath_id_lag'] = data['breath_id'].shift(1).fillna(0)
data['breath_id_lag2'] = data['breath_id'].shift(2).fillna(0)
data['breath_id_lagsame'] = np.select([data['breath_id_lag'] == data['breath_id']], [1], 0)
data['breath_id_lag2same'] = np.select([data['breath_id_lag2'] == data['breath_id']], [1], 0)
data['breath_id__u_in_lag'] = data['u_in'].shift(1).fillna(0)
data['breath_id__u_in_lag'] = data['breath_id__u_in_lag'] * data['breath_id_lagsame']
data['breath_id__u_in_lag2'] = data['u_in'].shift(2).fillna(0)
data['breath_id__u_in_lag2'] = data['breath_id__u_in_lag2'] * data['breath_id_lag2same']
c_dic = {10: 0, 20: 1, 50: 2}
r_dic = {5: 0, 20: 1, 50: 2}
rc_sum_dic = {v: i for i, v in enumerate([15, 25, 30, 40, 55, 60, 70, 100])}
rc_dot_dic = {v: i for i, v in enumerate([50, 100, 200, 250, 400, 500, 2500, 1000])}
data['C_cate'] = data['C'].map(c_dic)
data['R_cate'] = data['R'].map(r_dic)
data['RC_sum'] = (data['R'] + data['C']).map(rc_sum_dic)
data['RC_dot'] = (data['R'] * data['C']).map(rc_dot_dic)
data['R_sum_c'] = (data['R'] + data['C']).astype(str)
data['R_mult_c'] = (data['R'] * data['C']).astype(str)
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['R__C'] = data["R"].astype(str) + '__' + data["C"].astype(str)
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_RC_diff'] = data['u_in_diff1'] - data.groupby(['R__C', 'count'])['u_in_diff1'].transform('mean')
data['u_in_RC_diff_v2'] = data['u_in_diff1'] - data.groupby(['R__C'])['u_in_diff1'].transform('mean')
data = pd.get_dummies(data)
data['u_in_lagback_diff1'] = data['u_in'] - data['u_in_lag_back1']
data['u_out_lagback_diff1'] = data['u_out'] - data['u_out_lag_back1']
data['u_in_lagback_diff2'] = data['u_in'] - data['u_in_lag_back2']
data['u_out_lagback_diff2'] = data['u_out'] - data['u_out_lag_back2']
data['u_in_lagback_diff3'] = data['u_in'] - data['u_in_lag_back3']
data['u_out_lagback_diff3'] = data['u_out'] - data['u_out_lag_back3']
data['u_in_lagback_diff4'] = data['u_in'] - data['u_in_lag_back4']
data['u_out_lagback_diff4'] = data['u_out'] - data['u_out_lag_back4']
######
data['u_in_lag_back10'] = data.groupby('breath_id')['u_in'].shift(-10)
data['u_out_lag_back10'] = data.groupby('breath_id')['u_out'].shift(-10)
data['u_in_lagback_diff10'] = data['u_in'] - data['u_in_lag_back10']
data['u_out_lagback_diff10'] = data['u_out'] - data['u_out_lag_back10']
data['time_step_diff'] = data['time_step'] - data.groupby('breath_id')['time_step'].shift().fillna(0)
data[["15_out_sum", "15_out_min", "15_out_max", "15_out_mean", "15_out_std"]] = (data \
.groupby('breath_id')['u_out'] \
.rolling(window=15, min_periods=1) \
.agg({"15_out_sum": "sum",
"15_out_min": "min",
"15_out_max": "max",
"15_out_mean": "mean",
"15_out_std": "std"
}).reset_index(level=0, drop=True))
for window in [2, 4, 5, 10, 15, 20, 30, 45]:
data[[f"{window}_in_sum", f"{window}_in_min", f"{window}_in_max",
f"{window}_in_mean", f"{window}_in_std"]] = (data.groupby('breath_id')['u_in'].rolling(window=window,
min_periods=1) \
.agg({f"{window}_in_sum": "sum",
f"{window}_in_min": "min",
f"{window}_in_max": "max",
f"{window}_in_mean": "mean",
f"{window}_in_std": "std"
}).reset_index(level=0,
drop=True))
for halflife in [5, 9, 10, 15, 20]:
data[f'ewm_u_in_mean_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).mean().reset_index(level=0,
drop=True)
data[f'ewm_u_in_std_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).std().reset_index(level=0, drop=True)
data[f'ewm_u_in_corr_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).corr().reset_index(level=0,
drop=True)
data['expand_mean'] = data.groupby('breath_id')['u_in'].expanding(2).mean().reset_index(level=0, drop=True)
data['expand_max'] = data.groupby('breath_id')['u_in'].expanding(2).max().reset_index(level=0, drop=True)
data['expand_std'] = data.groupby('breath_id')['u_in'].expanding(2).std().reset_index(level=0, drop=True)
return data.fillna(0).replace((np.inf, -np.inf, np.nan), 0)
def make_features171(self, data):
"""
8 and new. no rct
Args:
data:
Returns:
"""
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['time_step_cumsum'] = data.groupby(['breath_id'])['time_step'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['exponent'] = (-1.0 * data['time_step'] * 1000) / (data['R'] * data['C'])
data['factor'] = np.exp(data['exponent'])
data['vf'] = (data['u_in_cumsum'] * data['R']) / data['factor']
data['vt'] = 0
data.loc[data['time_step'] != 0, 'vt'] = data['area'] / (
data['C'] * (-1.0 * data['factor'] + 1.0))
data['v'] = data['vf'] + data['vt']
data['v0'] = data.groupby(['breath_id'])['v'].transform('first')
data['pt'] = (1 - (data['v'] / data['v0']) ** 2) * (1 / (3 * (data['v'] / (4 * math.pi)) ** (1 / 3))) * (
3 * data['v0'] / (4 * math.pi)) ** (2 / 3)
data = data.drop(['exponent', 'factor', 'vf', 'vt', 'v', 'v0'], axis=1)
data['max_u_in_diff'] = data['u_in'] - data.groupby(['breath_id'])['u_in'].max()
data['max_u_in_diff_u_out_0'] = data['u_in'] - data.loc[data['u_out'] == 0].groupby(['breath_id'])['u_in'].max()
data['last_u_in_diff'] = data['u_in'] - data.groupby(['breath_id'])['u_in'].last()
data['last_u_in_diff_u_out_0'] = data['u_in'] - data.loc[data['u_out'] == 0].groupby(['breath_id'])['u_in'].last()
data['count_till_max_u_in'] = data['count'] - data.groupby(['breath_id'])['u_in'].idxmax()
data['count_till_max_u_in_out_0'] = data['count'] - data.loc[data['u_out'] == 0].groupby(['breath_id'])['u_in'].idxmax()
data['count_till_last_u_out_0'] = data['count'] - data.loc[data['u_out'] == 0].groupby(['breath_id'])['count'].last()
data['time_step_till_max_u_in'] = data['time_step'] - data.loc[data.groupby(['breath_id'])['u_in'].idxmax(), 'time_step']
data['time_step_till_max_u_in_out_0'] = data['time_step'] - data.loc[data.loc[data['u_out'] == 0].groupby(['breath_id'])['u_in'].idxmax(), 'time_step']
data['time_step_till_last_u_out_0'] = data['time_step'] - data.loc[data['u_out'] == 0].groupby(['breath_id'])['time_step'].last()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['u_in_lag1'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag1'] = data.groupby('breath_id')['u_out'].shift(1)
data['u_in_lag_back1'] = data.groupby('breath_id')['u_in'].shift(-1)
data['u_out_lag_back1'] = data.groupby('breath_id')['u_out'].shift(-1)
data['u_in_lag2'] = data.groupby('breath_id')['u_in'].shift(2)
data['u_out_lag2'] = data.groupby('breath_id')['u_out'].shift(2)
data['u_in_lag_back2'] = data.groupby('breath_id')['u_in'].shift(-2)
data['u_out_lag_back2'] = data.groupby('breath_id')['u_out'].shift(-2)
data['u_in_lag3'] = data.groupby('breath_id')['u_in'].shift(3)
data['u_out_lag3'] = data.groupby('breath_id')['u_out'].shift(3)
data['u_in_lag_back3'] = data.groupby('breath_id')['u_in'].shift(-3)
data['u_out_lag_back3'] = data.groupby('breath_id')['u_out'].shift(-3)
data['u_in_lag4'] = data.groupby('breath_id')['u_in'].shift(4)
data['u_out_lag4'] = data.groupby('breath_id')['u_out'].shift(4)
data['u_in_lag_back4'] = data.groupby('breath_id')['u_in'].shift(-4)
data['u_out_lag_back4'] = data.groupby('breath_id')['u_out'].shift(-4)
data = data.fillna(0)
data['breath_id__u_in__max'] = data.groupby(['breath_id'])['u_in'].transform('max')
data['breath_id__u_out__max'] = data.groupby(['breath_id'])['u_out'].transform('max')
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['u_in_diff3'] = data['u_in'] - data['u_in_lag3']
data['u_out_diff3'] = data['u_out'] - data['u_out_lag3']
data['u_in_diff4'] = data['u_in'] - data['u_in_lag4']
data['u_out_diff4'] = data['u_out'] - data['u_out_lag4']
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data['breath_id_lag'] = data['breath_id'].shift(1).fillna(0)
data['breath_id_lag2'] = data['breath_id'].shift(2).fillna(0)
data['breath_id_lagsame'] = np.select([data['breath_id_lag'] == data['breath_id']], [1], 0)
data['breath_id_lag2same'] = np.select([data['breath_id_lag2'] == data['breath_id']], [1], 0)
data['breath_id__u_in_lag'] = data['u_in'].shift(1).fillna(0)
data['breath_id__u_in_lag'] = data['breath_id__u_in_lag'] * data['breath_id_lagsame']
data['breath_id__u_in_lag2'] = data['u_in'].shift(2).fillna(0)
data['breath_id__u_in_lag2'] = data['breath_id__u_in_lag2'] * data['breath_id_lag2same']
c_dic = {10: 0, 20: 1, 50: 2}
r_dic = {5: 0, 20: 1, 50: 2}
rc_sum_dic = {v: i for i, v in enumerate([15, 25, 30, 40, 55, 60, 70, 100])}
rc_dot_dic = {v: i for i, v in enumerate([50, 100, 200, 250, 400, 500, 2500, 1000])}
data['C_cate'] = data['C'].map(c_dic)
data['R_cate'] = data['R'].map(r_dic)
data['RC_sum'] = (data['R'] + data['C']).map(rc_sum_dic)
data['RC_dot'] = (data['R'] * data['C']).map(rc_dot_dic)
data['R_sum_c'] = (data['R'] + data['C']).astype(str)
data['R_mult_c'] = (data['R'] * data['C']).astype(str)
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['R__C'] = data["R"].astype(str) + '__' + data["C"].astype(str)
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_RC_diff'] = data['u_in_diff1'] - data.groupby(['R__C', 'count'])['u_in_diff1'].transform('mean')
data['u_in_RC_diff_v2'] = data['u_in_diff1'] - data.groupby(['R__C'])['u_in_diff1'].transform('mean')
data = pd.get_dummies(data)
data['u_in_lagback_diff1'] = data['u_in'] - data['u_in_lag_back1']
data['u_out_lagback_diff1'] = data['u_out'] - data['u_out_lag_back1']
data['u_in_lagback_diff2'] = data['u_in'] - data['u_in_lag_back2']
data['u_out_lagback_diff2'] = data['u_out'] - data['u_out_lag_back2']
data['u_in_lagback_diff3'] = data['u_in'] - data['u_in_lag_back3']
data['u_out_lagback_diff3'] = data['u_out'] - data['u_out_lag_back3']
data['u_in_lagback_diff4'] = data['u_in'] - data['u_in_lag_back4']
data['u_out_lagback_diff4'] = data['u_out'] - data['u_out_lag_back4']
######
data['u_in_lag_back10'] = data.groupby('breath_id')['u_in'].shift(-10)
data['u_out_lag_back10'] = data.groupby('breath_id')['u_out'].shift(-10)
data['u_in_lagback_diff10'] = data['u_in'] - data['u_in_lag_back10']
data['u_out_lagback_diff10'] = data['u_out'] - data['u_out_lag_back10']
data['time_step_diff'] = data['time_step'] - data.groupby('breath_id')['time_step'].shift().fillna(0)
data[["15_out_sum", "15_out_min", "15_out_max", "15_out_mean", "15_out_std"]] = (data \
.groupby('breath_id')['u_out'] \
.rolling(window=15, min_periods=1) \
.agg({"15_out_sum": "sum",
"15_out_min": "min",
"15_out_max": "max",
"15_out_mean": "mean",
"15_out_std": "std"
}).reset_index(level=0, drop=True))
for window in [2, 4, 5, 10, 15, 20, 30, 45]:
data[[f"{window}_in_sum", f"{window}_in_min", f"{window}_in_max",
f"{window}_in_mean", f"{window}_in_std"]] = (data.groupby('breath_id')['u_in'].rolling(window=window,
min_periods=1) \
.agg({f"{window}_in_sum": "sum",
f"{window}_in_min": "min",
f"{window}_in_max": "max",
f"{window}_in_mean": "mean",
f"{window}_in_std": "std"
}).reset_index(level=0,
drop=True))
for halflife in [5, 9, 10, 15, 20]:
data[f'ewm_u_in_mean_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).mean().reset_index(level=0,
drop=True)
data[f'ewm_u_in_std_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).std().reset_index(level=0, drop=True)
data[f'ewm_u_in_corr_{halflife}'] = data.groupby('breath_id')['u_in'].ewm(halflife=halflife).corr().reset_index(level=0,
drop=True)
data['expand_mean'] = data.groupby('breath_id')['u_in'].expanding(2).mean().reset_index(level=0, drop=True)
data['expand_max'] = data.groupby('breath_id')['u_in'].expanding(2).max().reset_index(level=0, drop=True)
data['expand_std'] = data.groupby('breath_id')['u_in'].expanding(2).std().reset_index(level=0, drop=True)
return data.fillna(0).fillna(0).replace((np.inf, -np.inf, np.nan), 0)
def make_features18(self, data):
"""
3 and volume
Args:
data:
Returns:
"""
data['RCT'] = data['R'] * data['time_step'] / data['C']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['time_step_cumsum'] = data.groupby(['breath_id'])['time_step'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['exponent'] = (-1.0 * data['time_step'] * 1000) / (data['R'] * data['C'])
data['factor'] = np.exp(data['exponent'])
data['vf'] = (data['u_in_cumsum'] * data['R']) / data['factor']
data['vt'] = 0
data.loc[data['time_step'] != 0, 'vt'] = data['area'] / (
data['C'] * (-1.0 * data['factor'] + 1.0))
data['v'] = data['vf'] + data['vt']
data['v0'] = data.groupby(['breath_id'])['v'].transform('first')
data['pt'] = (1 - (data['v'] / data['v0']) ** 2) * (1 / (3 * (data['v'] / (4 * math.pi)) ** (1 / 3))) * (
3 * data['v0'] / (4 * math.pi)) ** (2 / 3)
data = data.drop(['exponent', 'factor', 'vf', 'vt', 'v', 'v0'], axis=1)
data['u_in_lag1'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag1'] = data.groupby('breath_id')['u_out'].shift(1)
data['u_in_lag_back1'] = data.groupby('breath_id')['u_in'].shift(-1)
data['u_out_lag_back1'] = data.groupby('breath_id')['u_out'].shift(-1)
data['u_in_lag2'] = data.groupby('breath_id')['u_in'].shift(2)
data['u_out_lag2'] = data.groupby('breath_id')['u_out'].shift(2)
data['u_in_lag_back2'] = data.groupby('breath_id')['u_in'].shift(-2)
data['u_out_lag_back2'] = data.groupby('breath_id')['u_out'].shift(-2)
data['u_in_lag3'] = data.groupby('breath_id')['u_in'].shift(3)
data['u_out_lag3'] = data.groupby('breath_id')['u_out'].shift(3)
data['u_in_lag_back3'] = data.groupby('breath_id')['u_in'].shift(-3)
data['u_out_lag_back3'] = data.groupby('breath_id')['u_out'].shift(-3)
data['u_in_lag4'] = data.groupby('breath_id')['u_in'].shift(4)
data['u_out_lag4'] = data.groupby('breath_id')['u_out'].shift(4)
data['u_in_lag_back4'] = data.groupby('breath_id')['u_in'].shift(-4)
data['u_out_lag_back4'] = data.groupby('breath_id')['u_out'].shift(-4)
data = data.fillna(0)
data['breath_id__u_in__max'] = data.groupby(['breath_id'])['u_in'].transform('max')
data['breath_id__u_out__max'] = data.groupby(['breath_id'])['u_out'].transform('max')
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['u_in_diff3'] = data['u_in'] - data['u_in_lag3']
data['u_out_diff3'] = data['u_out'] - data['u_out_lag3']
data['u_in_diff4'] = data['u_in'] - data['u_in_lag4']
data['u_out_diff4'] = data['u_out'] - data['u_out_lag4']
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data['breath_id_lag'] = data['breath_id'].shift(1).fillna(0)
data['breath_id_lag2'] = data['breath_id'].shift(2).fillna(0)
data['breath_id_lagsame'] = np.select([data['breath_id_lag'] == data['breath_id']], [1], 0)
data['breath_id_lag2same'] = np.select([data['breath_id_lag2'] == data['breath_id']], [1], 0)
data['breath_id__u_in_lag'] = data['u_in'].shift(1).fillna(0)
data['breath_id__u_in_lag'] = data['breath_id__u_in_lag'] * data['breath_id_lagsame']
data['breath_id__u_in_lag2'] = data['u_in'].shift(2).fillna(0)
data['breath_id__u_in_lag2'] = data['breath_id__u_in_lag2'] * data['breath_id_lag2same']
data['R_sum_c'] = (data['R'] + data['C']).astype(str)
data['R_mult_c'] = (data['R'] * data['C']).astype(str)
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['R__C'] = data["R"].astype(str) + '__' + data["C"].astype(str)
data = pd.get_dummies(data)
data[["15_in_sum", "15_in_min", "15_in_max", "15_in_mean"]] = (data \
.groupby('breath_id')['u_in'] \
.rolling(window=15, min_periods=1) \
.agg({"15_in_sum": "sum",
"15_in_min": "min",
"15_in_max": "max",
"15_in_mean": "mean"
# "15_in_std":"std"
}) \
.reset_index(level=0, drop=True))
data['u_in_lagback_diff1'] = data['u_in'] - data['u_in_lag_back1']
data['u_out_lagback_diff1'] = data['u_out'] - data['u_out_lag_back1']
data['u_in_lagback_diff2'] = data['u_in'] - data['u_in_lag_back2']
data['u_out_lagback_diff2'] = data['u_out'] - data['u_out_lag_back2']
data['ewm_u_in_mean'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).mean().reset_index(level=0,
drop=True)
data['ewm_u_in_std'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).std().reset_index(level=0, drop=True)
data['ewm_u_in_corr'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).corr().reset_index(level=0,
drop=True)
data['rolling_10_mean'] = data.groupby('breath_id')['u_in'].rolling(window=10,
min_periods=1).mean().reset_index(
level=0, drop=True)
data['rolling_10_max'] = data.groupby('breath_id')['u_in'].rolling(window=10, min_periods=1).max().reset_index(
level=0,
drop=True)
data['rolling_10_std'] = data.groupby('breath_id')['u_in'].rolling(window=10, min_periods=1).std().reset_index(
level=0,
drop=True)
data['expand_mean'] = data.groupby('breath_id')['u_in'].expanding(2).mean().reset_index(level=0, drop=True)
data['expand_max'] = data.groupby('breath_id')['u_in'].expanding(2).max().reset_index(level=0, drop=True)
data['expand_std'] = data.groupby('breath_id')['u_in'].expanding(2).std().reset_index(level=0, drop=True)
data["u_in_rolling_mean2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).mean()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_mean4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).mean()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_mean10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).mean()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_max2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).max()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_max4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).max()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_max10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).max()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_min2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).min()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_min4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).min()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_min10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).min()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_std2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).std()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_std4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).std()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_std10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).std()[
"u_in"].reset_index(drop=True)
g = data.groupby('breath_id')['u_in']
data['ewm_u_in_mean'] = g.ewm(halflife=10).mean() \
.reset_index(level=0, drop=True)
data['ewm_u_in_std'] = g.ewm(halflife=10).std() \
.reset_index(level=0, drop=True)
data['ewm_u_in_corr'] = g.ewm(halflife=10).corr() \
.reset_index(level=0, drop=True)
data['rolling_10_mean'] = g.rolling(window=10, min_periods=1).mean() \
.reset_index(level=0, drop=True)
data['rolling_10_max'] = g.rolling(window=10, min_periods=1).max() \
.reset_index(level=0, drop=True)
data['rolling_10_std'] = g.rolling(window=10, min_periods=1).std() \
.reset_index(level=0, drop=True)
data['expand_mean'] = g.expanding(2).mean() \
.reset_index(level=0, drop=True)
data['expand_max'] = g.expanding(2).max() \
.reset_index(level=0, drop=True)
data['expand_std'] = g.expanding(2).std() \
.reset_index(level=0, drop=True)
data['u_in_lag_back10'] = data.groupby('breath_id')['u_in'].shift(-10)
data['u_out_lag_back10'] = data.groupby('breath_id')['u_out'].shift(-10)
data['time_step_diff'] = data.groupby('breath_id')['time_step'].diff().fillna(0)
### rolling window ts feats
data['ewm_u_in_mean'] = data.groupby('breath_id')['u_in'].ewm(halflife=9).mean().reset_index(level=0,
drop=True)
data['ewm_u_in_std'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).std().reset_index(level=0,
drop=True) ## could add covar?
data['ewm_u_in_corr'] = data.groupby('breath_id')['u_in'].ewm(halflife=15).corr().reset_index(level=0,
drop=True) # self umin corr
# data['ewm_u_in_corr'] = data.groupby('breath_id')['u_in'].ewm(halflife=6).corr(data.groupby('breath_id')["u_out"]).reset_index(level=0,drop=True) # corr with u_out # error
## rolling window of 15 periods
data[["15_in_sum", "15_in_min", "15_in_max", "15_in_mean", "15_out_std"]] = data.groupby('breath_id')[
'u_in'].rolling(window=15, min_periods=1).agg(
{"15_in_sum": "sum", "15_in_min": "min", "15_in_max": "max", "15_in_mean": "mean",
"15_in_std": "std"}).reset_index(level=0, drop=True)
# data[["45_in_sum","45_in_min","45_in_max","45_in_mean","45_out_std"]] = data.groupby('breath_id')['u_in'].rolling(window=45,min_periods=1).agg({"45_in_sum":"sum","45_in_min":"min","45_in_max":"max","45_in_mean":"mean","45_in_std":"std"}).reset_index(level=0,drop=True)
data[["45_in_sum", "45_in_min", "45_in_max", "45_in_mean", "45_out_std"]] = data.groupby('breath_id')[
'u_in'].rolling(window=45, min_periods=1).agg(
{"45_in_sum": "sum", "45_in_min": "min", "45_in_max": "max", "45_in_mean": "mean",
"45_in_std": "std"}).reset_index(level=0, drop=True)
data[["15_out_mean"]] = data.groupby('breath_id')['u_out'].rolling(window=15, min_periods=1).agg(
{"15_out_mean": "mean"}).reset_index(level=0, drop=True)
return data.fillna(0).replace((np.inf, -np.inf, np.nan), 0)
def make_features181(self, data):
"""
3 and volume. no rct
Args:
data:
Returns:
"""
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['time_step_cumsum'] = data.groupby(['breath_id'])['time_step'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['exponent'] = (-1.0 * data['time_step'] * 1000) / (data['R'] * data['C'])
data['factor'] = np.exp(data['exponent'])
data['vf'] = (data['u_in_cumsum'] * data['R']) / data['factor']
data['vt'] = 0
data.loc[data['time_step'] != 0, 'vt'] = data['area'] / (
data['C'] * (-1.0 * data['factor'] + 1.0))
data['v'] = data['vf'] + data['vt']
data['v0'] = data.groupby(['breath_id'])['v'].transform('first')
data['pt'] = (1 - (data['v'] / data['v0']) ** 2) * (1 / (3 * (data['v'] / (4 * math.pi)) ** (1 / 3))) * (
3 * data['v0'] / (4 * math.pi)) ** (2 / 3)
data = data.drop(['exponent', 'factor', 'vf', 'vt', 'v', 'v0'], axis=1)
data['u_in_lag1'] = data.groupby('breath_id')['u_in'].shift(1)
data['u_out_lag1'] = data.groupby('breath_id')['u_out'].shift(1)
data['u_in_lag_back1'] = data.groupby('breath_id')['u_in'].shift(-1)
data['u_out_lag_back1'] = data.groupby('breath_id')['u_out'].shift(-1)
data['u_in_lag2'] = data.groupby('breath_id')['u_in'].shift(2)
data['u_out_lag2'] = data.groupby('breath_id')['u_out'].shift(2)
data['u_in_lag_back2'] = data.groupby('breath_id')['u_in'].shift(-2)
data['u_out_lag_back2'] = data.groupby('breath_id')['u_out'].shift(-2)
data['u_in_lag3'] = data.groupby('breath_id')['u_in'].shift(3)
data['u_out_lag3'] = data.groupby('breath_id')['u_out'].shift(3)
data['u_in_lag_back3'] = data.groupby('breath_id')['u_in'].shift(-3)
data['u_out_lag_back3'] = data.groupby('breath_id')['u_out'].shift(-3)
data['u_in_lag4'] = data.groupby('breath_id')['u_in'].shift(4)
data['u_out_lag4'] = data.groupby('breath_id')['u_out'].shift(4)
data['u_in_lag_back4'] = data.groupby('breath_id')['u_in'].shift(-4)
data['u_out_lag_back4'] = data.groupby('breath_id')['u_out'].shift(-4)
data = data.fillna(0)
data['breath_id__u_in__max'] = data.groupby(['breath_id'])['u_in'].transform('max')
data['breath_id__u_out__max'] = data.groupby(['breath_id'])['u_out'].transform('max')
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['breath_id__u_in__diffmax'] = data.groupby(['breath_id'])['u_in'].transform('max') - data['u_in']
data['breath_id__u_in__diffmean'] = data.groupby(['breath_id'])['u_in'].transform('mean') - data['u_in']
data['u_in_diff1'] = data['u_in'] - data['u_in_lag1']
data['u_out_diff1'] = data['u_out'] - data['u_out_lag1']
data['u_in_diff2'] = data['u_in'] - data['u_in_lag2']
data['u_out_diff2'] = data['u_out'] - data['u_out_lag2']
data['u_in_diff3'] = data['u_in'] - data['u_in_lag3']
data['u_out_diff3'] = data['u_out'] - data['u_out_lag3']
data['u_in_diff4'] = data['u_in'] - data['u_in_lag4']
data['u_out_diff4'] = data['u_out'] - data['u_out_lag4']
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
data['breath_id_lag'] = data['breath_id'].shift(1).fillna(0)
data['breath_id_lag2'] = data['breath_id'].shift(2).fillna(0)
data['breath_id_lagsame'] = np.select([data['breath_id_lag'] == data['breath_id']], [1], 0)
data['breath_id_lag2same'] = np.select([data['breath_id_lag2'] == data['breath_id']], [1], 0)
data['breath_id__u_in_lag'] = data['u_in'].shift(1).fillna(0)
data['breath_id__u_in_lag'] = data['breath_id__u_in_lag'] * data['breath_id_lagsame']
data['breath_id__u_in_lag2'] = data['u_in'].shift(2).fillna(0)
data['breath_id__u_in_lag2'] = data['breath_id__u_in_lag2'] * data['breath_id_lag2same']
data['R_sum_c'] = (data['R'] + data['C']).astype(str)
data['R_mult_c'] = (data['R'] * data['C']).astype(str)
data['R'] = data['R'].astype(str)
data['C'] = data['C'].astype(str)
data['R__C'] = data["R"].astype(str) + '__' + data["C"].astype(str)
data = pd.get_dummies(data)
data[["15_in_sum", "15_in_min", "15_in_max", "15_in_mean"]] = (data \
.groupby('breath_id')['u_in'] \
.rolling(window=15, min_periods=1) \
.agg({"15_in_sum": "sum",
"15_in_min": "min",
"15_in_max": "max",
"15_in_mean": "mean"
# "15_in_std":"std"
}) \
.reset_index(level=0, drop=True))
data['u_in_lagback_diff1'] = data['u_in'] - data['u_in_lag_back1']
data['u_out_lagback_diff1'] = data['u_out'] - data['u_out_lag_back1']
data['u_in_lagback_diff2'] = data['u_in'] - data['u_in_lag_back2']
data['u_out_lagback_diff2'] = data['u_out'] - data['u_out_lag_back2']
data['ewm_u_in_mean'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).mean().reset_index(level=0,
drop=True)
data['ewm_u_in_std'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).std().reset_index(level=0, drop=True)
data['ewm_u_in_corr'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).corr().reset_index(level=0,
drop=True)
data['rolling_10_mean'] = data.groupby('breath_id')['u_in'].rolling(window=10,
min_periods=1).mean().reset_index(
level=0, drop=True)
data['rolling_10_max'] = data.groupby('breath_id')['u_in'].rolling(window=10, min_periods=1).max().reset_index(
level=0,
drop=True)
data['rolling_10_std'] = data.groupby('breath_id')['u_in'].rolling(window=10, min_periods=1).std().reset_index(
level=0,
drop=True)
data['expand_mean'] = data.groupby('breath_id')['u_in'].expanding(2).mean().reset_index(level=0, drop=True)
data['expand_max'] = data.groupby('breath_id')['u_in'].expanding(2).max().reset_index(level=0, drop=True)
data['expand_std'] = data.groupby('breath_id')['u_in'].expanding(2).std().reset_index(level=0, drop=True)
data["u_in_rolling_mean2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).mean()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_mean4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).mean()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_mean10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).mean()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_max2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).max()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_max4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).max()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_max10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).max()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_min2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).min()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_min4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).min()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_min10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).min()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_std2"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(2).std()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_std4"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(4).std()[
"u_in"].reset_index(drop=True)
data["u_in_rolling_std10"] = data[["breath_id", "u_in"]].groupby("breath_id").rolling(10).std()[
"u_in"].reset_index(drop=True)
g = data.groupby('breath_id')['u_in']
data['ewm_u_in_mean'] = g.ewm(halflife=10).mean() \
.reset_index(level=0, drop=True)
data['ewm_u_in_std'] = g.ewm(halflife=10).std() \
.reset_index(level=0, drop=True)
data['ewm_u_in_corr'] = g.ewm(halflife=10).corr() \
.reset_index(level=0, drop=True)
data['rolling_10_mean'] = g.rolling(window=10, min_periods=1).mean() \
.reset_index(level=0, drop=True)
data['rolling_10_max'] = g.rolling(window=10, min_periods=1).max() \
.reset_index(level=0, drop=True)
data['rolling_10_std'] = g.rolling(window=10, min_periods=1).std() \
.reset_index(level=0, drop=True)
data['expand_mean'] = g.expanding(2).mean() \
.reset_index(level=0, drop=True)
data['expand_max'] = g.expanding(2).max() \
.reset_index(level=0, drop=True)
data['expand_std'] = g.expanding(2).std() \
.reset_index(level=0, drop=True)
data['u_in_lag_back10'] = data.groupby('breath_id')['u_in'].shift(-10)
data['u_out_lag_back10'] = data.groupby('breath_id')['u_out'].shift(-10)
data['time_step_diff'] = data.groupby('breath_id')['time_step'].diff().fillna(0)
### rolling window ts feats
data['ewm_u_in_mean'] = data.groupby('breath_id')['u_in'].ewm(halflife=9).mean().reset_index(level=0,
drop=True)
data['ewm_u_in_std'] = data.groupby('breath_id')['u_in'].ewm(halflife=10).std().reset_index(level=0,
drop=True) ## could add covar?
data['ewm_u_in_corr'] = data.groupby('breath_id')['u_in'].ewm(halflife=15).corr().reset_index(level=0,
drop=True) # self umin corr
# data['ewm_u_in_corr'] = data.groupby('breath_id')['u_in'].ewm(halflife=6).corr(data.groupby('breath_id')["u_out"]).reset_index(level=0,drop=True) # corr with u_out # error
## rolling window of 15 periods
data[["15_in_sum", "15_in_min", "15_in_max", "15_in_mean", "15_out_std"]] = data.groupby('breath_id')[
'u_in'].rolling(window=15, min_periods=1).agg(
{"15_in_sum": "sum", "15_in_min": "min", "15_in_max": "max", "15_in_mean": "mean",
"15_in_std": "std"}).reset_index(level=0, drop=True)
# data[["45_in_sum","45_in_min","45_in_max","45_in_mean","45_out_std"]] = data.groupby('breath_id')['u_in'].rolling(window=45,min_periods=1).agg({"45_in_sum":"sum","45_in_min":"min","45_in_max":"max","45_in_mean":"mean","45_in_std":"std"}).reset_index(level=0,drop=True)
data[["45_in_sum", "45_in_min", "45_in_max", "45_in_mean", "45_out_std"]] = data.groupby('breath_id')[
'u_in'].rolling(window=45, min_periods=1).agg(
{"45_in_sum": "sum", "45_in_min": "min", "45_in_max": "max", "45_in_mean": "mean",
"45_in_std": "std"}).reset_index(level=0, drop=True)
data[["15_out_mean"]] = data.groupby('breath_id')['u_out'].rolling(window=15, min_periods=1).agg(
{"15_out_mean": "mean"}).reset_index(level=0, drop=True)
return data.fillna(0).replace((np.inf, -np.inf, np.nan), 0)
def make_features19(self, data):
"""
6 and volume
Args:
data:
Returns:
"""
data['RCT'] = data['R'] * data['time_step'] / data['C']
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['time_step_cumsum'] = data.groupby(['breath_id'])['time_step'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['exponent'] = (-1.0 * data['time_step'] * 1000) / (data['R'] * data['C'])
data['factor'] = np.exp(data['exponent'])
data['vf'] = (data['u_in_cumsum'] * data['R']) / data['factor']
data['vt'] = 0
data.loc[data['time_step'] != 0, 'vt'] = data['area'] / (
data['C'] * (-1.0 * data['factor'] + 1.0))
data['v'] = data['vf'] + data['vt']
data['v0'] = data.groupby(['breath_id'])['v'].transform('first')
data['pt'] = (1 - (data['v'] / data['v0']) ** 2) * (1 / (3 * (data['v'] / (4 * math.pi)) ** (1 / 3))) * (
3 * data['v0'] / (4 * math.pi)) ** (2 / 3)
data = data.drop(['exponent', 'factor', 'vf', 'vt', 'v', 'v0'], axis=1)
# CATE_FEATURES = ['R_cate', 'C_cate', 'RC_dot', 'RC_sum']
CONT_FEATURES = ['u_in', 'u_out', 'time_step'] + ['u_in_cumsum', 'u_in_cummean', 'area', 'cross', 'cross2'] + [
'R_cate', 'C_cate']
LAG_FEATURES = ['breath_time']
LAG_FEATURES += [f'u_in_lag_{i}' for i in range(1, self.cfg.datamodule.use_lag + 1)]
# LAG_FEATURES += [f'u_in_lag_{i}_back' for i in range(1, USE_LAG+1)]
LAG_FEATURES += [f'u_in_time{i}' for i in range(1, self.cfg.datamodule.use_lag + 1)]
# LAG_FEATURES += [f'u_in_time{i}_back' for i in range(1, USE_LAG+1)]
LAG_FEATURES += [f'u_out_lag_{i}' for i in range(1, self.cfg.datamodule.use_lag + 1)]
# LAG_FEATURES += [f'u_out_lag_{i}_back' for i in range(1, USE_LAG+1)]
# ALL_FEATURES = CATE_FEATURES + CONT_FEATURES + LAG_FEATURES
ALL_FEATURES = CONT_FEATURES + LAG_FEATURES + ['RCT', 'pt']
for col in ['id', 'breath_id', 'one', 'count', 'breath_id_lag', 'breath_id_lag2', 'breath_id_lagsame',
'breath_id_lag2same', 'pressure', 'fold']:
if col in data.columns:
ALL_FEATURES.append(col)
data['time_delta'] = data.groupby('breath_id')['time_step'].diff().fillna(0)
data['delta'] = data['time_delta'] * data['u_in']
data['area'] = data.groupby('breath_id')['delta'].cumsum()
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
# data = data.drop(['count', 'one'], axis=1)
for lag in range(1, self.cfg.datamodule.use_lag + 1):
data[f'breath_id_lag{lag}'] = data['breath_id'].shift(lag).fillna(0)
data[f'breath_id_lag{lag}same'] = np.select([data[f'breath_id_lag{lag}'] == data['breath_id']], [1], 0)
# u_in
data[f'u_in_lag_{lag}'] = data['u_in'].shift(lag).fillna(0) * data[f'breath_id_lag{lag}same']
# data[f'u_in_lag_{lag}_back'] = data['u_in'].shift(-lag).fillna(0) * data[f'breath_id_lag{lag}same']
data[f'u_in_time{lag}'] = data['u_in'] - data[f'u_in_lag_{lag}']
# data[f'u_in_time{lag}_back'] = data['u_in'] - data[f'u_in_lag_{lag}_back']
data[f'u_out_lag_{lag}'] = data['u_out'].shift(lag).fillna(0) * data[f'breath_id_lag{lag}same']
# data[f'u_out_lag_{lag}_back'] = data['u_out'].shift(-lag).fillna(0) * data[f'breath_id_lag{lag}same']
# breath_time
data['time_step_lag'] = data['time_step'].shift(1).fillna(0) * data[f'breath_id_lag{lag}same']
data['breath_time'] = data['time_step'] - data['time_step_lag']
drop_columns = ['time_step_lag']
drop_columns += [f'breath_id_lag{i}' for i in range(1, self.cfg.datamodule.use_lag + 1)]
drop_columns += [f'breath_id_lag{i}same' for i in range(1, self.cfg.datamodule.use_lag + 1)]
data = data.drop(drop_columns, axis=1)
# fill na by zero
data = data.fillna(0)
c_dic = {10: 0, 20: 1, 50: 2}
r_dic = {5: 0, 20: 1, 50: 2}
rc_sum_dic = {v: i for i, v in enumerate([15, 25, 30, 40, 55, 60, 70, 100])}
rc_dot_dic = {v: i for i, v in enumerate([50, 100, 200, 250, 400, 500, 2500, 1000])}
data['C_cate'] = data['C'].map(c_dic)
data['R_cate'] = data['R'].map(r_dic)
data['RC_sum'] = (data['R'] + data['C']).map(rc_sum_dic)
data['RC_dot'] = (data['R'] * data['C']).map(rc_dot_dic)
norm_features = CONT_FEATURES + LAG_FEATURES
for col in ['id', 'breath_id', 'one', 'count', 'breath_id_lag', 'breath_id_lag2', 'breath_id_lagsame',
'breath_id_lag2same', 'pressure', 'fold']:
if col in data.columns:
norm_features.append(col)
if 'fold' in data.columns:
norm_features.append('fold')
norm_features = list(set(norm_features))
ALL_FEATURES = list(set(ALL_FEATURES))
print('data.columns', data.columns)
print('ALL_FEATURES', ALL_FEATURES)
# assert norm_features == ALL_FEATURES, 'something went wrong'
return data[ALL_FEATURES].fillna(0).replace((np.inf, -np.inf, np.nan), 0)
def make_features191(self, data):
"""
6 and volume
Args:
data:
Returns:
"""
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['area'] = data['time_step'] * data['u_in']
data['area'] = data.groupby('breath_id')['area'].cumsum()
data['time_step_cumsum'] = data.groupby(['breath_id'])['time_step'].cumsum()
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['exponent'] = (-1.0 * data['time_step'] * 1000) / (data['R'] * data['C'])
data['factor'] = np.exp(data['exponent'])
data['vf'] = (data['u_in_cumsum'] * data['R']) / data['factor']
data['vt'] = 0
data.loc[data['time_step'] != 0, 'vt'] = data['area'] / (
data['C'] * (-1.0 * data['factor'] + 1.0))
data['v'] = data['vf'] + data['vt']
data['v0'] = data.groupby(['breath_id'])['v'].transform('first')
data['pt'] = (1 - (data['v'] / data['v0']) ** 2) * (1 / (3 * (data['v'] / (4 * math.pi)) ** (1 / 3))) * (
3 * data['v0'] / (4 * math.pi)) ** (2 / 3)
data = data.drop(['exponent', 'factor', 'vf', 'vt', 'v', 'v0'], axis=1)
# CATE_FEATURES = ['R_cate', 'C_cate', 'RC_dot', 'RC_sum']
CONT_FEATURES = ['u_in', 'u_out', 'time_step'] + ['u_in_cumsum', 'u_in_cummean', 'area', 'cross', 'cross2'] + [
'R_cate', 'C_cate']
LAG_FEATURES = ['breath_time']
LAG_FEATURES += [f'u_in_lag_{i}' for i in range(1, self.cfg.datamodule.use_lag + 1)]
# LAG_FEATURES += [f'u_in_lag_{i}_back' for i in range(1, USE_LAG+1)]
LAG_FEATURES += [f'u_in_time{i}' for i in range(1, self.cfg.datamodule.use_lag + 1)]
# LAG_FEATURES += [f'u_in_time{i}_back' for i in range(1, USE_LAG+1)]
LAG_FEATURES += [f'u_out_lag_{i}' for i in range(1, self.cfg.datamodule.use_lag + 1)]
# LAG_FEATURES += [f'u_out_lag_{i}_back' for i in range(1, USE_LAG+1)]
# ALL_FEATURES = CATE_FEATURES + CONT_FEATURES + LAG_FEATURES
ALL_FEATURES = CONT_FEATURES + LAG_FEATURES + ['pt']
for col in ['id', 'breath_id', 'one', 'count', 'breath_id_lag', 'breath_id_lag2', 'breath_id_lagsame',
'breath_id_lag2same', 'pressure', 'fold']:
if col in data.columns:
ALL_FEATURES.append(col)
data['time_delta'] = data.groupby('breath_id')['time_step'].diff().fillna(0)
data['delta'] = data['time_delta'] * data['u_in']
data['area'] = data.groupby('breath_id')['delta'].cumsum()
data['cross'] = data['u_in'] * data['u_out']
data['cross2'] = data['time_step'] * data['u_out']
data['u_in_cumsum'] = (data['u_in']).groupby(data['breath_id']).cumsum()
data['one'] = 1
data['count'] = (data['one']).groupby(data['breath_id']).cumsum()
data['u_in_cummean'] = data['u_in_cumsum'] / data['count']
# data = data.drop(['count', 'one'], axis=1)
for lag in range(1, self.cfg.datamodule.use_lag + 1):
data[f'breath_id_lag{lag}'] = data['breath_id'].shift(lag).fillna(0)
data[f'breath_id_lag{lag}same'] = np.select([data[f'breath_id_lag{lag}'] == data['breath_id']], [1], 0)
# u_in
data[f'u_in_lag_{lag}'] = data['u_in'].shift(lag).fillna(0) * data[f'breath_id_lag{lag}same']
# data[f'u_in_lag_{lag}_back'] = data['u_in'].shift(-lag).fillna(0) * data[f'breath_id_lag{lag}same']
data[f'u_in_time{lag}'] = data['u_in'] - data[f'u_in_lag_{lag}']
# data[f'u_in_time{lag}_back'] = data['u_in'] - data[f'u_in_lag_{lag}_back']
data[f'u_out_lag_{lag}'] = data['u_out'].shift(lag).fillna(0) * data[f'breath_id_lag{lag}same']
# data[f'u_out_lag_{lag}_back'] = data['u_out'].shift(-lag).fillna(0) * data[f'breath_id_lag{lag}same']
# breath_time
data['time_step_lag'] = data['time_step'].shift(1).fillna(0) * data[f'breath_id_lag{lag}same']
data['breath_time'] = data['time_step'] - data['time_step_lag']
drop_columns = ['time_step_lag']
drop_columns += [f'breath_id_lag{i}' for i in range(1, self.cfg.datamodule.use_lag + 1)]
drop_columns += [f'breath_id_lag{i}same' for i in range(1, self.cfg.datamodule.use_lag + 1)]
data = data.drop(drop_columns, axis=1)
# fill na by zero
data = data.fillna(0)
c_dic = {10: 0, 20: 1, 50: 2}
r_dic = {5: 0, 20: 1, 50: 2}
rc_sum_dic = {v: i for i, v in enumerate([15, 25, 30, 40, 55, 60, 70, 100])}
rc_dot_dic = {v: i for i, v in enumerate([50, 100, 200, 250, 400, 500, 2500, 1000])}
data['C_cate'] = data['C'].map(c_dic)
data['R_cate'] = data['R'].map(r_dic)
data['RC_sum'] = (data['R'] + data['C']).map(rc_sum_dic)
data['RC_dot'] = (data['R'] * data['C']).map(rc_dot_dic)
norm_features = CONT_FEATURES + LAG_FEATURES
for col in ['id', 'breath_id', 'one', 'count', 'breath_id_lag', 'breath_id_lag2', 'breath_id_lagsame',
'breath_id_lag2same', 'pressure', 'fold']:
if col in data.columns:
norm_features.append(col)
if 'fold' in data.columns:
norm_features.append('fold')
norm_features = list(set(norm_features))
ALL_FEATURES = list(set(ALL_FEATURES))
print('data.columns', data.columns)
print('ALL_FEATURES', ALL_FEATURES)
# assert norm_features == ALL_FEATURES, 'something went wrong'
return data[ALL_FEATURES].fillna(0).replace((np.inf, -np.inf, np.nan), 0)
def setup(self, stage=None):
if os.path.exists(os.path.join(self.cfg.datamodule.path, f'train_{self.cfg.datamodule.make_features_style}.csv')):
print('Reading features')
train = pd.read_csv(os.path.join(self.cfg.datamodule.path, f'train_{self.cfg.datamodule.make_features_style}.csv'))
test = pd.read_csv(os.path.join(self.cfg.datamodule.path, f'test_{self.cfg.datamodule.make_features_style}.csv'))
gkf = GroupKFold(n_splits=self.cfg.datamodule.n_folds).split(train, train.pressure, groups=train.breath_id)
for fold, (_, valid_idx) in enumerate(gkf):
train.loc[valid_idx, 'fold'] = fold
train_targets = train.loc[train['fold'] != self.cfg.datamodule.fold_n, 'pressure'].copy().values.reshape(-1, 80)
valid_targets = train.loc[train['fold'] == self.cfg.datamodule.fold_n, 'pressure'].copy().values.reshape(-1, 80)
train_u_out_ = train.loc[train['fold'] != self.cfg.datamodule.fold_n, 'u_out'].copy().values.reshape(-1, 80)
valid_u_out_ = train.loc[train['fold'] == self.cfg.datamodule.fold_n, 'u_out'].copy().values.reshape(-1, 80)
test_targets = np.zeros(len(test)).reshape(-1, 80)
else:
if self.cfg.training.debug:
train = pd.read_csv(os.path.join(self.cfg.datamodule.path, 'train.csv'), nrows=196000)
test = pd.read_csv(os.path.join(self.cfg.datamodule.path, 'test.csv'), nrows=80000)
else:
train = pd.read_csv(os.path.join(self.cfg.datamodule.path, 'train.csv'))
test = pd.read_csv(os.path.join(self.cfg.datamodule.path, 'test.csv'))
gkf = GroupKFold(n_splits=self.cfg.datamodule.n_folds).split(train, train.pressure, groups=train.breath_id)
for fold, (_, valid_idx) in enumerate(gkf):
train.loc[valid_idx, 'fold'] = fold
train_targets = train.loc[train['fold'] != self.cfg.datamodule.fold_n, 'pressure'].copy().values.reshape(-1, 80)
valid_targets = train.loc[train['fold'] == self.cfg.datamodule.fold_n, 'pressure'].copy().values.reshape(-1, 80)
train_u_out_ = train.loc[train['fold'] != self.cfg.datamodule.fold_n, 'u_out'].copy().values.reshape(-1, 80)
valid_u_out_ = train.loc[train['fold'] == self.cfg.datamodule.fold_n, 'u_out'].copy().values.reshape(-1, 80)
test_targets = np.zeros(len(test)).reshape(-1, 80)
print('Making features')
if self.cfg.datamodule.make_features_style == 0:
train = self.make_features(train)
test = self.make_features(test)
elif self.cfg.datamodule.make_features_style == 1:
train = self.make_features1(train)
test = self.make_features1(test)
elif self.cfg.datamodule.make_features_style == 2:
train = self.make_features2(train)
test = self.make_features2(test)
elif self.cfg.datamodule.make_features_style == 3:
train = self.make_features3(train)
test = self.make_features3(test)
elif self.cfg.datamodule.make_features_style == 31:
train = self.make_features31(train)
test = self.make_features31(test)
elif self.cfg.datamodule.make_features_style == 32:
with open(os.path.join(self.cfg.datamodule.path, 'train_data_feats.pkl'), 'rb') as f:
d = pickle.load(f)
tmp = d.to_numpy().reshape(-1, 35, d.shape[-1])
tmp1 = np.zeros((tmp.shape[0], 80, tmp.shape[2]))
tmp1[:, :35, :] = tmp
tmp1 = tmp1.reshape(-1, d.shape[1])
d = pd.DataFrame(tmp1, columns=d.columns)
if self.cfg.training.debug:
d = d[:196000]
train['clusterIDeu'] = d['clusterIDeu'].values.astype(str).copy()
train['clusterIDdtw'] = d['clusterIDdtw'].values.astype(str).copy()
with open(os.path.join(self.cfg.datamodule.path, 'test_data_feats.pkl'), 'rb') as f:
d = pickle.load(f)
tmp = d.to_numpy().reshape(-1, 35, d.shape[-1])
tmp1 = np.zeros((tmp.shape[0], 80, tmp.shape[2]))
tmp1[:, :35, :] = tmp
tmp1 = tmp1.reshape(-1, d.shape[1])
d = | pd.DataFrame(tmp1, columns=d.columns) | pandas.DataFrame |
"""
# # DATE : 12,September 2017
# # AUTHOR : <EMAIL>
# # DESCRIPTION : This script is used to create Final_KPI_Report.xlsx inside ./Report Directory,
# # And uses all bundle*.csv files present inside ./Referral_Data Directory.
"""
from string import whitespace
from openpyxl import load_workbook
import pandas as pd
import os.path
import sys
import csv
import time
import logging
import fnmatch
from Combine_Configs_Script import CreateCombineConfigs
from Constants import Constants
from Formats import Formats
from Formulas import Formulas
from Utils import Utils
class FinalKPIReport:
__Const = None
CONFIG_HEADER = [u'Test Case No.', u'Test Category', u'Test Case ID', u'Test Type', u'Configurations']
ReadTemplateFlag = True
DF_Template = None
DF_Bundles_List = []
bundleFiles_list = []
def __init__(self):
self.__Const = Constants()
# logging.basicConfig(format=self.__Const.LOGGING_FORMAT, level=logging.DEBUG)
logging.debug("constructor is called...")
Utils.ChangeDirPath(self.__Const.REFERRAL_DATA_PATH)
self.bundleFiles_list = list(
reversed(fnmatch.filter(os.listdir(os.getcwd()), self.__Const.WILDCARD_BUNDLE_FILE)))
logging.debug(self.bundleFiles_list)
if self.bundleFiles_list != []:
logging.debug(
"List of files present inside " + self.__Const.REFERRAL_DATA_PATH + ":" + str(self.bundleFiles_list))
else:
logging.error(
self.__Const.REFERRAL_DATA_PATH + " Directory should contains 'Bundle*.csv files'. Script is aborted...!!")
if os.path.isdir("..\\" + self.__Const.REPORT_PATH):
pass
else:
logging.error(
self.__Const.REPORT_PATH + " Directory is need to be created to Generate Report. Script is aborted...!!")
sys.exit(0)
if len(self.bundleFiles_list) <= 1:
sys.exit("Script is aborted...Cannot Generate Report for Single Bundle.")
self.ReadBundles(self.bundleFiles_list)
def ReadBundles(self, list_bundleFiles):
for files in list_bundleFiles:
if os.path.isfile(os.path.realpath(files)):
logging.debug(files + " File is present")
if self.ReadTemplateFlag:
self.DF_Template = Utils.ReadCSV(self.__Const.CONFIG_TEMPLATE_FILE)
self.ReadTemplateFlag = False
self.DF_Bundles_List.append(Utils.ReadCSV(files))
else:
logging.error(files + " File not present")
logging.error(
self.__Const.WILDCARD_BUNDLE_FILE + " File is needed to create Final Report. Script is aborted...!!")
sys.exit(0)
def CreateSheets(self,Max_RunCount):
df_template = self.DF_Template
AppsDict, AppNames = Utils.MakeApplicationDictionary(df_template)
AppCount = 0
Utils.ChangeDirPath(Constants.REPORT_PATH)
writer = pd.ExcelWriter(Constants.FINAL_REPORT_FILE, engine='xlsxwriter')
workbook = writer.book
# # # Create Summary Tabs
self.Create_Summary_Tab(workbook, AppNames)
# # # # Create Module Details BUNDLE_MD Tabs
self.Create_Summary_MD_Tab(workbook, Constants.SUMMARY_BUNDLE_MD_TAB_NAME)
# # # # Create Module Details CONFIG_MD Tabs
self.Create_Summary_MD_Tab(workbook, Constants.SUMMARY_CONFIG_MD_TAB_NAME)
# # # Create Module Details Tabs
self.Create_ModuleDetail_Tab(workbook, AppNames)
# # Create Module Details _MD Tabs
self.Create_ModuleDetail_MD_Tab(workbook, AppNames)
# # # Create All Application and Application_MD Tabs
self.Create_ApplicationMD_Tab(workbook, AppNames, AppsDict, df_template,Max_RunCount)
workbook.close()
def Create_Summary_MD_Tab(self, workbook, TabName):
Row_Index = 0
Col_Index = 0
if TabName == Constants.SUMMARY_BUNDLE_MD_TAB_NAME:
worksheet = workbook.add_worksheet(Constants.SUMMARY_BUNDLE_MD_TAB_NAME)
PerformanceList = [Constants.NUMBER_OF_TESTS] + Constants.PERFORMANCE_STATUS_HEADER
Color = Constants.COLOR_LIGHT_BLUE
else:
worksheet = workbook.add_worksheet(Constants.SUMMARY_CONFIG_MD_TAB_NAME)
PerformanceList = [Constants.NUMBER_OF_TESTS] + Constants.PERFORMANCE_STATUS_RESULT_HEADER
Color = Constants.COLOR_GOLD
worksheet.set_column("D:AAA", Constants.CELL_WIDTH)
worksheet.set_column("A:B", Constants.REPORT_DESCRIPTION_WIDTH)
worksheet.merge_range(Row_Index, Col_Index, Row_Index, Col_Index + 1, Constants.CONFIGURATIONS_DETAIL_TITLE,
Formats.Format_First_MD_Header(workbook, Constants.COLOR_NAVY_BLUE))
worksheet.write(Row_Index + 1, Col_Index, Constants.CONFIGURATIONS_NAME_HEADER,
Formats.Format_First_MD_Header(workbook, Constants.COLOR_LIGHT_BLUE))
worksheet.write(Row_Index + 1, Col_Index + 1, Constants.CONFIGURATIONS_DESCRIPTION_HEADER,
Formats.Format_First_MD_Header(workbook, Constants.COLOR_LIGHT_BLUE))
# # # Average Response Time_Table
if TabName == Constants.SUMMARY_BUNDLE_MD_TAB_NAME:
self.AverageResponseTime_Table(worksheet,workbook,Row_Index,Col_Index,PerformanceList)
for i in range(Constants.CONFIGURATIONS_COUNT):
for j in range(2):
if j == 0:
worksheet.write(Row_Index + 2 + i, Col_Index + j, Constants.CONFIGURATIONS_LIST[i],
Formats.Format_Cell(workbook))
else:
worksheet.write(Row_Index + 2 + i, Col_Index + j, Constants.CONFIGURATIONS_DESCRIPTION_LIST[i],
Formats.Format_Cell(workbook))
worksheet.merge_range(Row_Index + 1, Col_Index + 3, Row_Index + 1, Col_Index + (2 * len(PerformanceList)) + 3,
Constants.SUMMARY_MD_TAB_TITLE,
Formats.Format_Hyperlink(workbook))
worksheet.merge_range(Row_Index + 2, Col_Index + 3, Row_Index + 2, Col_Index + (2 * len(PerformanceList)) + 3,
"Improvement Count",
Formats.Format_First_MD_Header(workbook, Constants.COLOR_GREY))
row = Row_Index + 12
col = Col_Index + 3
for i in range(len(Constants.SUMMARY_MD_TAB_HEADER)):
if i == 0:
worksheet.merge_range(row, col, row + 1, col, Constants.CONFIGURATIONS_NAME_HEADER,
Formats.Format_First_MD_Header(workbook, Color))
else:
worksheet.merge_range(row, col + 1, row, col + len(PerformanceList), Constants.SUMMARY_MD_TAB_HEADER[i],
Formats.Format_First_MD_Header(workbook, Color))
for j in range(len(PerformanceList)):
worksheet.write(row + 1, col + 1 + j, PerformanceList[j],
Formats.Format_First_MD_Header(workbook, Color))
col += len(PerformanceList)
row = Row_Index + 14
col = Col_Index + 3
for i in range(Constants.CONFIGURATIONS_COUNT + 1):
if i < Constants.CONFIGURATIONS_COUNT:
for j in range(len(PerformanceList) * 2 + 1):
if j == 0:
worksheet.write(row + i, col + j, Constants.CONFIGURATIONS_LIST[i],
Formats.Format_Cell(workbook))
elif j == 1:
start_cell = Utils.GetColumnName(col + 1 + j)+ str(row + 1 + i)
end_cell = Utils.GetColumnName(col + len(PerformanceList) - 1 + j) + str(row + 1 + i)
formula_str = '=SUM({0}:{1})'.format(start_cell,end_cell)
worksheet.write(row + i, col + j, formula_str, Formats.Format_Cell(workbook))
elif j <= len(PerformanceList):
if TabName==Constants.SUMMARY_CONFIG_MD_TAB_NAME:
p_col = Col_Index - 1 + j + len(PerformanceList)
else:
p_col = Col_Index - 1 + j
formula_str = '={0}!{1}{2}'.format(Constants.MODULE_DETAILS_MD_TAB_NAME,
Utils.GetColumnName(p_col),
Constants.PERFORMANCE_ROW_INDEX_REFERENCE_TABLE + 1 + i)
worksheet.write(row + i, col + j, formula_str, Formats.Format_Cell(workbook))
else:
worksheet.write(row + i, col + j, "", Formats.Format_Cell(workbook))
else:
for j in range(len(PerformanceList) * 2 + 1):
if j == 0:
worksheet.write(row + i, col + j, "TOTAL",
Formats.Format_Cell(workbook))
else:
formula_str = "=SUM({0}15:{0}18)".format(Utils.GetColumnName(col + j))
worksheet.write(row + i, col + j, formula_str, Formats.Format_Cell(workbook))
#### Create Current release PIE CHART
formula_format = '${0}${1}'
f_row = row
f_col = col
start_cat_cell =formula_format.format(Utils.GetColumnName(f_col+2),f_row)
end_cat_cell =formula_format.format(Utils.GetColumnName(f_col+len(PerformanceList)),f_row)
f_row+=Constants.CONFIGURATIONS_COUNT + 1
start_val_cell =formula_format.format(Utils.GetColumnName(f_col+2),f_row)
end_val_cell =formula_format.format(Utils.GetColumnName(f_col+len(PerformanceList)),f_row)
f_col += len(PerformanceList)
# print start_cat_cell,end_cat_cell,start_val_cell,end_val_cell
chartPie = workbook.add_chart(
{'type': 'pie'}) # Configure the series. Note the use of the list syntax to define ranges:
chartPie.add_series({
'name': 'Pie data',
'data_labels': {'percentage': True},
'categories': '=' + TabName + '!{0}:{1}'.format(start_cat_cell,end_cat_cell),
'values': '=' + TabName + '!{0}:{1}'.format(start_val_cell,end_val_cell),
})
chartPie.set_size({'width': 400, 'height': 150})
# Add a title.
chartPie.set_title({'name': Constants.CURRENT_RELEASE_HEADER})
# Set an Excel chart style. Colors with white outline and shadow.
chartPie.set_style(10)
# Insert the chart into the worksheet (with an offset).
worksheet.insert_chart('F4', chartPie)
#### Create Previous release PIE CHART
s_row = row
s_col = f_col
start_cat_cell =formula_format.format(Utils.GetColumnName(s_col+2),s_row)
end_cat_cell =formula_format.format(Utils.GetColumnName(s_col+len(PerformanceList)),s_row)
s_row+=Constants.CONFIGURATIONS_COUNT + 1
start_val_cell =formula_format.format(Utils.GetColumnName(s_col+2),s_row)
end_val_cell =formula_format.format(Utils.GetColumnName(s_col+len(PerformanceList)),s_row)
# print start_cat_cell,end_cat_cell,start_val_cell,end_val_cell
chartPie = workbook.add_chart(
{'type': 'pie'}) # Configure the series. Note the use of the list syntax to define ranges:
chartPie.add_series({
'name': 'Pie data',
'data_labels': {'percentage': True},
'categories': '=' + TabName+ '!{0}:{1}'.format(start_cat_cell,end_cat_cell),
'values': '=' + TabName + '!{0}:{1}'.format(start_val_cell,end_val_cell),
})
chartPie.set_size({'width': 400, 'height': 150})
# Add a title.
chartPie.set_title({'name': Constants.PREVIOUS_RELEASE_HEADER})
# Set an Excel chart style. Colors with white outline and shadow.
chartPie.set_style(10)
# Insert the chart into the worksheet (with an offset).
worksheet.insert_chart('K4', chartPie)
def Create_Summary_Tab(self, workbook, AppNames):
Row_Index = 0
Col_Index = 0
worksheet = workbook.add_worksheet(Constants.SUMMARY_TAB_NAME)
worksheet.set_column("E:AAA", Constants.CELL_WIDTH)
worksheet.set_column("A:B", Constants.REPORT_DESCRIPTION_WIDTH)
worksheet.set_column("D:D", Constants.REPORT_DESCRIPTION_WIDTH)
# # TEST_REPORT_DESCRIPTION_TITLE
worksheet.merge_range(Row_Index, Col_Index, Row_Index + 3, Col_Index + 1,
Constants.TEST_REPORT_DESCRIPTION_TITLE,
Formats.Format_First_MD_Header(workbook, Constants.COLOR_NAVY_BLUE))
for i in range(len(Constants.TEST_REPORT_DESCRIPTION_INDEX_LIST)):
worksheet.write(Row_Index + 4 + i, Col_Index, Constants.TEST_REPORT_DESCRIPTION_INDEX_LIST[i],
Formats.Format_Cell(workbook))
worksheet.write(Row_Index + 4 + i, Col_Index + 1, "",
Formats.Format_Cell(workbook))
# # Module Detail Table
col = Col_Index
for i in range(len(Constants.SUMMARY_TAB_HEADER)):
if i == 0:
worksheet.merge_range(Row_Index + len(Constants.TEST_REPORT_DESCRIPTION_INDEX_LIST), col + 3,
Row_Index + len(Constants.TEST_REPORT_DESCRIPTION_INDEX_LIST) + 1, Col_Index + 3,
Constants.SUMMARY_TAB_HEADER[i],
Formats.Format_First_MD_Header(workbook, Constants.COLOR_LIGHT_BLUE))
else:
worksheet.merge_range(Row_Index + len(Constants.TEST_REPORT_DESCRIPTION_INDEX_LIST),
col,
Row_Index + len(Constants.TEST_REPORT_DESCRIPTION_INDEX_LIST),
col + len(Constants.SUMMARY_TAB_SUB_HEADER) - 1,
Constants.SUMMARY_TAB_HEADER[i],
Formats.Format_First_MD_Header(workbook, Constants.COLOR_LIGHT_BLUE))
for j in range(len(Constants.SUMMARY_TAB_SUB_HEADER)):
worksheet.write(Row_Index + len(Constants.TEST_REPORT_DESCRIPTION_INDEX_LIST) + 1, col + j,
Constants.SUMMARY_TAB_SUB_HEADER[j],
Formats.Format_First_MD_Header(workbook, Constants.COLOR_LIGHT_BLUE))
col += len(Constants.SUMMARY_TAB_SUB_HEADER)
Write_Col = Col_Index + len(Constants.SUMMARY_TAB_SUB_HEADER) - 1
Write_Row = Row_Index + len(Constants.TEST_REPORT_DESCRIPTION_INDEX_LIST) + 2
col = Write_Col
row = Write_Row
for i in range(len(AppNames) + 1):
Reference_SheetName = Constants.MODULE_DETAILS_TAB_NAME
if i < len(AppNames):
for j in range(2 * len(Constants.SUMMARY_TAB_SUB_HEADER) + 1):
if j == 0:
worksheet.write(Write_Row + i, Write_Col + j, AppNames[i], Formats.Format_Cell(workbook))
elif j <= len(Constants.SUMMARY_TAB_SUB_HEADER):
# print Write_Row+i,Write_Col+j
# =SUM(Module_Details!H3:H5)
formula_str = '=SUM({0}!{1}{2}:{1}{3})'.format(Reference_SheetName, Utils.GetColumnName(3 + j),
col, col + 2)
worksheet.write(Write_Row + i, Write_Col + j, formula_str, Formats.Format_Cell(workbook))
# print formula_str
else:
worksheet.write(Write_Row + i, Write_Col + j, "", Formats.Format_Cell(workbook))
col += 3
else:
for j in range(2 * len(Constants.SUMMARY_TAB_SUB_HEADER) + 1):
if j == 0:
worksheet.write(Write_Row + i, Write_Col + j, "TOTAL", Formats.Format_Cell(workbook))
else:
formula_str = "=SUM({}{}:{}{})".format(Utils.GetColumnName(Write_Col + j),
Write_Row + i - len(AppNames) + 1,
Utils.GetColumnName(Write_Col + j), Write_Row + i)
# print formula_str
worksheet.write(Write_Row + i, Write_Col + j, formula_str, Formats.Format_Cell(workbook))
row = Write_Row + i
#### Create Current release PIE CHART
chartPie = workbook.add_chart({'type': 'pie'})
# Configure the series. Note the use of the list syntax to define ranges:
chartPie.add_series({
'name': 'Pie data',
'data_labels': {'percentage': True},
'categories': '=' + Constants.SUMMARY_TAB_NAME + '!$F$14:$H$14',
'values': '=' + Constants.SUMMARY_TAB_NAME + '!$F$' + str(row + 1) + ':$H$' + str(row + 1),
})
chartPie.set_size({'width': 420, 'height': 220})
# Add a title.
chartPie.set_title({'name': Constants.CURRENT_RELEASE_HEADER})
# Set an Excel chart style. Colors with white outline and shadow.
chartPie.set_style(10)
# Insert the chart into the worksheet (with an offset).
worksheet.insert_chart('F1', chartPie, {'x_offset': 1, 'y_offset': 10})
#### Create Previous release PIE CHART
chartPie = workbook.add_chart({'type': 'pie'})
# Configure the series. Note the use of the list syntax to define ranges:
chartPie.add_series({
'name': 'Pie data',
'data_labels': {'percentage': True},
'categories': '=' + Constants.SUMMARY_TAB_NAME + '!$J$14:$L$14',
'values': '=' + Constants.SUMMARY_TAB_NAME + '!$J$' + str(row + 1) + ':$L$' + str(row + 1),
})
chartPie.set_size({'width': 420, 'height': 220})
# Add a title.
chartPie.set_title({'name': Constants.PREVIOUS_RELEASE_HEADER})
# Set an Excel chart style. Colors with white outline and shadow.
chartPie.set_style(10)
# Insert the chart into the worksheet (with an offset).
worksheet.insert_chart('J1', chartPie, {'x_offset': 1, 'y_offset': 10})
def Create_ModuleDetail_MD_Tab(self, workbook, AppNames):
# print AppNames
Row_Index = 0
Col_Index = 0
BundleName_list = Utils.GetBundlesList(self.bundleFiles_list)
CurrentBundleName = BundleName_list[0]
worksheet = workbook.add_worksheet(Constants.MODULE_DETAILS_MD_TAB_NAME)
worksheet.set_column("A:AAA", Constants.CELL_WIDTH)
Header_Col_Index = Col_Index
# Application Name Header
worksheet.merge_range(Row_Index, Header_Col_Index, Row_Index + 1, Header_Col_Index,
Constants.APPLICATION_NAME_HEADER,
Formats.Format_First_MD_Header(workbook, Constants.COLOR_GREY))
# Configuration Header
Header_Col_Index += 1
worksheet.merge_range(Row_Index, Header_Col_Index, Row_Index + 1, Header_Col_Index,
Constants.CONFIGURATIONS_HEADER,
Formats.Format_First_MD_Header(workbook, Constants.COLOR_GREY))
# Bundle to Bundle Performance Result Comparison for Current Bundle Header
Header_Col_Index += 1
worksheet.merge_range(Row_Index, Header_Col_Index, Row_Index,
Header_Col_Index + len(Constants.PERFORMANCE_STATUS_HEADER) - 1,
Constants.BUNDLE_TO_BUNDLE_COMPARISON_HEADER.format(CurrentBundleName),
Formats.Format_First_MD_Header(workbook, Constants.COLOR_LIGHT_BLUE))
for i in range(len(Constants.PERFORMANCE_STATUS_HEADER)):
worksheet.write(Row_Index + 1, Header_Col_Index + i, Constants.PERFORMANCE_STATUS_HEADER[i],
Formats.Format_First_MD_Header(workbook, Constants.COLOR_LIGHT_BLUE))
Header_Col_Index += len(Constants.PERFORMANCE_STATUS_HEADER)
# Config to Config Comparison Performance Result for Current Bundle Header
worksheet.merge_range(Row_Index, Header_Col_Index, Row_Index,
Header_Col_Index + len(Constants.PERFORMANCE_STATUS_RESULT_HEADER) - 1,
Constants.CONFIG_TO_CONFIG_COMPARISON_HEADER.format(CurrentBundleName),
Formats.Format_First_MD_Header(workbook, Constants.COLOR_GOLD))
for i in range(len(Constants.PERFORMANCE_STATUS_RESULT_HEADER)):
worksheet.write(Row_Index + 1, Header_Col_Index + i, Constants.PERFORMANCE_STATUS_RESULT_HEADER[i],
Formats.Format_First_MD_Header(workbook, Constants.COLOR_GOLD))
Header_Col_Index += len(Constants.PERFORMANCE_STATUS_RESULT_HEADER)
# Bundle Performance header
worksheet.merge_range(Row_Index, Header_Col_Index, Row_Index, Header_Col_Index + 1,
Constants.BUNDLES_PERFORMANCE_TITLE,
Formats.Format_First_MD_Header(workbook, Constants.COLOR_LIGHT_ORANGE))
for i in range(len(Constants.BUNDLES_PERFORMANCE_HEADER)):
worksheet.write(Row_Index + 1, Header_Col_Index + i, Constants.BUNDLES_PERFORMANCE_HEADER[i],
Formats.Format_First_MD_Header(workbook, Constants.COLOR_LIGHT_ORANGE))
Header_Col_Index += i
# Average Response Time
self.BundleAverageResTimeHeader_Col(worksheet, workbook, Row_Index, Header_Col_Index + 1,
(Constants.AVERAGE_RESULTS_HEADER + BundleName_list))
Write_Row_Index = Row_Index + 2
Write_Col_Index = Col_Index
row = Write_Row_Index
col = Write_Col_Index
AppCount = 0
p_row = row + 1
p_col = 2 + len(Constants.PERFORMANCE_STATUS_HEADER) \
+ len(Constants.PERFORMANCE_STATUS_RESULT_HEADER)\
+ len(Constants.BUNDLES_PERFORMANCE_HEADER)\
+len(Constants.AVERAGE_RESULTS_HEADER)
bundle_count = len(BundleName_list)
Count = 0
for appName in AppNames:
Updated_AppName = Utils.SupportedSheetName(appName, 11)
AppCount += 1
SheetName_MD = Utils.SheetNameFormator(AppCount, Updated_AppName, "_MD")
formula_str = Constants.HYPERLINK_FORMULA_STRING_FORMAT.format(SheetName_MD + "!A1", appName)
worksheet.merge_range(row, col, row + Constants.CONFIGURATIONS_COUNT - 1, col, formula_str,
Formats.Format_Hyperlink(workbook))
for i in range(len(Constants.CONFIGURATIONS_LIST)):
# CONFIG
worksheet.write(row + i, col + 1, Constants.CONFIGURATIONS_LIST[i], Formats.Format_Cell(workbook))
for j in range(len(Constants.PERFORMANCE_STATUS_HEADER) + len(
Constants.PERFORMANCE_STATUS_RESULT_HEADER) + len(Constants.BUNDLES_PERFORMANCE_HEADER) + len(
Constants.AVERAGE_RESULTS_HEADER + BundleName_list)):
if j < len(Constants.PERFORMANCE_STATUS_HEADER + Constants.PERFORMANCE_STATUS_RESULT_HEADER):
formula_str = '={}!{}{}'.format(SheetName_MD, Utils.GetColumnName(col + j + 2),
(Constants.PERFORMANCE_ROW_INDEX_REFERENCE_TABLE + 1 + i))
elif j == len(Constants.PERFORMANCE_STATUS_HEADER + Constants.PERFORMANCE_STATUS_RESULT_HEADER):
formula_str = Formulas.Formula_PerformanceComparedToAllReleases(p_row+Count,p_col,bundle_count,Constants.CONFIGURATIONS_COUNT,False)[0]
elif j == len(Constants.PERFORMANCE_STATUS_HEADER + Constants.PERFORMANCE_STATUS_RESULT_HEADER) + 1:
formula_str = Formulas.Formula_PerformanceComparedToLastRelease(p_row+Count,p_col)
Count+=1
else:
formula_str = '={}!{}{}'.format(SheetName_MD, Utils.GetColumnName(col + j),
(Constants.PERFORMANCE_ROW_INDEX_REFERENCE_TABLE + 1 + i))
worksheet.write(row + i, col + j + 2, formula_str, Formats.Format_Cell(workbook))
row += Constants.CONFIGURATIONS_COUNT
# # Performance Count Reference Table
Col_Index_Refer = 0
Row_Index_Refer = Constants.PERFORMANCE_ROW_INDEX_REFERENCE_TABLE
Read_Row_Index = 3
for i in range(Constants.CONFIGURATIONS_COUNT):
worksheet.write(Row_Index_Refer + i, Col_Index_Refer, Constants.CONFIGURATIONS_LIST[i],
Formats.Format_First_MD_Header(workbook, Constants.COLOR_GREY))
Col_Index_Refer += 1
# BUNDLE_TO_BUNDLE_COMPARISON_HEADER
worksheet.merge_range(Row_Index_Refer - 2, Col_Index_Refer, Row_Index_Refer - 2,
Col_Index_Refer + len(Constants.PERFORMANCE_STATUS_HEADER) - 1,
Constants.BUNDLE_TO_BUNDLE_COMPARISON_HEADER.format(Utils.GetBundlesList(self.bundleFiles_list)[0]),
Formats.Format_First_MD_Header(workbook, Constants.COLOR_LIGHT_BLUE))
for i in range(len(Constants.PERFORMANCE_STATUS_HEADER)):
worksheet.write(Row_Index_Refer - 1, Col_Index_Refer + i, Constants.PERFORMANCE_STATUS_HEADER[i],
Formats.Format_First_MD_Header(workbook, Constants.COLOR_LIGHT_BLUE))
Col_Index_Refer += len(Constants.PERFORMANCE_STATUS_HEADER)
# CONFIG_TO_CONFIG_COMPARISON_HEADER
worksheet.merge_range(Row_Index_Refer - 2, Col_Index_Refer, Row_Index_Refer - 2,
Col_Index_Refer + len(Constants.PERFORMANCE_STATUS_HEADER) - 2,
Constants.CONFIG_TO_CONFIG_COMPARISON_HEADER.format(Utils.GetBundlesList(self.bundleFiles_list)[0]),
Formats.Format_First_MD_Header(workbook, Constants.COLOR_GOLD))
for i in range(len(Constants.PERFORMANCE_STATUS_RESULT_HEADER)):
worksheet.write(Row_Index_Refer - 1, Col_Index_Refer + i, Constants.PERFORMANCE_STATUS_RESULT_HEADER[i],
Formats.Format_First_MD_Header(workbook, Constants.COLOR_GOLD))
Col_Index_Refer += len(Constants.PERFORMANCE_STATUS_RESULT_HEADER)
# Average Response Time for All Bundles
worksheet.merge_range(Row_Index_Refer - 2, Col_Index_Refer, Row_Index_Refer - 2,
Col_Index_Refer + len(BundleName_list)+len(Constants.AVERAGE_RESULTS_HEADER) - 1,
Constants.BUNDLES_AVERAGE_RESPONSE_TIME,
Formats.Format_First_MD_Header(workbook, Constants.COLOR_LIGHT_GREEN))
bundles_results_list = Constants.AVERAGE_RESULTS_HEADER+BundleName_list
for i in range(len(bundles_results_list)):
worksheet.write(Row_Index_Refer - 1, Col_Index_Refer + i,
bundles_results_list[i],
Formats.Format_First_MD_Header(workbook, Constants.COLOR_LIGHT_GREEN))
data_len = len(Constants.PERFORMANCE_STATUS_HEADER) + len(Constants.PERFORMANCE_STATUS_RESULT_HEADER) + len(
bundles_results_list)
Col_Index_Refer = 1
# print data_len
for i in range(Constants.CONFIGURATIONS_COUNT):
for j in range(data_len):
if j < data_len-len(bundles_results_list):
formula_str = Formulas.Formula_BundleWiseAverage(Utils.GetColumnName(Col_Index_Refer + j + 1),
Read_Row_Index + i, len(AppNames),
Constants.CONFIGURATIONS_COUNT, Constants.SUM_KEY)
worksheet.write(Row_Index_Refer + i, Col_Index_Refer + j, formula_str,
Formats.Format_Cell(workbook))
elif j >= data_len-len(bundles_results_list) and j < data_len-len(bundles_results_list)+2:
formula_str = Formulas.Formula_BundleWiseAverage(Utils.GetColumnName(Col_Index_Refer + j + 3),
Read_Row_Index + i, len(AppNames),
Constants.CONFIGURATIONS_COUNT, Constants.SUM_KEY)
worksheet.write(Row_Index_Refer + i, Col_Index_Refer + j, formula_str,
Formats.Format_Cell(workbook))
else:
formula_str = Formulas.Formula_BundleWiseAverage(Utils.GetColumnName(Col_Index_Refer + j + 3),
Read_Row_Index + i, len(AppNames),
Constants.CONFIGURATIONS_COUNT,
Constants.AVERAGE_KEY)
worksheet.write(Row_Index_Refer + i, Col_Index_Refer + j, formula_str,
Formats.Format_Cell(workbook))
def Create_ApplicationMD_Tab(self, workbook, AppNames, AppsDict, df_template,Max_RunCount):
AppCount = 0
for appName in AppNames:
# for appName, tc_count in AppsDict.items():
tc_count = AppsDict.get(appName)
AppCount += 1
Updated_AppName = Utils.SupportedSheetName(appName, 11)
df_sorted = df_template.loc[[appName], :]
df_app_list = self.RearrangeDataFrame(appName,Max_RunCount)
run_count = Max_RunCount
# print df_app_list
SheetName_MD = Utils.SheetNameFormator(AppCount, Updated_AppName, "_MD")
SheetName_APP = Utils.SheetNameFormator(AppCount, Updated_AppName)
# # APP TAB CREATION
self.Create_TAB(workbook, tc_count, df_sorted, df_app_list, run_count,
Constants.TEST_CASE_DESCRIPTION_COUNT,
SheetName_APP,
Constants.TEST_CASE_DESCRIPTION_LIST,
Constants.APP_TAB_HEADER,
Constants.TC_DESCRIPTION_WIDTH)
# # MD TAB CREATION
self.Create_TAB(workbook, tc_count, df_sorted, df_app_list, run_count,
Constants.CONFIGURATIONS_COUNT,
SheetName_MD,
Constants.CONFIGURATIONS_LIST,
Constants.MD_TAB_HEADER)
# break
def Create_ModuleDetail_Tab(self, workbook, AppNames):
col_index = 0
row_index = 0
ref_col_index = col_index
ref_row_index = row_index
worksheet = workbook.add_worksheet(Constants.MODULE_DETAILS_TAB_NAME)
worksheet.set_column("A:AAA", Constants.CELL_WIDTH)
len_ModuleDetailTabHeader = len(Constants.MODULE_DETAILS_TAB_HEADER)
for i in range(len_ModuleDetailTabHeader - 1):
worksheet.merge_range(row_index, ref_col_index, row_index + 1, ref_col_index,
Constants.MODULE_DETAILS_TAB_HEADER[i],
Formats.Format_First_MD_Header(workbook, Constants.COLOR_NAVY_BLUE))
ref_col_index += 1
worksheet.merge_range(row_index, ref_col_index, row_index, ref_col_index + 2,
Constants.MODULE_DETAILS_TAB_HEADER[-1],
Formats.Format_First_MD_Header(workbook, Constants.COLOR_NAVY_BLUE))
result_index_list = Constants.TEST_LEVEL_INDEX_LIST[1:-1]
for i in range(len(result_index_list)):
worksheet.write(row_index + 1, ref_col_index + i, result_index_list[i],
Formats.Format_First_MD_Header(workbook, Constants.COLOR_NAVY_BLUE))
write_row_index = row_index + 2
for i in range(len(AppNames)):
SheetName_APP = Utils.SheetNameFormator(i + 1, Utils.SupportedSheetName(AppNames[i], 11))
col_name = 9
for j in range(len(Constants.TEST_LEVEL_HEADER_LIST)):
worksheet.write(write_row_index + j, col_index + 1, Constants.TEST_LEVEL_HEADER_LIST[j],
Formats.Format_Cell(workbook))
worksheet.write(write_row_index + j, col_index + 2, "", Formats.Format_Cell(workbook))
worksheet.write(write_row_index + j, col_index + 3, "", Formats.Format_Cell(workbook))
# TOTAL
formula_str = "={}!{}7".format(SheetName_APP, Utils.GetColumnName(col_name + j))
worksheet.write(write_row_index + j, col_index + 4, formula_str, Formats.Format_Cell(workbook))
# PASS
formula_str = "={}!{}4".format(SheetName_APP, Utils.GetColumnName(col_name + j))
worksheet.write(write_row_index + j, col_index + 5, formula_str, Formats.Format_Cell(workbook))
# FAIL
formula_str = "={}!{}5".format(SheetName_APP, Utils.GetColumnName(col_name + j))
worksheet.write(write_row_index + j, col_index + 6, formula_str, Formats.Format_Cell(workbook))
# OPEN
formula_str = "={}!{}6".format(SheetName_APP, Utils.GetColumnName(col_name + j))
worksheet.write(write_row_index + j, col_index + 7, formula_str, Formats.Format_Cell(workbook))
# HYPERLINK FOR APPNAME
formula_str = Constants.HYPERLINK_FORMULA_STRING_FORMAT.format(SheetName_APP + "!A1", AppNames[i])
worksheet.merge_range(write_row_index, col_index, write_row_index + 2, col_index, formula_str,
Formats.Format_Hyperlink(workbook))
write_row_index += 3
def RearrangeDataFrame(self, appName,Max_RunCount):
first_time_flag = True
df_FinalFrames_list = []
current_run_count = 0
for df in self.DF_Bundles_List:
df_sorted = df.loc[[appName], :]
if first_time_flag:
current_run_count = Utils.CalculateRunCount(df_sorted)
dataframe_list = []
for i in range(len(df_sorted)):
row = list(df_sorted.iloc[i, :])[Constants.BUNDLE_FIXED_HEADER_COUNT:]
# print appName,len(row)
split_list_count = Constants.ISSUE_HEADER_COUNT + Max_RunCount
temp = [row[i:i + split_list_count] for i in xrange(0, len(row), split_list_count)]
if first_time_flag:
dataframe_list.append(pd.DataFrame(zip(*temp)).transpose())
else:
# pd.DataFrame(zip(*temp)).transpose()
dataframe_list.append(pd.DataFrame(zip(*temp)).transpose().iloc[:, [-3]])
# dataframe_list.append(pd.DataFrame(zip(*temp)).transpose())
first_time_flag = False
finalFrame = pd.concat(dataframe_list, ignore_index=False)
df_FinalFrames_list.append(finalFrame)
return df_FinalFrames_list
def Create_TAB(self, workbook, tc_count, df_sorted, df_app_list, run_count,
ConfigPerTC_Count,
SheetName,
Test_Decription_list,
header,
DescWidth=Constants.CELL_WIDTH):
worksheet = workbook.add_worksheet(SheetName)
worksheet.set_column("A:AAA", Constants.CELL_WIDTH)
worksheet.set_column("E:E", DescWidth)
worksheet.set_column("G:G", DescWidth)
Col_Index = 0
Row_Index = 0
Row_Index_Write = 2
self.Header_MD(worksheet, workbook, header, Row_Index, Col_Index) # r = 0; c = 0;
self.TestCaseNumber_Col(worksheet, workbook, tc_count, ConfigPerTC_Count, Row_Index_Write,
Col_Index) # r = 2; c = 0
Col_Index += 1
self.TestCategory_Col(worksheet, workbook, df_sorted.iloc[:, 1], ConfigPerTC_Count,
Row_Index_Write, Col_Index) # r = 2; c = 1
Col_Index += 1
self.TestCaseID_Col(worksheet, workbook, df_sorted.iloc[:, 0], ConfigPerTC_Count, Row_Index_Write,
Col_Index) # r = 2;c = 2
Col_Index += 1
self.TestType_Col(worksheet, workbook, df_sorted.iloc[:, 2], ConfigPerTC_Count, Row_Index_Write,
Col_Index) # r = 2;c = 3
Col_Index += 1
self.Config_Col(worksheet, workbook, tc_count, Test_Decription_list, Row_Index_Write, Col_Index) # r = 2;c = 4
Col_Index += 1
# print "df_app_list",len(df_app_list)
# print i,Utils.GetColumnName(i)
bundles_List = Utils.GetBundlesList(self.bundleFiles_list)
BundleCount = len(bundles_List)
currentBundle_Name = bundles_List[0]
df_currentBundle = df_app_list[0]
if "_MD" in SheetName:
Col_Index = self.PerformanceHeader_Col(worksheet, workbook, Row_Index, Col_Index, currentBundle_Name,
tc_count, run_count, BundleCount, df_currentBundle)
self.MeasurementsAndAvgResponseTime(worksheet, workbook, Row_Index_Write, Col_Index, df_app_list, run_count)
Col_Index = self.MeasurementHeader_Col(worksheet, workbook, Row_Index, Col_Index, currentBundle_Name,
run_count)
Col_Index = self.BundleAverageResTimeHeader_Col(worksheet, workbook, Row_Index, Col_Index, bundles_List)
self.PerformanceCountReferenceTable(worksheet, workbook, Row_Index_Write, Col_Index, tc_count, bundles_List,run_count)
else:
result_list = self.TCResult_list(Row_Index_Write, SheetName, tc_count)
# print len(result_list)
start_cell = (Utils.GetColumnName(Col_Index)+str(Row_Index_Write + 1))
end_cell = (Utils.GetColumnName(Col_Index)+str(Row_Index_Write + 1+len(result_list)))
cell_range = "{0}:{1}".format(start_cell,end_cell)
# print cell_range
for i in range(len(result_list)):
worksheet.write(Row_Index_Write + i, Col_Index, result_list[i], Formats.Format_Cell(workbook))
worksheet.write(Row_Index_Write + i, Col_Index + 1, " ", Formats.Format_Cell(workbook))
worksheet.conditional_format(cell_range,
{'type': 'cell',
'criteria': 'equal to',
'value': 'FAIL',
'format': Formats.Format_First_MD_Header(workbook,
Constants.COLOR_LIGHT_ORANGE)})
worksheet.conditional_format(cell_range,
{'type': 'cell',
'criteria': 'equal to',
'value': 'PASS',
'format': Formats.Format_First_MD_Header(workbook,
Constants.COLOR_LIGHT_GREEN)})
worksheet.conditional_format(cell_range,
{'type': 'cell',
'criteria': 'equal to',
'value': 'OPEN',
'format': Formats.Format_First_MD_Header(workbook,
Constants.COLOR_NAVY_BLUE)})
TestType_Dict = Utils.AppsNameWithTestCount(list(df_sorted.iloc[:, 1]))
self.ResultReferenceTable(worksheet, workbook, Row_Index_Write, Col_Index, TestType_Dict)
def ResultReferenceTable(self, worksheet, workbook, Row_Index_Write, Col_Index, TestType_Dict):
Read_Col_Index = Col_Index
Read_Row_Index = Row_Index_Write
write_col_index = Col_Index + 3
for i in range(len(Constants.TEST_LEVEL_INDEX_LIST)):
worksheet.write(Row_Index_Write + i, write_col_index, Constants.TEST_LEVEL_INDEX_LIST[i],
Formats.Format_First_MD_Header(workbook, Constants.COLOR_GREY))
write_col_index += 1
for i in range(len(Constants.TEST_LEVEL_HEADER_LIST)):
worksheet.write(Row_Index_Write, write_col_index + i, Constants.TEST_LEVEL_HEADER_LIST[i],
Formats.Format_First_MD_Header(workbook, Constants.COLOR_GREY))
for i in range(len(Constants.TEST_LEVEL_HEADER_LIST)):
Read_Col_Name = Utils.GetColumnName(Read_Col_Index)
# print Read_Row_Index
TC_count = 0
result = Constants.TEST_LEVEL_INDEX_LIST[1:-1]
for j in range(len(result)):
status = result[j]
row = Row_Index_Write + 1 + j
col = write_col_index + i
if Constants.TEST_LEVEL_HEADER_LIST[i] in TestType_Dict.viewkeys():
# print Constants.TEST_LEVEL_HEADER_LIST[i]
TC_count = TestType_Dict[Constants.TEST_LEVEL_HEADER_LIST[i]]
RowCount = TC_count * Constants.TEST_CASE_DESCRIPTION_COUNT
startRow = Read_Col_Name + str(Read_Row_Index + 1)
endRow = Read_Col_Name + str(Read_Row_Index + RowCount)
# print startRow,endRow
formula_str = '=COUNTIF({0}:{1}, "{2}")'.format(startRow, endRow, status)
# print row,col
worksheet.write(row, col, formula_str, Formats.Format_Cell(workbook))
# print formula_str
else:
TC_count = 0
# print TC_count
# print row, col
worksheet.write(row, col, TC_count, Formats.Format_Cell(workbook))
formula_str = '=SUM({0}4:{0}6)'.format(Utils.GetColumnName(write_col_index + i))
worksheet.write(row + 1, write_col_index + i, formula_str, Formats.Format_Cell(workbook))
# break
def PerformanceCountReferenceTable(self, worksheet, workbook, Row_Index_Write, Col_Index, tc_count, bundles_List,run_count):
Col_Index_Refer = 0
Row_Index_Refer = Constants.PERFORMANCE_ROW_INDEX_REFERENCE_TABLE
d_col = Col_Index - 12
g_col = Col_Index - 11
c_col = Col_Index - 3
BundleCount = len(bundles_List)
for i in range(Constants.CONFIGURATIONS_COUNT):
worksheet.write(Row_Index_Refer + i, Col_Index_Refer, Constants.CONFIGURATIONS_LIST[i],
Formats.Format_First_MD_Header(workbook, Constants.COLOR_GREY))
Col_Index_Refer += 1
worksheet.merge_range(Row_Index_Refer - 2, Col_Index_Refer, Row_Index_Refer - 1, Col_Index_Refer,
Constants.TOTAL_TEST_CASES,
Formats.Format_First_MD_Header(workbook, Constants.COLOR_GREY))
for i in range(Constants.CONFIGURATIONS_COUNT):
worksheet.write(Row_Index_Refer + i, Col_Index_Refer, tc_count, Formats.Format_Cell(workbook))
Col_Index_Refer += 1
worksheet.merge_range(Row_Index_Refer - 2, Col_Index_Refer, Row_Index_Refer - 2,
Col_Index_Refer + len(Constants.PERFORMANCE_STATUS_HEADER) - 1,
Constants.BUNDLE_TO_BUNDLE_COMPARISON_HEADER.format(
Utils.GetBundlesList(self.bundleFiles_list)[0]),
Formats.Format_First_MD_Header(workbook, Constants.COLOR_LIGHT_BLUE))
e_col = Col_Index_Refer + len(Constants.PERFORMANCE_STATUS_HEADER)
worksheet.merge_range(Row_Index_Refer - 2, e_col, Row_Index_Refer - 2,
e_col + len(Constants.PERFORMANCE_STATUS_RESULT_HEADER) - 1,
Constants.CONFIG_TO_CONFIG_COMPARISON_HEADER.format(
Utils.GetBundlesList(self.bundleFiles_list)[0]),
Formats.Format_First_MD_Header(workbook, Constants.COLOR_GOLD))
for i in range(len(Constants.PERFORMANCE_STATUS_RESULT_HEADER)):
worksheet.write(Row_Index_Refer - 1, e_col + i, Constants.PERFORMANCE_STATUS_RESULT_HEADER[i] + " Count",
Formats.Format_First_MD_Header(workbook, Constants.COLOR_GOLD))
last_Col_index = 0
c2c_col = Constants.MD_TAB_HEADER_COUNT+1
for j in range(len(Constants.PERFORMANCE_STATUS_RESULT_HEADER)):
for i in range(Constants.CONFIGURATIONS_COUNT):
formula_str = Formulas.Formula_PerformanceWiseCount(Row_Index_Write + 1 + i, Utils.GetColumnName(c2c_col),
tc_count,
Constants.CONFIGURATIONS_COUNT,
Constants.PERFORMANCE_STATUS_RESULT_HEADER_DICT.get(
Constants.PERFORMANCE_STATUS_RESULT_HEADER[j]))
worksheet.write(Row_Index_Refer + i, e_col + j, formula_str, Formats.Format_Cell(workbook))
last_Col_index = e_col + j
worksheet.write(Constants.NA_ROW_INDEX_REFERENCE-1, 0, "NA", Formats.Format_Cell(workbook))
e_col = Col_Index_Refer + len(Constants.PERFORMANCE_STATUS_HEADER) + len(
Constants.PERFORMANCE_STATUS_RESULT_HEADER)
worksheet.merge_range(Row_Index_Refer - 2, e_col, Row_Index_Refer - 2,
e_col + len(self.bundleFiles_list) + len(Constants.AVERAGE_RESULTS_HEADER) - 1,
Constants.BUNDLES_AVERAGE_RESPONSE_TIME,
Formats.Format_First_MD_Header(workbook, Constants.COLOR_LIGHT_GREEN))
last_Col_index = 0
b2b_col = Constants.MD_TAB_HEADER_COUNT
for j in range(len(Constants.PERFORMANCE_STATUS_HEADER)):
for i in range(Constants.CONFIGURATIONS_COUNT):
formula_str = Formulas.Formula_PerformanceWiseCount(Row_Index_Write + 1 + i, Utils.GetColumnName(b2b_col),
tc_count,
Constants.CONFIGURATIONS_COUNT,
Constants.PERFORMANCE_STATUS_HEADER_DICT.get(
Constants.PERFORMANCE_STATUS_HEADER[j]))
worksheet.write(Row_Index_Refer + i, Col_Index_Refer + j, formula_str, Formats.Format_Cell(workbook))
last_Col_index = Col_Index_Refer + j
for i in range(len(Constants.PERFORMANCE_STATUS_HEADER)):
worksheet.write(Row_Index_Refer - 1, Col_Index_Refer + i, Constants.PERFORMANCE_STATUS_HEADER[i] + " Count",
Formats.Format_First_MD_Header(workbook, Constants.COLOR_LIGHT_BLUE))
Col_Index_Refer += len(Constants.PERFORMANCE_STATUS_HEADER)
for i in range(len(Constants.AVERAGE_RESULTS_HEADER)):
worksheet.write(Row_Index_Refer - 1, e_col + i, Constants.AVERAGE_RESULTS_HEADER[i],
Formats.Format_First_MD_Header(workbook, Constants.COLOR_LIGHT_GREEN))
a_col = Constants.PERFORMANCE_HEADER_COUNT + Constants.MD_TAB_HEADER_COUNT + run_count
for i in range(Constants.CONFIGURATIONS_COUNT):
formula_str = Formulas.Formula_TestCasesComparedForAverageResults(Row_Index_Write + 1 + i, a_col,
BundleCount,
Constants.CONFIGURATIONS_COUNT, tc_count)
worksheet.write(Row_Index_Refer + i, e_col, formula_str, Formats.Format_Cell(workbook))
# print formula_str
formula_str = "={}-{}".format(tc_count, Utils.GetColumnName(e_col) + str(Row_Index_Refer + 1 + i))
worksheet.write(Row_Index_Refer + i, e_col + 1, formula_str, Formats.Format_Cell(workbook))
e_col = e_col + len(Constants.AVERAGE_RESULTS_HEADER)
for i in range(BundleCount):
worksheet.write(Row_Index_Refer - 1, e_col + i, bundles_List[i],
Formats.Format_First_MD_Header(workbook, Constants.COLOR_LIGHT_GREEN))
c_col = Constants.PERFORMANCE_HEADER_COUNT + Constants.MD_TAB_HEADER_COUNT + run_count
for i in range(Constants.CONFIGURATIONS_COUNT):
for j in range(BundleCount):
formula_str = Formulas.Formula_Average(Row_Index_Write + 1 + i, c_col, c_col + j, BundleCount,
Constants.CONFIGURATIONS_COUNT, tc_count)
worksheet.write(Row_Index_Refer + i, e_col + j, formula_str, Formats.Format_Cell(workbook))
def TCResult_list(self, Row_Index_Write, SheetName, tc_count):
SheetName = SheetName + "_MD"
result_list = []
for i in range(tc_count):
result_list += Formulas.Formula_TestCaseResult(Row_Index_Write+1, SheetName)
Row_Index_Write += Constants.CONFIGURATIONS_COUNT
return result_list
def MeasurementsAndAvgResponseTime(self, worksheet, workbook, Row_Index_Write, Col_Index, df_app_list, run_count):
first = True
c = 0
all_data_list = []
for df in df_app_list:
list_df = df.iloc[:, :run_count].fillna("NA").values.tolist()
if first:
first = False
for i in range(len(list_df)):
for j in range(len(list_df[i])):
worksheet.write(Row_Index_Write + i, Col_Index + j, list_df[i][j],
Formats.Format_Cell(workbook))
df = df.iloc[:, [run_count]].fillna("NA")
list_df = df.iloc[:, :].fillna("NA").values.tolist()
all_data_list.append(list_df)
for i in range(len(list_df)):
for j in range(len(list_df[i])):
worksheet.write(Row_Index_Write + i, Col_Index + run_count + c, list_df[i][j],
Formats.Format_Cell(workbook))
c += 1
def AppFreezeAndFirstMeasurement_list(self, df_CurrentBundle):
return df_CurrentBundle.iloc[:, -2:].fillna("NA").values.tolist()
def PerformanceComparedBetweenAllConfiguration_list(self, Row_Index_Write, Col_Index, RunCount, TC_Count):
Row_Index_Write += 1
str_list = []
for i in range(TC_Count):
str_list += Formulas.Formula_PerformanceComparedBetweenAllConfiguration(
Utils.GetColumnName(Col_Index + Constants.PERFORMANCE_HEADER_COUNT + RunCount),
Row_Index_Write,
Constants.CONFIGURATIONS_COUNT)
Row_Index_Write += Constants.CONFIGURATIONS_COUNT
# print str_list
return str_list
def performanceCompareToAllReleases_list(self, Row_index, Col_index, TC_Count, RunCount, BundleCount):
PCR_List = []
Row_index += 1
Col_index = Col_index + Constants.PERFORMANCE_HEADER_COUNT + RunCount
for i in range(TC_Count):
PCR_List += Formulas.Formula_PerformanceComparedToAllReleases(Row_index, Col_index, BundleCount,
Constants.CONFIGURATIONS_COUNT)
Row_index += Constants.CONFIGURATIONS_COUNT
return PCR_List
def BundleAverageResTimeHeader_Col(self, worksheet, workbook, Row_Index, Col_Index, bundles_List):
worksheet.merge_range(Row_Index, Col_Index, Row_Index, Col_Index + len(bundles_List) - 1,
Constants.BUNDLES_AVERAGE_RESPONSE_TIME,
Formats.Format_First_MD_Header(workbook, Constants.COLOR_LIGHT_GREEN))
for bundle in bundles_List:
worksheet.write(Row_Index + 1, Col_Index, bundle,
Formats.Format_First_MD_Header(workbook, Constants.COLOR_LIGHT_GREEN))
Col_Index += 1
return Col_Index
def MeasurementHeader_Col(self, worksheet, workbook, Row_Index, Col_Index, currentBundle_Name, run_count):
worksheet.merge_range(Row_Index, Col_Index, Row_Index, Col_Index + run_count - 1, currentBundle_Name,
Formats.Format_First_MD_Header(workbook, Constants.COLOR_LIGHT_ORANGE))
for i in range(run_count):
worksheet.write(Row_Index + 1, Col_Index, Constants.MEASUREMENT_STRING_FORMAT % (i + 1),
Formats.Format_First_MD_Header(workbook, Constants.COLOR_LIGHT_ORANGE))
Col_Index += 1
return Col_Index
def PerformanceHeader_Col(self, worksheet, workbook, Row_Index, Col_Index, currentBundle_Name, tc_count, RunCount,
BundleCount, DF_CurrentBundle):
Reference_Index = Col_Index
# print "Reference_Index",Reference_Index
Row_Index_Write = Row_Index + 2
###### PerformanceComparedToAllReleases
PCR_list = self.performanceCompareToAllReleases_list(Row_Index_Write, Col_Index, tc_count, RunCount,
BundleCount)
# print PCR_list
######## PerformanceComparedBetweenAllConfiguration
PCC_list = self.PerformanceComparedBetweenAllConfiguration_list(Row_Index_Write, Col_Index, RunCount, tc_count)
######## AppFreezeAndFirstMeasurement
AFFM_list = self.AppFreezeAndFirstMeasurement_list(DF_CurrentBundle)
dummyList = zip(PCR_list, PCC_list)
for i in range(len(dummyList)):
for j in range(len(dummyList[i])):
worksheet.write(Row_Index_Write + i, Reference_Index + j, dummyList[i][j],
Formats.Format_Cell(workbook))
Reference_Index += 2
for i in range(len(AFFM_list)):
for j in range(len(AFFM_list[i])+1):
if j < 2:
worksheet.write(Row_Index_Write + i, Reference_Index + j, AFFM_list[i][j],
Formats.Format_Cell(workbook))
else:
e_col = len(Constants.PERFORMANCE_HEADER_LIST) + RunCount + Col_Index
formula_str = Formulas.Formula_ImprovementPercentageComparedToCurrentRelease(Row_Index_Write+1+i,e_col)
worksheet.write(Row_Index_Write + i, Reference_Index + j,formula_str ,
Formats.Format_Percentage(workbook))
worksheet.merge_range(Row_Index, Col_Index, Row_Index, Col_Index + Constants.PERFORMANCE_HEADER_COUNT - 1,
currentBundle_Name,
Formats.Format_First_MD_Header(workbook, Constants.COLOR_LIGHT_BLUE))
for performanceTitle in Constants.PERFORMANCE_HEADER_LIST:
worksheet.write(Row_Index + 1, Col_Index, performanceTitle,
Formats.Format_First_MD_Header(workbook, Constants.COLOR_LIGHT_BLUE))
Col_Index += 1
return Col_Index
def Header_MD(self, worksheet, workbook, header_list, r, c):
for v in range(len(header_list)):
worksheet.merge_range(0, c, 1, c, header_list[v],
Formats.Format_First_MD_Header(workbook, Constants.COLOR_GREY))
c += 1
def TestCaseNumber_Col(self, worksheet, workbook, tc_count, count, r, c):
for v in range(tc_count * count):
worksheet.write(r, 0, v + 1, Formats.Format_Cell(workbook))
r += 1
def TestCategory_Col(self, worksheet, workbook, category_list, ConfigPerTC_Count, r, c):
# 0 1 2 3
# "Test Case ID" "Test Case Level" "Test Case Category" "Test Case Description"
# r = 2; c = 1
for k, v in Utils.AppsNameWithTestCount(category_list).items():
# print k, v
worksheet.merge_range(r, c, r + (v * ConfigPerTC_Count) - 1, c, k, Formats.Format_Cell(workbook))
r = r + (v * ConfigPerTC_Count)
def TestCaseID_Col(self, worksheet, workbook, TestcaseId_list, ConfigPerTC_Count, r, c):
# df_sorted.iloc[:, 0]
# r = 2;c = 2
for v in TestcaseId_list:
# print v
worksheet.merge_range(r, c, r + (1 * ConfigPerTC_Count) - 1, c, v, Formats.Format_Cell(workbook))
r = r + (1 * ConfigPerTC_Count)
def TestType_Col(self, worksheet, workbook, testType_list, ConfigPerTC_Count, r, c):
for v in testType_list:
# print v
worksheet.merge_range(r, c, r + ConfigPerTC_Count - 1, c, v, Formats.Format_Cell(workbook))
r = r + ConfigPerTC_Count
def Config_Col(self, worksheet, workbook, tc_count, Config_list, r, c):
for v in range(tc_count):
for config in Config_list:
worksheet.write(r, c, config, Formats.Format_Cell(workbook))
r += 1
def list_to_merge_cells(self, worksheet, targetList, TargetColumn, TestCaseId_List=[]):
if TestCaseId_List:
set_list = set(TestCaseId_List[1:])
for item in set_list:
first_index = TestCaseId_List.index(item)
last_index = len(TestCaseId_List) - 1 - TestCaseId_List[::-1].index(item)
worksheet.merge_range(TargetColumn + str(first_index + 2) + ':' + TargetColumn + str(last_index + 2),
targetList[first_index])
else:
set_list = set(targetList[1:])
for item in set_list:
first_index = targetList.index(item)
last_index = len(targetList) - 1 - targetList[::-1].index(item)
worksheet.merge_range(TargetColumn + str(first_index + 2) + ':' + TargetColumn + str(last_index + 2),
targetList[first_index])
# logging.debug( first_index, last_index)
def CreateApplicationMDTab(self):
logging.debug("Creating Application MD tabs for Final KPI Report")
# self.__Utils.ChangeDirPath(self.REPORT_PATH)
filePath = os.path.abspath(self.__Const.FINAL_REOPRT_FILE)
logging.debug(filePath)
writer = | pd.ExcelWriter(filePath, engine='xlsxwriter') | pandas.ExcelWriter |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'chengzhi'
"""
tqsdk.ta 模块包含了一批常用的技术指标计算函数
"""
import numpy as np
import pandas as pd
import numba
from tqsdk import ta_func
def ATR(df, n):
"""平均真实波幅"""
new_df = pd.DataFrame()
pre_close = df["close"].shift(1)
new_df["tr"] = np.where(df["high"] - df["low"] > np.absolute(pre_close - df["high"]),
np.where(df["high"] - df["low"] > np.absolute(pre_close - df["low"]),
df["high"] - df["low"], np.absolute(pre_close - df["low"])),
np.where(np.absolute(pre_close - df["high"]) > np.absolute(pre_close - df["low"]),
np.absolute(pre_close - df["high"]), np.absolute(pre_close - df["low"])))
new_df["atr"] = ta_func.ma(new_df["tr"], n)
return new_df
def BIAS(df, n):
"""乖离率"""
ma1 = ta_func.ma(df["close"], n)
new_df = pd.DataFrame(data=list((df["close"] - ma1) / ma1 * 100), columns=["bias"])
return new_df
def BOLL(df, n, p):
"""布林线"""
new_df = pd.DataFrame()
mid = ta_func.ma(df["close"], n)
std = df["close"].rolling(n).std()
new_df["mid"] = mid
new_df["top"] = mid + p * std
new_df["bottom"] = mid - p * std
return new_df
def DMI(df, n, m):
"""动向指标"""
new_df = pd.DataFrame()
new_df["atr"] = ATR(df, n)["atr"]
pre_high = df["high"].shift(1)
pre_low = df["low"].shift(1)
hd = df["high"] - pre_high
ld = pre_low - df["low"]
admp = ta_func.ma(pd.Series(np.where((hd > 0) & (hd > ld), hd, 0)), n)
admm = ta_func.ma(pd.Series(np.where((ld > 0) & (ld > hd), ld, 0)), n)
new_df["pdi"] = pd.Series(np.where(new_df["atr"] > 0, admp / new_df["atr"] * 100, np.NaN)).ffill()
new_df["mdi"] = pd.Series(np.where(new_df["atr"] > 0, admm / new_df["atr"] * 100, np.NaN)).ffill()
ad = pd.Series(np.absolute(new_df["mdi"] - new_df["pdi"]) / (new_df["mdi"] + new_df["pdi"]) * 100)
new_df["adx"] = ta_func.ma(ad, m)
new_df["adxr"] = (new_df["adx"] + new_df["adx"].shift(m)) / 2
return new_df
def KDJ(df, n, m1, m2):
"""随机指标"""
new_df = pd.DataFrame()
hv = df["high"].rolling(n).max()
lv = df["low"].rolling(n).min()
rsv = pd.Series(np.where(hv == lv, 0, (df["close"] - lv) / (hv - lv) * 100))
new_df["k"] = ta_func.sma(rsv, m1, 1)
new_df["d"] = ta_func.sma(new_df["k"], m2, 1)
new_df["j"] = 3 * new_df["k"] - 2 * new_df["d"]
return new_df
def MACD(df, short, long, m):
"""异同移动平均线"""
new_df = pd.DataFrame()
eshort = ta_func.ema(df["close"], short)
elong = ta_func.ema(df["close"], long)
new_df["diff"] = eshort - elong
new_df["dea"] = ta_func.ema(new_df["diff"], m)
new_df["bar"] = 2 * (new_df["diff"] - new_df["dea"])
return new_df
@numba.njit
def _sar(open, high, low, close, range_high, range_low, n, step, maximum):
sar = np.empty_like(close)
sar[:n] = np.NAN
af = 0
ep = 0
trend = 1 if (close[n] - open[n]) > 0 else -1
if trend == 1:
sar[n] = min(range_low[n - 2], low[n - 1])
else:
sar[n] = max(range_high[n - 2], high[n - 1])
for i in range(n, len(sar)):
if i != n:
if abs(trend) > 1:
sar[i] = sar[i - 1] + af * (ep - sar[i - 1])
elif trend == 1:
sar[i] = min(range_low[i - 2], low[i - 1])
elif trend == -1:
sar[i] = max(range_high[i - 2], high[i - 1])
if trend > 0:
if sar[i - 1] > low[i]:
ep = low[i]
af = step
trend = -1
else:
ep = high[i]
af = min(af + step, maximum) if ep > range_high[i - 1] else af
trend += 1
else:
if sar[i - 1] < high[i]:
ep = high[i]
af = step
trend = 1
else:
ep = low[i]
af = min(af + step, maximum) if ep < range_low[i - 1] else af
trend -= 1
return sar
def SAR(df, n, step, max):
"""抛物转向"""
range_high = df["high"].rolling(n - 1).max()
range_low = df["low"].rolling(n - 1).min()
sar = _sar(df["open"].values, df["high"].values, df["low"].values, df["close"].values, range_high.values,
range_low.values, n, step, max)
new_df = pd.DataFrame(data=sar, columns=["sar"])
return new_df
def WR(df, n):
"""威廉指标"""
hn = df["high"].rolling(n).max()
ln = df["low"].rolling(n).min()
new_df = pd.DataFrame(data=list((hn - df["close"]) / (hn - ln) * (-100)), columns=["wr"])
return new_df
def RSI(df, n):
"""相对强弱指标"""
lc = df["close"].shift(1)
rsi = ta_func.sma(pd.Series(np.where(df["close"] - lc > 0, df["close"] - lc, 0)), n, 1) / \
ta_func.sma(np.absolute(df["close"] - lc), n, 1) * 100
new_df = pd.DataFrame(data=rsi, columns=["rsi"])
return new_df
def ASI(df):
"""振动升降指标"""
lc = df["close"].shift(1) # 上一交易日的收盘价
aa = np.absolute(df["high"] - lc)
bb = np.absolute(df["low"] - lc)
cc = np.absolute(df["high"] - df["low"].shift(1))
dd = np.absolute(lc - df["open"].shift(1))
r = np.where((aa > bb) & (aa > cc), aa + bb / 2 + dd / 4,
np.where((bb > cc) & (bb > aa), bb + aa / 2 + dd / 4, cc + dd / 4))
x = df["close"] - lc + (df["close"] - df["open"]) / 2 + lc - df["open"].shift(1)
si = np.where(r == 0, 0, 16 * x / r * np.where(aa > bb, aa, bb))
new_df = pd.DataFrame(data=list(pd.Series(si).cumsum()), columns=["asi"])
return new_df
def VR(df, n):
"""VR 容量比率"""
lc = df["close"].shift(1)
vr = pd.Series(np.where(df["close"] > lc, df["volume"], 0)).rolling(n).sum() / pd.Series(
np.where(df["close"] <= lc, df["volume"], 0)).rolling(n).sum() * 100
new_df = pd.DataFrame(data=list(vr), columns=["vr"])
return new_df
def ARBR(df, n):
"""人气意愿指标"""
new_df = | pd.DataFrame() | pandas.DataFrame |
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Tests for PySMO's family of SurrogateTrainer (PysmoPolyTrainer, PysmoRBFTrainer and PysmoKrigingTrainer)
"""
import pytest
import numpy as np
import pandas as pd
import io
import os
from math import sin, cos, log, exp
from pathlib import Path
from io import StringIO
import re
import pyomo as pyo
from pyomo.environ import ConcreteModel, Var, Constraint
from pyomo.common.tempfiles import TempfileManager
from idaes.core.surrogate.pysmo import (
polynomial_regression as pr,
radial_basis_function as rbf,
kriging as krg,
)
from idaes.core.surrogate.pysmo_surrogate import (
PysmoTrainer,
PysmoPolyTrainer,
PysmoRBFTrainer,
PysmoKrigingTrainer,
PysmoSurrogate,
PysmoSurrogateTrainingResult,
PysmoTrainedSurrogate,
)
from idaes.core.surrogate.surrogate_block import SurrogateBlock
from idaes.core.util.exceptions import ConfigurationError
from idaes.core.surrogate.metrics import compute_fit_metrics
dirpath = Path(__file__).parent.resolve()
# String representation of json output for testing
jstring_poly_1 = (
'{"model_encoding": '
'{"z1": {"attr": {"regression_data_columns": ["x1", "x2"], '
'"multinomials": 1, "additional_term_expressions": [], '
'"optimal_weights_array": [[-75.26111111111476], [-8.815277777775934], [18.81527777777826], [-2.2556956302821618e-13]], '
'"final_polynomial_order": 1, '
'"errors": {"MAE": 3.772981926886132e-13, "MSE": 1.5772926701095834e-25, "R2": 1.0, "Adjusted R2": 1.0},'
' "extra_terms_feature_vector": ["IndexedParam[x1]", "IndexedParam[x2]"]}, '
'"map": {"regression_data_columns": "list", "multinomials": "str", '
'"additional_term_expressions": "list", "optimal_weights_array": "numpy", '
'"final_polynomial_order": "str", "errors": "str", "extra_terms_feature_vector": "other"}}}, '
'"input_labels": ["x1", "x2"], '
'"output_labels": ["z1"], '
'"input_bounds": {"x1": [0, 5], "x2": [0, 10]}, '
'"surrogate_type": "poly"}'
)
jstring_poly_2 = (
'{"model_encoding": '
'{"z1": {"attr": {"regression_data_columns": ["x1", "x2"], '
'"multinomials": 1, "additional_term_expressions": [], '
'"optimal_weights_array": [[-75.26111111111476], [-8.815277777775934], [18.81527777777826], [-2.2556956302821618e-13]], '
'"final_polynomial_order": 1, '
'"errors": {"MAE": 3.772981926886132e-13, "MSE": 1.5772926701095834e-25, "R2": 1.0, "Adjusted R2": 1.0},'
' "extra_terms_feature_vector": ["IndexedParam[x1]", "IndexedParam[x2]"]}, '
'"map": {"regression_data_columns": "list", "multinomials": "str", '
'"additional_term_expressions": "list", "optimal_weights_array": "numpy", '
'"final_polynomial_order": "str", "errors": "str", "extra_terms_feature_vector": "other"}}, '
'"z2": {"attr": {"regression_data_columns": ["x1", "x2"], '
'"multinomials": 1, "additional_term_expressions": [], '
'"optimal_weights_array": [[-3.0033074724377813], [0.2491731318906352], [1.7508268681094337], [-6.786238238021269e-15]], '
'"final_polynomial_order": 1, "errors": {"MAE": 1.1901590823981678e-14, "MSE": 1.5225015470765528e-28, "R2": 1.0, "Adjusted R2": 1.0}, '
'"extra_terms_feature_vector": ["IndexedParam[x1]", "IndexedParam[x2]"]}, '
'"map": {"regression_data_columns": "list", "multinomials": "str", '
'"additional_term_expressions": "list", "optimal_weights_array": "numpy", '
'"final_polynomial_order": "str", "errors": "str", "extra_terms_feature_vector": "other"}}}, '
'"input_labels": ["x1", "x2"], '
'"output_labels": ["z1", "z2"], '
'"input_bounds": null, '
'"surrogate_type": "poly"}'
)
jstring_poly_3 = (
'{"model_encoding": '
'{"z1": {"attr": {"regression_data_columns": ["x1", "x2"], '
'"multinomials": 0, "additional_term_expressions": ["log(IndexedParam[x1])", "sin(IndexedParam[x2])"], '
'"optimal_weights_array": [[-14.290243902439855], [6.4274390243899795], [3.572560975609962], [1.9753643165643098e-13], [-4.4048098502003086e-14]], '
'"final_polynomial_order": 1, '
'"errors": {"MAE": 1.4210854715202004e-14, "MSE": 2.8188629679897487e-28, "R2": 1.0},'
' "extra_terms_feature_vector": ["IndexedParam[x1]", "IndexedParam[x2]"]}, '
'"map": {"regression_data_columns": "list", "multinomials": "str", '
'"additional_term_expressions": "list", "optimal_weights_array": "numpy", '
'"final_polynomial_order": "str", "errors": "str", "extra_terms_feature_vector": "other"}}, '
'"z2": {"attr": {"regression_data_columns": ["x1", "x2"], '
'"multinomials": 0, "additional_term_expressions": ["log(IndexedParam[x1])", "sin(IndexedParam[x2])"], '
'"optimal_weights_array": [[5.704971042443143], [2.4262427606248815], [-0.42624276060821653], [-5.968545102597034e-11], [6.481176706429892e-12]], '
'"final_polynomial_order": 1, "errors": {"MAE": 3.869645344896829e-12, "MSE": 7.189162598662876e-23, "R2": 1.0}, '
'"extra_terms_feature_vector": ["IndexedParam[x1]", "IndexedParam[x2]"]}, '
'"map": {"regression_data_columns": "list", "multinomials": "str", '
'"additional_term_expressions": "list", "optimal_weights_array": "numpy", '
'"final_polynomial_order": "str", "errors": "str", "extra_terms_feature_vector": "other"}}}, '
'"input_labels": ["x1", "x2"], '
'"output_labels": ["z1", "z2"], '
'"input_bounds": null, '
'"surrogate_type": "poly"}'
)
jstring_poly_4 = (
'{"model_encoding": '
'{"z1": {"attr": {"regression_data_columns": ["x1", "x2"], '
'"multinomials": 0, "additional_term_expressions": ["IndexedParam[x1]/IndexedParam[x2]"], '
'"optimal_weights_array": [[-110.15000000001504], [-17.53750000000189], [27.537500000006148], [-5.3967136315336006e-11]], '
'"final_polynomial_order": 1, '
'"errors": {"MAE": 1.0317080523236656e-12, "MSE": 2.126880072091303e-24, "R2": 1.0},'
' "extra_terms_feature_vector": ["IndexedParam[x1]", "IndexedParam[x2]"]}, '
'"map": {"regression_data_columns": "list", "multinomials": "str", '
'"additional_term_expressions": "other", "optimal_weights_array": "numpy", '
'"final_polynomial_order": "str", "errors": "str", "extra_terms_feature_vector": "other"}}, '
'"z2": {"attr": {"regression_data_columns": ["x1", "x2"], '
'"multinomials": 0, "additional_term_expressions": ["IndexedParam[x1]/IndexedParam[x2]"], '
'"optimal_weights_array": [[-12.523574144487087], [-2.1308935361219556], [4.1308935361216435], [3.6347869158959156e-12]], '
'"final_polynomial_order": 1, "errors": {"MAE": 7.762679388179095e-14, "MSE": 6.506051429719772e-27, "R2": 1.0}, '
'"extra_terms_feature_vector": ["IndexedParam[x1]", "IndexedParam[x2]"]}, '
'"map": {"regression_data_columns": "list", "multinomials": "str", '
'"additional_term_expressions": "other", "optimal_weights_array": "numpy", '
'"final_polynomial_order": "str", "errors": "str", "extra_terms_feature_vector": "other"}}}, '
'"input_labels": ["x1", "x2"], '
'"output_labels": ["z1", "z2"], '
'"input_bounds": null, '
'"surrogate_type": "poly"}'
)
jstring_rbf = (
'{"model_encoding": '
'{"z1": {"attr": {"x_data_columns": ["x1", "x2"], '
'"x_data": [[0.0, 0.0], [0.25, 0.25], [0.5, 0.5], [0.75, 0.75], [1.0, 1.0]], '
'"centres": [[0.0, 0.0], [0.25, 0.25], [0.5, 0.5], [0.75, 0.75], [1.0, 1.0]], '
'"basis_function": "gaussian", '
'"weights": [[-69.10791015625], [-319807.1317138672], [959336.2551269531], [-959973.7440185547], [320514.66677856445]], '
'"sigma": 0.05, "regularization_parameter": 0.0, '
'"rmse": 0.0005986693684275349, "R2": 0.9999971327598984, '
'"x_data_min": [[1, 5]], "x_data_max": [[5, 9]], "y_data_min": [10], "y_data_max": [50]}, '
'"map": {"x_data_columns": "list", "x_data": "numpy", "centres": "numpy", '
'"basis_function": "str", "weights": "numpy", "sigma": "str", "regularization_parameter": "str", '
'"rmse": "str", "R2": "str", "x_data_min": "numpy", "x_data_max": "numpy", "y_data_min": "numpy", '
'"y_data_max": "numpy"}}, '
'"z2": {"attr": {"x_data_columns": ["x1", "x2"], '
'"x_data": [[0.0, 0.0], [0.25, 0.25], [0.5, 0.5], [0.75, 0.75], [1.0, 1.0]], '
'"centres": [[0.0, 0.0], [0.25, 0.25], [0.5, 0.5], [0.75, 0.75], [1.0, 1.0]], '
'"basis_function": "gaussian", "weights": [[-69.10791015625], [-319807.1317138672], [959336.2551269531], [-959973.7440185547], [320514.66677856445]], '
'"sigma": 0.05, "regularization_parameter": 0.0, '
'"rmse": 0.0005986693684275349, "R2": 0.9999971327598984, '
'"x_data_min": [[1, 5]], "x_data_max": [[5, 9]], "y_data_min": [6], "y_data_max": [14]}, '
'"map": {"x_data_columns": "list", "x_data": "numpy", "centres": "numpy", '
'"basis_function": "str", "weights": "numpy", "sigma": "str", "regularization_parameter": "str", '
'"rmse": "str", "R2": "str", "x_data_min": "numpy", "x_data_max": "numpy", "y_data_min": "numpy", '
'"y_data_max": "numpy"}}}, '
'"input_labels": ["x1", "x2"], '
'"output_labels": ["z1", "z2"], '
'"input_bounds": {"x1": [0, 5], "x2": [0, 10]}, '
'"surrogate_type": "rbf"}'
)
jstring_krg = (
'{"model_encoding": '
'{"z1": {"attr": {"x_data_columns": ["x1", "x2"], '
'"x_data": [[1, 5], [2, 6], [3, 7], [4, 8], [5, 9]], "x_data_min": [[1, 5]], "x_data_max": [[5, 9]], '
'"x_data_scaled": [[0.0, 0.0], [0.25, 0.25], [0.5, 0.5], [0.75, 0.75], [1.0, 1.0]], '
'"optimal_weights": [0.027452451845611077, 0.0010443446337808024], '
'"optimal_p": 2, "optimal_mean": [[30.00000000077694]], "optimal_variance": [[6503.3113222215325]], '
'"regularization_parameter": 1.000000000001e-06, '
'"optimal_covariance_matrix": [[1.000001, 0.9982205353479938, 0.9929011178300284, 0.9840983398813247, 0.971905407660152], '
"[0.9982205353479938, 1.000001, 0.9982205353479938, 0.9929011178300284, 0.9840983398813247], "
"[0.9929011178300284, 0.9982205353479938, 1.000001, 0.9982205353479938, 0.9929011178300284], "
"[0.9840983398813247, 0.9929011178300284, 0.9982205353479938, 1.000001, 0.9982205353479938], "
"[0.971905407660152, 0.9840983398813247, 0.9929011178300284, 0.9982205353479938, 1.000001]], "
'"covariance_matrix_inverse": [[108728.9916945844, -240226.85108007095, 82932.18571364644, 121970.72026795016, -73364.51387189297], '
"[-240226.85108202277, 589985.9891969847, -341158.67300272395, -130592.8567227173, 121970.72027126199], "
"[82932.18571952915, -341158.67301448685, 516416.75018761755, -341158.6729826693, 82932.18570353556], "
"[121970.72026201998, -130592.85670691582, -341158.6729945546, 589985.9891699858, -240226.8510697507], "
"[-73364.51386989365, 121970.72026527137, 82932.18570954115, -240226.85107176506, 108728.99169106234]], "
'"optimal_y_mu": [[-20.00000000077694], [-10.00000000077694], [-7.769394017032027e-10], [9.99999999922306], [19.99999999922306]], '
'"training_R2": 0.9999962956016578, "training_rmse": 0.02721910484270722}, '
'"map": {"x_data_columns": "list", "x_data": "numpy", "x_data_min": "numpy", "x_data_max": "numpy", '
'"x_data_scaled": "numpy", "optimal_weights": "numpy", "optimal_p": "str", "optimal_mean": "numpy", '
'"optimal_variance": "numpy", "regularization_parameter": "str", "optimal_covariance_matrix": "numpy", '
'"covariance_matrix_inverse": "numpy", "optimal_y_mu": "numpy", "training_R2": "str", "training_rmse": "str"}}, '
'"z2": {"attr": {"x_data_columns": ["x1", "x2"], '
'"x_data": [[1, 5], [2, 6], [3, 7], [4, 8], [5, 9]], "x_data_min": [[1, 5]], "x_data_max": [[5, 9]], '
'"x_data_scaled": [[0.0, 0.0], [0.25, 0.25], [0.5, 0.5], [0.75, 0.75], [1.0, 1.0]], '
'"optimal_weights": [0.02749666901085125, 0.001000000000000049], '
'"optimal_p": 2, "optimal_mean": [[9.999999999902883]], "optimal_variance": [[260.13320726701056]], '
'"regularization_parameter": 1e-06, '
'"optimal_covariance_matrix": [[1.000001, 0.998220543300601, 0.9929011494709431, 0.9840984104422155, 0.9719055315475238], '
"[0.998220543300601, 1.000001, 0.998220543300601, 0.9929011494709431, 0.9840984104422155], "
"[0.9929011494709431, 0.998220543300601, 1.000001, 0.998220543300601, 0.9929011494709431], "
"[0.9840984104422155, 0.9929011494709431, 0.998220543300601, 1.000001, 0.998220543300601], "
"[0.9719055315475238, 0.9840984104422155, 0.9929011494709431, 0.998220543300601, 1.000001]], "
'"covariance_matrix_inverse": [[108729.13455237681, -240227.09704128528, 82932.15558036882, 121970.94143487987, -73364.601633614], '
"[-240227.0970392892, 589986.4681472526, -341158.6596781079, -130593.32427863385, 121970.94144222786], "
"[82932.15557448889, -341158.6596663887, 516416.7835787105, -341158.659633822, 82932.15555811858], "
"[121970.94144067129, -130593.32429416949, -341158.6596220617, 589986.4680877628, -240227.09701875152], "
"[-73364.60163552182, 121970.94144804058, 82932.15555219717, -240227.09701673465, 108729.13454474375]], "
'"optimal_y_mu": [[-3.999999999902883], [-1.999999999902883], [9.711698112369049e-11], [2.000000000097117], [4.000000000097117]], '
'"training_R2": 0.9999962956250228, "training_rmse": 0.005443803800474329}, '
'"map": {"x_data_columns": "list", "x_data": "numpy", "x_data_min": "numpy", "x_data_max": "numpy", '
'"x_data_scaled": "numpy", "optimal_weights": "numpy", "optimal_p": "str", "optimal_mean": "numpy", '
'"optimal_variance": "numpy", "regularization_parameter": "str", "optimal_covariance_matrix": "numpy", '
'"covariance_matrix_inverse": "numpy", "optimal_y_mu": "numpy", "training_R2": "str", "training_rmse": "str"}}}, '
'"input_labels": ["x1", "x2"], '
'"output_labels": ["z1", "z2"], '
'"input_bounds": {"x1": [0, 5], "x2": [0, 10]}, '
'"surrogate_type": "kriging"}'
)
class TestSurrogateTrainingResult:
@pytest.fixture
def pysmo_output_pr(self):
data = {
"x1": [1, 2, 3, 4, 5],
"x2": [5, 6, 7, 8, 9],
"z1": [10, 20, 30, 40, 50],
}
data = pd.DataFrame(data)
init_pr = pr.PolynomialRegression(
data, data, maximum_polynomial_order=1, overwrite=True, multinomials=True
)
vars = init_pr.get_feature_vector()
init_pr.training()
return init_pr, vars
@pytest.fixture
def pysmo_output_rbf(self):
data = {
"x1": [1, 2, 3, 4, 5],
"x2": [5, 6, 7, 8, 9],
"z1": [10, 20, 30, 40, 50],
}
data = pd.DataFrame(data)
init_rbf = rbf.RadialBasisFunctions(
data, basis_function="linear", overwrite=True
)
vars = init_rbf.get_feature_vector()
init_rbf.training()
return init_rbf, vars
@pytest.fixture
def pysmo_output_krg(self):
data = {
"x1": [1, 2, 3, 4, 5],
"x2": [5, 6, 7, 8, 9],
"z1": [10, 20, 30, 40, 50],
}
data = pd.DataFrame(data)
init_krg = krg.KrigingModel(data, numerical_gradients=True, overwrite=True)
vars = init_krg.get_feature_vector()
init_krg.training()
return init_krg, vars
@pytest.mark.unit
def test_init(self):
init_func = PysmoSurrogateTrainingResult()
assert init_func.metrics == {}
assert init_func._model == None
assert init_func.expression_str == ""
@pytest.mark.unit
def test_model_poly(self, pysmo_output_pr):
out1, vars = pysmo_output_pr
init_func_poly = PysmoSurrogateTrainingResult()
init_func_poly.model = out1
assert init_func_poly.expression_str == str(
out1.generate_expression([vars[i] for i in vars.keys()])
)
assert init_func_poly._model is not None
assert isinstance(init_func_poly._model, pr.PolynomialRegression)
assert init_func_poly._model == out1
@pytest.mark.unit
def test_model_rbf(self, pysmo_output_rbf):
out2, vars = pysmo_output_rbf
init_func_rbf = PysmoSurrogateTrainingResult()
init_func_rbf.model = out2
assert init_func_rbf.expression_str == str(
out2.generate_expression([vars[i] for i in vars.keys()])
)
assert init_func_rbf._model is not None
assert isinstance(init_func_rbf._model, rbf.RadialBasisFunctions)
assert init_func_rbf._model == out2
@pytest.mark.unit
def test_model_krg(self, pysmo_output_krg):
out3, vars = pysmo_output_krg
init_func_krg = PysmoSurrogateTrainingResult()
init_func_krg.model = out3
assert init_func_krg.expression_str == str(
out3.generate_expression([vars[i] for i in vars.keys()])
)
assert init_func_krg._model is not None
assert isinstance(init_func_krg._model, krg.KrigingModel)
assert init_func_krg._model == out3
class TestTrainedSurrogate:
@pytest.fixture
def pysmo_outputs(self):
data = {
"x1": [1, 2, 3, 4, 5],
"x2": [5, 6, 7, 8, 9],
"z1": [10, 20, 30, 40, 50],
}
data = pd.DataFrame(data)
init_pr = pr.PolynomialRegression(
data, data, maximum_polynomial_order=1, overwrite=True, multinomials=True
)
vars = init_pr.get_feature_vector()
init_pr.training()
init_rbf = rbf.RadialBasisFunctions(
data, basis_function="linear", overwrite=True
)
init_rbf.get_feature_vector()
init_rbf.training()
init_krg = krg.KrigingModel(data, numerical_gradients=True, overwrite=True)
init_krg.get_feature_vector()
init_krg.training()
return init_pr, init_rbf, init_krg, vars
@pytest.mark.unit
def test_init(self):
init_func = PysmoTrainedSurrogate()
assert init_func._data == {}
assert init_func.model_type == ""
assert init_func.num_outputs == 0
assert init_func.output_labels == []
assert init_func.input_labels == None
assert init_func.input_bounds == None
init_func1 = PysmoTrainedSurrogate(model_type="poly")
assert init_func1._data == {}
assert init_func1.model_type == "poly"
assert init_func1.num_outputs == 0
assert init_func1.output_labels == []
assert init_func1.input_labels == None
assert init_func1.input_bounds == None
@pytest.mark.unit
def test_add_result(self, pysmo_outputs):
# These need to be tested this way to made sure ``add_result`` builds out model object propoerly.
out1, out2, out3, vars = pysmo_outputs
init_func = PysmoTrainedSurrogate()
outvar = "z1"
init_func.add_result(outvar, out1)
assert init_func.output_labels == ["z1"]
assert init_func._data[outvar] == out1
outvar = "z2"
init_func.add_result(outvar, out2)
assert init_func.output_labels == ["z1", "z2"]
assert init_func._data[outvar] == out2
outvar = "z3"
init_func.add_result(outvar, out3)
assert init_func.output_labels == ["z1", "z2", "z3"]
assert init_func._data[outvar] == out3
@pytest.mark.unit
def test_get_result(self, pysmo_outputs):
out1, out2, out3, vars = pysmo_outputs
init_func = PysmoTrainedSurrogate()
outvar = "z1"
init_func.add_result(outvar, out1)
outvar = "z2"
init_func.add_result(outvar, out2)
outvar = "z3"
init_func.add_result(outvar, out3)
for i in range(len(init_func.output_labels)):
assert init_func.get_result(init_func.output_labels[i]) == pysmo_outputs[i]
class TestPysmoPolyTrainer:
@pytest.fixture
def pysmo_poly_trainer(self):
data = {"x1": [1, 2, 3, 4], "x2": [5, 6, 7, 8], "z1": [10, 20, 30, 40]}
data = pd.DataFrame(data)
input_labels = ["x1", "x2"]
output_labels = ["z1"]
bnds = {"x1": (0, 5), "x2": (0, 10)}
poly_trainer = PysmoPolyTrainer(
input_labels=input_labels,
output_labels=output_labels,
input_bounds=bnds,
training_dataframe=data,
)
return poly_trainer
@pytest.mark.unit
def test_defaults(self, pysmo_poly_trainer):
# Check all defaults
assert pysmo_poly_trainer.model_type == "poly"
assert pysmo_poly_trainer.config.maximum_polynomial_order == None
assert pysmo_poly_trainer.config.multinomials == False
assert pysmo_poly_trainer.config.number_of_crossvalidations == 3
assert pysmo_poly_trainer.config.training_split == 0.8
assert pysmo_poly_trainer.config.solution_method == None
assert pysmo_poly_trainer.config.extra_features == None
@pytest.mark.unit
def test_set_polynomial_order_righttype(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.maximum_polynomial_order = 3
assert pysmo_poly_trainer.config.maximum_polynomial_order == 3
@pytest.mark.unit
def test_set_polynomial_order_wrongtype(self, pysmo_poly_trainer):
with pytest.raises(
ValueError,
match="invalid value for configuration 'maximum_polynomial_order'",
):
pysmo_poly_trainer.config.maximum_polynomial_order = 3.1
@pytest.mark.unit
def test_set_polynomial_order_wrongbounds(self, pysmo_poly_trainer):
with pytest.raises(
ValueError,
match="invalid value for configuration 'maximum_polynomial_order'",
):
pysmo_poly_trainer.config.maximum_polynomial_order = 0
@pytest.mark.unit
def test_set_number_of_crossvalidations_righttype(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.number_of_crossvalidations = 5
assert pysmo_poly_trainer.config.number_of_crossvalidations == 5
@pytest.mark.unit
def test_set_number_of_crossvalidations_wrongtype(self, pysmo_poly_trainer):
with pytest.raises(
ValueError,
match="invalid value for configuration 'number_of_crossvalidations'",
):
pysmo_poly_trainer.config.number_of_crossvalidations = 3.1
@pytest.mark.unit
def test_set_number_of_crossvalidations_wrongbounds(self, pysmo_poly_trainer):
with pytest.raises(
ValueError,
match="invalid value for configuration 'number_of_crossvalidations'",
):
pysmo_poly_trainer.config.number_of_crossvalidations = 0
@pytest.mark.unit
def test_set_training_split_righttype(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.training_split = 0.5
assert pysmo_poly_trainer.config.training_split == 0.5
@pytest.mark.unit
def test_set_training_split_wrongbounds(self, pysmo_poly_trainer):
with pytest.raises(
ValueError, match="invalid value for configuration 'training_split'"
):
pysmo_poly_trainer.config.training_split = -0.5
@pytest.mark.unit
def test_set_solution_method_righttype_1(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.solution_method = "mle"
assert pysmo_poly_trainer.config.solution_method == "mle"
@pytest.mark.unit
def test_set_solution_method_righttype_2(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.solution_method = "pyomo"
assert pysmo_poly_trainer.config.solution_method == "pyomo"
@pytest.mark.unit
def test_set_solution_method_righttype_3(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.solution_method = "bfgs"
assert pysmo_poly_trainer.config.solution_method == "bfgs"
@pytest.mark.unit
def test_set_solution_method_wrongtype(self, pysmo_poly_trainer):
with pytest.raises(
ValueError, match="invalid value for configuration 'solution_method'"
):
pysmo_poly_trainer.config.solution_method = "bfgh"
@pytest.mark.unit
def test_set_multinomials_righttype_1(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.multinomials = True
assert pysmo_poly_trainer.config.multinomials == True
@pytest.mark.unit
def test_set_multinomials_righttype_2(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.multinomials = False
assert pysmo_poly_trainer.config.multinomials == False
@pytest.mark.unit
def test_set_multinomials_righttype_3(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.multinomials = "False"
assert pysmo_poly_trainer.config.multinomials == False
@pytest.mark.unit
def test_set_multinomials_righttype_4(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.multinomials = "True"
assert pysmo_poly_trainer.config.multinomials == True
@pytest.mark.unit
def test_set_multinomials_righttype_5(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.multinomials = 1
assert pysmo_poly_trainer.config.multinomials == True
@pytest.mark.unit
def test_set_multinomials_righttype_6(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.multinomials = 0
assert pysmo_poly_trainer.config.multinomials == False
@pytest.mark.unit
def test_set_multinomials_wrongtype(self, pysmo_poly_trainer):
with pytest.raises(ValueError):
pysmo_poly_trainer.config.multinomials = 2
@pytest.mark.unit
def test_set_extra_features_righttype_2(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.extra_features = ["x1 / x2"]
assert pysmo_poly_trainer.config.extra_features == ["x1 / x2"]
@pytest.mark.unit
def test_set_extra_features_righttype_2(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.extra_features = ["x1 / x2", "sin(x1)"]
assert pysmo_poly_trainer.config.extra_features == ["x1 / x2", "sin(x1)"]
@pytest.mark.unit
def test_set_extra_features_wrongtype(self, pysmo_poly_trainer):
with pytest.raises(NameError):
pysmo_poly_trainer.config.extra_features = x1 / x2
@pytest.mark.unit
def test_set_extra_features_wrongtype(self, pysmo_poly_trainer):
with pytest.raises(ValueError):
pysmo_poly_trainer.config.extra_features = 10
@pytest.mark.unit
def test_create_model_no_extra_features(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.multinomials = 1
pysmo_poly_trainer.config.maximum_polynomial_order = 1
pysmo_poly_trainer.config.solution_method = "mle"
pysmo_poly_trainer.config.number_of_crossvalidations = 2
pysmo_poly_trainer.config.training_split = 0.9
output_label = "z5"
data = {"x1": [1, 2, 3, 4], "x2": [5, 6, 7, 8], "z1": [10, 20, 30, 40]}
data = pd.DataFrame(data)
model = pysmo_poly_trainer._create_model(data, output_label)
assert (
model.max_polynomial_order
== pysmo_poly_trainer.config.maximum_polynomial_order
)
assert model.overwrite == True
assert model.multinomials == pysmo_poly_trainer.config.multinomials
assert model.solution_method == "mle"
assert (
model.number_of_crossvalidations
== pysmo_poly_trainer.config.number_of_crossvalidations
)
assert model.fraction_training == 0.9
assert model.filename == "solution.pickle"
assert model.number_of_x_vars == data.shape[1] - 1
assert model.additional_term_expressions == []
assert model.extra_terms_feature_vector == None
np.testing.assert_array_equal(model.original_data, data.values)
np.testing.assert_array_equal(model.regression_data, data.values)
assert model.regression_data_columns == data.columns.tolist()[:-1]
assert list(model.feature_list._data.keys()) == data.columns.tolist()[:-1]
@pytest.mark.unit
def test_create_model_with_extra_features(self, pysmo_poly_trainer):
pysmo_poly_trainer.config.multinomials = 0
pysmo_poly_trainer.config.maximum_polynomial_order = 2
pysmo_poly_trainer.config.solution_method = "mle"
pysmo_poly_trainer.config.number_of_crossvalidations = 2
pysmo_poly_trainer.config.training_split = 0.9
pysmo_poly_trainer.config.extra_features = [
"sin(x1)/cos(x2)",
"log(x1)*sin(x2)",
"x1/x2",
]
output_label = "z1"
data = {
"x1": [1, 2, 3, 4, 5, 6, 7, 8],
"x2": [5, 6, 7, 8, 9, 10, 11, 12],
"z1": [10, 20, 30, 40, 50, 60, 70, 80],
}
data = pd.DataFrame(data)
model = pysmo_poly_trainer._create_model(data, output_label)
assert model.overwrite == True
assert model.multinomials == pysmo_poly_trainer.config.multinomials
assert model.solution_method == "mle"
assert (
model.number_of_crossvalidations
== pysmo_poly_trainer.config.number_of_crossvalidations
)
assert (
model.max_polynomial_order
== pysmo_poly_trainer.config.maximum_polynomial_order
)
assert model.fraction_training == 0.9
assert model.filename == "solution.pickle"
assert model.number_of_x_vars == data.shape[1] - 1
np.testing.assert_array_equal(model.original_data, data.values)
np.testing.assert_array_equal(model.regression_data, data.values)
assert model.regression_data_columns == data.columns.tolist()[:-1]
assert list(model.feature_list._data.keys()) == data.columns.tolist()[:-1]
assert len(model.additional_term_expressions) == 3
assert isinstance(model.additional_term_expressions, list)
assert isinstance(
model.additional_term_expressions[0],
pyo.core.expr.numeric_expr.NPV_DivisionExpression,
)
assert isinstance(
model.additional_term_expressions[1],
pyo.core.expr.numeric_expr.ProductExpression,
)
assert isinstance(
model.additional_term_expressions[2],
pyo.core.expr.numeric_expr.NPV_DivisionExpression,
)
assert (
str(model.additional_term_expressions[0])
== "sin(IndexedParam[x1])/cos(IndexedParam[x2])"
)
assert (
str(model.additional_term_expressions[1])
== "log(IndexedParam[x1])*sin(IndexedParam[x2])"
)
assert (
str(model.additional_term_expressions[2])
== "IndexedParam[x1]/IndexedParam[x2]"
)
assert model.extra_terms_feature_vector == None
class TestPysmoRBFTrainer:
@pytest.fixture
def pysmo_rbf_trainer(self):
data = {"x1": [1, 2, 3, 4], "x2": [5, 6, 7, 8], "z1": [10, 20, 30, 40]}
data = pd.DataFrame(data)
input_labels = ["x1", "x2"]
output_labels = ["z1"]
bnds = {"x1": (0, 5), "x2": (0, 10)}
rbf_trainer = PysmoRBFTrainer(
input_labels=input_labels,
output_labels=output_labels,
input_bounds=bnds,
training_dataframe=data,
)
return rbf_trainer
@pytest.mark.unit
def test_defaults(self, pysmo_rbf_trainer):
# Check all defaults
assert pysmo_rbf_trainer.model_type == "None rbf"
assert pysmo_rbf_trainer.config.basis_function == None
assert pysmo_rbf_trainer.config.regularization == None
assert pysmo_rbf_trainer.config.solution_method == None
@pytest.mark.unit
def test_set_basis_function_righttype_1(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.basis_function = "linear"
assert pysmo_rbf_trainer.config.basis_function == "linear"
@pytest.mark.unit
def test_set_basis_function_righttype_2(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.basis_function = "cubic"
assert pysmo_rbf_trainer.config.basis_function == "cubic"
@pytest.mark.unit
def test_set_basis_function_righttype_3(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.basis_function = "imq"
assert pysmo_rbf_trainer.config.basis_function == "imq"
@pytest.mark.unit
def test_set_basis_function_righttype_4(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.basis_function = "mq"
assert pysmo_rbf_trainer.config.basis_function == "mq"
@pytest.mark.unit
def test_set_basis_function_righttype_5(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.basis_function = "gaussian"
assert pysmo_rbf_trainer.config.basis_function == "gaussian"
@pytest.mark.unit
def test_set_basis_function_righttype_6(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.basis_function = "spline"
assert pysmo_rbf_trainer.config.basis_function == "spline"
@pytest.mark.unit
def test_set_basis_function_outdomain(self, pysmo_rbf_trainer):
with pytest.raises(
ValueError, match="invalid value for configuration 'basis_function'"
):
pysmo_rbf_trainer.config.basis_function = "mqimq"
@pytest.mark.unit
def test_set_solution_method_righttype_1(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.solution_method = "algebraic"
assert pysmo_rbf_trainer.config.solution_method == "algebraic"
@pytest.mark.unit
def test_set_solution_method_righttype_2(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.solution_method = "pyomo"
assert pysmo_rbf_trainer.config.solution_method == "pyomo"
@pytest.mark.unit
def test_set_solution_method_righttype_3(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.solution_method = "bfgs"
assert pysmo_rbf_trainer.config.solution_method == "bfgs"
@pytest.mark.unit
def test_set_solution_method_wrongtype(self, pysmo_rbf_trainer):
with pytest.raises(
ValueError, match="invalid value for configuration 'solution_method'"
):
pysmo_rbf_trainer.config.solution_method = "mle"
@pytest.mark.unit
def test_set_regularization_righttype_1(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.regularization = True
assert pysmo_rbf_trainer.config.regularization == True
@pytest.mark.unit
def test_set_regularization_righttype_2(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.regularization = False
assert pysmo_rbf_trainer.config.regularization == False
@pytest.mark.unit
def test_set_regularization_righttype_3(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.regularization = "False"
assert pysmo_rbf_trainer.config.regularization == False
@pytest.mark.unit
def test_set_regularization_righttype_4(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.regularization = "True"
assert pysmo_rbf_trainer.config.regularization == True
@pytest.mark.unit
def test_set_regularization_righttype_5(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.regularization = 1
assert pysmo_rbf_trainer.config.regularization == True
@pytest.mark.unit
def test_set_regularization_righttype_6(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.regularization = 0
assert pysmo_rbf_trainer.config.regularization == False
@pytest.mark.unit
def test_set_regularization_wrongtype(self, pysmo_rbf_trainer):
with pytest.raises(ValueError):
pysmo_rbf_trainer.config.regularization = 2
@pytest.mark.unit
def test_create_model_defaults(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.basis_function = None
pysmo_rbf_trainer.config.regularization = "True"
pysmo_rbf_trainer.config.solution_method = None
output_label = "z5"
data = {"x1": [1, 2, 3, 4], "x2": [5, 6, 7, 8], "z1": [10, 20, 30, 40]}
data = pd.DataFrame(data)
model = pysmo_rbf_trainer._create_model(data, output_label)
assert model.x_data_columns == ["x1", "x2"]
np.testing.assert_array_equal(model.x_data_unscaled, data.values[:, :-1])
np.testing.assert_array_equal(model.y_data_unscaled[:, 0], data.values[:, -1])
assert model.overwrite == True
assert model.basis_function == "gaussian"
assert model.regularization == True
assert model.solution_method == "algebraic"
# assert model.filename == 'pysmo_Nonerbf_z5.pickle'
assert list(model.feature_list._data.keys()) == data.columns.tolist()[:-1]
@pytest.mark.unit
def test_create_model_cubic(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.basis_function = "cubic"
pysmo_rbf_trainer.config.regularization = "False"
pysmo_rbf_trainer.config.solution_method = "pyomo"
output_label = "z5"
data = {"x1": [1, 2, 3, 4], "x2": [5, 6, 7, 8], "z1": [10, 20, 30, 40]}
data = pd.DataFrame(data)
model = pysmo_rbf_trainer._create_model(data, output_label)
assert model.x_data_columns == ["x1", "x2"]
np.testing.assert_array_equal(model.x_data_unscaled, data.values[:, :-1])
np.testing.assert_array_equal(model.y_data_unscaled[:, 0], data.values[:, -1])
assert model.overwrite == True
assert model.basis_function == "cubic"
assert model.regularization == False
assert model.solution_method == "pyomo"
assert model.filename == "solution.pickle"
assert list(model.feature_list._data.keys()) == data.columns.tolist()[:-1]
@pytest.mark.unit
def test_create_model_imq(self, pysmo_rbf_trainer):
pysmo_rbf_trainer.config.basis_function = "imq"
pysmo_rbf_trainer.config.regularization = True
pysmo_rbf_trainer.config.solution_method = "bfgs"
output_label = "z5"
data = {"x1": [1, 2, 3, 4], "x2": [5, 6, 7, 8], "z1": [10, 20, 30, 40]}
data = pd.DataFrame(data)
model = pysmo_rbf_trainer._create_model(data, output_label)
assert model.x_data_columns == ["x1", "x2"]
np.testing.assert_array_equal(model.x_data_unscaled, data.values[:, :-1])
np.testing.assert_array_equal(model.y_data_unscaled[:, 0], data.values[:, -1])
assert model.overwrite == True
assert model.basis_function == "imq"
assert model.regularization == True
assert model.solution_method == "bfgs"
# assert model.filename == 'pysmo_Nonerbf_z5.pickle'
assert list(model.feature_list._data.keys()) == data.columns.tolist()[:-1]
class TestPysmoKrigingTrainer:
@pytest.fixture
def pysmo_krg_trainer(self):
data = {"x1": [1, 2, 3, 4], "x2": [5, 6, 7, 8], "z1": [10, 20, 30, 40]}
data = pd.DataFrame(data)
input_labels = ["x1", "x2"]
output_labels = ["z1"]
bnds = {"x1": (0, 5), "x2": (0, 10)}
krg_trainer = PysmoKrigingTrainer(
input_labels=input_labels,
output_labels=output_labels,
input_bounds=bnds,
training_dataframe=data,
)
return krg_trainer
@pytest.mark.unit
def test_defaults(self, pysmo_krg_trainer):
# Check all defaults
assert pysmo_krg_trainer.model_type == "kriging"
assert pysmo_krg_trainer.config.numerical_gradients == True
assert pysmo_krg_trainer.config.regularization == True
@pytest.mark.unit
def test_set_regularization_righttype_1(self, pysmo_krg_trainer):
pysmo_krg_trainer.config.regularization = True
assert pysmo_krg_trainer.config.regularization == True
@pytest.mark.unit
def test_set_regularization_righttype_2(self, pysmo_krg_trainer):
pysmo_krg_trainer.config.regularization = False
assert pysmo_krg_trainer.config.regularization == False
@pytest.mark.unit
def test_set_regularization_righttype_3(self, pysmo_krg_trainer):
pysmo_krg_trainer.config.regularization = "False"
assert pysmo_krg_trainer.config.regularization == False
@pytest.mark.unit
def test_set_regularization_righttype_4(self, pysmo_krg_trainer):
pysmo_krg_trainer.config.regularization = "True"
assert pysmo_krg_trainer.config.regularization == True
@pytest.mark.unit
def test_set_regularization_righttype_5(self, pysmo_krg_trainer):
pysmo_krg_trainer.config.regularization = 1
assert pysmo_krg_trainer.config.regularization == True
@pytest.mark.unit
def test_set_regularization_righttype_6(self, pysmo_krg_trainer):
pysmo_krg_trainer.config.regularization = 0
assert pysmo_krg_trainer.config.regularization == False
@pytest.mark.unit
def test_set_regularization_wrongtype(self, pysmo_krg_trainer):
with pytest.raises(ValueError):
pysmo_krg_trainer.config.regularization = 2
@pytest.mark.unit
def test_set_numerical_gradients_righttype_1(self, pysmo_krg_trainer):
pysmo_krg_trainer.config.numerical_gradients = True
assert pysmo_krg_trainer.config.numerical_gradients == True
@pytest.mark.unit
def test_set_numerical_gradients_righttype_2(self, pysmo_krg_trainer):
pysmo_krg_trainer.config.numerical_gradients = False
assert pysmo_krg_trainer.config.numerical_gradients == False
@pytest.mark.unit
def test_set_numerical_gradients_righttype_3(self, pysmo_krg_trainer):
pysmo_krg_trainer.config.numerical_gradients = "False"
assert pysmo_krg_trainer.config.numerical_gradients == False
@pytest.mark.unit
def test_set_numerical_gradients_righttype_4(self, pysmo_krg_trainer):
pysmo_krg_trainer.config.numerical_gradients = "True"
assert pysmo_krg_trainer.config.numerical_gradients == True
@pytest.mark.unit
def test_set_numerical_gradients_righttype_5(self, pysmo_krg_trainer):
pysmo_krg_trainer.config.numerical_gradients = 1
assert pysmo_krg_trainer.config.numerical_gradients == True
@pytest.mark.unit
def test_set_numerical_gradients_righttype_6(self, pysmo_krg_trainer):
pysmo_krg_trainer.config.numerical_gradients = 0
assert pysmo_krg_trainer.config.numerical_gradients == False
@pytest.mark.unit
def test_set_numerical_gradients_wrongtype(self, pysmo_krg_trainer):
with pytest.raises(ValueError):
pysmo_krg_trainer.config.numerical_gradients = 2
@pytest.mark.unit
def test_create_model_defaults(self, pysmo_krg_trainer):
output_label = "z5"
data = {"x1": [1, 2, 3, 4], "x2": [5, 6, 7, 8], "z1": [10, 20, 30, 40]}
data = | pd.DataFrame(data) | pandas.DataFrame |
import seaborn as sb
from matplotlib import pyplot as plt
import pandas as pd
class MultipleResults:
index_keys = ['round', 'trial']
def __init__(self, model_name, **initial_state):
self.model_name = model_name
self.state_vars = initial_state
for k, v in initial_state.items():
vars(self)[k] = list()
def add_state(self, trial=None, **update_state):
for k in self.state_vars.keys():
vars(self)[k].extend([{'round':i, 'trial':trial, k:j} for i, j in enumerate(update_state[k])])
def add_results(self, **results):
for k in self.state_vars.keys():
vars(self)[k].extend(results[k])
@property
def get_state(self):
return {k: vars(self)[k] for k in self.state_vars}
@staticmethod
def lineplot(data, x, y, **kwargs):
return sb.lineplot(data=data, x=x, y=y, legend='brief', **kwargs)
@staticmethod
def histplot(data, x, y, **kwargs):
return sb.histplot(data=data, x=y, stat="probability", legend=True, **kwargs)
def save_state(self, path):
data_keys = self.get_state.keys()
data = | pd.DataFrame(columns=MultipleResults.index_keys) | pandas.DataFrame |
import json
import logging
import os
import sys
from pathlib import Path
from typing import Union
import fire
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from smart_open import open
from tqdm import tqdm
from cord19.preprocessing.negative_sampling import get_cocitations
from cord19.utils import get_sorted_pair, to_label
from cord19.preprocessing.cord19_reader import get_papers_and_citations_from_cord19, merge_cord19_and_s2_papers
from cord19.preprocessing.negative_sampling import get_negative_pairs
from cord19.utils import normalize_section, resolve_and_sect_titles, get_text_from_doi
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
def save_dataset(input_dir: Union[str, Path], output_dir: Union[str, Path], cv_folds: int = 4):
"""
Run with: $ python -m cord19.dataset save_dataset <input_dir> <output_dir>
input_dir = '/home/mostendorff/datasets/cord-19/'
output_dir = '/home/mostendorff/datasets/cord-19/dataset/'
cv_folds = 4
input_dir/metadata.csv
input_dir/doi2paper.json.gz
input_dir/<subsets> = ['biorxiv_medrxiv', 'comm_use_subset', 'custom_license', 'noncomm_use_subset']
output_dir/docs.jsonl
output_dir/folds/1/train.csv
output_dir/folds/1/test.csv
tar -cvzf cord19_docrel.tar.gz docs.jsonl folds/
curl --upload-file cord19_docrel.tar.gz ftp://$FTP_LOGIN:[email protected]/cloud.ostendorff.org/static/
:param input_dir: Path to directory with input files
:param output_dir: Output files are written to this dir
:param cv_folds: Number of folds in k-fold cross validation
"""
label_col = 'label'
negative_label = 'none'
min_text_length = 50
negative_sampling_ratio = 0.5
doc_a_col = 'from_doi'
doc_b_col = 'to_doi'
labels = [
'discussion',
'introduction',
'conclusion',
'results',
'methods',
'background',
'materials',
'virus',
'future work'
]
# input_dir = os.path.join(env['datasets_dir'], 'cord-19')
# Convert dirs to Path if is string
if isinstance(output_dir, str):
output_dir = Path(output_dir)
if isinstance(input_dir, str):
input_dir = Path(input_dir)
# Read meta data
meta_df = | pd.read_csv(input_dir / 'metadata.csv', dtype={'doi': str, 'journal': str}) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# # Project 5: NLP on Financial Statements
# ## Instructions
# Each problem consists of a function to implement and instructions on how to implement the function. The parts of the function that need to be implemented are marked with a `# TODO` comment. After implementing the function, run the cell to test it against the unit tests we've provided. For each problem, we provide one or more unit tests from our `project_tests` package. These unit tests won't tell you if your answer is correct, but will warn you of any major errors. Your code will be checked for the correct solution when you submit it to Udacity.
#
# ## Packages
# When you implement the functions, you'll only need to you use the packages you've used in the classroom, like [Pandas](https://pandas.pydata.org/) and [Numpy](http://www.numpy.org/). These packages will be imported for you. We recommend you don't add any import statements, otherwise the grader might not be able to run your code.
#
# The other packages that we're importing are `project_helper` and `project_tests`. These are custom packages built to help you solve the problems. The `project_helper` module contains utility functions and graph functions. The `project_tests` contains the unit tests for all the problems.
#
# ### Install Packages
# In[1]:
import sys
get_ipython().system('{sys.executable} -m pip install -r requirements.txt')
# ### Load Packages
# In[2]:
import nltk
import numpy as np
import pandas as pd
import pickle
import pprint
import project_helper
import project_tests
from tqdm import tqdm
# ### Download NLP Corpora
# You'll need two corpora to run this project: the stopwords corpus for removing stopwords and wordnet for lemmatizing.
# In[3]:
nltk.download('stopwords')
nltk.download('wordnet')
# ## Get 10ks
# We'll be running NLP analysis on 10-k documents. To do that, we first need to download the documents. For this project, we'll download 10-ks for a few companies. To lookup documents for these companies, we'll use their CIK. If you would like to run this against other stocks, we've provided the dict `additional_cik` for more stocks. However, the more stocks you try, the long it will take to run.
# In[4]:
cik_lookup = {
'AMZN': '0001018724',
'BMY': '0000014272',
'CNP': '0001130310',
'CVX': '0000093410',
'FL': '0000850209',
'FRT': '0000034903',
'HON': '0000773840'}
additional_cik = {
'AEP': '0000004904',
'AXP': '0000004962',
'BA': '0000012927',
'BK': '0001390777',
'CAT': '0000018230',
'DE': '0000315189',
'DIS': '0001001039',
'DTE': '0000936340',
'ED': '0001047862',
'EMR': '0000032604',
'ETN': '0001551182',
'GE': '0000040545',
'IBM': '0000051143',
'IP': '0000051434',
'JNJ': '0000200406',
'KO': '0000021344',
'LLY': '0000059478',
'MCD': '0000063908',
'MO': '0000764180',
'MRK': '0000310158',
'MRO': '0000101778',
'PCG': '0001004980',
'PEP': '0000077476',
'PFE': '0000078003',
'PG': '0000080424',
'PNR': '0000077360',
'SYY': '0000096021',
'TXN': '0000097476',
'UTX': '0000101829',
'WFC': '0000072971',
'WMT': '0000104169',
'WY': '0000106535',
'XOM': '0000034088'}
# ### Get list of 10-ks
# The SEC has a limit on the number of calls you can make to the website per second. In order to avoid hiding that limit, we've created the `SecAPI` class. This will cache data from the SEC and prevent you from going over the limit.
# In[5]:
sec_api = project_helper.SecAPI()
# With the class constructed, let's pull a list of filled 10-ks from the SEC for each company.
# In[6]:
from bs4 import BeautifulSoup
def get_sec_data(cik, doc_type, start=0, count=60):
newest_pricing_data = pd.to_datetime('2018-01-01')
rss_url = 'https://www.sec.gov/cgi-bin/browse-edgar?action=getcompany' '&CIK={}&type={}&start={}&count={}&owner=exclude&output=atom' .format(cik, doc_type, start, count)
sec_data = sec_api.get(rss_url)
feed = BeautifulSoup(sec_data.encode('ascii'), 'xml').feed
entries = [
(
entry.content.find('filing-href').getText(),
entry.content.find('filing-type').getText(),
entry.content.find('filing-date').getText())
for entry in feed.find_all('entry', recursive=False)
if pd.to_datetime(entry.content.find('filing-date').getText()) <= newest_pricing_data]
return entries
# Let's pull the list using the `get_sec_data` function, then display some of the results. For displaying some of the data, we'll use Amazon as an example.
# In[7]:
example_ticker = 'AMZN'
sec_data = {}
for ticker, cik in cik_lookup.items():
sec_data[ticker] = get_sec_data(cik, '10-K')
pprint.pprint(sec_data[example_ticker][:5])
# ### Download 10-ks
# As you see, this is a list of urls. These urls point to a file that contains metadata related to each filling. Since we don't care about the metadata, we'll pull the filling by replacing the url with the filling url.
# In[8]:
raw_fillings_by_ticker = {}
for ticker, data in sec_data.items():
raw_fillings_by_ticker[ticker] = {}
for index_url, file_type, file_date in tqdm(data, desc='Downloading {} Fillings'.format(ticker), unit='filling'):
if (file_type == '10-K'):
file_url = index_url.replace('-index.htm', '.txt').replace('.txtl', '.txt')
raw_fillings_by_ticker[ticker][file_date] = sec_api.get(file_url)
print('Example Document:\n\n{}...'.format(next(iter(raw_fillings_by_ticker[example_ticker].values()))[:1000]))
# ### Get Documents
# With theses fillings downloaded, we want to break them into their associated documents. These documents are sectioned off in the fillings with the tags `<DOCUMENT>` for the start of each document and `</DOCUMENT>` for the end of each document. There's no overlap with these documents, so each `</DOCUMENT>` tag should come after the `<DOCUMENT>` with no `<DOCUMENT>` tag in between.
#
# Implement `get_documents` to return a list of these documents from a filling. Make sure not to include the tag in the returned document text.
# In[9]:
import re
def get_documents(text):
"""
Extract the documents from the text
Parameters
----------
text : str
The text with the document strings inside
Returns
-------
extracted_docs : list of str
The document strings found in `text`
"""
# TODO: Implement
document_start= re.compile(r'<DOCUMENT>')
document_end= re.compile(r'</DOCUMENT>')
document_start_is = [x.end() for x in document_start.finditer(text)]
document_end_is = [x.start() for x in document_end.finditer(text)]
text_=[]
for document_start_i, document_end_i in zip(document_start_is, document_end_is):
text_.append(text[document_start_i:document_end_i])
return text_
project_tests.test_get_documents(get_documents)
# With the `get_documents` function implemented, let's extract all the documents.
# In[10]:
filling_documents_by_ticker = {}
for ticker, raw_fillings in raw_fillings_by_ticker.items():
filling_documents_by_ticker[ticker] = {}
for file_date, filling in tqdm(raw_fillings.items(), desc='Getting Documents from {} Fillings'.format(ticker), unit='filling'):
filling_documents_by_ticker[ticker][file_date] = get_documents(filling)
print('\n\n'.join([
'Document {} Filed on {}:\n{}...'.format(doc_i, file_date, doc[:200])
for file_date, docs in filling_documents_by_ticker[example_ticker].items()
for doc_i, doc in enumerate(docs)][:3]))
# ### Get Document Types
# Now that we have all the documents, we want to find the 10-k form in this 10-k filing. Implement the `get_document_type` function to return the type of document given. The document type is located on a line with the `<TYPE>` tag. For example, a form of type "TEST" would have the line `<TYPE>TEST`. Make sure to return the type as lowercase, so this example would be returned as "test".
# In[11]:
def get_document_type(doc):
"""
Return the document type lowercased
Parameters
----------
doc : str
The document string
Returns
-------
doc_type : str
The document type lowercased
"""
type_pattern=re.compile(r'<TYPE>[^\n]+')
type_i=[x[len('<TYPE>'):] for x in type_pattern.findall(doc) ]
# TODO: Implement
return type_i[0].lower()
project_tests.test_get_document_type(get_document_type)
# With the `get_document_type` function, we'll filter out all non 10-k documents.
# In[12]:
ten_ks_by_ticker = {}
for ticker, filling_documents in filling_documents_by_ticker.items():
ten_ks_by_ticker[ticker] = []
for file_date, documents in filling_documents.items():
for document in documents:
if get_document_type(document) == '10-k':
ten_ks_by_ticker[ticker].append({
'cik': cik_lookup[ticker],
'file': document,
'file_date': file_date})
project_helper.print_ten_k_data(ten_ks_by_ticker[example_ticker][:5], ['cik', 'file', 'file_date'])
# ## Preprocess the Data
# ### Clean Up
# As you can see, the text for the documents are very messy. To clean this up, we'll remove the html and lowercase all the text.
# In[13]:
def remove_html_tags(text):
text = BeautifulSoup(text, 'html.parser').get_text()
return text
def clean_text(text):
text = text.lower()
text = remove_html_tags(text)
return text
# Using the `clean_text` function, we'll clean up all the documents.
# In[14]:
for ticker, ten_ks in ten_ks_by_ticker.items():
for ten_k in tqdm(ten_ks, desc='Cleaning {} 10-Ks'.format(ticker), unit='10-K'):
ten_k['file_clean'] = clean_text(ten_k['file'])
project_helper.print_ten_k_data(ten_ks_by_ticker[example_ticker][:5], ['file_clean'])
# ### Lemmatize
# With the text cleaned up, it's time to distill the verbs down. Implement the `lemmatize_words` function to lemmatize verbs in the list of words provided.
# In[15]:
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet
def lemmatize_words(words):
"""
Lemmatize words
Parameters
----------
words : list of str
List of words
Returns
-------
lemmatized_words : list of str
List of lemmatized words
"""
return [WordNetLemmatizer().lemmatize(x, wordnet.VERB) for x in words]
project_tests.test_lemmatize_words(lemmatize_words)
# With the `lemmatize_words` function implemented, let's lemmatize all the data.
# In[16]:
word_pattern = re.compile('\w+')
for ticker, ten_ks in ten_ks_by_ticker.items():
for ten_k in tqdm(ten_ks, desc='Lemmatize {} 10-Ks'.format(ticker), unit='10-K'):
ten_k['file_lemma'] = lemmatize_words(word_pattern.findall(ten_k['file_clean']))
project_helper.print_ten_k_data(ten_ks_by_ticker[example_ticker][:5], ['file_lemma'])
# ### Remove Stopwords
# In[17]:
from nltk.corpus import stopwords
lemma_english_stopwords = lemmatize_words(stopwords.words('english'))
for ticker, ten_ks in ten_ks_by_ticker.items():
for ten_k in tqdm(ten_ks, desc='Remove Stop Words for {} 10-Ks'.format(ticker), unit='10-K'):
ten_k['file_lemma'] = [word for word in ten_k['file_lemma'] if word not in lemma_english_stopwords]
print('Stop Words Removed')
# ## Analysis on 10ks
# ### Loughran McDonald Sentiment Word Lists
# We'll be using the Loughran and McDonald sentiment word lists. These word lists cover the following sentiment:
# - Negative
# - Positive
# - Uncertainty
# - Litigious
# - Constraining
# - Superfluous
# - Modal
#
# This will allow us to do the sentiment analysis on the 10-ks. Let's first load these word lists. We'll be looking into a few of these sentiments.
# In[18]:
import os
sentiments = ['negative', 'positive', 'uncertainty', 'litigious', 'constraining', 'interesting']
sentiment_df = pd.read_csv(os.path.join('..', '..', 'data', 'project_5_loughran_mcdonald', 'loughran_mcdonald_master_dic_2016.csv'))
sentiment_df.columns = [column.lower() for column in sentiment_df.columns] # Lowercase the columns for ease of use
# Remove unused information
sentiment_df = sentiment_df[sentiments + ['word']]
sentiment_df[sentiments] = sentiment_df[sentiments].astype(bool)
sentiment_df = sentiment_df[(sentiment_df[sentiments]).any(1)]
# Apply the same preprocessing to these words as the 10-k words
sentiment_df['word'] = lemmatize_words(sentiment_df['word'].str.lower())
sentiment_df = sentiment_df.drop_duplicates('word')
sentiment_df.head()
# ### Bag of Words
# using the sentiment word lists, let's generate sentiment bag of words from the 10-k documents. Implement `get_bag_of_words` to generate a bag of words that counts the number of sentiment words in each doc. You can ignore words that are not in `sentiment_words`.
# In[56]:
from collections import defaultdict, Counter
from sklearn.feature_extraction.text import CountVectorizer
def get_bag_of_words(sentiment_words, docs):
"""
Generate a bag of words from documents for a certain sentiment
Parameters
----------
sentiment_words: Pandas Series
Words that signify a certain sentiment
docs : list of str
List of documents used to generate bag of words
Returns
-------
bag_of_words : 2-d Numpy Ndarray of int
Bag of words sentiment for each document
The first dimension is the document.
The second dimension is the word.
"""
# TODO: Implement
v = CountVectorizer()
v.fit(sentiment_words)
return v.transform(docs).toarray()
project_tests.test_get_bag_of_words(get_bag_of_words)
# Using the `get_bag_of_words` function, we'll generate a bag of words for all the documents.
# In[57]:
sentiment_bow_ten_ks = {}
for ticker, ten_ks in ten_ks_by_ticker.items():
lemma_docs = [' '.join(ten_k['file_lemma']) for ten_k in ten_ks]
sentiment_bow_ten_ks[ticker] = {
sentiment: get_bag_of_words(sentiment_df[sentiment_df[sentiment]]['word'], lemma_docs)
for sentiment in sentiments}
project_helper.print_ten_k_data([sentiment_bow_ten_ks[example_ticker]], sentiments)
# ### Jaccard Similarity
# Using the bag of words, let's calculate the jaccard similarity on the bag of words and plot it over time. Implement `get_jaccard_similarity` to return the jaccard similarities between each tick in time. Since the input, `bag_of_words_matrix`, is a bag of words for each time period in order, you just need to compute the jaccard similarities for each neighboring bag of words. Make sure to turn the bag of words into a boolean array when calculating the jaccard similarity.
# In[59]:
from sklearn.metrics import jaccard_similarity_score
def get_jaccard_similarity(bag_of_words_matrix):
"""
Get jaccard similarities for neighboring documents
Parameters
----------
bag_of_words : 2-d Numpy Ndarray of int
Bag of words sentiment for each document
The first dimension is the document.
The second dimension is the word.
Returns
-------
jaccard_similarities : list of float
Jaccard similarities for neighboring documents
"""
# TODO: Implement
j_s=[]
for i in range(bag_of_words_matrix.shape[0]-1):
score_=jaccard_similarity_score(bag_of_words_matrix.astype('bool')[i],bag_of_words_matrix.astype('bool')[i+1])
j_s.append(score_)
return j_s
project_tests.test_get_jaccard_similarity(get_jaccard_similarity)
# Using the `get_jaccard_similarity` function, let's plot the similarities over time.
# In[60]:
# Get dates for the universe
file_dates = {
ticker: [ten_k['file_date'] for ten_k in ten_ks]
for ticker, ten_ks in ten_ks_by_ticker.items()}
jaccard_similarities = {
ticker: {
sentiment_name: get_jaccard_similarity(sentiment_values)
for sentiment_name, sentiment_values in ten_k_sentiments.items()}
for ticker, ten_k_sentiments in sentiment_bow_ten_ks.items()}
project_helper.plot_similarities(
[jaccard_similarities[example_ticker][sentiment] for sentiment in sentiments],
file_dates[example_ticker][1:],
'Jaccard Similarities for {} Sentiment'.format(example_ticker),
sentiments)
# ### TFIDF
# using the sentiment word lists, let's generate sentiment TFIDF from the 10-k documents. Implement `get_tfidf` to generate TFIDF from each document, using sentiment words as the terms. You can ignore words that are not in `sentiment_words`.
# In[63]:
from sklearn.feature_extraction.text import TfidfVectorizer
def get_tfidf(sentiment_words, docs):
"""
Generate TFIDF values from documents for a certain sentiment
Parameters
----------
sentiment_words: Pandas Series
Words that signify a certain sentiment
docs : list of str
List of documents used to generate bag of words
Returns
-------
tfidf : 2-d Numpy Ndarray of float
TFIDF sentiment for each document
The first dimension is the document.
The second dimension is the word.
"""
# TODO: Implement
tf = TfidfVectorizer()
tf.fit(sentiment_words)
return tf.transform(docs).toarray()
project_tests.test_get_tfidf(get_tfidf)
# Using the `get_tfidf` function, let's generate the TFIDF values for all the documents.
# In[64]:
sentiment_tfidf_ten_ks = {}
for ticker, ten_ks in ten_ks_by_ticker.items():
lemma_docs = [' '.join(ten_k['file_lemma']) for ten_k in ten_ks]
sentiment_tfidf_ten_ks[ticker] = {
sentiment: get_tfidf(sentiment_df[sentiment_df[sentiment]]['word'], lemma_docs)
for sentiment in sentiments}
project_helper.print_ten_k_data([sentiment_tfidf_ten_ks[example_ticker]], sentiments)
# ### Cosine Similarity
# Using the TFIDF values, we'll calculate the cosine similarity and plot it over time. Implement `get_cosine_similarity` to return the cosine similarities between each tick in time. Since the input, `tfidf_matrix`, is a TFIDF vector for each time period in order, you just need to computer the cosine similarities for each neighboring vector.
# In[68]:
from sklearn.metrics.pairwise import cosine_similarity
def get_cosine_similarity(tfidf_matrix):
"""
Get cosine similarities for each neighboring TFIDF vector/document
Parameters
----------
tfidf : 2-d Numpy Ndarray of float
TFIDF sentiment for each document
The first dimension is the document.
The second dimension is the word.
Returns
-------
cosine_similarities : list of float
Cosine similarities for neighboring documents
"""
# TODO: Implement
count=0
cosine_similarities=[]
x= tfidf_matrix[0:]
y= tfidf_matrix[1:]
return cosine_similarity(x,y)[0].tolist()
project_tests.test_get_cosine_similarity(get_cosine_similarity)
# Let's plot the cosine similarities over time.
# In[69]:
cosine_similarities = {
ticker: {
sentiment_name: get_cosine_similarity(sentiment_values)
for sentiment_name, sentiment_values in ten_k_sentiments.items()}
for ticker, ten_k_sentiments in sentiment_tfidf_ten_ks.items()}
project_helper.plot_similarities(
[cosine_similarities[example_ticker][sentiment] for sentiment in sentiments],
file_dates[example_ticker][1:],
'Cosine Similarities for {} Sentiment'.format(example_ticker),
sentiments)
#
# ## Evaluate Alpha Factors
# Just like we did in project 4, let's evaluate the alpha factors. For this section, we'll just be looking at the cosine similarities, but it can be applied to the jaccard similarities as well.
# ### Price Data
# Let's get yearly pricing to run the factor against, since 10-Ks are produced annually.
# In[70]:
pricing = pd.read_csv('../../data/project_5_yr/yr-quotemedia.csv', parse_dates=['date'])
pricing = pricing.pivot(index='date', columns='ticker', values='adj_close')
pricing
# ### Dict to DataFrame
# The alphalens library uses dataframes, so we we'll need to turn our dictionary into a dataframe.
# In[71]:
cosine_similarities_df_dict = {'date': [], 'ticker': [], 'sentiment': [], 'value': []}
for ticker, ten_k_sentiments in cosine_similarities.items():
for sentiment_name, sentiment_values in ten_k_sentiments.items():
for sentiment_values, sentiment_value in enumerate(sentiment_values):
cosine_similarities_df_dict['ticker'].append(ticker)
cosine_similarities_df_dict['sentiment'].append(sentiment_name)
cosine_similarities_df_dict['value'].append(sentiment_value)
cosine_similarities_df_dict['date'].append(file_dates[ticker][1:][sentiment_values])
cosine_similarities_df = | pd.DataFrame(cosine_similarities_df_dict) | pandas.DataFrame |
from torchtools import *
from collections import OrderedDict
import math
import os
import numpy as np
import pandas as pd
# encoder for imagenet dataset
class EmbeddingImagenet(nn.Module):
def __init__(self,
emb_size):
super(EmbeddingImagenet, self).__init__()
# set size
self.hidden = 64
self.last_hidden = self.hidden * 25
self.emb_size = emb_size
# set layers
self.conv_1 = nn.Sequential(nn.Conv2d(in_channels=3,
out_channels=self.hidden,
kernel_size=3,
padding=1,
bias=False),
nn.BatchNorm2d(num_features=self.hidden),
nn.MaxPool2d(kernel_size=2),
nn.LeakyReLU(negative_slope=0.2, inplace=True))
self.conv_2 = nn.Sequential(nn.Conv2d(in_channels=self.hidden,
out_channels=int(self.hidden*1.5),
kernel_size=3,
bias=False),
nn.BatchNorm2d(num_features=int(self.hidden*1.5)),
nn.MaxPool2d(kernel_size=2),
nn.LeakyReLU(negative_slope=0.2, inplace=True))
self.conv_3 = nn.Sequential(nn.Conv2d(in_channels=int(self.hidden*1.5),
out_channels=self.hidden*2,
kernel_size=3,
padding=1,
bias=False),
nn.BatchNorm2d(num_features=self.hidden * 2),
nn.MaxPool2d(kernel_size=2),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(0.4))
self.conv_4 = nn.Sequential(nn.Conv2d(in_channels=self.hidden*2,
out_channels=self.hidden*4,
kernel_size=3,
padding=1,
bias=False),
nn.BatchNorm2d(num_features=self.hidden * 4),
nn.MaxPool2d(kernel_size=2),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(0.5))
self.layer_last = nn.Sequential(nn.Linear(in_features=self.last_hidden * 4,
out_features=self.emb_size, bias=True),
nn.BatchNorm1d(self.emb_size))
def forward(self, input_data):
output_data = self.conv_4(self.conv_3(self.conv_2(self.conv_1(input_data))))
return self.layer_last(output_data.view(output_data.size(0), -1))
class GraphUnpool(nn.Module):
def __init__(self):
super(GraphUnpool, self).__init__()
def forward(self, A, X, idx_batch):
# optimized by Gai
batch = X.shape[0]
new_X = torch.zeros(batch, A.shape[1], X.shape[-1]).to(tt.arg.device)
new_X[torch.arange(idx_batch.shape[0]).unsqueeze(-1), idx_batch] = X
#
return A, new_X
class GraphPool(nn.Module):
def __init__(self, k, in_dim, num_classes, num_queries):
super(GraphPool, self).__init__()
self.k = k
self.num_queries = num_queries
self.num_classes = num_classes
self.proj = nn.Linear(in_dim, 1).to(tt.arg.device)
self.sigmoid = nn.Sigmoid()
def forward(self, A, X):
batch = X.shape[0]
idx_batch = []
new_X_batch = []
new_A_batch = []
if tt.arg.visual == True:
if tt.arg.pool_count == None:
tt.arg.pool_count = 0
# for each batch
for i in range(batch):
num_nodes = A[i, 0].shape[0]
scores = self.proj(X[i])
scores = torch.squeeze(scores)
scores = self.sigmoid(scores / 100)
#visual scores
if tt.arg.visual == True:
np_scores = scores.detach().cpu().numpy()
if tt.arg.pool_count == 0:
np_idx = np.arange(scores.size(0))
data = [['idx_%d' % tt.arg.pool_count]+list(np_idx),
['pool_%d_scores' % tt.arg.pool_count] + list(np_scores)]
else:
data = [['pool_%d_scores' % tt.arg.pool_count] + list(np_scores)]
df = pd.DataFrame(data)
if tt.arg.pool_count == 0:
if os.path.exists('visual_%s/%03d/pool_record.csv' % (tt.arg.exp_name, tt.arg.iter * batch + i)):
os.remove('visual_%s/%03d/pool_record.csv' % (tt.arg.exp_name, tt.arg.iter * batch + i))
df.to_csv('visual_%s/%03d/pool_record.csv' % (tt.arg.exp_name, tt.arg.iter*batch + i),header=False,index=False,mode='a')
if tt.arg.pool_mode == 'way':
num_spports = int((num_nodes - self.num_queries) / self.num_classes)
idx = []
values = []
# pooling by each way
for j in range(self.num_classes):
way_values, way_idx = torch.topk(scores[j * num_spports:(j + 1) * num_spports],
int(self.k * num_spports))
way_idx = way_idx + j * num_spports
idx.append(way_idx)
values.append(way_values)
query_values = scores[num_nodes - self.num_queries:]
query_idx = torch.arange(num_nodes - self.num_queries, num_nodes).long().to(tt.arg.device)
values = torch.cat(values + [query_values], dim=0)
idx = torch.cat(idx + [query_idx], dim=0)
elif tt.arg.pool_mode == 'support':
num_supports = num_nodes - self.num_queries
support_values, support_idx = torch.topk(scores[:num_supports], int(self.k * num_supports),
largest=True)
query_values = scores[num_supports:]
query_idx = torch.arange(num_nodes - self.num_queries, num_nodes).long().to(tt.arg.device)
values = torch.cat([support_values, query_values], dim=0)
idx = torch.cat([support_idx, query_idx], dim=0)
elif tt.arg.pool_mode == 'way&kn':
num_supports = int((num_nodes - self.num_queries) / self.num_classes)
idx = []
values = []
# pooling by each way
for j in range(self.num_classes):
way_scores = scores[j * num_supports:(j + 1) * num_supports]
intra_scores = way_scores - way_scores.mean()
_, way_idx = torch.topk(intra_scores,
int(self.k * num_supports), largest=False)
way_values = way_scores[way_idx]
way_idx = way_idx + j * num_supports
idx.append(way_idx)
values.append(way_values)
query_values = scores[num_nodes - self.num_queries:]
query_idx = torch.arange(num_nodes - self.num_queries, num_nodes).long().to(tt.arg.device)
values = torch.cat(values + [query_values], dim=0)
idx = torch.cat(idx + [query_idx], dim=0)
elif tt.arg.pool_mode == 'kn':
num_supports = num_nodes - self.num_queries
support_scores = scores[:num_supports]
intra_scores = support_scores - support_scores.mean()
_, support_idx = torch.topk(intra_scores,
int(self.k * num_supports), largest=False)
support_values = support_scores[support_idx]
query_values = scores[num_nodes - self.num_queries:]
query_idx = torch.arange(num_nodes - self.num_queries, num_nodes).long().to(tt.arg.device)
values = torch.cat([support_values, query_values], dim=0)
idx = torch.cat([support_idx, query_idx], dim=0)
else:
print('wrong pool_mode setting!!!')
raise NameError('wrong pool_mode setting!!!')
new_X = X[i, idx, :]
values = torch.unsqueeze(values, -1)
new_X = torch.mul(new_X, values)
new_A = A[i, idx, :]
new_A = new_A[:, idx]
idx_batch.append(idx)
new_X_batch.append(new_X)
new_A_batch.append(new_A)
A = torch.stack(new_A_batch, dim=0).to(tt.arg.device)
new_X = torch.stack(new_X_batch, dim=0).to(tt.arg.device)
idx_batch = torch.stack(idx_batch, dim=0).to(tt.arg.device)
# visual pool idx result
if tt.arg.visual == True:
for i in range(batch):
old_idx = pd.read_csv('visual_%s/%03d/pool_record.csv' % (tt.arg.exp_name, tt.arg.iter * batch + i),
skiprows=tt.arg.pool_count*2,nrows=1,header=None).to_numpy(copy=True).reshape(-1)[1:].astype(np.int32)
np_idx = old_idx[idx_batch[i].cpu().numpy()]
data = [['idx_%d' % (tt.arg.pool_count+1)] + list(np_idx)]
df = pd.DataFrame(data)
df.to_csv('visual_%s/%03d/pool_record.csv' % (tt.arg.exp_name, tt.arg.iter * batch + i), header=False,
index=False, mode='a')
tt.arg.pool_count = tt.arg.pool_count + 1
return A, new_X, idx_batch
class Unet(nn.Module):
def __init__(self, ks, in_dim, num_classes, num_queries):
super(Unet, self).__init__()
l_n = len(ks)
self.l_n = l_n
start_mlp = MLP(in_dim=in_dim)
start_gcn = GCN(in_dim=in_dim, out_dim=in_dim)
self.add_module('start_mlp', start_mlp)
self.add_module('start_gcn', start_gcn)
for l in range(l_n):
down_mlp = MLP(in_dim=in_dim)
down_gcn = GCN(in_dim=in_dim, out_dim=in_dim)
up_mlp = MLP(in_dim=in_dim)
up_gcn = GCN(in_dim=in_dim, out_dim=in_dim)
pool = GraphPool(ks[l], in_dim=in_dim, num_classes=num_classes, num_queries=num_queries)
unpool = GraphUnpool()
self.add_module('down_mlp_{}'.format(l), down_mlp)
self.add_module('down_gcn_{}'.format(l), down_gcn)
self.add_module('up_mlp_{}'.format(l), up_mlp)
self.add_module('up_gcn_{}'.format(l), up_gcn)
self.add_module('pool_{}'.format(l), pool)
self.add_module('unpool_{}'.format(l), unpool)
bottom_mlp = MLP(in_dim=in_dim)
bottom_gcn = GCN(in_dim=in_dim, out_dim=in_dim)
self.add_module('bottom_mlp', bottom_mlp)
self.add_module('bottom_gcn', bottom_gcn)
out_mlp = MLP(in_dim=in_dim * 2)
out_gcn = GCN(in_dim=in_dim * 2, out_dim=num_classes)
self.add_module('out_mlp', out_mlp)
self.add_module('out_gcn', out_gcn)
def forward(self, A_init, X):
adj_ms = []
indices_list = []
down_outs = []
A_old = A_init
# visual_X(1)
if tt.arg.visual == True:
batch = X.size(0)
for j in range(batch):
np_X = X[j].detach().cpu().numpy()
data = [['Input_feature'] + list(np_X)]
df = pd.DataFrame(data)
df.to_csv('visual_%s/%03d/feature_record.csv' % (tt.arg.exp_name, tt.arg.iter * batch + j),
header=False,
index=False, mode='a')
A_new = self._modules['start_mlp'](X)
X = self._modules['start_gcn'](A_new, A_old, X)
org_X = X
# visual_X(2)
if tt.arg.visual == True:
for j in range(batch):
np_X = X[j].detach().cpu().numpy()
data = [['start_gcn'] + list(np_X)]
df = pd.DataFrame(data)
df.to_csv('visual_%s/%03d/feature_record.csv' % (tt.arg.exp_name, tt.arg.iter * batch + j),
header=False,
index=False, mode='a')
for i in range(self.l_n):
A_old = A_new
A_new = self._modules['down_mlp_{}'.format(i)](X)
X = self._modules['down_gcn_{}'.format(i)](A_new, A_old, X)
adj_ms.append(A_new)
down_outs.append(X)
# visual_X(3.1)
if tt.arg.visual == True:
batch = X.size(0)
for j in range(batch):
np_X = X[j].detach().cpu().numpy()
data = [['down_mlp_pool_before_{}'.format(i)] + list(np_X)]
df = pd.DataFrame(data)
df.to_csv('visual_%s/%03d/feature_record.csv' % (tt.arg.exp_name, tt.arg.iter * batch + j),
header=False,
index=False, mode='a')
A_new, X, idx_batch = self._modules['pool_{}'.format(i)](A_new, X)
indices_list.append(idx_batch)
# visual_X(3.2)
if tt.arg.visual == True:
batch = X.size(0)
for j in range(batch):
if i == 0:
old_idx = pd.read_csv(
'visual_%s/%03d/feature_record.csv' % (tt.arg.exp_name, tt.arg.iter * batch + j),
skiprows=0, nrows=1, header=None).to_numpy(
copy=True).reshape(-1)[1:].astype(np.int32)
else:
old_idx = pd.read_csv('visual_%s/%03d/feature_record.csv' % (tt.arg.exp_name, tt.arg.iter * batch + j),
skiprows=4 + (i-1) * 3, nrows=1, header=None).to_numpy(
copy=True).reshape(-1)[1:].astype(np.int32)
np_idx = old_idx[idx_batch[j].cpu().numpy()]
np_X = X[j].detach().cpu().numpy()
data = [['label'] + list(np_idx),
['down_mlp_pool_after_{}'.format(i)] + list(np_X)]
df = pd.DataFrame(data)
df.to_csv('visual_%s/%03d/feature_record.csv' % (tt.arg.exp_name, tt.arg.iter * batch + j),
header=False,
index=False, mode='a')
A_old = A_new
A_new = self._modules['bottom_mlp'](X)
X = self._modules['bottom_gcn'](A_new, A_old, X)
# visual_X(4)
if tt.arg.visual == True:
for j in range(batch):
np_X = X[j].detach().cpu().numpy()
data = [['bottom_mlp'] + list(np_X)]
df = pd.DataFrame(data)
df.to_csv('visual_%s/%03d/feature_record.csv' % (tt.arg.exp_name, tt.arg.iter * batch + j),
header=False,
index=False, mode='a')
for i in range(self.l_n):
up_idx = self.l_n - i - 1
A_old, idx_batch = adj_ms[up_idx], indices_list[up_idx]
A_old, X = self._modules['unpool_{}'.format(i)](A_old, X, idx_batch)
X = X.add(down_outs[up_idx])
A_new = self._modules['up_mlp_{}'.format(up_idx)](X)
X = self._modules['up_gcn_{}'.format(up_idx)](A_new, A_old, X)
# visual_X(5)
if tt.arg.visual == True:
for j in range(batch):
if up_idx == 0:
np_idx = pd.read_csv(
'visual_%s/%03d/feature_record.csv' % (tt.arg.exp_name, tt.arg.iter * batch + j),
skiprows=0, nrows=1, header=None).to_numpy(
copy=True).reshape(-1)[1:].astype(np.int32)
else:
np_idx = pd.read_csv(
'visual_%s/%03d/feature_record.csv' % (tt.arg.exp_name, tt.arg.iter * batch + j),
skiprows=1 + up_idx * 3, nrows=1, header=None).to_numpy(
copy=True).reshape(-1)[1:].astype(np.int32)
np_X = X[j].detach().cpu().numpy()
data = [['label'] + list(np_idx),
['unpool_{}'.format(i)] + list(np_X)]
df = pd.DataFrame(data)
df.to_csv('visual_%s/%03d/feature_record.csv' % (tt.arg.exp_name, tt.arg.iter * batch + j),
header=False,
index=False, mode='a')
X = torch.cat([X, org_X], -1)
# visual_X(6)
if tt.arg.visual == True:
for j in range(batch):
np_X = X[j].detach().cpu().numpy()
data = [['skip_connection'] + list(np_X)]
df = pd.DataFrame(data)
df.to_csv('visual_%s/%03d/feature_record.csv' % (tt.arg.exp_name, tt.arg.iter * batch + j),
header=False,
index=False, mode='a')
A_old = A_new
A_new = self._modules['out_mlp'](X)
X = self._modules['out_gcn'](A_new, A_old, X)
# visual_X(7)
if tt.arg.visual == True:
for j in range(batch):
np_X = X[j].detach().cpu().numpy()
data = [['out'] + list(np_X)]
df = pd.DataFrame(data)
df.to_csv('visual_%s/%03d/feature_record.csv' % (tt.arg.exp_name, tt.arg.iter * batch + j),
header=False,
index=False, mode='a')
out = F.log_softmax(X,dim=-1)
return out
class MLP(nn.Module):
def __init__(self,in_dim,hidden=96,ratio=[2,2,1,1]):
super(MLP, self).__init__()
# set layers
self.conv_1 = nn.Sequential(nn.Conv2d(in_channels=in_dim,
out_channels=hidden*ratio[0],
kernel_size=1,
bias=False),
nn.BatchNorm2d(num_features=hidden*ratio[0]),
nn.LeakyReLU())
self.conv_2 = nn.Sequential(nn.Conv2d(in_channels=hidden*ratio[0],
out_channels=hidden*ratio[1],
kernel_size=1,
bias=False),
nn.BatchNorm2d(num_features=hidden*ratio[1]),
nn.LeakyReLU())
self.conv_3 = nn.Sequential(nn.Conv2d(in_channels=hidden * ratio[1],
out_channels=hidden * ratio[2],
kernel_size=1,
bias=False),
nn.BatchNorm2d(num_features=hidden * ratio[2]),
nn.LeakyReLU())
self.conv_4 = nn.Sequential(nn.Conv2d(in_channels=hidden * ratio[2],
out_channels=hidden * ratio[3],
kernel_size=1,
bias=False),
nn.BatchNorm2d(num_features=hidden * ratio[3]),
nn.LeakyReLU())
self.conv_last = nn.Conv2d(in_channels=hidden * ratio[3],
out_channels=1,
kernel_size=1)
def forward(self,X):
# compute abs(x_i, x_j)
x_i = X.unsqueeze(2)
x_j = torch.transpose(x_i, 1, 2)
x_ij = torch.abs(x_i - x_j)
# parrallel
x_ij = torch.transpose(x_ij, 1, 3).to(self.conv_last.weight.device)
#
A_new = self.conv_last(self.conv_4(self.conv_3(self.conv_2(self.conv_1(x_ij))))).squeeze(1)
A_new = F.softmax(A_new, dim=-1)
return A_new
class GCN(nn.Module):
def __init__(self, in_dim, out_dim=133,dropout=0.0):
super(GCN, self).__init__()
if tt.arg.unet_mode == 'addold':
self.proj = nn.Linear(in_dim * 2, out_dim)
else:
self.proj = nn.Linear(in_dim, out_dim)
self.drop = nn.Dropout(p=dropout)
def forward(self,A_new, A_old, X):
# parrallel
X = X.to(self.proj.weight.device)
A_new = A_new.to(X.device)
A_old = A_old.to(X.device)
#
X = self.drop(X)
if tt.arg.unet_mode == 'addold':
X1 = torch.bmm(A_new, X)
X2 = torch.bmm(A_old, X)
X = torch.cat([X1, X2], dim=-1)
else:
X = torch.bmm(A_new, X)
X = self.proj(X)
return X
class Unet2(nn.Module):
def __init__(self, ks_1,ks_2,mode_1,mode_2, in_dim, num_classes, num_queries):
super(Unet2, self).__init__()
l_n_1 = len(ks_1)
l_n_2 = len(ks_2)
l_n = l_n_1 + l_n_2
self.l_n_1 = l_n_1
self.l_n_2 = l_n_2
self.l_n = l_n
self.mode_1 = mode_1
self.mode_2 = mode_2
start_mlp = MLP(in_dim=in_dim)
start_gcn = GCN(in_dim=in_dim, out_dim=in_dim)
self.add_module('start_mlp', start_mlp)
self.add_module('start_gcn', start_gcn)
for l in range(l_n):
down_mlp = MLP(in_dim=in_dim)
down_gcn = GCN(in_dim=in_dim, out_dim=in_dim)
up_mlp = MLP(in_dim=in_dim)
up_gcn = GCN(in_dim=in_dim, out_dim=in_dim)
if l < l_n_1:
pool = GraphPool(ks_1[l], in_dim=in_dim, num_classes=num_classes, num_queries=num_queries)
else:
pool = GraphPool(ks_2[l - l_n_1], in_dim=in_dim, num_classes=num_classes, num_queries=num_queries)
unpool = GraphUnpool()
self.add_module('down_mlp_{}'.format(l), down_mlp)
self.add_module('down_gcn_{}'.format(l), down_gcn)
self.add_module('up_mlp_{}'.format(l), up_mlp)
self.add_module('up_gcn_{}'.format(l), up_gcn)
self.add_module('pool_{}'.format(l), pool)
self.add_module('unpool_{}'.format(l), unpool)
bottom_mlp = MLP(in_dim=in_dim)
bottom_gcn = GCN(in_dim=in_dim, out_dim=in_dim)
self.add_module('bottom_mlp', bottom_mlp)
self.add_module('bottom_gcn', bottom_gcn)
out_mlp = MLP(in_dim=in_dim * 2)
out_gcn = GCN(in_dim=in_dim * 2, out_dim=num_classes)
self.add_module('out_mlp', out_mlp)
self.add_module('out_gcn', out_gcn)
def forward(self, A_init, X):
adj_ms = []
indices_list = []
down_outs = []
A_old = A_init
# visual_X(1)
if tt.arg.visual == True:
batch = X.size(0)
for j in range(batch):
np_X = X[j].detach().cpu().numpy()
data = [['Input_feature'] + list(np_X)]
df = pd.DataFrame(data)
df.to_csv('visual_%s/%03d/feature_record.csv' % (tt.arg.exp_name, tt.arg.iter * batch + j),
header=False,
index=False, mode='a')
A_new = self._modules['start_mlp'](X)
X = self._modules['start_gcn'](A_new, A_old, X)
org_X = X
# visual_X(2)
if tt.arg.visual == True:
for j in range(batch):
np_X = X[j].detach().cpu().numpy()
data = [['start_gcn'] + list(np_X)]
df = | pd.DataFrame(data) | pandas.DataFrame |
"""
30 May 2020
Author: <NAME>
After we have cleaned all the datasets, we will now combine everything into a
single dataframe.
Saving it as csv for the moment as we are still trying to figure out how we can
best share this data.
"""
import pandas as pd
#Simple calling of all the cleaned csv files with the file path censored
df1 = pd.read_csv(r"file_path\API_2005_2013_cleaned.csv")
df2 = pd.read_csv(r"file_path\API_2013_2014_cleaned.csv")
df3 = pd.read_csv(r"file_path\API_2014_2015_cleaned.csv")
df4 = pd.read_csv(r"file_path\API_2015_cleaned.csv")
df5 = pd.read_csv(r"file_path\API_2016_cleaned.csv")
df6 = pd.read_csv(r"file_path\API_Johor_2017_cleaned.csv")
df7 = pd.read_csv(r"file_path\API_Johor_2018_cleaned.csv")
df8 = pd.read_csv(r"file_path\API_Johor_2019_cleaned.csv")
df9 = pd.read_csv(r"file_path\API_Kedah_2017_cleaned.csv")
df10 = pd.read_csv(r"file_path\API_Kedah_2018_cleaned.csv")
df11 = pd.read_csv(r"file_path\API_Kedah_2019_cleaned.csv")
df12 = pd.read_csv(r"file_path\API_Kelantan_2017_cleaned.csv")
df13 = pd.read_csv(r"file_path\API_Kelantan_2018_cleaned.csv")
df14 = pd.read_csv(r"file_path\API_Kelantan_2019_cleaned.csv")
df15 = pd.read_csv(r"file_path\API_KL_2017_cleaned.csv")
df16 = pd.read_csv(r"file_path\API_Melaka_2017_cleaned.csv")
df17 = pd.read_csv(r"file_path\API_Melaka_2018_cleaned.csv")
df18 = pd.read_csv(r"file_path\API_NS_2017_cleaned.csv")
df19 = pd.read_csv(r"file_path\API_NS_2018_cleaned.csv")
df20 = pd.read_csv(r"file_path\API_Pahang_2017_cleaned.csv")
df21 = pd.read_csv(r"file_path\API_Pahang_2018_cleaned.csv")
df22 = pd.read_csv(r"file_path\API_Penang_2017_cleaned.csv")
df23 = pd.read_csv(r"file_path\API_Penang_2018_cleaned.csv")
df24 = pd.read_csv(r"file_path\API_Perak_2017_cleaned.csv")
df25 = pd.read_csv(r"file_path\API_Perak_2018_cleaned.csv")
df26 = pd.read_csv(r"file_path\API_Perlis_2017_cleaned.csv")
df27 = | pd.read_csv(r"file_path\API_Perlis_2018_cleaned.csv") | pandas.read_csv |
import json
from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
from evalml.model_understanding import find_confusion_matrix_per_thresholds
from evalml.model_understanding.decision_boundary import (
_accuracy,
_balanced_accuracy,
_f1,
_find_confusion_matrix_objective_threshold,
_find_data_between_ranges,
_precision,
_recall,
)
from evalml.objectives import AccuracyBinary
@pytest.mark.parametrize(
"val_list,expected_val",
[
([0, 0, 100, 100], 0.0),
([100, 0, 0, 100], 0.5),
([50, 50, 50, 50], 0.5),
([40, 20, 10, 30], 0.6),
],
)
def test_accuracy(val_list, expected_val):
val = _accuracy(val_list)
assert val == expected_val
@pytest.mark.parametrize(
"val_list,expected_val",
[
([0, 0, 100, 100], 0.0),
([100, 0, 0, 100], 0.25),
([50, 50, 50, 50], 0.5),
([40, 20, 10, 30], 13 / 21),
],
)
def test_balanced_accuracy(val_list, expected_val):
val = _balanced_accuracy(val_list)
assert val == expected_val
@pytest.mark.parametrize(
"val_list,expected_val",
[
([0, 0, 100, 100], 0.0),
([100, 0, 0, 100], 0.5),
([50, 50, 50, 50], 0.5),
([40, 20, 10, 30], 4 / 7),
],
)
def test_recall(val_list, expected_val):
val = _recall(val_list)
assert val == expected_val
@pytest.mark.parametrize(
"val_list,expected_val",
[
([0, 0, 100, 100], 0.0),
([100, 0, 0, 100], 1.0),
([50, 50, 50, 50], 0.5),
([40, 20, 10, 30], 0.8),
],
)
def test_precision(val_list, expected_val):
val = _precision(val_list)
assert val == expected_val
@pytest.mark.parametrize(
"val_list,expected_val",
[
([0, 0, 100, 100], 0.0),
([100, 0, 0, 100], 2 / 3),
([50, 50, 50, 50], 0.5),
([40, 20, 10, 30], 2 / 3),
],
)
def test_f1(val_list, expected_val):
val = _f1(val_list)
assert val == expected_val
def test_find_confusion_matrix_per_threshold_errors(
dummy_binary_pipeline, dummy_multiclass_pipeline
):
X = pd.DataFrame()
y = pd.Series()
with pytest.raises(
ValueError, match="Expected a fitted binary classification pipeline"
):
find_confusion_matrix_per_thresholds(dummy_binary_pipeline, X, y)
with pytest.raises(
ValueError, match="Expected a fitted binary classification pipeline"
):
find_confusion_matrix_per_thresholds(dummy_multiclass_pipeline, X, y)
dummy_multiclass_pipeline._is_fitted = True
with pytest.raises(
ValueError, match="Expected a fitted binary classification pipeline"
):
find_confusion_matrix_per_thresholds(dummy_multiclass_pipeline, X, y)
@patch("evalml.pipelines.BinaryClassificationPipeline.fit")
@patch("evalml.pipelines.BinaryClassificationPipeline.predict_proba")
@patch(
"evalml.model_understanding.decision_boundary._find_confusion_matrix_objective_threshold"
)
@patch("evalml.model_understanding.decision_boundary._find_data_between_ranges")
def test_find_confusion_matrix_per_threshold_args_pass_through(
mock_ranges, mock_threshold, mock_pred_proba, mock_fit, dummy_binary_pipeline
):
n_bins = 100
X = pd.DataFrame()
y = pd.Series([0] * 500 + [1] * 500)
dummy_binary_pipeline._is_fitted = True
# set return predicted proba
preds = [0.1] * 250 + [0.8] * 500 + [0.6] * 250
pred_proba = pd.DataFrame({0: [1 - v for v in preds], 1: preds})
mock_pred_proba.return_value = pred_proba
# set the output for the thresholding private method
obj_dict = {
"accuracy": [{"objective score": 0.5, "threshold value": 0.5}, "some function"],
"balanced_accuracy": [
{"objective score": 0.5, "threshold value": 0.25},
"some function",
],
}
conf_matrix = np.array([[0, 100, 280, 0] for i in range(n_bins)])
mock_threshold.return_value = (conf_matrix, obj_dict)
# set the output for data between ranges
range_result = [[range(5)] for i in range(n_bins)]
mock_ranges.return_value = range_result
# calculate the expected output results
bins = [i / n_bins for i in range(n_bins + 1)]
expected_pos_skew, pos_range = np.histogram(pred_proba.iloc[:, -1][500:], bins=bins)
expected_neg_skew, _ = np.histogram(pred_proba.iloc[:, -1][:500], bins=bins)
expected_result_df = pd.DataFrame(
{
"true_pos_count": expected_pos_skew,
"true_neg_count": expected_neg_skew,
"true_positives": conf_matrix[:, 0].tolist(),
"true_negatives": conf_matrix[:, 1].tolist(),
"false_positives": conf_matrix[:, 2].tolist(),
"false_negatives": conf_matrix[:, 3].tolist(),
"data_in_bins": range_result,
},
index=pos_range[1:],
)
final_obj_dict = {
"accuracy": {"objective score": 0.5, "threshold value": 0.5},
"balanced_accuracy": {"objective score": 0.5, "threshold value": 0.25},
}
returned_result = find_confusion_matrix_per_thresholds(
dummy_binary_pipeline, X, y, n_bins
)
call_args = mock_threshold.call_args
assert all(call_args[0][0] == expected_pos_skew)
assert all(call_args[0][1] == expected_neg_skew)
assert all(call_args[0][2] == pos_range)
assert isinstance(returned_result, tuple)
pd.testing.assert_frame_equal(returned_result[0], expected_result_df)
assert returned_result[1] == final_obj_dict
@patch("evalml.pipelines.BinaryClassificationPipeline.fit")
@patch("evalml.pipelines.BinaryClassificationPipeline.predict_proba")
@pytest.mark.parametrize("n_bins", [100, 10, None])
def test_find_confusion_matrix_per_threshold_n_bins(
mock_pred_proba, mock_fit, n_bins, dummy_binary_pipeline
):
X = pd.DataFrame()
y = pd.Series([0] * 1200 + [1] * 800)
dummy_binary_pipeline._is_fitted = True
top_k = 5
# set return predicted proba
preds = [0.1] * 400 + [0.8] * 400 + [0.6] * 400 + [0.4] * 400 + [0.5] * 400
pred_proba = pd.DataFrame({0: [1 - v for v in preds], 1: preds})
mock_pred_proba.return_value = pred_proba
# calculate the expected output results
returned_result = find_confusion_matrix_per_thresholds(
dummy_binary_pipeline, X, y, n_bins, top_k=top_k
)
assert isinstance(returned_result, tuple)
if n_bins is not None:
assert len(returned_result[0]) == n_bins
assert returned_result[0].columns.tolist() == [
"true_pos_count",
"true_neg_count",
"true_positives",
"true_negatives",
"false_positives",
"false_negatives",
"data_in_bins",
]
assert sum(returned_result[0]["true_pos_count"]) == 800
assert sum(returned_result[0]["true_neg_count"]) == 1200
assert all([len(v) <= top_k for v in returned_result[0]["data_in_bins"]])
assert isinstance(returned_result[1], dict)
assert set(returned_result[1].keys()) == {
"accuracy",
"balanced_accuracy",
"precision",
"f1",
}
@patch("evalml.pipelines.BinaryClassificationPipeline.fit")
@patch("evalml.pipelines.BinaryClassificationPipeline.predict_proba")
@pytest.mark.parametrize("top_k", [-1, 4])
@pytest.mark.parametrize("n_bins", [100, None])
def test_find_confusion_matrix_per_threshold_k_(
mock_pred_proba, mock_fit, n_bins, top_k, dummy_binary_pipeline
):
X = pd.DataFrame()
y = | pd.Series([0] * 1200 + [1] * 800) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 25 16:14:12 2019
@author: <NAME>
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#import graphviz
import os
import seaborn as sns
from scipy.stats import chi2_contingency
os.chdir("E:\PYTHON NOTES\projects\cab fare prediction")
dataset_train=pd.read_csv("train_cab.csv")
dataset_test=pd.read_csv("test.csv")
dataset_train.describe()
# dimension of data
# dimension of data
dataset_train.shape
# Number of rows
dataset_train.shape[0]
# number of columns
dataset_train.shape[1]
# name of columns
list(dataset_train)
# data detailat
dataset_train.info()
dataset_train.isnull().sum()
dataset_test.isnull().sum()
sns.heatmap(dataset_train.isnull(),yticklabels=False,cbar=False, cmap='coolwarm')
#datetime change into reqired format
data=[dataset_train,dataset_test]
for i in data:
i["pickup_datetime"]=pd.to_datetime(i["pickup_datetime"],errors="coerce")
dataset_train.info()
dataset_test.info()
dataset_train.isnull().sum()
dataset_test.isna().sum()
dataset_train=dataset_train.dropna(subset=["pickup_datetime"],how="all")
dataset_train["fare_amount"]=dataset_train["fare_amount"].astype(float)
np.where(dataset_train["fare_amount"]=="430-")
dataset_train["fare_amount"].loc[1123]=430
dataset_train["fare_amount"]=dataset_train["fare_amount"].astype(float)
#we will convery passanger count in to catogorical varibale ,cause passangor caount is not contineous varibale
dataset_obj=["passenger_count"]
dataset_int=["fare_amount","pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]
# data visulization
import seaborn as sns
import matplotlib.pyplot as plt
#$stting up the sns for plots
sns.set(style="darkgrid",palette="Set1")
#some histogram plot from seaborn lib
plt.figure(figsize=(20,20))
plt.subplot(321)
_=sns.distplot(dataset_train["fare_amount"],bins=50)
plt.subplot(322)
_=sns.distplot(dataset_train["pickup_longitude"],bins=50)
plt.subplot(323)
_=sns.distplot(dataset_train["pickup_latitude"],bins=50)
plt.subplot(324)
_ = sns.distplot(dataset_train['dropoff_longitude'],bins=50)
plt.subplot(325)
_ = sns.distplot(dataset_train['dropoff_latitude'],bins=50)
plt.show()
plt.savefig('hist.png')
import scipy.stats as stats
#Some Bee Swarmplots
# plt.title('Cab Fare w.r.t passenger_count')
plt.figure(figsize=(25,25))
#_=sns.swarmplot(x="passenger_count",y="fare_amount",data=dataset_train)
#Jointplots for Bivariate Analysis.
#Here Scatter plot has regression line between 2 variables along with separate Bar plots of both variables.
#Also its annotated with pearson correlation coefficient and p value.
_=sns.jointplot(x="fare_amount",y="pickup_longitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
#plt.savefig("jointfplo.png")
plt.show()
_=sns.jointplot(x="fare_amount",y="pickup_latitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
_=sns.jointplot(x="fare_amount",y="dropoff_longitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
_=sns.jointplot(x="fare_amount",y="dropoff_latitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
# some violineplots to see spread d variable
plt.figure(figsize=(20,20))
plt.subplot(321)
_=sns.violinplot(y="fare_amount",data=dataset_train)
plt.subplot(322)
_=sns.violinplot(y="pickup_longitude",data=dataset_train)
plt.subplot(323)
_ = sns.violinplot(y='pickup_latitude',data=dataset_train)
plt.subplot(324)
_ = sns.violinplot(y='dropoff_longitude',data=dataset_train)
plt.subplot(325)
_ = sns.violinplot(y='dropoff_latitude',data=dataset_train)
plt.savefig("violine.png")
plt.show()
#pairplot for all numeric varibale
_=sns.pairplot(dataset_train.loc[:,dataset_int],kind="scatter",dropna=True)
_.fig.suptitle("pairwise plot all numeric varibale")
#plt.savefig("pairwise.png")
plt.show()
#removing values which are not within the desired range outlier depanding upon basic understanding of dataset
#1.Fare amount has a negative value, which doesn't make sense. A price amount cannot be -ve and also cannot be 0. So we will remove these fields.
sum(dataset_train["fare_amount"]<1)
dataset_train[dataset_train["fare_amount"]<1]
dataset_train=dataset_train.drop(dataset_train[dataset_train["fare_amount"]<1].index,axis=0)
#dataset_train.loc[dataset_train["fare_amount"]<1,"fare_amount"]=np.nan
#2. passanger count varibale /// passanger count cound not increse more than 6
sum(dataset_train["passenger_count"]>6)
for i in range (4,11):
print("passanger_count_above"+ str(i)+ "={}".format(sum(dataset_train["passenger_count"]>i)))
# so 20 observations of passenger_count is consistenly above from 6,7,8,9,10 passenger_counts, let's check them.
dataset_train[dataset_train["passenger_count"]>6]
#Also we need to see if there are any passenger_count<1
dataset_train[dataset_train["passenger_count"]<1]
len(dataset_train[dataset_train["passenger_count"]<1])
dataset_test["passenger_count"].unique()
# We will remove 20 observation which are above 6 value because a cab cannot hold these number of passengers.
dataset_train=dataset_train.drop(dataset_train[dataset_train["passenger_count"]<1].index,axis=0)
dataset_train=dataset_train.drop(dataset_train[dataset_train["passenger_count"]>6].index,axis=0)
#dataset_train.loc[dataset_train["passenger_count"]<1,"passenger_count"]=np.nan
#dataset_train.loc[dataset_train["passenger_count"]>6,"passenger_count"]=np.nan
sum(dataset_train["passenger_count"]<1)
#3.Latitudes range from -90 to 90.Longitudes range from -180 to 180. Removing which does not satisfy these ranges
print("pickup_longitude above 180 ={}".format(sum(dataset_train["pickup_longitude"]>180)))
print("pickup_longitude above -180 = {}".format(sum(dataset_train["pickup_longitude"]<-180)))
print("pickup_latitude above 90 ={}".format(sum(dataset_train["pickup_latitude"]>90)))
print("pickup_latitude above -90 ={}".format(sum(dataset_train["pickup_latitude"]<-90)))
print('dropoff_longitude above 180={}'.format(sum(dataset_train['dropoff_longitude']>180)))
print('dropoff_longitude below -180={}'.format(sum(dataset_train['dropoff_longitude']<-180)))
print('dropoff_latitude below -90={}'.format(sum(dataset_train['dropoff_latitude']<-90)))
print('dropoff_latitude above 90={}'.format(sum(dataset_train['dropoff_latitude']>90)))
#for test data
print("pickup_longitude above 180 ={}".format(sum(dataset_test["pickup_longitude"]>180)))
print("pickup_longitude above -180 = {}".format(sum(dataset_test["pickup_longitude"]<-180)))
print("pickup_latitude above 90 ={}".format(sum(dataset_test["pickup_latitude"]>90)))
print("pickup_latitude above -90 ={}".format(sum(dataset_test["pickup_latitude"]<-90)))
print('dropoff_longitude above 180={}'.format(sum(dataset_test['dropoff_longitude']>180)))
print('dropoff_longitude below -180={}'.format(sum(dataset_test['dropoff_longitude']<-180)))
print('dropoff_latitude below -90={}'.format(sum(dataset_test['dropoff_latitude']<-90)))
print('dropoff_latitude above 90={}'.format(sum(dataset_test['dropoff_latitude']>90)))
#There's only one outlier which is in variable pickup_latitude.So we will remove it with nan.
#Also we will see if there are any values equal to 0.
for i in ["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]:
print(i,"equal to 0={}".format(sum(dataset_train[i]==0)))
#for test data
for i in ["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]:
print(i,"equal to 0={}".format(sum(dataset_test[i]==0)))
#there are values which are equal to 0. we will remove them.
# There's only one outlier which is in variable pickup_latitude.So we will remove it with nan
dataset_train=dataset_train.drop(dataset_train[dataset_train["pickup_latitude"]>90].index,axis=0)
#there are values which are equal to 0. we will remove them.
for i in ["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]:
dataset_train=dataset_train.drop(dataset_train[dataset_train[i]==0].index,axis=0)
# for i in ['pickup_longitude','pickup_latitude','dropoff_longitude','dropoff_latitude']:
# train.loc[train[i]==0,i] = np.nan
# train.loc[train['pickup_latitude']>90,'pickup_latitude'] = np.nan
dataset_train.shape
#Missing Value Analysis
missing_value=dataset_train.isnull().sum()
missing_value = missing_value.reset_index()
missing_value = missing_value.rename(columns = {'index': 'Variables', 0: 'Missing_percentage'})
missing_value
#find out percentage of null value
missing_value['Missing_percentage'] = (missing_value['Missing_percentage']/len(dataset_train))*100
missing_value = missing_value.sort_values('Missing_percentage', ascending = False).reset_index(drop = True)
dataset_train.info()
dataset_train["fare_amount"]=dataset_train["fare_amount"].fillna(dataset_train["fare_amount"].median())
dataset_train["passenger_count"]=dataset_train["passenger_count"].fillna(dataset_train["passenger_count"].mode()[0])
dataset_train.isnull().sum()
dataset_train["passenger_count"]=dataset_train["passenger_count"].round().astype(object)
dataset_train["passenger_count"].unique()
#outliers analysis by box plot
plt.figure(figsize=(20,5))
plt.xlim(0,100)
sns.boxplot(x=dataset_train["fare_amount"],data=dataset_train,orient="h")
# sum(dataset_train['fare_amount']<22.5)/len(dataset_train['fare_amount'])*100
#Bivariate Boxplots: Boxplot for Numerical Variable Vs Categorical Variable.
plt.figure(figsize=(20,10))
plt.xlim(0,100)
_=sns.boxplot(x=dataset_train["fare_amount"],y=dataset_train["passenger_count"],data=dataset_train,orient="h")
def outlier_detect(df):
for i in df.describe().columns:
q1=df.describe().at["25%",i]
q3=df.describe().at["75%",i]
IQR=(q3-q1)
ltv=(q1-1.5*IQR)
utv=(q3+1.5*IQR)
x=np.array(df[i])
p=[]
for j in x:
if j<ltv:
p.append(ltv)
elif j>utv:
p.append(utv)
else:
p.append(j)
df[i]=p
return (df)
dataset_int1=outlier_detect(dataset_train.loc[:,dataset_int])
dataset_test_obj=["passenger_count"]
dataset_test_int=["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]
dataset_test1=outlier_detect(dataset_test.loc[:,dataset_test_int])
dataset_test1=pd.concat([dataset_test1,dataset_test["passenger_count"]],axis=1)
dataset_test=pd.concat([dataset_test1,dataset_test["pickup_datetime"]],axis=1)
#determine corr
corr=dataset_int1.corr()
f,ax=plt.subplots(figsize=(7,5))
sns.heatmap(corr,mask=np.zeros_like(corr,dtype=np.bool),cmap=sns.diverging_palette(220,10,as_cmap=True),square=True,ax=ax)
# """feature engineering"""
#1.we will derive new features from pickup_datetime variable
#new features will be year,month,day_of_week,hour
dataset_train1=pd.concat([dataset_int1,dataset_train["passenger_count"]],axis=1)
dataset_train2=pd.concat([dataset_train1,dataset_train["pickup_datetime"]],axis=1)
#dataset_train2.isna().sum()
data=[dataset_train2,dataset_test]
for i in data:
i["year"]=i["pickup_datetime"].apply(lambda row:row.year)
i["month"]=i["pickup_datetime"].apply(lambda row:row.month)
i["day_of_week"] = i["pickup_datetime"].apply(lambda row: row.dayofweek)
i["hour"] = i["pickup_datetime"].apply(lambda row: row.hour)
# train2_nodummies=dataset_train2.copy()
# dataset_train2=train2_nodummies.copy()
plt.figure(figsize=(20,10))
sns.countplot(dataset_train2["year"])
# plt.savefig('year.png')
plt.figure(figsize=(20,10))
sns.countplot(dataset_train2['month'])
# plt.savefig('month.png')
plt.figure(figsize=(20,10))
sns.countplot(dataset_train2['day_of_week'])
# plt.savefig('day_of_week.png')
plt.figure(figsize=(20,10))
sns.countplot(dataset_train2['hour'])
# plt.savefig('hour.png')
plt.show
#Now we will use month,day_of_week,hour to derive new features like sessions in a day,seasons in a year,week:weekend/weekday
# for sessions in a day using hour columns
def f(x):
if(x>=5) and (x<=11):
return "morning"
elif (x>=12) and (x<=16):
return "afternoon"
elif (x>=17) and (x<=20):
return "evening"
elif (x>=21) and (x<=23):
return "night pm"
elif (x>=0) and (x<=4):
return "night am"
dataset_train2["sessions"]=dataset_train2["hour"].apply(f)
dataset_test['session'] = dataset_test['hour'].apply(f)
#for seasons in a year using month column
def g(x):
if (x>=3) and (x<=5):
return "spring"
elif (x>=6) and (x<=8):
return "summer"
elif (x>=9) and (x<=11):
return "fall"
else :
return "winter"
dataset_train2['seasons'] = dataset_train2['month'].apply(g)
dataset_test['seasons'] = dataset_test['month'].apply(g)
#for week / weekend in a day of week columns
def h(x):
if (x>=0) and (x<=4):
return "weekday"
elif (x>=5) and (x<=6):
return "weekend"
dataset_train2['week'] = dataset_train2['day_of_week'].apply(h)
dataset_test['week'] = dataset_test['day_of_week'].apply(h)
dataset_train2['passenger_count'].describe()
dataset_train2.isnull().sum()
dataset_test.isna().sum()
#creating dummy varibale
temp=pd.get_dummies(dataset_train2["passenger_count"],prefix="passenger_count")
dataset_train2=dataset_train2.join(temp)
temp = pd.get_dummies(dataset_test['passenger_count'], prefix = 'passenger_count')
dataset_test = dataset_test.join(temp)
temp = pd.get_dummies(dataset_test['seasons'], prefix = 'seasons')
dataset_test = dataset_test.join(temp)
temp=pd.get_dummies(dataset_train2["seasons"],prefix = "season" )
dataset_train2=pd.concat([dataset_train2,temp],axis=1)
temp = pd.get_dummies(dataset_train2['week'], prefix = 'week')
dataset_train2=pd.concat([dataset_train2,temp],axis=1)
temp = pd.get_dummies(dataset_test['week'], prefix = 'week')
dataset_test = dataset_test.join(temp)
temp = pd.get_dummies(dataset_train2['sessions'], prefix = 'sessions')
dataset_train2=pd.concat([dataset_train2,temp],axis=1)
temp = pd.get_dummies(dataset_test['session'], prefix = 'session')
dataset_test = dataset_test.join(temp)
temp = pd.get_dummies(dataset_train2['year'], prefix = 'year')
dataset_train2=pd.concat([dataset_train2,temp],axis=1)
temp = pd.get_dummies(dataset_test['year'], prefix = 'year')
dataset_test = dataset_test.join(temp)
#we will drop one column from each one-hot-encoded variables
dataset_train2.columns
dataset_test.columns
dataset_train2.info()
dataset_train2=dataset_train2.drop(['passenger_count_1.0','season_fall','week_weekday','sessions_afternoon','year_2009'],axis=1)
dataset_test=dataset_test.drop(['passenger_count_1','seasons_fall','week_weekday','session_afternoon','year_2009'],axis=1)
#3.Feature Engineering for latitude and longitude variable
#As we have latitude and longitude data for pickup and dropoff, we will find the distance the cab travelled from pickup and dropoff location.
#def haversine(coord1,coord2):
# data=[dataset_train2,dataset_test]
# for i in data:
# lon1,lat1=coord1
# lon2,lat2=coord2
# r=6371000 #randius of earth in meters
# phi_1=np.radians(i[lat1])
# phi_2=np.radians(i[lat2])
# delta_phi=np.radians(i[lat2]-i[lat1])
# delta_lambda=np.radians(i[lon2]-i[lon1])
# a=np.sin(delta_phi/2.0)**2+np.cos(phi_1)*np.cos(phi_2)*np.sin(delta_lambda/2.0)**2
# c=2*np.arctan2(np.sqrt(a),np.sqrt(1-a))
# meters=c*r #output distance in meter
# km=meters/1000.0
# miles=round(km,3)/1.609344
# i["distance"]=miles
# print(f"distance:{miles} miles")
# return miles
#
#haversine(['pickup_longitude','pickup_latitude'],['dropoff_longitude','dropoff_latitude'])
#As Vincenty is more accurate than haversine. Also vincenty is prefered for short distances.
#Therefore we will drop great_circle. we will drop them together with other variables which were used to feature engineer.
from geopy.distance import geodesic
from geopy.distance import great_circle
#from sklearn.externals import joblib
data=[dataset_train2,dataset_test]
for i in data:
i["great_circle"]=i.apply(lambda x : great_circle((x["pickup_latitude"],x["pickup_longitude"]),(x["dropoff_latitude"],x["dropoff_longitude"])).miles,axis=1)
i["geodesic"]=i.apply(lambda x: geodesic((x["pickup_latitude"],x["pickup_longitude"]),(x["dropoff_latitude"],x["dropoff_longitude"])).miles,axis=1)
#We will remove the variables which were used to feature engineer new variable
dataset_train2=dataset_train2.drop(['pickup_datetime','pickup_longitude', 'pickup_latitude',
'dropoff_longitude', 'dropoff_latitude', 'passenger_count', 'year',
'month', 'day_of_week', 'hour', 'sessions', 'seasons', 'week','great_circle'],axis=1)
dataset_test=dataset_test.drop(['pickup_datetime','pickup_longitude', 'pickup_latitude',
'dropoff_longitude', 'dropoff_latitude', 'passenger_count', 'year',
'month', 'day_of_week', 'hour', 'session', 'seasons', 'week','great_circle'],axis=1)
plt.figure(figsize=(20,5))
sns.boxplot(x=dataset_train2["geodesic"],data=dataset_train2,orient="h")
plt.figure(figsize=(20,5))
plt.xlim(0,100)
sns.boxplot(x=dataset_train2['geodesic'],data=dataset_train2,orient='h')
plt.title('Boxplot of geodesic ')
# plt.savefig('bp geodesic.png')
plt.show()
dataset_train2.isnull().sum()
#outlier in geodesic
def outlier_detect(df):
for i in df.describe().columns:
q1=df.describe().at["25%",i]
q3=df.describe().at["75%",i]
IQR=(q3-q1)
ltv=(q1-1.5*IQR)
utv=(q3+1.5*IQR)
x=np.array(df[i])
p=[]
for j in x:
if j<ltv:
p.append(ltv)
elif j>utv:
p.append(utv)
else:
p.append(j)
df[i]=p
return (df)
dataset_train11=pd.DataFrame(dataset_train2["geodesic"])
dataset_11=outlier_detect(dataset_train11)
dataset_train2=dataset_train2.drop(["geodesic"],axis=1)
dataset_train2=pd.concat([dataset_train2,dataset_11],axis=1)
dataset_train2.info()
#*****************************************************
#for test data
dataset_test1= | pd.DataFrame(dataset_test["geodesic"]) | pandas.DataFrame |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from meterstick import metrics
from meterstick import operations
import mock
import numpy as np
import pandas as pd
from pandas import testing
import unittest
class MetricTest(unittest.TestCase):
"""Tests general features of Metric."""
df = pd.DataFrame({'X': [0, 1, 2, 3], 'Y': [0, 1, 1, 2]})
def test_precompute(self):
metric = metrics.Metric(
'foo',
precompute=lambda df, split_by: df[split_by],
compute=lambda x: x.sum().values[0])
output = metric.compute_on(self.df, 'Y')
expected = pd.DataFrame({'foo': [0, 2, 2]}, index=range(3))
expected.index.name = 'Y'
testing.assert_frame_equal(output, expected)
def test_compute(self):
metric = metrics.Metric('foo', compute=lambda x: x['X'].sum())
output = metric.compute_on(self.df)
expected = metrics.Sum('X', 'foo').compute_on(self.df)
testing.assert_frame_equal(output, expected)
def test_postcompute(self):
def postcompute(values, split_by):
del split_by
return values / values.sum()
output = metrics.Sum('X', postcompute=postcompute).compute_on(self.df, 'Y')
expected = operations.Distribution('Y',
metrics.Sum('X')).compute_on(self.df)
expected.columns = ['sum(X)']
testing.assert_frame_equal(output.astype(float), expected)
def test_compute_slices(self):
def _sum(df, split_by):
if split_by:
df = df.groupby(split_by)
return df['X'].sum()
metric = metrics.Metric('foo', compute_slices=_sum)
output = metric.compute_on(self.df)
expected = metrics.Sum('X', 'foo').compute_on(self.df)
testing.assert_frame_equal(output, expected)
def test_final_compute(self):
metric = metrics.Metric(
'foo', compute=lambda x: x, final_compute=lambda *_: 2)
output = metric.compute_on(None)
self.assertEqual(output, 2)
def test_pipeline_operator(self):
m = metrics.Count('X')
testing.assert_frame_equal(
m.compute_on(self.df), m | metrics.compute_on(self.df))
class SimpleMetricTest(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 1, 1, 2, 2, 3, 4],
'Y': [3, 1, 1, 4, 4, 3, 5],
'grp': ['A'] * 3 + ['B'] * 4
})
def test_list_where(self):
metric = metrics.Mean('X', where=['grp == "A"'])
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].mean()
self.assertEqual(output, expected)
def test_single_list_where(self):
metric = metrics.Mean('X', where=['grp == "A"', 'Y < 2'])
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A" and Y < 2')['X'].mean()
self.assertEqual(output, expected)
def test_count_not_df(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 7)
def test_count_split_by_not_df(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].count()
expected.name = 'count(X)'
testing.assert_series_equal(output, expected)
def test_count_where(self):
metric = metrics.Count('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 3)
def test_count_with_nan(self):
df = pd.DataFrame({'X': [1, 1, np.nan, 2, 2, 3, 4]})
metric = metrics.Count('X')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 6)
def test_count_unmelted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'count(X)': [7]})
testing.assert_frame_equal(output, expected)
def test_count_melted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [7]}, index=['count(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_count_split_by_unmelted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'count(X)': [3, 4]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_count_split_by_melted(self):
metric = metrics.Count('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [3, 4],
'grp': ['A', 'B']
},
index=['count(X)', 'count(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_count_distinct(self):
df = pd.DataFrame({'X': [1, 1, np.nan, 2, 2, 3]})
metric = metrics.Count('X', distinct=True)
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 3)
def test_sum_not_df(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 14)
def test_sum_split_by_not_df(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].sum()
expected.name = 'sum(X)'
testing.assert_series_equal(output, expected)
def test_sum_where(self):
metric = metrics.Sum('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].sum()
self.assertEqual(output, expected)
def test_sum_unmelted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sum(X)': [14]})
testing.assert_frame_equal(output, expected)
def test_sum_melted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [14]}, index=['sum(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_sum_split_by_unmelted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'sum(X)': [3, 11]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_sum_split_by_melted(self):
metric = metrics.Sum('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [3, 11],
'grp': ['A', 'B']
},
index=['sum(X)', 'sum(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_dot_not_df(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, sum(self.df.X * self.df.Y))
def test_dot_split_by_not_df(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
self.df['X * Y'] = self.df.X * self.df.Y
expected = self.df.groupby('grp')['X * Y'].sum()
expected.name = 'sum(X * Y)'
testing.assert_series_equal(output, expected)
def test_dot_where(self):
metric = metrics.Dot('X', 'Y', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
d = self.df.query('grp == "A"')
self.assertEqual(output, sum(d.X * d.Y))
def test_dot_unmelted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sum(X * Y)': [sum(self.df.X * self.df.Y)]})
testing.assert_frame_equal(output, expected)
def test_dot_normalized(self):
metric = metrics.Dot('X', 'Y', True)
output = metric.compute_on(self.df)
expected = pd.DataFrame({'mean(X * Y)': [(self.df.X * self.df.Y).mean()]})
testing.assert_frame_equal(output, expected)
def test_dot_melted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [sum(self.df.X * self.df.Y)]},
index=['sum(X * Y)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_dot_split_by_unmelted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'sum(X * Y)': [5, 45]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_dot_split_by_melted(self):
metric = metrics.Dot('X', 'Y')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [5, 45],
'grp': ['A', 'B']
},
index=['sum(X * Y)', 'sum(X * Y)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_mean_not_df(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2)
def test_mean_split_by_not_df(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].mean()
expected.name = 'mean(X)'
testing.assert_series_equal(output, expected)
def test_mean_where(self):
metric = metrics.Mean('X', where='grp == "A"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "A"')['X'].mean()
self.assertEqual(output, expected)
def test_mean_unmelted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'mean(X)': [2.]})
testing.assert_frame_equal(output, expected)
def test_mean_melted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [2.]}, index=['mean(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_mean_split_by_unmelted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'mean(X)': [1, 2.75]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_mean_split_by_melted(self):
metric = metrics.Mean('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [1, 2.75],
'grp': ['A', 'B']
},
index=['mean(X)', 'mean(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_max(self):
metric = metrics.Max('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'max(X)': [4]})
testing.assert_frame_equal(output, expected)
def test_min(self):
metric = metrics.Min('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'min(X)': [1]})
testing.assert_frame_equal(output, expected)
def test_weighted_mean_not_df(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1.25)
def test_weighted_mean_split_by_not_df(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp', return_dataframe=False)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.Series((1.25, 3.), index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'Y-weighted mean(X)'
testing.assert_series_equal(output, expected)
def test_weighted_mean_unmelted(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted mean(X)': [1.25]})
testing.assert_frame_equal(output, expected)
def test_weighted_mean_melted(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame({'Value': [1.25]}, index=['Y-weighted mean(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_mean_split_by_unmelted(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({'Y-weighted mean(X)': [1.25, 3.]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_mean_split_by_melted(self):
df = pd.DataFrame({
'X': [1, 2, 1, 3],
'Y': [3, 1, 0, 1],
'grp': ['A', 'A', 'B', 'B']
})
metric = metrics.Mean('X', 'Y')
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({
'Value': [1.25, 3.],
'grp': ['A', 'B']
},
index=['Y-weighted mean(X)', 'Y-weighted mean(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_quantile_raise(self):
with self.assertRaises(ValueError) as cm:
metrics.Quantile('X', 2)
self.assertEqual(str(cm.exception), 'quantiles must be in [0, 1].')
def test_quantile_multiple_quantiles_raise(self):
with self.assertRaises(ValueError) as cm:
metrics.Quantile('X', [0.1, 2])
self.assertEqual(str(cm.exception), 'quantiles must be in [0, 1].')
def test_quantile_not_df(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2)
def test_quantile_where(self):
metric = metrics.Quantile('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2.5)
def test_quantile_interpolation(self):
metric = metrics.Quantile('X', 0.5, interpolation='lower')
output = metric.compute_on(
pd.DataFrame({'X': [1, 2]}), return_dataframe=False)
self.assertEqual(output, 1)
def test_quantile_split_by_not_df(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].quantile(0.5)
expected.name = 'quantile(X, 0.5)'
testing.assert_series_equal(output, expected)
def test_quantile_unmelted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df)
expected = pd.DataFrame({'quantile(X, 0.5)': [2.]})
testing.assert_frame_equal(output, expected)
def test_quantile_melted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [2.]}, index=['quantile(X, 0.5)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_quantile_split_by_unmelted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'quantile(X, 0.5)': [1, 2.5]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_quantile_split_by_melted(self):
metric = metrics.Quantile('X', 0.5)
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [1, 2.5],
'grp': ['A', 'B']
},
index=['quantile(X, 0.5)'] * 2)
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_quantile_multiple_quantiles(self):
df = pd.DataFrame({'X': [0, 1]})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.1, 0.5]),
metrics.Count('X')])
output = metric.compute_on(df)
expected = pd.DataFrame(
[[0.1, 0.5, 2]],
columns=['quantile(X, 0.1)', 'quantile(X, 0.5)', 'count(X)'])
testing.assert_frame_equal(output, expected)
def test_quantile_multiple_quantiles_melted(self):
df = pd.DataFrame({'X': [0, 1]})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.1, 0.5]),
metrics.Count('X')])
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame(
{'Value': [0.1, 0.5, 2]},
index=['quantile(X, 0.1)', 'quantile(X, 0.5)', 'count(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_quantile_not_df(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Quantile('X', weight='Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1.25)
def test_weighted_quantile_df(self):
df = pd.DataFrame({'X': [1, 2], 'Y': [3, 1]})
metric = metrics.Quantile('X', weight='Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted quantile(X, 0.5)': [1.25]})
testing.assert_frame_equal(output, expected)
def test_weighted_quantile_multiple_quantiles_split_by(self):
df = pd.DataFrame({
'X': [0, 1, 2, 1, 2, 3],
'Y': [1, 2, 2, 1, 1, 1],
'grp': ['B'] * 3 + ['A'] * 3
})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.25, 0.5], weight='Y'),
metrics.Sum('X')])
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame(
{
'Y-weighted quantile(X, 0.25)': [1.25, 0.5],
'Y-weighted quantile(X, 0.5)': [2., 1.25],
'sum(X)': [6, 3]
},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_quantile_multiple_quantiles_split_by_melted(self):
df = pd.DataFrame({
'X': [0, 1, 2, 1, 2, 3],
'Y': [1, 2, 2, 1, 1, 1],
'grp': ['B'] * 3 + ['A'] * 3
})
metric = metrics.MetricList(
[metrics.Quantile('X', [0.25, 0.5], weight='Y'),
metrics.Sum('X')])
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level=['Metric', 'grp'], inplace=True) # For Py2
expected = pd.DataFrame({'Value': [1.25, 0.5, 2., 1.25, 6., 3.]},
index=pd.MultiIndex.from_product(
([
'Y-weighted quantile(X, 0.25)',
'Y-weighted quantile(X, 0.5)', 'sum(X)'
], ['A', 'B']),
names=['Metric', 'grp']))
testing.assert_frame_equal(output, expected)
def test_variance_not_df(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.var())
def test_variance_biased(self):
metric = metrics.Variance('X', False)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.var(ddof=0))
def test_variance_split_by_not_df(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].var()
expected.name = 'var(X)'
testing.assert_series_equal(output, expected)
def test_variance_where(self):
metric = metrics.Variance('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "B"')['X'].var()
self.assertEqual(output, expected)
def test_variance_unmelted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'var(X)': [self.df.X.var()]})
testing.assert_frame_equal(output, expected)
def test_variance_melted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [self.df.X.var()]}, index=['var(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_variance_split_by_unmelted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'var(X)': self.df.groupby('grp')['X'].var()},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_variance_split_by_melted(self):
metric = metrics.Variance('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame(
{
'Value': self.df.groupby('grp')['X'].var().values,
'grp': ['A', 'B']
},
index=['var(X)', 'var(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_weighted_variance_not_df(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1)
def test_weighted_variance_not_df_biased(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', False, 'Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 0.75)
def test_weighted_variance_split_by_not_df(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, 'grp', return_dataframe=False)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.Series((2., 1), index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'Y-weighted var(X)'
testing.assert_series_equal(output, expected)
def test_weighted_variance_unmelted(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted var(X)': [1.]})
testing.assert_frame_equal(output, expected)
def test_weighted_variance_melted(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame({'Value': [1.]}, index=['Y-weighted var(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_variance_split_by_unmelted(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({'Y-weighted var(X)': [2., 1]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_variance_split_by_melted(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.Variance('X', weight='Y')
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({
'Value': [2., 1],
'grp': ['A', 'B']
},
index=['Y-weighted var(X)', 'Y-weighted var(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_standard_deviation_not_df(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.std())
def test_standard_deviation_biased(self):
metric = metrics.StandardDeviation('X', False)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.std(ddof=0))
def test_standard_deviation_split_by_not_df(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].std()
expected.name = 'sd(X)'
testing.assert_series_equal(output, expected)
def test_standard_deviation_where(self):
metric = metrics.StandardDeviation('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "B"')['X'].std()
self.assertEqual(output, expected)
def test_standard_deviation_unmelted(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'sd(X)': [self.df.X.std()]})
testing.assert_frame_equal(output, expected)
def test_standard_deviation_melted(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [self.df.X.std()]}, index=['sd(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_standard_deviation_split_by_unmelted(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'sd(X)': self.df.groupby('grp')['X'].std()},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_standard_deviation_split_by_melted(self):
metric = metrics.StandardDeviation('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame(
{
'Value': self.df.groupby('grp')['X'].std().values,
'grp': ['A', 'B']
},
index=['sd(X)', 'sd(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_weighted_standard_deviation_not_df(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, 1)
def test_weighted_standard_deviation_not_df_biased(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.StandardDeviation('X', False, 'Y')
output = metric.compute_on(df, return_dataframe=False)
self.assertEqual(output, np.sqrt(0.75))
def test_weighted_standard_deviation_split_by_not_df(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df, 'grp', return_dataframe=False)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.Series((np.sqrt(2), 1), index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'Y-weighted sd(X)'
testing.assert_series_equal(output, expected)
def test_weighted_standard_deviation_unmelted(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df)
expected = pd.DataFrame({'Y-weighted sd(X)': [1.]})
testing.assert_frame_equal(output, expected)
def test_weighted_standard_deviation_melted(self):
df = pd.DataFrame({'X': [0, 2], 'Y': [1, 3]})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df, melted=True)
expected = pd.DataFrame({'Value': [1.]}, index=['Y-weighted sd(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_weighted_standard_deviation_split_by_unmelted(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({'Y-weighted sd(X)': [np.sqrt(2), 1]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_weighted_standard_deviation_split_by_melted(self):
df = pd.DataFrame({
'X': [0, 2, 1, 3],
'Y': [1, 3, 1, 1],
'grp': ['B', 'B', 'A', 'A']
})
metric = metrics.StandardDeviation('X', weight='Y')
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame({
'Value': [np.sqrt(2), 1],
'grp': ['A', 'B']
},
index=['Y-weighted sd(X)', 'Y-weighted sd(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_cv_not_df(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, np.sqrt(1 / 3.))
def test_cv_biased(self):
metric = metrics.CV('X', False)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.std(ddof=0) / np.mean(self.df.X))
def test_cv_split_by_not_df(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
expected = self.df.groupby('grp')['X'].std() / [1, 2.75]
expected.name = 'cv(X)'
testing.assert_series_equal(output, expected)
def test_cv_where(self):
metric = metrics.CV('X', where='grp == "B"')
output = metric.compute_on(self.df, return_dataframe=False)
expected = self.df.query('grp == "B"')['X'].std() / 2.75
self.assertEqual(output, expected)
def test_cv_unmelted(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'cv(X)': [np.sqrt(1 / 3.)]})
testing.assert_frame_equal(output, expected)
def test_cv_melted(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({'Value': [np.sqrt(1 / 3.)]}, index=['cv(X)'])
expected.index.name = 'Metric'
testing.assert_frame_equal(output, expected)
def test_cv_split_by_unmelted(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame({'cv(X)': [0, np.sqrt(1 / 8.25)]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cv_split_by_melted(self):
metric = metrics.CV('X')
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame(
data={
'Value': [0, np.sqrt(1 / 8.25)],
'grp': ['A', 'B']
},
index=['cv(X)', 'cv(X)'])
expected.index.name = 'Metric'
expected.set_index('grp', append=True, inplace=True)
testing.assert_frame_equal(output, expected)
def test_correlation(self):
metric = metrics.Correlation('X', 'Y')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, np.corrcoef(self.df.X, self.df.Y)[0, 1])
self.assertEqual(output, self.df.X.corr(self.df.Y))
def test_weighted_correlation(self):
metric = metrics.Correlation('X', 'Y', weight='Y')
output = metric.compute_on(self.df)
cov = np.cov(self.df.X, self.df.Y, aweights=self.df.Y)
expected = pd.DataFrame(
{'Y-weighted corr(X, Y)': [cov[0, 1] / np.sqrt(cov[0, 0] * cov[1, 1])]})
testing.assert_frame_equal(output, expected)
def test_correlation_method(self):
metric = metrics.Correlation('X', 'Y', method='kendall')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, self.df.X.corr(self.df.Y, method='kendall'))
def test_correlation_kwargs(self):
metric = metrics.Correlation('X', 'Y', min_periods=10)
output = metric.compute_on(self.df, return_dataframe=False)
self.assertTrue(pd.isnull(output))
def test_correlation_split_by_not_df(self):
metric = metrics.Correlation('X', 'Y')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
corr_a = metric.compute_on(
self.df[self.df.grp == 'A'], return_dataframe=False)
corr_b = metric.compute_on(
self.df[self.df.grp == 'B'], return_dataframe=False)
expected = pd.Series([corr_a, corr_b], index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'corr(X, Y)'
testing.assert_series_equal(output, expected)
def test_correlation_where(self):
metric = metrics.Correlation('X', 'Y', where='grp == "B"')
metric_no_filter = metrics.Correlation('X', 'Y')
output = metric.compute_on(self.df)
expected = metric_no_filter.compute_on(self.df[self.df.grp == 'B'])
testing.assert_frame_equal(output, expected)
def test_correlation_df(self):
metric = metrics.Correlation('X', 'Y')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'corr(X, Y)': [self.df.X.corr(self.df.Y)]})
testing.assert_frame_equal(output, expected)
def test_correlation_split_by_df(self):
df = pd.DataFrame({
'X': [1, 1, 1, 2, 2, 2, 3, 4],
'Y': [3, 1, 1, 3, 4, 4, 3, 5],
'grp': ['A'] * 4 + ['B'] * 4
})
metric = metrics.Correlation('X', 'Y')
output = metric.compute_on(df, 'grp')
corr_a = metric.compute_on(df[df.grp == 'A'], return_dataframe=False)
corr_b = metric.compute_on(df[df.grp == 'B'], return_dataframe=False)
expected = pd.DataFrame({'corr(X, Y)': [corr_a, corr_b]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cov(self):
metric = metrics.Cov('X', 'Y')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, np.cov(self.df.X, self.df.Y)[0, 1])
self.assertEqual(output, self.df.X.cov(self.df.Y))
def test_cov_bias(self):
metric = metrics.Cov('X', 'Y', bias=True)
output = metric.compute_on(self.df, return_dataframe=False)
expected = np.mean(
(self.df.X - self.df.X.mean()) * (self.df.Y - self.df.Y.mean()))
self.assertEqual(output, expected)
def test_cov_ddof(self):
metric = metrics.Cov('X', 'Y', ddof=0)
output = metric.compute_on(self.df, return_dataframe=False)
expected = np.mean(
(self.df.X - self.df.X.mean()) * (self.df.Y - self.df.Y.mean()))
self.assertEqual(output, expected)
def test_cov_kwargs(self):
metric = metrics.Cov('X', 'Y', fweights=self.df.Y)
output = metric.compute_on(self.df)
expected = np.cov(self.df.X, self.df.Y, fweights=self.df.Y)[0, 1]
expected = pd.DataFrame({'cov(X, Y)': [expected]})
testing.assert_frame_equal(output, expected)
def test_weighted_cov(self):
metric = metrics.Cov('X', 'Y', weight='Y')
output = metric.compute_on(self.df)
expected = np.cov(self.df.X, self.df.Y, aweights=self.df.Y)[0, 1]
expected = pd.DataFrame({'Y-weighted cov(X, Y)': [expected]})
testing.assert_frame_equal(output, expected)
def test_cov_split_by_not_df(self):
metric = metrics.Cov('X', 'Y')
output = metric.compute_on(self.df, 'grp', return_dataframe=False)
output.sort_index(level='grp', inplace=True) # For Py2
cov_a = metric.compute_on(
self.df[self.df.grp == 'A'], return_dataframe=False)
cov_b = metric.compute_on(
self.df[self.df.grp == 'B'], return_dataframe=False)
expected = pd.Series([cov_a, cov_b], index=['A', 'B'])
expected.index.name = 'grp'
expected.name = 'cov(X, Y)'
testing.assert_series_equal(output, expected)
def test_cov_where(self):
metric = metrics.Cov('X', 'Y', where='grp == "B"')
metric_no_filter = metrics.Cov('X', 'Y')
output = metric.compute_on(self.df)
expected = metric_no_filter.compute_on(self.df[self.df.grp == 'B'])
testing.assert_frame_equal(output, expected)
def test_cov_df(self):
metric = metrics.Cov('X', 'Y')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'cov(X, Y)': [self.df.X.cov(self.df.Y)]})
testing.assert_frame_equal(output, expected)
def test_cov_split_by_df(self):
df = pd.DataFrame({
'X': [1, 1, 1, 2, 2, 2, 3, 4],
'Y': [3, 1, 1, 3, 4, 4, 3, 5],
'grp': ['A'] * 4 + ['B'] * 4
})
metric = metrics.Cov('X', 'Y')
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
cov_a = metric.compute_on(df[df.grp == 'A'], return_dataframe=False)
cov_b = metric.compute_on(df[df.grp == 'B'], return_dataframe=False)
expected = pd.DataFrame({'cov(X, Y)': [cov_a, cov_b]}, index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
class CompositeMetric(unittest.TestCase):
"""Tests for composition of two metrics."""
df = pd.DataFrame({'X': [0, 1, 2, 3], 'Y': [0, 1, 1, 2]})
def test_add(self):
df = pd.DataFrame({'X': [1, 2, 3], 'Y': ['a', 'b', 'c']})
sumx = metrics.Sum('X')
metric = sumx + sumx
output = metric.compute_on(df, 'Y', return_dataframe=False)
expected = pd.Series([2, 4, 6], index=['a', 'b', 'c'])
expected.name = 'sum(X) + sum(X)'
expected.index.name = 'Y'
testing.assert_series_equal(output, expected)
def test_sub(self):
sumx = metrics.Sum('X')
sumy = metrics.Sum('Y')
metric = sumx - sumy
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2)
def test_mul(self):
metric = 2. * metrics.Sum('X') * metrics.Sum('Y')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 48)
self.assertEqual(metric.name, '2.0 * sum(X) * sum(Y)')
def test_div(self):
metric = 6. / metrics.Sum('X') / metrics.Sum('Y')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 0.25)
self.assertEqual(metric.name, '6.0 / sum(X) / sum(Y)')
def test_neg(self):
base = metrics.MetricList((metrics.Sum('X'), metrics.Sum('Y')))
metric = -base
output = metric.compute_on(self.df)
expected = -base.compute_on(self.df)
expected.columns = ['-sum(X)', '-sum(Y)']
testing.assert_frame_equal(output, expected)
def test_pow(self):
metric = metrics.Sum('X')**metrics.Sum('Y')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 1296)
self.assertEqual(metric.name, 'sum(X) ^ sum(Y)')
def test_pow_with_scalar(self):
metric = metrics.Sum('X')**2
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 36)
self.assertEqual(metric.name, 'sum(X) ^ 2')
def test_sqrt(self):
metric = metrics.Sum('Y')**0.5
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 2)
self.assertEqual(metric.name, 'sqrt(sum(Y))')
def test_rpow(self):
metric = 2**metrics.Sum('X')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 64)
self.assertEqual(metric.name, '2 ^ sum(X)')
def test_ratio(self):
metric = metrics.Ratio('X', 'Y')
output = metric.compute_on(self.df)
expected = metrics.Sum('X') / metrics.Sum('Y')
expected = expected.compute_on(self.df)
testing.assert_frame_equal(output, expected)
def test_to_dataframe(self):
metric = 5 + metrics.Sum('X')
output = metric.compute_on(self.df)
expected = pd.DataFrame({'5 + sum(X)': [11]})
testing.assert_frame_equal(output, expected)
def test_where(self):
metric = metrics.Count('X', 'f', 'Y == 1') * metrics.Sum('X', 'b', 'Y == 2')
output = metric.compute_on(self.df, return_dataframe=False)
self.assertEqual(output, 6)
def test_between_operations(self):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6],
'Condition': [0, 0, 0, 1, 1, 1],
'grp': ['A', 'A', 'B', 'A', 'B', 'C']
})
suma = metrics.Sum('X', where='grp == "A"')
sumb = metrics.Sum('X', where='grp == "B"')
pct = operations.PercentChange('Condition', 0)
output = (pct(suma) - pct(sumb)).compute_on(df)
expected = pct(suma).compute_on(df) - pct(sumb).compute_on(df)
expected.columns = ['%s - %s' % (c, c) for c in expected.columns]
testing.assert_frame_equal(output, expected)
def test_between_operations_where(self):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6],
'Condition': [0, 0, 0, 1, 1, 1],
'grp': ['A', 'A', 'B', 'A', 'B', 'C']
})
sumx = metrics.Sum('X')
pcta = operations.PercentChange('Condition', 0, sumx, where='grp == "A"')
pctb = operations.PercentChange('Condition', 0, sumx, where='grp == "B"')
output = (pcta - pctb).compute_on(df)
expected = pcta.compute_on(df) - pctb.compute_on(df)
expected.columns = ['%s - %s' % (c, c) for c in expected.columns]
testing.assert_frame_equal(output, expected)
def test_between_stderr_operations_where(self):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6] * 2,
'Condition': [0, 0, 0, 1, 1, 1] * 2,
'grp': ['A', 'A', 'B', 'A', 'B', 'C'] * 2,
'cookie': [1, 2, 3] * 4
})
np.random.seed(42)
sumx = metrics.Sum('X')
pcta = operations.PercentChange('Condition', 0, sumx, where='grp == "A"')
pctb = operations.PercentChange('Condition', 0, sumx)
jk = operations.Jackknife('cookie', pcta)
bst = operations.Bootstrap(None, pctb, 20, where='grp != "C"')
m = (jk / bst).rename_columns(
pd.MultiIndex.from_product((('sum(X)',), ('Value', 'SE'))))
output = m.compute_on(df)
np.random.seed(42)
expected = jk.compute_on(df).values / bst.compute_on(df).values
expected = pd.DataFrame(
expected, index=output.index, columns=output.columns)
testing.assert_frame_equal(output, expected)
def test_rename_columns(self):
df = | pd.DataFrame({'X': [1, 2], 'Y': [3, 1]}) | pandas.DataFrame |
from sklearn.decomposition import PCA
import tensorflow as tf
import pandas as pd
import matplotlib.pyplot as plt
def pad_sentences(sentences, max_len):
"""
Pad sentences to the maximal length
@sentences: 2d array of sentence pairs
@max_len: Maximal length of a sentence in all datasets
@return: 2d array of padded sentence pairs
"""
for i in [0, 1]:
for j in range(len(sentences[i])):
s = sentences[i][j]
diff = max_len - len(s)
offset_left = int(diff / 2)
if diff % 2 == 0:
offset_right = offset_left
else:
offset_right = offset_left + 1
s = [""]* offset_left + s + [""]*offset_right
sentences[i][j] = s
return sentences
def transform(vocab_in_word2vec, train_unknown_words,
dev_and_test_unknown_words, embds):
"""
Transform embeddings to a dataframe with labels
@vocab_in_word2vec: All words that are representable by Google
word2vec
@train_unknown_words: Words in training set that are not
representable by Google word2vec
@dev_and_test_unknown_words: Words in development or test set that
are not representable by Google word2vec
@embds: Embeddings of all words
@return: Transformed embeddings with labels
"""
init = [tf.global_variables_initializer(), tf.tables_initializer()]
with tf.Session() as session:
session.run(init)
y = []
for i in range(len(vocab_in_word2vec)):
y.append(0)
for i in range(len(train_unknown_words)):
y.append(1)
for i in range(len(dev_and_test_unknown_words)):
y.append(2)
for i in range(1):
y.append(3)
X = embds.eval()
pca = PCA(n_components=2)
transformed = pd.DataFrame(pca.fit_transform(X))
transformed['y'] = | pd.Series(y, index=transformed.index) | pandas.Series |
"""Plotting functions for decoders."""
import copy
import matplotlib.animation as animation
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import numpy as np
import os
import pandas as pd
import pickle
from behavenet import make_dir_if_not_exists
from behavenet.fitting.eval import get_reconstruction
from behavenet.fitting.utils import get_best_model_and_data
from behavenet.data.utils import get_region_list
from behavenet.fitting.utils import get_expt_dir
from behavenet.fitting.utils import get_session_dir
from behavenet.fitting.utils import get_subdirs
from behavenet.plotting import concat, save_movie
# to ignore imports for sphix-autoapidoc
__all__ = [
'get_r2s_by_trial', 'get_best_models', 'get_r2s_across_trials',
'make_neural_reconstruction_movie_wrapper', 'make_neural_reconstruction_movie',
'plot_neural_reconstruction_traces_wrapper', 'plot_neural_reconstruction_traces']
def _get_dataset_str(hparams):
return os.path.join(hparams['expt'], hparams['animal'], hparams['session'])
def get_r2s_by_trial(hparams, model_types):
"""For a given session, load R^2 metrics from all decoders defined by hparams.
Parameters
----------
hparams : :obj:`dict`
needs to contain enough information to specify decoders
model_types : :obj:`list` of :obj:`strs`
'mlp' | 'mlp-mv' | 'lstm'
Returns
-------
:obj:`pd.DataFrame`
pandas dataframe of decoder validation metrics
"""
dataset = _get_dataset_str(hparams)
region_names = get_region_list(hparams)
metrics = []
model_idx = 0
model_counter = 0
for region in region_names:
hparams['region'] = region
for model_type in model_types:
hparams['session_dir'], _ = get_session_dir(
hparams, session_source=hparams.get('all_source', 'save'))
expt_dir = get_expt_dir(
hparams,
model_type=model_type,
model_class=hparams['model_class'],
expt_name=hparams['experiment_name'])
# gather all versions
try:
versions = get_subdirs(expt_dir)
except Exception:
print('No models in %s; skipping' % expt_dir)
# load csv files with model metrics (saved out from test tube)
for i, version in enumerate(versions):
# read metrics csv file
model_dir = os.path.join(expt_dir, version)
try:
metric = pd.read_csv(os.path.join(model_dir, 'metrics.csv'))
model_counter += 1
except FileNotFoundError:
continue
with open(os.path.join(model_dir, 'meta_tags.pkl'), 'rb') as f:
hparams = pickle.load(f)
# append model info to metrics ()
version_num = version[8:]
metric['version'] = str('version_%i' % model_idx + version_num)
metric['region'] = region
metric['dataset'] = dataset
metric['model_type'] = model_type
for key, val in hparams.items():
if isinstance(val, (str, int, float)):
metric[key] = val
metrics.append(metric)
model_idx += 10000 # assumes no more than 10k model versions/expt
# put everything in pandas dataframe
metrics_df = pd.concat(metrics, sort=False)
return metrics_df
def get_best_models(metrics_df):
"""Find best decoder over l2 regularization and learning rate.
Returns a dataframe with test R^2s for each batch, for the best decoder in each category
(defined by dataset, region, n_lags, and n_hid_layers).
Parameters
----------
metrics_df : :obj:`pd.DataFrame`
output of :func:`get_r2s_by_trial`
Returns
-------
:obj:`pd.DataFrame`
test R^2s for each batch
"""
# for each version, only keep rows where test_loss is not nan
data_queried = metrics_df[ | pd.notna(metrics_df.test_loss) | pandas.notna |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, Biota Technology.
# www.biota.com
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import division
import numpy as np
from skbio.stats import subsample_counts
import pandas as pd
from functools import partial
def validate_gibbs_input(sources, sinks=None):
'''Validate `gibbs` inputs and coerce/round to type `np.int32`.
Summary
-------
Checks if data contains `nan` or `null` values, and returns data as
type `np.int32`. If both `sources` and `sinks` are passed, columns must
match exactly (including order).
Parameters
----------
sources : pd.DataFrame
A dataframe containing count data. Must be castable to `np.int32`.
sinks : optional, pd.DataFrame or None
If not `None` a dataframe containing count data that is castable to
`np.int32`.
Returns
-------
pd.Dataframe(s)
Raises
------
ValueError
If `nan` or `null` values found in inputs.
ValueError
If any values are smaller than 0.
ValueError
If any columns of an input dataframe are non-numeric.
ValueError
If `sources` and `sinks` passed and columns are not identical.
'''
if sinks is not None:
dfs = [sources, sinks]
else:
dfs = [sources]
for df in dfs:
# Because of this bug (https://github.com/numpy/numpy/issues/6114)
# we can't use e.g. np.isreal(df.dtypes).all(). Instead we use
# applymap. Based on:
# http://stackoverflow.com/questions/21771133/finding-non-numeric-rows-in-dataframe-in-pandas
if not df.applymap(np.isreal).values.all():
raise ValueError('A dataframe contains one or more values which '
'are not numeric. Data must be exclusively '
'positive integers.')
if np.isnan(df.values).any():
raise ValueError('A dataframe has `nan` or `null` values. Data '
'must be exclusively positive integers.')
if (df.values < 0).any():
raise ValueError('A dataframe has a negative count. Data '
'must be exclusively positive integers.')
if sinks is not None:
if not (sinks.columns == sources.columns).all():
raise ValueError('Dataframes do not contain identical (and '
'identically ordered) columns. Columns must '
'match exactly.')
return (sources.astype(np.int32, copy=False),
sinks.astype(np.int32, copy=False))
else:
return sources.astype(np.int32, copy=False)
def validate_gibbs_parameters(alpha1, alpha2, beta, restarts,
draws_per_restart, burnin, delay):
'''Return `True` if params numerically acceptable. See `gibbs` for docs.'''
real_vals = [alpha1, alpha2, beta]
int_vals = [restarts, draws_per_restart, burnin, delay]
# Check everything is real.
if all(np.isreal(val) for val in real_vals + int_vals):
# Check that integer values are some type of int.
int_check = all(isinstance(val, (int, np.int32, np.int64)) for val in
int_vals)
# All integer values must be > 0.
pos_int = all(val > 0 for val in int_vals)
# All real values must be non-negative.
non_neg = all(val >= 0 for val in real_vals)
return int_check and pos_int and non_neg and real_vals
else: # Failed to be all numeric values.
False
def intersect_and_sort_samples(sample_metadata, feature_table):
'''Return input tables retaining only shared samples, row order equivalent.
Parameters
----------
sample_metadata : pd.DataFrame
Contingency table with rows, columns = samples, metadata.
feature_table : pd.DataFrame
Contingency table with rows, columns = samples, features.
Returns
-------
sample_metadata, feature_table : pd.DataFrame, pd.DataFrame
Input tables with unshared samples removed and ordered equivalently.
Raises
------
ValueError
If no shared samples are found.
'''
shared_samples = np.intersect1d(sample_metadata.index, feature_table.index)
if shared_samples.size == 0:
raise ValueError('There are no shared samples between the feature '
'table and the sample metadata. Ensure that you have '
'passed the correct files.')
elif (shared_samples.size == sample_metadata.shape[0] ==
feature_table.shape[0]):
s_metadata = sample_metadata.copy()
s_features = feature_table.copy()
else:
s_metadata = sample_metadata.loc[np.in1d(sample_metadata.index,
shared_samples), :].copy()
s_features = feature_table.loc[np.in1d(feature_table.index,
shared_samples), :].copy()
return s_metadata, s_features.loc[s_metadata.index, :]
def get_samples(sample_metadata, col, value):
'''Return samples which have `value` under `col`.'''
return sample_metadata.index[sample_metadata[col] == value].copy()
def collapse_source_data(sample_metadata, feature_table, source_samples,
category, method):
'''Collapse each set of source samples into an aggregate source.
Parameters
----------
sample_metadata : pd.DataFrame
Contingency table where rows are features and columns are metadata.
feature_table : pd.DataFrame
Contingency table where rows are features and columns are samples.
source_samples : iterable
Samples which should be considered for collapsing (i.e. are sources).
category : str
Column in `sample_metadata` which should be used to group samples.
method : str
One of the available aggregation methods in pd.DataFrame.agg (mean,
median, prod, sum, std, var).
Returns
-------
pd.DataFrame
Collapsed sample data.
Notes
-----
This function calls `validate_gibbs_input` before returning the collapsed
source table to ensure aggregation has not introduced non-integer values.
The order of the collapsed sources is determined by the sort order of their
names. For instance, in the example below, .4 comes before 3.0 so the
collapsed sources will have the 0th row as .4.
Examples
--------
>>> samples = ['sample1', 'sample2', 'sample3', 'sample4']
>>> category = 'pH'
>>> values = [3.0, 0.4, 3.0, 3.0]
>>> stable = pd.DataFrame(values, index=samples, columns = [category])
>>> stable
pH
sample1 3.0
sample2 0.4
sample3 3.0
sample4 3.0
>>> fdata = np.array([[ 10, 50, 10, 70],
[ 0, 25, 10, 5],
[ 0, 25, 10, 5],
[100, 0, 10, 5]])
>>> ftable = pd.DataFrame(fdata, index = stable.index)
>>> ftable
0 1 2 3
sample1 10 50 10 70
sample2 0 25 10 5
sample3 0 25 10 5
sample4 100 0 10 5
>>> source_samples = ['sample1', 'sample2', 'sample3']
>>> method = 'sum'
>>> csources = collapse_source_data(stable, ftable, source_samples,
category, method)
>>> csources
0 1 2 3
collapse_col
0.4 0 25 10 5
3.0 10 75 20 75
'''
sources = sample_metadata.loc[source_samples, :]
table = feature_table.loc[sources.index, :].copy()
table['collapse_col'] = sources[category]
return validate_gibbs_input(table.groupby('collapse_col').agg(method))
def subsample_dataframe(df, depth, replace=False):
'''Subsample (rarify) input dataframe without replacement.
Parameters
----------
df : pd.DataFrame
Feature table where rows are features and columns are samples.
depth : int
Number of sequences to choose per sample.
replace : bool, optional
If ``True``, subsample with replacement. If ``False`` (the default),
subsample without replacement.
Returns
-------
pd.DataFrame
Subsampled dataframe.
'''
def subsample(x):
return pd.Series(subsample_counts(x.values, n=depth, replace=replace),
index=x.index)
return df.apply(subsample, axis=1)
def generate_environment_assignments(n, num_sources):
'''Randomly assign `n` counts to one of `num_sources` environments.
Parameters
----------
n : int
Number of environment assignments to generate.
num_sources : int
Number of possible environment states (this includes the 'Unknown').
Returns
-------
seq_env_assignments : np.array
1D vector of length `n`. The ith entry is the environment assignment of
the ith feature.
envcounts : np.array
1D vector of length `num_sources`. The ith entry is the total number of
entries in `seq_env_assignments` which are equal to i.
'''
seq_env_assignments = np.random.choice(np.arange(num_sources), size=n,
replace=True)
envcounts = np.bincount(seq_env_assignments, minlength=num_sources)
return seq_env_assignments, envcounts
class ConditionalProbability(object):
def __init__(self, alpha1, alpha2, beta, source_data):
r"""Set properties used for calculating the conditional probability.
Paramaters
----------
alpha1 : float
Prior counts of each feature in the training environments.
alpha2 : float
Prior counts of each feature in the Unknown environment. Higher
values make the Unknown environment smoother and less prone to
overfitting given a training sample.
beta : float
Number of prior counts of test sequences from each feature in each
environment
source_data : np.array
Columns are features, rows are collapsed samples. The [i,j]
entry is the sum of the counts of features j in all samples which
were considered part of source i.
Attributes
----------
m_xivs : np.array
This is an exact copy of the source_data passed when the function
is initialized. It is referenced as m_xivs because m_xiv is the
[v, xi] entry of the source data. In other words, the count of the
xith feature in the vth environment.
m_vs : np.array
The row sums of self.m_xivs. This is referenced as m_v in [1]_.
V : int
Number of environments (includes both known sources and the
'unknown' source).
tau : int
Number of features.
joint_probability : np.array
The joint conditional distribution. Until the `precalculate` method
is called, this will be uniformly zero.
n : int
Number of sequences in the sink.
known_p_tv : np.array
An array giving the precomputable parts of the probability of
finding the xith taxon in the vth environment given the known
sources, aka p_tv in the R implementation. Rows are (known)
sources, columns are features, shape is (V-1, tau).
denominator_p_v : float
The denominator of the calculation for finding the probability of
a sequence being in the vth environment given the training data
(source data).
known_source_cp : np.array
All precomputable portions of the conditional probability array.
Dimensions are the same as self.known_p_tv.
Notes
-----
This class exists to calculate the conditional probability given in
reference [1]_ (with modifications based on communications with the
author). Since the calculation of the conditional probability must
occur during each pass of the Gibbs sampler, reducing the number of
computations is of paramount importance. This class precomputes
everything that is static throughout a run of the sampler to reduce the
innermost for-loop computations.
The formula used to calculate the conditional joint probability is
described in the project readme file.
The variables are named in the class, as well as its methods, in
accordance with the variable names used in [1]_.
Examples
--------
The class is written so that it will be created before being passed to
the function which handles the loops of the Gibbs sampling.
>>> cp = ConditionalProbability(alpha1 = .5, alpha2 = .001, beta = 10,
... np.array([[0, 0, 0, 100, 100, 100],
... [100, 100, 100, 0, 0, 0]]))
Once it is passed to the Gibbs sampling function, the number of
sequences in the sink becomes known, and we can update the object with
this information to allow final precomputation.
>>> cp.set_n(367)
>>> cp.precompute()
Now we can compute the 'slice' of the conditional probability depending
on the current state of the test sequences (the ones randomly assigned
and then iteratively reassigned) and which feature (the slice) the
sequence we have removed was from.
>>> xi = 2
Count of the training sequences (that are feature xi) currently
assigned to the unknown environment.
>>> m_xiV = 38
Sum of the training sequences currently assigned to the unknown
environment (over all features).
>>> m_V = 158
Counts of the test sequences in each environment at the current
iteration of the sampler.
>>> n_vnoti = np.array([10, 500, 6])
Calculating the probability slice.
>>> cp.calculate_cp_slice(xi, m_xiV, m_V, n_vnoti)
array([8.55007781e-05, 4.38234238e-01, 9.92823532e-03])
References
----------
.. [1] Knights et al. "Bayesian community-wide culture-independent
source tracking", Nature Methods 2011.
"""
self.alpha1 = alpha1
self.alpha2 = alpha2
self.beta = beta
self.m_xivs = source_data.astype(np.float64)
self.m_vs = np.expand_dims(source_data.sum(1),
axis=1).astype(np.float64)
self.V = source_data.shape[0] + 1
self.tau = source_data.shape[1]
# Create the joint probability vector which will be overwritten each
# time self.calculate_cp_slice is called.
self.joint_probability = np.zeros(self.V, dtype=np.float64)
def set_n(self, n):
"""Set the sum of the sink."""
self.n = n
def precalculate(self):
"""Precompute all static quantities of the probability matrix."""
# Known source.
self.known_p_tv = (self.m_xivs + self.alpha1) / \
(self.m_vs + self.tau * self.alpha1)
self.denominator_p_v = self.n - 1 + (self.beta * self.V)
# We are going to be accessing columns of this array in the innermost
# loop of the Gibbs sampler. By forcing this array into 'F' order -
# 'Fortran-contiguous' - we've set it so that accessing column slices
# is faster. Tests indicate about 2X speed up in this operation from
# 'F' order as opposed to the default 'C' order.
self.known_source_cp = np.array(self.known_p_tv / self.denominator_p_v,
order='F', dtype=np.float64)
self.alpha2_n = self.alpha2 * self.n
self.alpha2_n_tau = self.alpha2_n * self.tau
def calculate_cp_slice(self, xi, m_xiV, m_V, n_vnoti):
"""Calculate slice of the conditional probability matrix.
Parameters
----------
xi : int
Index of the column (taxon) of the conditional probability matrix
that should be calculated.
m_xiV : float
Count of the training sequences (that are taxon xi) currently
assigned to the unknown environment.
m_V : float
Sum of the training sequences currently assigned to the unknown
environment (over all taxa).
n_vnoti : np.array
Counts of the test sequences in each environment at the current
iteration of the sampler.
Returns
-------
self.joint_probability : np.array
The joint conditional probability distribution for the the current
taxon based on the current state of the sampler.
"""
# Components for known sources, i.e. indices {0,1...V-2}.
self.joint_probability[:-1] = \
self.known_source_cp[:, xi] * (n_vnoti[:-1] + self.beta)
# Component for unknown source, i.e. index V-1.
self.joint_probability[-1] = \
((m_xiV + self.alpha2_n) * (n_vnoti[-1] + self.beta)) / \
((m_V + self.alpha2_n_tau) * self.denominator_p_v)
return self.joint_probability
def gibbs_sampler(sink, cp, restarts, draws_per_restart, burnin, delay):
"""Run Gibbs Sampler to estimate feature contributions from a sink sample.
Parameters
----------
sink : np.array
A one dimentional array containing counts of features whose sources are
to be estimated.
cp : ConditionalProbability object
Instantiation of the class handling probability calculations.
restarts : int
Number of independent Markov chains to grow. `draws_per_restart` *
`restarts` gives the number of samplings of the mixing proportions that
will be generated.
draws_per_restart : int
Number of times to sample the state of the Markov chain for each
independent chain grown.
burnin : int
Number of passes (withdrawal and reassignment of every sequence in the
sink) that will be made before a sample (draw) will be taken. Higher
values allow more convergence towards the true distribution before
draws are taken.
delay : int >= 1
Number passes between each sampling (draw) of the Markov chain. Once
the burnin passes have been made, a sample will be taken, and
additional samples will be drawn every `delay` number of passes. This
is also known as 'thinning'. Thinning helps reduce the impact of
correlation between adjacent states of the Markov chain.
Returns
-------
final_envcounts : np.array
2D array of ints. Rows are draws, columns are sources. The [i, j] entry
is the number of sequences from draw i that where assigned to have come
from environment j.
final_env_assignments : np.array
2D array of ints. Rows are draws, columns are conserved but arbitrary
ordering. The [i, j] entry is the index of feature j in draw i. These
orderings are identical for each draw.
final_taxon_assignments : np.array
2D array of ints. Rows are draws, columns are conserved but arbitrary
ordering (same ordering as `final_env_assignments`). The [i, j] entry
is the environment that the taxon `final_env_assignments[i, j]` is
determined to have come from in draw i (j is the environment).
"""
# Basic bookkeeping information we will use throughout the function.
num_sources = cp.V
num_features = cp.tau
sink = sink.astype(np.int32)
sink_sum = sink.sum()
# Calculate the number of passes that need to be conducted.
total_draws = restarts * draws_per_restart
total_passes = burnin + (draws_per_restart - 1) * delay + 1
# Results containers.
final_envcounts = np.zeros((total_draws, num_sources), dtype=np.int32)
final_env_assignments = np.zeros((total_draws, sink_sum), dtype=np.int32)
final_taxon_assignments = np.zeros((total_draws, sink_sum), dtype=np.int32)
# Sequences from the sink will be randomly assigned a source environment
# and then reassigned based on an increasingly accurate set of
# probabilities. The order in which the sequences are selected for
# reassignment must be random to avoid a systematic bias where the
# sequences occuring later in the taxon_sequence book-keeping vector
# receive more accurate reassignments by virtue of more updates to the
# probability model. 'order' will be shuffled each pass, but can be
# instantiated here to avoid unnecessary duplication.
order = np.arange(sink_sum, dtype=np.int32)
# Create a bookkeeping vector that keeps track of each sequence in the
# sink. Each one will be randomly assigned an environment, and then
# reassigned based on the increasinly accurate distribution. sink[i] i's
# will be placed in the `taxon_sequence` vector to allow each individual
# count to be removed and reassigned.
taxon_sequence = np.repeat(np.arange(num_features), sink).astype(np.int32)
# Update the conditional probability class now that we have the sink sum.
cp.set_n(sink_sum)
cp.precalculate()
# Several bookkeeping variables that are used within the for loops.
drawcount = 0
unknown_idx = num_sources - 1
for restart in range(restarts):
# Generate random source assignments for each sequence in the sink
# using a uniform distribution.
seq_env_assignments, envcounts = \
generate_environment_assignments(sink_sum, num_sources)
# Initially, the count of each taxon in the 'unknown' source should be
# 0.
unknown_vector = np.zeros(num_features, dtype=np.int32)
unknown_sum = 0
# If a sequence's random environmental assignment is to the 'unknown'
# environment we alter the training data to include those sequences
# in the 'unknown' source.
for e, t in zip(seq_env_assignments, taxon_sequence):
if e == unknown_idx:
unknown_vector[t] += 1
unknown_sum += 1
for rep in range(1, total_passes + 1):
# Iterate through sequences in a random order so that no
# systematic bias is introduced based on position in the taxon
# vector (i.e. taxa appearing at the end of the vector getting
# better estimates of the probability).
np.random.shuffle(order)
for seq_index in order:
e = seq_env_assignments[seq_index]
t = taxon_sequence[seq_index]
# Remove the ith sequence and update the probability
# associated with that environment.
envcounts[e] -= 1
if e == unknown_idx:
unknown_vector[t] -= 1
unknown_sum -= 1
# Calculate the new joint probability vector based on the
# removal of the ith sequence. Reassign the sequence to a new
# source environment and update counts for each environment and
# the unknown source if necessary.
# This is the fastest way I've currently found to draw from
# `jp`. By stacking (cumsum) the probability of `jp`, we can
# draw x from uniform variable in [0, total sum), and then find
# which interval that value lies in with searchsorted. Visual
# representation below
# e1 e2 e3 e4 e5 unk
# jp: | | | | | | |
# x: x
# new_e_idx == 4 (zero indexed)
# This is in contrast to the more intuitive, but much slower
# call it replaced:
# np.random.choice(num_sources, jp/jp.sum())
jp = cp.calculate_cp_slice(t, unknown_vector[t], unknown_sum,
envcounts)
cs = jp.cumsum()
new_e_idx = np.searchsorted(cs, np.random.uniform(0, cs[-1]))
seq_env_assignments[seq_index] = new_e_idx
envcounts[new_e_idx] += 1
if new_e_idx == unknown_idx:
unknown_vector[t] += 1
unknown_sum += 1
if rep > burnin and ((rep - (burnin + 1)) % delay) == 0:
# Update envcounts array with the assigned envs.
final_envcounts[drawcount] = envcounts
# Assign vectors necessary for feature table reconstruction.
final_env_assignments[drawcount] = seq_env_assignments
final_taxon_assignments[drawcount] = taxon_sequence
# We've made a draw, update this index so that the next
# iteration will be placed in the correct index of results.
drawcount += 1
return (final_envcounts, final_env_assignments, final_taxon_assignments)
def gibbs(sources, sinks=None, alpha1=.001, alpha2=.1, beta=10, restarts=10,
draws_per_restart=1, burnin=100, delay=1, cluster=None,
create_feature_tables=True):
'''Gibb's sampling API.
Notes
-----
This function exists to allow API calls to source/sink prediction and
leave-one-out (LOO) source prediction.
Input validation is done on the sources and sinks (if not None). They must
be dataframes with integerial data (or castable to such). If both
sources and sinks are provided, their columns must agree exactly.
Input validation is done on the Gibb's parameters, to make sure they are
numerically acceptable (all must be non-negative, some must be positive
integers - see below).
Warnings
--------
This function does _not_ perform rarefaction, the user should perform
rarefaction prior to calling this function.
This function does not collapse sources or sinks, it expects each row of
the `sources` dataframe to represent a unique source, and each row of the
`sinks` dataframe to represent a unique sink.
Parameters
----------
sources : DataFrame
A dataframe containing source data (rows are sources, columns are
features). The index must be the names of the sources.
sinks : DataFrame or None
A dataframe containing sink data (rows are sinks, columns are
features). The index must be the names of the sinks. If `None`,
leave-one-out (LOO) prediction will be done.
alpha1 : float
Prior counts of each feature in the training environments. Higher
values decrease the trust in the training environments, and make
the source environment distributions over taxa smoother. A value of
0.001 indicates reasonably high trust in all source environments, even
those with few training sequences. A more conservative value would be
0.01.
alpha2 : float
Prior counts of each feature in the Unknown environment. Higher
values make the Unknown environment smoother and less prone to
overfitting given a training sample.
beta : int
Number of prior counts of test sequences from each feature in each
environment.
restarts : int
Number of independent Markov chains to grow. `draws_per_restart` *
`restarts` gives the number of samplings of the mixing proportions that
will be generated.
draws_per_restart : int
Number of times to sample the state of the Markov chain for each
independent chain grown.
burnin : int
Number of passes (withdarawal and reassignment of every sequence in the
sink) that will be made before a sample (draw) will be taken. Higher
values allow more convergence towards the true distribtion before draws
are taken.
delay : int >= 1
Number passes between each sampling (draw) of the Markov chain. Once
the burnin passes have been made, a sample will be taken, and
additional samples will be drawn every `delay` number of passes. This
is also known as 'thinning'. Thinning helps reduce the impact of
correlation between adjacent states of the Markov chain.
cluster : ipyparallel.client.client.Client or None
An ipyparallel Client object, e.g. a started cluster.
create_feature_tables : boolean
If `True` create a feature table for each sink. The feature table
records the average count of each feature from each source for this
sink. This option can consume large amounts of memory if there are many
source, sinks, and features. If `False`, feature tables are not
created.
Returns
-------
mpm : DataFrame
Mixing proportion means. A dataframe containing the mixing proportions
(rows are sinks, columns are sources).
mps : DataFrame
Mixing proportion standard deviations. A dataframe containing the
mixing proportions standard deviations (rows are sinks, columns are
sources).
fas : list
ith item is a pd.DataFrame of the average feature assignments from each
source for the ith sink (in the same order as rows of `mpm` and `mps`).
Examples
--------
# An example of using the normal prediction.
>>> import pandas as pd
>>> import numpy as np
>>> from ipyparallel import Client
>>> import subprocess
>>> import time
>>> from sourcetracker import gibbs
# Prepare some source data.
>>> otus = np.array(['o%s' % i for i in range(50)])
>>> source1 = np.random.randint(0, 1000, size=50)
>>> source2 = np.random.randint(0, 1000, size=50)
>>> source3 = np.random.randint(0, 1000, size=50)
>>> source_df = pd.DataFrame([source1, source2, source3],
index=['source1', 'source2', 'source3'],
columns=otus, dtype=np.int32)
# Prepare some sink data.
>>> sink1 = np.ceil(.5*source1+.5*source2)
>>> sink2 = np.ceil(.5*source2+.5*source3)
>>> sink3 = np.ceil(.5*source1+.5*source3)
>>> sink4 = source1
>>> sink5 = source2
>>> sink6 = np.random.randint(0, 1000, size=50)
>>> sink_df = pd.DataFrame([sink1, sink2, sink3, sink4, sink5, sink6],
index=np.array(['sink%s' % i for i in
range(1,7)]),
columns=otus, dtype=np.int32)
# Set paramaters
>>> alpha1 = .01
>>> alpha2 = .001
>>> beta = 10
>>> restarts = 5
>>> draws_per_restart = 1
>>> burnin = 2
>>> delay = 2
# Call without a cluster
>>> mpm, mps, fas = gibbs(source_df, sink_df, alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay,
cluster=None, create_feature_tables=True)
# Start a cluster and call the function
>>> jobs = 4
>>> subprocess.Popen('ipcluster start -n %s --quiet' % jobs, shell=True)
>>> time.sleep(25)
>>> c = Client()
>>> mpm, mps, fas = gibbs(source_df, sink_df, alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay,
cluster=c, create_feature_tables=True)
# LOO prediction.
>>> mpm, mps, fas = gibbs(source_df, sinks=None, alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay,
cluster=c, create_feature_tables=True)
'''
if not validate_gibbs_parameters(alpha1, alpha2, beta, restarts,
draws_per_restart, burnin, delay):
raise ValueError('The supplied Gibbs parameters are not acceptable. '
'Please review the `gibbs` doc string or call the '
'help function in the CLI.')
# Validate the input source and sink data. Error if the data do not meet
# the critical assumptions or cannot be cast to the proper type.
if sinks is not None:
sources, sinks = validate_gibbs_input(sources, sinks)
else:
sources = validate_gibbs_input(sources)
# Run LOO predictions on `sources`.
if sinks is None:
def f(cp_and_sink):
# The import is here to ensure that the engines of the cluster can
# access the gibbs_sampler function.
from sourcetracker._sourcetracker import gibbs_sampler
return gibbs_sampler(cp_and_sink[1], cp_and_sink[0], restarts,
draws_per_restart, burnin, delay)
cps_and_sinks = []
for source in sources.index:
_sources = sources.select(lambda x: x != source)
cp = ConditionalProbability(alpha1, alpha2, beta, _sources.values)
sink = sources.loc[source, :].values
cps_and_sinks.append((cp, sink))
if cluster is not None:
results = cluster[:].map(f, cps_and_sinks, block=True)
else:
results = list(map(f, cps_and_sinks))
mpm, mps, fas = collate_gibbs_results([i[0] for i in results],
[i[1] for i in results],
[i[2] for i in results],
sources.index, sources.index,
sources.columns,
create_feature_tables, loo=True)
return mpm, mps, fas
# Run normal prediction on `sinks`.
else:
cp = ConditionalProbability(alpha1, alpha2, beta, sources.values)
f = partial(gibbs_sampler, cp=cp, restarts=restarts,
draws_per_restart=draws_per_restart, burnin=burnin,
delay=delay)
if cluster is not None:
results = cluster[:].map(f, sinks.values, block=True)
else:
results = list(map(f, sinks.values))
mpm, mps, fas = collate_gibbs_results([i[0] for i in results],
[i[1] for i in results],
[i[2] for i in results],
sinks.index, sources.index,
sources.columns,
create_feature_tables, loo=False)
return mpm, mps, fas
def cumulative_proportions(all_envcounts, sink_ids, source_ids):
'''Calculate contributions of each source for each sink in `sink_ids`.
Parameters
----------
all_envcounts : list
Each entry is 2D array of ints. The ith entry must correspond to the
ith sink ID. The [j, k] entry of the ith table is the count of
sequences assigned to the sink from kth environment during the jth
draw.
sink_ids : np.array
ID's of the sinks. Must be in the same order as data in
`all_envcounts`.
source_ids : np.array
ID's of the sources. Must be in the same order as the columns of the
tables in `all_envcounts`.
Returns
-------
proportions : pd.DataFrame
A dataframe of floats, containing the mixing proportions of each source
in each sink. The [i, j] entry is the contribution from the jth source
to the ith sink.
proportions_std : pd.DataFrame
A dataframe of floats, identical to `proportions` except the entries
are the standard deviation of each entry in `proportions`.
Notes
-----
This script is designed to be used by `collate_gibbs_results` after
completion of multiple `gibbs_sampler` calls (for different sinks). This
function does _not_ check that the assumptions of ordering described above
are met. It is the user's responsibility to check these if using this
function independently.
'''
num_sinks = len(sink_ids)
num_sources = len(source_ids) + 1
proportions = np.zeros((num_sinks, num_sources), dtype=np.float64)
proportions_std = np.zeros((num_sinks, num_sources), dtype=np.float64)
for i, envcounts in enumerate(all_envcounts):
proportions[i] = envcounts.sum(0) / envcounts.sum()
proportions_std[i] = (envcounts / envcounts.sum()).std(0)
cols = list(source_ids) + ['Unknown']
return ( | pd.DataFrame(proportions, index=sink_ids, columns=cols) | pandas.DataFrame |
from django.shortcuts import render
from django.http import HttpResponse
from datetime import datetime
import psycopg2
import math
import pandas as pd
from openpyxl import Workbook
import csv
import random
def psql_pdc(query):
#credenciales PostgreSQL produccion
connP_P = {
'host' : '10.150.1.74',
'port' : '5432',
'user':'postgres',
'password':'<PASSWORD>',
'database' : 'postgres'}
#conexion a PostgreSQL produccion
conexionP_P = psycopg2.connect(**connP_P)
#print('\nConexión con el servidor PostgreSQL produccion establecida!')
cursorP_P = conexionP_P.cursor ()
#ejecucion query telefonos PostgreSQL
cursorP_P.execute(query)
anwr = cursorP_P.fetchall()
cursorP_P.close()
conexionP_P.close()
return anwr
def to_horiz(anwr_P,name,_id):
#vertical horizontal
anwr_P1 = anwr_P.pivot(index=0,columns=1)
anwr_P1[_id] = anwr_P1.index
col1 = []
i=0
for i in range(anwr_P1.shape[1]-1):
col1.append(name+str(i+1))
col1.append(_id)
anwr_P1.columns = col1
return anwr_P1
def csv_o(fn,name):
response = HttpResponse(content_type = "text/csv")
content = "attachment; filename = %s"%name
response["Content-Disposition"] = content
# for j in range(fn.shape[1]):
# try:
# fn.iloc[:,j] = fn.iloc[:,j].str.decode(encoding='utf-8-sig')
# fn.iloc[:,j] = fn.iloc[:,j].str.encode(encoding='utf_16_le')
# except:
# pass
fn2 = [tuple(x) for x in fn.values]
writer = csv.writer(response,delimiter ='|')
writer.writerow(fn.columns)
writer.writerows(fn2)
return response
def excel(fn,name):
wb = Workbook()
ws = wb.active
k = 0
a = pd.DataFrame(fn.columns)
for k in range(a.shape[0]):
ws.cell(row = 1, column = k+1).value = a.iloc[k,0]
i=0
j=0
for i in range(fn.shape[0]):
for j in range(0,fn.shape[1]):
try:
ws.cell(row = i+2, column = j+1).value = fn.iloc[i,j]
except:
pass
response = HttpResponse(content_type = "application/ms-excel")
content = "attachment; filename = %s"%name
response["Content-Disposition"] = content
wb.save(response)
return response
def excel_CV_COL(request):
today = datetime.now()
tablename = "CV_Col"+today.strftime("%Y%m%d%H") + ".xlsx"
with open("./hello/Plantillas/Colp/QueryTel_COL.txt","r") as f1:
queryP_PT = f1.read()
with open("./hello/Plantillas/Colp/QueryCor_COL.txt","r") as f2:
queryP_PC = f2.read()
with open("./hello/Plantillas/Colp/QueryDir_COL.txt","r") as f3:
queryP_PD = f3.read()
with open("./hello/Plantillas/Colp/QueryCV_COL.txt","r") as f4:
queryP_cons = f4.read()
with open("./hello/Plantillas/Colp/QueryCiu_COL.txt","r") as f6:
queryP_Ciu = f6.read()
anwr = psql_pdc(queryP_PT)
anwrC = psql_pdc(queryP_PC)
anwrD = psql_pdc(queryP_PD)
anwrCi = psql_pdc(queryP_Ciu)
yanwr = psql_pdc(queryP_cons)
anwr_P = pd.DataFrame(anwr)
anwr_C = pd.DataFrame(anwrC)
anwr_D = pd.DataFrame(anwrD)
anwr_Ci = pd.DataFrame(anwrCi)
df = pd.DataFrame(yanwr)
inf = to_horiz(anwr_P,'phone',"deudor_id")
infC = to_horiz(anwr_C,'mail',"deudor_id")
infD = to_horiz(anwr_D,'address',"deudor_id")
infCi = to_horiz(anwr_Ci,'town',"deudor_id")
df = df.rename(columns={0:'rownumber',
1:'obligacion_id',
2:'deudor_id',
3:'unico',
4:'estado',
5:'tipo_cliente',
6:'nombre',
7:'producto',
8:'initial_bucket',
9:'ciudad',
10:'sucursal',
11:'tipo_prod',
12:'dias_mora_inicial',
13:'dias_mora_actual',
14:'rango_mora_inicial',
15:'rango_mora_final',
16:'rango',
17:'suma_pareto',
18:'rango_pareto',
19:'fcast',
20:'fdesem',
21:'vrdesem',
22:'saldo_total_inicial',
23:'saldo_total_actual',
24:'saldo_capital_inicial',
25:'saldo_capital_actual',
26:'saldo_vencido_inicial',
27:'saldo_vencido_actual',
28:'pagomin',
29:'fultpago',
30:'vrultpago',
31:'agencia',
32:'tasainter',
33:'feultref',
34:'ultcond',
35:'fasigna',
36:'eqasicampana',
37:'diferencia_pago',
38:'pago_preliminar',
39:'pago_cliente',
40:'min',
41:'tarifa',
42:'honorarios',
43:'perfil_mes_4',
44:'perfil_mes_3',
45:'perfil_mes_2',
46:'perfil_mes_1',
47:'fecha_primer_gestion',
48:'fecha_ultima_gestion',
49:'perfil_mes_actual',
50:'contactabilidad',
51:'ultimo_alo',
52:'descod1',
53:'descod2',
54:'asesor',
55:'fecha_gestion',
56:'telefono_mejor_gestion',
57:'mejorgestionhoy',
58:'asesor_indicador_hoy',
59:'repeticion',
60:'llamadas',
61:'sms',
62:'correos',
63:'gescall',
64:'visitas',
65:'whatsapp',
66:'no_contacto',
67:'total_gestiones',
68:'telefono_positivo',
69:'marcaciones_telefono_positivo',
70:'ultima_marcacion_telefono_positivo',
71:'fec_creacion_ult_compromiso',
72:'fec_pactada_ult_compromiso',
73:'valor_acordado_ult_compromiso',
74:'asesor_ult_compromiso',
75:'cantidad_acuerdos_mes',
76:'estado_acuerdo',})
fn = pd.merge(df,inf,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infC,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infD,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infCi,on = ["deudor_id"]\
,how = "left",indicator = False)
return excel(fn,tablename)
def csv_CV_Claro(request):
today = datetime.now()
tablename = "CV_Claro" + today.strftime("%Y%m%d%H") + ".csv"
with open("./hello/Plantillas/Claro/QueryTel_Claro.txt","r") as f1:
queryP_PT = f1.read()
with open("./hello/Plantillas/Claro/QueryCor_Claro.txt","r") as f2:
queryP_PC = f2.read()
with open("./hello/Plantillas/Claro/QueryCV_Claro.txt","r") as f4:
queryP_cons = f4.read()
anwr = psql_pdc(queryP_PT)
anwrC = psql_pdc(queryP_PC)
yanwr = psql_pdc(queryP_cons)
#dataframes
anwr_P = pd.DataFrame(anwr)
anwr_C = pd.DataFrame(anwrC)
df = pd.DataFrame(yanwr)
anwr_P1 = to_horiz(anwr_P,'phone','deudor_id')
#renombrar campos correos
anwr_C = anwr_C.rename(columns={
0:'deudor_id',
1:'mail0',
2:'mail1'})
anwr_C1 = anwr_C.drop_duplicates(subset=['deudor_id'])
#renombrar campos CV
df = df.rename(columns={0:'rownumber',
1:'deudor_id',
2:'obligacion_id',
3:'nombredelcliente',
4:'estado',
5:'tipo_cliente',
6:'unico',
7:'crmorigen',
8:'potencialmark',
9:'prepotencialmark',
10:'writeoffmark',
11:'dias_mora',
12:'segmento_bpo',
13:'rango_bpo',
14:'tipo',
15:'fecha_de_vencimiento',
16:'min_cliente',
17:'valorscoring',
18:'numeroreferenciadepago',
19:'monto_inicial',
20:'monto_ini_cuenta',
21:'porcentaje_descuento',
22:'valor_descuento',
23:'valor_a_pagar',
24:'deuda_real',
25:'valor_pago',
26:'saldo_pendiente',
27:'fecha_pago',
28:'fecha_compromiso',
29:'fecha_pago_compromiso',
30:'valor_compromiso',
31:'estado_acuerdo',
32:'ind_m4',
33:'ind_m3',
34:'ind_m2',
35:'ind_m1',
36:'fecha_primer_gestion',
37:'fecha_ultima_gestion',
38:'indicador',
39:'phone',
40:'asesor',
41:'fecha_gestion',
42:'contactabilidad',
43:'indicador_hoy',
44:'repeticion',
45:'llamadas',
46:'sms',
47:'correos',
48:'gescall',
49:'whatsapp',
50:'visitas',
51:'no_contacto',
52:'total_gestiones',
53:'telefono_positivo',
54:'fec_ultima_marcacion'})
#a = fn[fn.obligacion_id == '9876510000211227']
#i=0
#lin = ['no_contacto_mes_actual','gescall_mes_actual','tel_mes_actual','tel_positivo']
#for i in lin:
# df[i].fillna(0,inplace=True)
# df[i] = df[i].apply(lambda x: round(x))
# df[i] = df[i].astype('str')
fn = pd.merge(df,anwr_P1,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,anwr_C1,on = ["deudor_id"]\
,how = "left",indicator = False)
return csv_o(fn,tablename)
def csv_CV_CarP(request):
today = datetime.now()
tablename = "CV_CarP" + today.strftime("%Y%m%d%H") + ".csv"
with open("./hello/Plantillas/CarP/QueryTel_CarP.txt","r") as f1:
queryP_PT = f1.read()
with open("./hello/Plantillas/CarP/QueryCor_CarP.txt","r") as f2:
queryP_PC = f2.read()
with open("./hello/Plantillas/CarP/QueryDir_CarP.txt","r") as f3:
queryP_PD = f3.read()
with open("./hello/Plantillas/CarP/QueryCV_CarP.txt","r") as f4:
queryP_cons = f4.read()
with open("./hello/Plantillas/CarP/QueryCiu_CarP.txt","r") as f5:
queryP_Ciu = f5.read()
anwr = psql_pdc(queryP_PT)
anwrC = psql_pdc(queryP_PC)
anwrD = psql_pdc(queryP_PD)
anwrCi = psql_pdc(queryP_Ciu)
yanwr = psql_pdc(queryP_cons)
anwr_P = pd.DataFrame(anwr)
anwr_C = pd.DataFrame(anwrC)
anwr_D = pd.DataFrame(anwrD)
anwr_Ci = pd.DataFrame(anwrCi)
df = pd.DataFrame(yanwr)
inf = to_horiz(anwr_P,'phone',"deudor_id")
infC = to_horiz(anwr_C,'mail',"deudor_id")
infD = to_horiz(anwr_D,'address',"deudor_id")
infCi = to_horiz(anwr_Ci,'town',"deudor_id")
#renombrar campos CV
df = df.rename(columns={0:'deudor_id',
1:'unico',
2:'nombre',
3:'obligacion',
4:'obligacion_17',
5:'tipo_cliente',
6:'sucursal_final',
7:'zona',
8:'ano_castigo',
9:'saldo_k_pareto_mes_vigente',
10:'intereses',
11:'honorarios_20',
12:'saldo_total_mes_vigente',
13:'saldo_total_pareto_mes_vigente_',
14:'saldokpareto',
15:'rango_k_pareto',
16:'interesespareto',
17:'honorariospareto',
18:'porcentaje_k_del_total',
19:'porcentaje_intereses_del_total',
20:'porcentaje_honorarios_del_total',
21:'rango_k_porcentaje',
22:'capital_20_porciento',
23:'dias_mora_acumulado',
24:'marca_juridica_cliente',
25:'focos',
26:'valor_pago',
27:'ultima_fecha_pago',
28:'estado_cliente_mes_anterior',
29:'valor_compromiso',
30:'fecha_compromiso',
31:'fecha_pactada_compromiso',
32:'asesor_compromiso',
33:'ind_m4',
34:'ind_m3',
35:'ind_m2',
36:'ind_m1',
37:'fecha_primer_gestion',
38:'fecha_ultima_gestion',
39:'indicador',
40:'telefono_mejor_gestion',
41:'asesor_mejor_gestion',
42:'fecha_gestion',
43:'contactabilidad',
44:'indicador_hoy',
45:'repeticion',
46:'llamadas',
47:'sms',
48:'correos',
49:'gescall',
50:'whatsapp',
51:'visitas',
52:'no_contacto',
53:'total_gestiones',
54:'telefono_positivo',
55:'fec_ultima_marcacion',
56:'investigacion_de_bienes'})
fn = pd.merge(df,inf,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infC,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infD,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infCi,on = ["deudor_id"]\
,how = "left",indicator = False)
return csv_o(fn,tablename)
def csv_CV_FalaJ(request):
today = datetime.now()
tablename = "CV_FalJ"+today.strftime("%Y%m%d%H") + ".csv"
with open("./hello/Plantillas/Fala/QueryTel_Fal.txt","r") as f1:
queryP_PT = f1.read()
with open("./hello/Plantillas/Fala/QueryCor_Fal.txt","r") as f2:
queryP_PC = f2.read()
with open("./hello/Plantillas/Fala/QueryDir_Fal.txt","r") as f3:
queryP_PD = f3.read()
with open("./hello/Plantillas/Fala/QueryCV_FalJ.txt","r") as f4:
queryP_cons = f4.read()
with open("./hello/Plantillas/Fala/QueryPa_Fal.txt","r") as f5:
queryP_Pa = f5.read()
with open("./hello/Plantillas/Fala/QueryRe_Fal.txt","r") as f6:
queryP_PR = f6.read()
with open("./hello/Plantillas/Fala/QueryCiu_Fal.txt","r") as f7:
queryP_Ciu = f7.read()
anwr = psql_pdc(queryP_PT)
anwrC = psql_pdc(queryP_PC)
anwrD = psql_pdc(queryP_PD)
anwrPa = psql_pdc(queryP_Pa)
anwrR = psql_pdc(queryP_PR)
anwrCi = psql_pdc(queryP_Ciu)
yanwr = psql_pdc(queryP_cons)
anwr_P = pd.DataFrame(anwr)
anwr_C = pd.DataFrame(anwrC)
anwr_D = pd.DataFrame(anwrD)
anwr_Pa = pd.DataFrame(anwrPa)
anwr_R = pd.DataFrame(anwrR)
anwr_Ci = pd.DataFrame(anwrCi)
df = pd.DataFrame(yanwr)
inf = to_horiz(anwr_P,'phone',"deudor_id")
infC = to_horiz(anwr_C,'mail',"deudor_id")
infD = to_horiz(anwr_D,'address',"deudor_id")
infR = to_horiz(anwr_R,'referencia',"deudor_id")
infCi = to_horiz(anwr_Ci,'town',"deudor_id")
try:
infP = to_horiz(anwr_Pa,'pago','obligacion_id')
if infP.shape[1] > 4:
tipos = infP.dtypes.to_frame()
tipos['index'] = range(len(tipos))
tipos = tipos.set_index('index')
su = []
for n in range(len(tipos)):
if str(tipos.iloc[n,0]) == 'float64':
su.append(n)
else:
pass
infP1 = infP[['pago1','pago2']]
infP1['pago3'] = infP.iloc[:,2:max(su)+1].sum(axis = 1)
infP1['obligacion_id'] = infP.index
infP = infP1
i=0
lin = ['pago1','pago2','pago3']
for i in lin:
infP[i].fillna(0,inplace=True)
infP[i] = infP[i].apply(lambda x: round(x))
infP[i] = '$' + infP[i].astype('str')
except:
pass
#renombrar campos CV
df = df.rename(columns={0:'idcbpo',
1:'tipo_producto_asignacion',
2:'grupo',
3:'cartera',
4:'tipo_cliente',
5:'unico',
6:'unico_pro',
7:'obligacion_id',
8:'deudor_id',
9:'nombre',
10:'producto',
11:'saldototal',
12:'saldo_pareto',
13:'segmentacion',
14:'peso',
15:'alturamora_hoy',
16:'rango',
17:'dias_mora',
18:'vencto',
19:'indicador_mejor_gestion',
20:'total_gestiones',
21:'fecha_ultima_gestion',
22:'asesor_mejor_gestion',
23:'fecha_compromiso',
24:'fecha_pago_compromiso',
25:'valor_compromiso',
26:'asesor',
27:'estado_acuerdo',
28:'dias_mora_pagos',
29:'valor_pago',
30:'fecha_pago',
31:'pendiente',
32:'pago_total',
33:'nvo_status',
34:'status_refresque',
35:'nvo_status_refresque',
36:'dias_mora_refresque',
37:'pendiente_mas_gastos',
38:'vencida_mas_gastos',
39:'gastos_mora',
40:'gastos_cv',
41:'porcentaje_gasto',
42:'valor_a_mantener_sin_gxc',
43:'cv8',
44:'cv9',
45:'cv10',
46:'cv11',
47:'cv12',
48:'restructuracion',
49:'valor_restruc',
50:'pagominimo_actual',
51:'pagominimo_anterior',
52:'periodo_actual',
53:'periodo_anterior',
54:'cuota36',
55:'cuota48',
56:'cuota60',
57:'cuota72',
58:'proyectada_cargue',
59:'aplica_ajuste',
60:'fecha',
61:'diferencia',
62:'porcentaje_saldo_total',
63:'x',
64:'valor',
65:'porcentaje_participacion',
66:'ind_m4',
67:'ind_m3',
68:'ind_m2',
69:'ind_m1',
70:'fecha_primer_gestion',
71:'telefono_mejor_gestion',
72:'fecha_gestion',
73:'contactabilidad',
74:'indicador_hoy',
75:'repeticion',
76:'llamadas',
77:'sms',
78:'correos',
79:'gescall',
80:'whatsapp',
81:'visitas',
82:'no_contacto',
83:'telefono_positivo',
84:'fec_ultima_marcacion',
85:'lista_robinson'})
fn = pd.merge(df,inf,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infR,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infC,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infD,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infCi,on = ["deudor_id"]\
,how = "left",indicator = False)
if 'infP' in locals():
# Cruce pagos
fn = pd.merge(fn,infP,on = ["obligacion_id"]\
,how = "left",indicator = False)
# ordenamiento
lt = fn.columns.tolist()
lt = lt[:29] + lt[(infP.shape[1]-1)*-1:] + lt[29:fn.shape[1]-(infP.shape[1]-1)]
fn = fn[lt]
return csv_o(fn,tablename)
def csv_CV_FalaC(request):
today = datetime.now()
tablename = "CV_FalC"+today.strftime("%Y%m%d%H") + ".csv"
with open("./hello/Plantillas/Fala/QueryTel_Fal.txt","r") as f1:
queryP_PT = f1.read()
with open("./hello/Plantillas/Fala/QueryCor_Fal.txt","r") as f2:
queryP_PC = f2.read()
with open("./hello/Plantillas/Fala/QueryDir_Fal.txt","r") as f3:
queryP_PD = f3.read()
with open("./hello/Plantillas/Fala/QueryCV_FalC.txt","r") as f4:
queryP_cons = f4.read()
with open("./hello/Plantillas/Fala/QueryPa_Fal.txt","r") as f5:
queryP_Pa = f5.read()
with open("./hello/Plantillas/Fala/QueryRe_Fal.txt","r") as f6:
queryP_PR = f6.read()
with open("./hello/Plantillas/Fala/QueryCiu_Fal.txt","r") as f7:
queryP_Ciu = f7.read()
anwr = psql_pdc(queryP_PT)
anwrC = psql_pdc(queryP_PC)
anwrD = psql_pdc(queryP_PD)
anwrPa = psql_pdc(queryP_Pa)
anwrR = psql_pdc(queryP_PR)
anwrCi = psql_pdc(queryP_Ciu)
yanwr = psql_pdc(queryP_cons)
anwr_P = pd.DataFrame(anwr)
anwr_C = pd.DataFrame(anwrC)
anwr_D = pd.DataFrame(anwrD)
anwr_Pa = pd.DataFrame(anwrPa)
anwr_R = pd.DataFrame(anwrR)
anwr_Ci = pd.DataFrame(anwrCi)
df = pd.DataFrame(yanwr)
inf = to_horiz(anwr_P,'phone',"deudor_id")
infC = to_horiz(anwr_C,'mail',"deudor_id")
infD = to_horiz(anwr_D,'address',"deudor_id")
infR = to_horiz(anwr_R,'referencia',"deudor_id")
infCi = to_horiz(anwr_Ci,'town',"deudor_id")
try:
infP = to_horiz(anwr_Pa,'pago','obligacion_id')
if infP.shape[1] > 4:
tipos = infP.dtypes.to_frame()
tipos['index'] = range(len(tipos))
tipos = tipos.set_index('index')
su = []
for n in range(len(tipos)):
if str(tipos.iloc[n,0]) == 'float64':
su.append(n)
else:
pass
infP1 = infP[['pago1','pago2']]
infP1['pago3'] = infP.iloc[:,2:max(su)+1].sum(axis = 1)
infP1['obligacion_id'] = infP.index
infP = infP1
i=0
lin = ['pago1','pago2','pago3']
for i in lin:
infP[i].fillna(0,inplace=True)
infP[i] = infP[i].apply(lambda x: round(x))
infP[i] = '$' + infP[i].astype('str')
except:
pass
#renombrar campos CV
df = df.rename(columns={0:'idcbpo',
1:'tipo_producto_asignacion',
2:'grupo',
3:'cartera',
4:'tipo_cliente',
5:'unico',
6:'unico_pro',
7:'obligacion_id',
8:'deudor_id',
9:'nombre',
10:'producto',
11:'saldototal',
12:'saldo_pareto',
13:'segmentacion',
14:'peso',
15:'alturamora_hoy',
16:'rango',
17:'dias_mora',
18:'vencto',
19:'indicador_mejor_gestion',
20:'total_gestiones',
21:'fecha_ultima_gestion',
22:'asesor_mejor_gestion',
23:'fecha_compromiso',
24:'fecha_pago_compromiso',
25:'valor_compromiso',
26:'asesor',
27:'estado_acuerdo',
28:'dias_mora_pagos',
29:'valor_pago',
30:'fecha_pago',
31:'pendiente',
32:'pago_total',
33:'nvo_status',
34:'status_refresque',
35:'nvo_status_refresque',
36:'dias_mora_refresque',
37:'pendiente_mas_gastos',
38:'vencida_mas_gastos',
39:'gastos_mora',
40:'gastos_cv',
41:'porcentaje_gasto',
42:'valor_a_mantener_sin_gxc',
43:'cv1',
44:'cv2',
45:'cv3',
46:'cv4',
47:'cv5',
48:'cv6',
49:'cv7',
50:'cv8',
51:'cv9',
52:'cv10',
53:'cv11',
54:'cv12',
55:'restructuracion',
56:'valor_restruc',
57:'pagominimo_actual',
58:'pagominimo_anterior',
59:'periodo_actual',
60:'periodo_anterior',
61:'cuota36',
62:'cuota48',
63:'cuota60',
64:'cuota72',
65:'proyectada_cargue',
66:'aplica_ajuste',
67:'fecha',
68:'diferencia',
69:'porcentaje_saldo_total',
70:'x',
71:'valor',
72:'porcentaje_participacion',
73:'ind_m4',
74:'ind_m3',
75:'ind_m2',
76:'ind_m1',
77:'fecha_primer_gestion',
78:'telefono_mejor_gestion',
79:'fecha_gestion',
80:'contactabilidad',
81:'indicador_hoy',
82:'repeticion',
83:'llamadas',
84:'sms',
85:'correos',
86:'gescall',
87:'whatsapp',
88:'visitas',
89:'no_contacto',
90:'telefono_positivo',
91:'fec_ultima_marcacion',
92:'lista_robinson'})
fn = pd.merge(df,inf,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infR,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infC,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infD,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infCi,on = ["deudor_id"]\
,how = "left",indicator = False)
if 'infP' in locals():
# Cruce pagos
fn = pd.merge(fn,infP,on = ["obligacion_id"]\
,how = "left",indicator = False)
# ordenamiento
lt = fn.columns.tolist()
lt = lt[:27] + lt[(infP.shape[1]-1)*-1:] + lt[27:fn.shape[1]-(infP.shape[1]-1)]
fn = fn[lt]
return csv_o(fn,tablename)
def csv_CV_Sant(request):
today = datetime.now()
tablename = "CV_San"+today.strftime("%Y%m%d%H") + ".csv"
with open("./hello/Plantillas/Sant/QueryTel_San.txt","r") as f1:
queryP_PT = f1.read()
with open("./hello/Plantillas/Sant/QueryCor_San.txt","r") as f2:
queryP_PC = f2.read()
with open("./hello/Plantillas/Sant/QueryDir_San.txt","r") as f3:
queryP_PD = f3.read()
with open("./hello/Plantillas/Sant/QueryCV_San.txt","r") as f4:
queryP_cons = f4.read()
with open("./hello/Plantillas/Sant/QueryCiu_San.txt","r") as f5:
queryP_Ciu = f5.read()
anwr = psql_pdc(queryP_PT)
anwrC = psql_pdc(queryP_PC)
anwrD = psql_pdc(queryP_PD)
anwrCi = psql_pdc(queryP_Ciu)
yanwr = psql_pdc(queryP_cons)
anwr_P = pd.DataFrame(anwr)
anwr_C = pd.DataFrame(anwrC)
anwr_D = pd.DataFrame(anwrD)
anwr_Ci = pd.DataFrame(anwrCi)
df = pd.DataFrame(yanwr)
inf = to_horiz(anwr_P,'phone',"deudor_id")
infC = to_horiz(anwr_C,'mail',"deudor_id")
infD = to_horiz(anwr_D,'address',"deudor_id")
infCi = to_horiz(anwr_Ci,'town',"deudor_id")
#renombrar campos CV
df = df.rename(columns={0:'gestor',
1:'entidad',
2:'abogado',
3:'fecha_desembolso',
4:'fecha_corte',
5:'solicitud',
6:'obligacion_id',
7:'deudor_id',
8:'unico',
9:'nombre',
10:'capital',
11:'dias_mora',
12:'rango_mora',
13:'saldo_capital_pareto',
14:'rango_pareto',
15:'tomo_encuesta',
16:'aplica_alivio',
17:'fecha_proximo_pago_alivio',
18:'debitos',
19:'rango_cierre_m4',
20:'rango_cierre_m3',
21:'rango_cierre_m2',
22:'rango_cierre_m1',
23:'repeticion',
24:'llamadas',
25:'sms',
26:'correos',
27:'gescall',
28:'whatsapp',
29:'visitas',
30:'no_contacto',
31:'total_gestiones',
32:'fecha_primer_gestion',
33:'fecha_ultima_gestion',
34:'ultimo_alo',
35:'ind_m4',
36:'ind_m3',
37:'ind_m2',
38:'ind_m1',
39:'ind_mes_actual',
40:'fec_ind_mes_actual',
41:'tel_ind_mes_actual',
42:'asesor_ind_mes_actual',
43:'contactabilidad',
44:'asesor_ind_hoy',
45:'ind_hoy',
46:'ultima_marcacion_tel_pos',
47:'telefono_positivo',
48:'fecha_compromiso',
49:'fecha_pago_compromiso',
50:'valor_compromiso',
51:'calificacion',
52:'fechas_probables_pago',
53:'id_protocolo',
54:'canal_protocolo',
55:'texto_protocolo'})
fn = pd.merge(df,inf,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infC,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infD,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infCi,on = ["deudor_id"]\
,how = "left",indicator = False)
lt = fn.columns.tolist()
lt = lt[:53] + lt[-(inf.shape[1] + infC.shape[1] + infD.shape[1] -1):] + lt[53:56]
fn = fn[lt]
return csv_o(fn,tablename)
def csv_CV_Pop(request):
today = datetime.now()
tablename = "CV_Pop" + today.strftime("%Y%m%d%H") + '.csv'
with open("./hello/Plantillas/Pop/QueryTel_Pop.txt","r") as f1:
queryP_PT = f1.read()
with open("./hello/Plantillas/Pop/QueryCor_Pop.txt","r") as f2:
queryP_PC = f2.read()
with open("./hello/Plantillas/Pop/QueryDir_Pop.txt","r") as f3:
queryP_PD = f3.read()
with open("./hello/Plantillas/Pop/QueryCV_Pop.txt","r") as f4:
queryP_cons = f4.read()
with open("./hello/Plantillas/Pop/QueryPa_Pop.txt","r") as f5:
queryP_Pa = f5.read()
with open("./hello/Plantillas/Pop/QueryCiu_Pop.txt","r") as f6:
queryP_Ciu = f6.read()
anwr = psql_pdc(queryP_PT)
anwrC = psql_pdc(queryP_PC)
anwrD = psql_pdc(queryP_PD)
anwrPa = psql_pdc(queryP_Pa)
anwrCi = psql_pdc(queryP_Ciu)
yanwr = psql_pdc(queryP_cons)
anwr_P = pd.DataFrame(anwr)
anwr_C = pd.DataFrame(anwrC)
anwr_D = pd.DataFrame(anwrD)
anwr_Pa = pd.DataFrame(anwrPa)
anwr_Ci = pd.DataFrame(anwrCi)
df = pd.DataFrame(yanwr)
inf = to_horiz(anwr_P,'phone',"deudor_id")
infC = to_horiz(anwr_C,'mail',"deudor_id")
infD = to_horiz(anwr_D,'address',"deudor_id")
infCi = to_horiz(anwr_Ci,'town',"deudor_id")
try:
infP = to_horiz(anwr_Pa,'pago','llave')
except:
pass
#renombrar campos CV
df = df.rename(columns={0:'tipo_bpo',
1:'llave',
2:'entidad',
3:'unico',
4:'nombre_prod',
5:'lin_descripcion',
6:'deudor_id',
7:'nombre_cliente',
8:'tipo_cliente',
9:'estado_actual_cartera',
10:'obligacion_id',
11:'inicio_corte',
12:'fecha_desembolso',
13:'ciclo',
14:'dias_en_mora_inicial',
15:'dias_en_mora_final',
16:'valor_compromiso',
17:'fecha_creacion_compromiso',
18:'fecha_pago_compromiso',
19:'asesor_compromiso',
20:'pagos_acumulados',
21:'cantidad_pagos',
22:'rango_mora_inicial',
23:'rango_mora_final',
24:'estado',
25:'valmora',
26:'valmora_pareto',
27:'capital_inicial',
28:'saltotalpareto',
29:'pago_minimo',
30:'saldo_capital_vencido',
31:'seguros',
32:'intereses_mora',
34:'rango_saltotal',
35:'asignacion_inicial',
36:'wasis_banco',
37:'fecha_de_retiro',
38:'tipo_cliente',
39:'ind_m4',
40:'ind_m3',
41:'ind_m2',
42:'ind_m1',
43:'ind_mejor_gestion',
44:'fec_mejor_gestion',
45:'tel_mejor_gestion',
46:'asesor_mejor_gestion',
47:'contactability',
48:'ind_mejor_gestion_hoy',
49:'asesor_mejor_gestion_hoy',
50:'fecha_primer_gestion',
51:'fecha_ultima_gestion',
52:'repeticion',
53:'llamadas',
54:'sms',
55:'correos',
56:'gescall',
57:'whatsapp',
58:'visitas',
59:'no_contacto',
60:'total_gestiones',
61:'primer_alo',
62:'ultimo_alo',
63:'fec_ult_marc_tel_pos',
64:'tel_positivo',
65:'casa_inicial',
66:'casa_actual',
67:'fecha_retiro_casa'})
# inf["deudor_id"] = "1" + inf["deudor_id"]
# infC["deudor_id"] = "1" + infC["deudor_id"]
# infD["deudor_id"] = "1" + infD["deudor_id"]
# infCi["deudor_id"] = "1" + infCi["deudor_id"]
fn = pd.merge(df,inf,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infC,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infD,on = ["deudor_id"]\
,how = "left",indicator = False)
fn = pd.merge(fn,infCi,on = ["deudor_id"]\
,how = "left",indicator = False)
if 'infP' in locals():
# Cruce pagos
fn = pd.merge(fn,infP,on = ["llave"]\
,how = "left",indicator = False)
# ordenamiento
lt = fn.columns.tolist()
lt = lt[:20] + lt[(infP.shape[1]-1)*-1:] + lt[20:fn.shape[1]-(infP.shape[1]-1)]
fn = fn[lt]
return csv_o(fn,tablename)
def csv_CV_Dav(request):
today = datetime.now()
tablename = "CV_Dav"+today.strftime("%Y%m%d%H") + ".csv"
with open("./hello/Plantillas/Davi/QueryTel_Dav.txt","r") as f1:
queryP_PT = f1.read()
with open("./hello/Plantillas/Davi/QueryCor_Dav.txt","r") as f2:
queryP_PC = f2.read()
with open("./hello/Plantillas/Davi/QueryDir_Dav.txt","r") as f3:
queryP_PD = f3.read()
with open("./hello/Plantillas/Davi/QueryCV_Dav.txt","r") as f4:
queryP_cons = f4.read()
with open("./hello/Plantillas/Davi/QueryCiu_Dav.txt","r") as f5:
queryP_Ciu = f5.read()
anwr = psql_pdc(queryP_PT)
anwrC = psql_pdc(queryP_PC)
anwrD = psql_pdc(queryP_PD)
anwrCi = psql_pdc(queryP_Ciu)
yanwr = psql_pdc(queryP_cons)
anwr_P = pd.DataFrame(anwr)
anwr_C = pd.DataFrame(anwrC)
anwr_D = pd.DataFrame(anwrD)
anwr_Ci = pd.DataFrame(anwrCi)
df = | pd.DataFrame(yanwr) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import long
from pandas.core import ops
from pandas.errors import NullFrequencyError, PerformanceWarning
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
timedelta_range,
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=lambda x: type(x).__name__)
def delta(request):
"""
Several ways of representing two hours
"""
return request.param
@pytest.fixture(params=[timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()],
ids=lambda x: type(x).__name__)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(params=[pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(params=[pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(strict=True))],
ids=lambda x: x.__name__)
def box_df_fail(request):
"""
Fixture equivalent to `box` fixture but xfailing the DataFrame case.
"""
return request.param
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = | Timestamp('2011-01-01') | pandas.Timestamp |
import os
from glob import glob
import pandas as pd
import numpy as np
import requests as req
import matplotlib.pyplot as plt
from bs4 import BeautifulSoup as bs
import matplotlib
matplotlib.rcParams.update({'font.size': 30})
def convert_process(x):
"""
Converts process column in wikipedia transistor count table to numeric.
"""
# NANs are floats
if isinstance(x, float):
return x
else:
return x.replace('nm', '').replace(',', '').strip()
def convert_area(x):
"""
Converts area column in wikipedia transistor count table to numeric.
"""
# NANs are floats
if isinstance(x, float):
x = x
else:
x = x.replace('mm²', '').replace(',', '').replace('\xa0mm2','').strip()
return x
def update_transistor_data():
"""
Retrieves GPU transistor count table from wikipedia and writes to disk
as a csv.
"""
res = req.get('https://en.wikipedia.org/wiki/Transistor_count')
soup = bs(res.content, 'lxml')
tables = soup.find_all('table', {'class': 'wikitable'})
# find the table with GPU data
for t in tables:
if 'ARTC HD63484' in t.text:
break
# bs4 object has to be converted to a string
# read_html returns a list for some reason
df = pd.read_html(str(t))[0]
# some data cleaning/type conversions
df['Date of introduction'] = | pd.to_datetime(df['Date of introduction']) | pandas.to_datetime |
import os
import sys
import random
import pandas as pd
from tqdm import tqdm
import pydicom
import numpy as np
import copy
from pneumonia import InferenceConfig, InferenceConfig2
from functions import get_image_fps, box_locations, iou, create_submission, testing_augment
DATA_DIR = '../input/'
TRAIN_DIR = os.path.join(DATA_DIR, 'stage_1_train_images')
TEST_DIR = os.path.join(DATA_DIR, 'stage_1_test_images')
MODEL_DIR = '../model/Mask_RCNN'
ORIG_SIZE = 1024
# Import Mask RCNN
sys.path.append(os.path.join(MODEL_DIR)) # To find local version of the library
import mrcnn.model as modellib
from mrcnn import utils
from mrcnn import visualize
from mrcnn.model import log
image_fps_val = pd.read_csv('image_fps_val.csv').image_fps_val.tolist()
# Phase 1 config
inference_config = InferenceConfig()
inference_config.display()
assert inference_config.NUM_CLASSES == 2
# Select phase 1 model
model_path = '../model/Mask_RCNN/pneumonia/model_weight.h5'
# Load phase 1 trained model
model = modellib.MaskRCNN(mode='inference',
config=inference_config,
model_dir=MODEL_DIR)
assert model_path != "", "Provide path to trained weights"
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
# Phase 1
def predict(image_fps, min_conf=0.95, augment=False):
RESIZE_FACTOR = ORIG_SIZE / inference_config.IMAGE_SHAPE[0]
prediction={}
for image_id in tqdm(image_fps):
ds = pydicom.read_file(image_id)
image = ds.pixel_array
# If grayscale. Convert to RGB for consistency.
if len(image.shape) != 3 or image.shape[2] != 3:
image = np.stack((image,) * 3, -1)
image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=inference_config.IMAGE_MIN_DIM,
min_scale=inference_config.IMAGE_MIN_SCALE,
max_dim=inference_config.IMAGE_MAX_DIM,
mode=inference_config.IMAGE_RESIZE_MODE)
patient_id = os.path.splitext(os.path.basename(image_id))[0]
r = model.detect([image])
r = r[0]
if augment:
r2 = model.detect([np.fliplr(image)])
r2 = r2[0]
r = testing_augment(r, r2, min_conf, inference_config)
if len(r['rois'])==0:
prediction[patient_id]=[]
else:
prediction[patient_id]=[]
for i in range(len(r['rois'])):
if r['scores'][i] > min_conf:
score = r['scores'][i]
x = r['rois'][i][1]
y = r['rois'][i][0]
if x>0 and y>0:
width = r['rois'][i][3] - x
height = r['rois'][i][2] - y
x*=RESIZE_FACTOR
y*=RESIZE_FACTOR
width*=RESIZE_FACTOR
height*=RESIZE_FACTOR
prediction[patient_id].append([score, x, y, width, height])
return prediction
truth = box_locations()
prediction = predict(image_fps_val, min_conf=0.96, augment=True)
iou_all_mean,tp,fp,tn,fn = iou(truth, prediction)
print(iou_all_mean,tp,fp,tn,fn)
# Predict on all training data for training phase 2 model
if False:
image_fps_train = get_image_fps(TRAIN_DIR)
prediction = predict(image_fps_train, min_conf=0.96, augment=True)
# Convert prediction to training labels
train_labels_2 = pd.DataFrame(columns=['patientId', 'x', 'y', 'width', 'height', 'Target', 'class'])
i=0
for patient_id in list(prediction.keys()):
if len(truth[patient_id])>0:
for box in truth[patient_id]:
train_labels_2.loc[i] = [patient_id, int(box[0]), int(box[1]), int(box[2]), int(box[3]), 1, 1]
i+=1
else:
if len(prediction[patient_id])>0:
for box in prediction[patient_id]:
train_labels_2.loc[i] = [patient_id, int(box[1]), int(box[2]), int(box[3]), int(box[4]), 0, 2]
i+=1
else:
train_labels_2.loc[i] = [patient_id, np.nan, np.nan, np.nan, np.nan, 0, 0]
i+=1
train_labels_2.sort_values(by='patientId', inplace=True)
train_labels_2.to_csv(os.path.join(DATA_DIR, 'train_labels_2.csv'), index=False)
print(len(train_labels_2))
# Phase 2
inference_config_2 = InferenceConfig2()
inference_config_2.display()
assert inference_config_2.NUM_CLASSES == 3
# Select phase 2 model
model_2_path = '../model/Mask_RCNN/pneumonia/model_weight.h5'
model_2 = modellib.MaskRCNN(mode='inference',
config=inference_config_2,
model_dir=MODEL_DIR)
assert model_2_path != "", "Provide path to trained weights"
print("Loading weights from ", model_2_path)
model_2.load_weights(model_2_path, by_name=True)
# Phase 2
def predict2(image_fps, min_conf=0.90, augment=False):
RESIZE_FACTOR = ORIG_SIZE / inference_config_2.IMAGE_SHAPE[0]
prediction={}
for image_id in tqdm(image_fps):
ds = pydicom.read_file(image_id)
image = ds.pixel_array
# If grayscale. Convert to RGB for consistency.
if len(image.shape) != 3 or image.shape[2] != 3:
image = np.stack((image,) * 3, -1)
image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=inference_config.IMAGE_MIN_DIM,
min_scale=inference_config.IMAGE_MIN_SCALE,
max_dim=inference_config.IMAGE_MAX_DIM,
mode=inference_config.IMAGE_RESIZE_MODE)
patient_id = os.path.splitext(os.path.basename(image_id))[0]
r = model_2.detect([image])
r = r[0]
if augment:
r2 = model_2.detect([np.fliplr(image)])
r2 = r2[0]
r = testing_augment(r, r2, min_conf, inference_config)
if len(r['rois'])==0:
prediction[patient_id]=[]
else:
prediction[patient_id]=[]
for i in range(len(r['rois'])):
if r['class_ids'][i]==2 and r['scores'][i] > min_conf:
score = r['scores'][i]
x = r['rois'][i][1]
y = r['rois'][i][0]
if x>0 and y>0:
width = r['rois'][i][3] - x
height = r['rois'][i][2] - y
x*=RESIZE_FACTOR
y*=RESIZE_FACTOR
width*=RESIZE_FACTOR
height*=RESIZE_FACTOR
prediction[patient_id].append([score, x, y, width, height])
return prediction
prediction_2 = predict2(image_fps_val, min_conf=0.92, augment=False)
#Merge
def merge_predictions(prediction, prediction_2):
prediction_3 = copy.deepcopy(prediction)
for patient_id in list(prediction_2.keys()):
if len(prediction_2[patient_id])>0:
prediction_3[patient_id] = []
return prediction_3
prediction_3 = merge_predictions(prediction, prediction_2)
iou_all_mean,tp,fp,tn,fn = iou(truth, prediction_3)
# Predict on testing data
if True:
image_fps_test = get_image_fps(TEST_DIR)
image_fps_test.sort()
prediction_test = predict(image_fps_test, min_conf=0.96, augment=True)
prediction_test_2 = predict2(image_fps_test, min_conf=0.92, augment=False)
prediction_test_3 = merge_predictions(prediction_test, prediction_test_2)
create_submission(prediction_test_3)
#submission
submission = | pd.read_csv('submission.csv') | pandas.read_csv |
import os
import joblib
import numpy as np
import pandas as pd
from joblib import Parallel
from joblib import delayed
from Fuzzy_clustering.version2.common_utils.logging import create_logger
from Fuzzy_clustering.version2.dataset_manager.common_utils import check_empty_nwp
from Fuzzy_clustering.version2.dataset_manager.common_utils import rescale_mean
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_2d_dense
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_3d
class DatasetCreatorDense:
def __init__(self, projects_group, projects, data, path_nwp, nwp_model, nwp_resolution, data_variables, njobs=1,
test=False, dates=None):
self.projects = projects
self.is_for_test = test
self.projects_group = projects_group
self.data = data
self.path_nwp = path_nwp
self.nwp_model = nwp_model
self.nwp_resolution = nwp_resolution
self.compress = True if self.nwp_resolution == 0.05 else False
self.n_jobs = njobs
self.variables = data_variables
self.logger = create_logger(logger_name=__name__, abs_path=self.path_nwp,
logger_path=f'log_{self.projects_group}.log', write_type='a')
if not self.data is None:
self.dates = self.check_dates()
elif not dates is None:
self.dates = dates
def check_dates(self):
start_date = pd.to_datetime(self.data.index[0].strftime('%d%m%y'), format='%d%m%y')
end_date = pd.to_datetime(self.data.index[-1].strftime('%d%m%y'), format='%d%m%y')
dates = pd.date_range(start_date, end_date)
data_dates = pd.to_datetime(np.unique(self.data.index.strftime('%d%m%y')), format='%d%m%y')
dates = [d for d in dates if d in data_dates]
self.logger.info('Dates are checked. Number of time samples is %s', str(len(dates)))
return pd.DatetimeIndex(dates)
def correct_nwps(self, nwp, variables):
if nwp['lat'].shape[0] == 0:
area_group = self.projects[0]['static_data']['area_group']
resolution = self.projects[0]['static_data']['NWP_resolution']
nwp['lat'] = np.arange(area_group[0][0], area_group[1][0] + resolution / 2,
resolution).reshape(-1, 1)
nwp['long'] = np.arange(area_group[0][1], area_group[1][1] + resolution / 2,
resolution).reshape(-1, 1).T
for var in nwp.keys():
if not var in {'lat', 'long'}:
if nwp['lat'].shape[0] != nwp[var].shape[0]:
nwp[var] = nwp[var].T
if 'WS' in variables and not 'WS' in nwp.keys():
if 'Uwind' in nwp.keys() and 'Vwind' in nwp.keys():
if nwp['Uwind'].shape[0] > 0 and nwp['Vwind'].shape[0] > 0:
r2d = 45.0 / np.arctan(1.0)
wspeed = np.sqrt(np.square(nwp['Uwind']) + np.square(nwp['Vwind']))
wdir = np.arctan2(nwp['Uwind'], nwp['Vwind']) * r2d + 180
nwp['WS'] = wspeed
nwp['WD'] = wdir
if 'Temp' in nwp.keys():
nwp['Temperature'] = nwp['Temp']
del nwp['Temp']
return nwp
def stack_by_sample(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables, predictions):
timestep = 60
x = dict()
y = dict()
x_3d = dict()
file_name = os.path.join(path_nwp, f"{nwp_model}_{t.strftime('%d%m%y')}.pickle")
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for project in projects:
preds = predictions[project['_id']]
hor = preds.columns[-1] + timestep
p_dates = [t + pd.DateOffset(minutes=hor)]
preds = preds.loc[t].to_frame().T
dates_pred = [t + pd.DateOffset(minutes=h) for h in preds.columns]
pred = pd.DataFrame(preds.values.ravel(), index=dates_pred, columns=[project['_id']])
data_temp = pd.concat([data[project['_id']].iloc[np.where(data.index < t)].to_frame(), pred])
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
y[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
try:
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample(date, nwp, nwp_prev, nwp_next, lats[project_id],
longs[project_id], project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data_temp.loc[(date - pd.DateOffset(hours=1))].values
inp['Obs_lag2'] = data_temp.loc[(date - pd.DateOffset(hours=2))].values
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
y[project_id] = pd.concat([y[project_id], pd.DataFrame(data.loc[date, project_id],
columns=['target'],
index=[date])])
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data_temp.loc[(date - pd.DateOffset(hours=1)), project_id].values
inp['Obs_lag2'] = data_temp.loc[(date - pd.DateOffset(hours=2)), project_id].values
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project['_id']] = pd.concat([x[project['_id']], inp])
x_3d[project['_id']] = stack_2d_dense(x_3d[project['_id']], inp_cnn, False)
y[project['_id']] = pd.concat(
[y[project['_id']], pd.DataFrame(data.loc[date, project['_id']],
columns=['target'], index=[date])])
except Exception:
continue
print(t.strftime('%d%m%y%H%M'), ' extracted')
for project in projects:
if len(x_3d[project['_id']].shape) == 3:
x_3d[project['_id']] = x_3d[project['_id']][np.newaxis, :, :, :]
return x, y, x_3d, t.strftime('%d%m%y%H%M')
def stack_daily_nwps(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables):
x = dict()
y = dict()
x_3d = dict()
file_name = os.path.join(path_nwp, f"{nwp_model}_{t.strftime('%d%m%y')}.pickle")
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for project in projects:
if project['static_data']['horizon'] == 'day_ahead':
p_dates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
else:
p_dates = pd.date_range(t + pd.DateOffset(hours=1), t + pd.DateOffset(hours=24), freq='H')
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
y[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample(date, nwp, nwp_prev, nwp_next, lats[project_id],
longs[project_id], project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data.loc[(date - pd.DateOffset(hours=1)), project_id]
inp['Obs_lag2'] = data.loc[(date - pd.DateOffset(hours=2)), project_id]
if not self.is_for_test:
inp['Obs_lag1'] = inp['Obs_lag1'] + np.random.normal(0, 0.05) * inp['Obs_lag1']
inp['Obs_lag2'] = inp['Obs_lag2'] + np.random.normal(0, 0.05) * inp['Obs_lag2']
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
y[project_id] = pd.concat([y[project_id], pd.DataFrame(data.loc[date, project_id],
columns=['target'],
index=[date])])
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data.loc[(date - pd.DateOffset(hours=1)), project_id]
inp['Obs_lag2'] = data.loc[(date - pd.DateOffset(hours=2)), project_id]
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project['_id']] = pd.concat([x[project['_id']], inp])
x_3d[project['_id']] = stack_2d_dense(x_3d[project['_id']], inp_cnn, False)
y[project['_id']] = pd.concat(
[y[project['_id']], pd.DataFrame(data.loc[date, project['_id']],
columns=['target'], index=[date])])
except Exception:
continue
print(t.strftime('%d%m%y%H%M'), ' extracted')
return x, y, x_3d, t.strftime('%d%m%y%H%M')
def stack_daily_nwps_rabbitmq(self, t, path_nwp, nwp_model, project, variables):
x = dict()
x_3d = dict()
nwps = project['nwp']
p_dates = pd.date_range(t, t + pd.DateOffset(days=3) - pd.DateOffset(hours=1), freq='H')
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
try:
date_nwp = date.strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_rabbitmq(date, nwp, nwp_prev, nwp_next, project['static_data']['type'])
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
date = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
x[project['_id']] = pd.concat([x[project['_id']], inp])
x_3d[project['_id']] = stack_2d_dense(x_3d[project['_id']], inp_cnn, False)
except Exception:
continue
print(t.strftime('%d%m%y%H%M'), ' extracted')
return x, x_3d, t.strftime('%d%m%y%H%M')
def stack_daily_nwps_online(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables):
x = dict()
x_3d = dict()
file_name = os.path.join(path_nwp, f"{nwp_model}_{t.strftime('%d%m%y')}.pickle")
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for project in projects:
if project['static_data']['horizon'] == 'day_ahead':
p_dates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
else:
p_dates = pd.date_range(t + pd.DateOffset(hours=1), t + pd.DateOffset(hours=24), freq='15min')
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample(date, nwp, nwp_prev, nwp_next, lats[project_id],
longs[project_id], project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data.loc[(date - pd.DateOffset(hours=1)), project_id]
inp['Obs_lag2'] = data.loc[(date - pd.DateOffset(hours=2)), project_id]
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data.loc[(date - pd.DateOffset(hours=1)), project_id]
inp['Obs_lag2'] = data.loc[(date - pd.DateOffset(hours=2)), project_id]
x[project['_id']] = pd.concat([x[project['_id']], inp])
x_3d[project['_id']] = stack_2d_dense(x_3d[project['_id']], inp_cnn, False)
except Exception:
continue
print(t.strftime('%d%m%y%H%M'), ' extracted')
return x, x_3d, t.strftime('%d%m%y%H%M')
def get_lats_longs(self):
lats = dict()
longs = dict()
nwp_found = False
for t in self.dates: # Try to load at least one file ??
file_name = os.path.join(self.path_nwp, f"{self.nwp_model}_{t.strftime('%d%m%y')}.pickle")
p_dates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=48), freq='H').strftime(
'%d%m%y%H%M')
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for date in p_dates:
if date in nwps:
nwp = nwps[date]
nwp_found = True
break
if nwp_found:
break
print(nwp_found)
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
if nwp['lat'].shape[0] == 0:
area_group = self.projects[0]['static_data']['area_group']
resolution = self.projects[0]['static_data']['NWP_resolution']
nwp['lat'] = np.arange(area_group[0][0], area_group[1][0] + resolution / 2, resolution).reshape(-1, 1)
nwp['long'] = np.arange(area_group[0][1], area_group[1][1] + resolution / 2, resolution).reshape(-1, 1).T
for project in self.projects:
areas = project['static_data']['areas'] # The final area is a 5x5 grid
project_id = project['_id']
lat, long = nwp['lat'], nwp['long']
if isinstance(areas, list):
# Is this guaranteed to be 5x5 ? I think yes, because of the resolution. TODO: VERIFY
lats[project_id] = np.where((lat[:, 0] >= areas[0][0]) & (lat[:, 0] <= areas[1][0]))[0]
longs[project_id] = np.where((long[0, :] >= areas[0][1]) & (long[0, :] <= areas[1][1]))[0]
else:
lats[project_id] = dict()
longs[project_id] = dict()
for area in sorted(areas.keys()):
lats[project_id][area] = np.where((lat[:, 0] >= areas[0][0]) & (lat[:, 0] <= areas[1][0]))[0]
longs[project_id][area] = np.where((long[0, :] >= areas[0][1]) & (long[0, :] <= areas[1][1]))[0]
return lats, longs
def make_dataset_res_short_term(self):
lats, longs = self.get_lats_longs()
predictions = dict()
for project in self.projects:
predictions[project['_id']] = joblib.load(os.path.join(project['static_data']['path_data']
, 'predictions_short_term.pickle'))
nwp = self.stack_by_sample(self.data.index[20], self.data, lats, longs, self.path_nwp, self.nwp_model, self.projects,
self.variables, predictions)
nwp_samples = Parallel(n_jobs=self.n_jobs)(
delayed(self.stack_by_sample)(t, self.data, lats, longs, self.path_nwp, self.nwp_model, self.projects,
self.variables, predictions) for t in self.data.index[20:])
x = dict()
y = dict()
x_3d = dict()
for project in self.projects:
x[project['_id']] = pd.DataFrame()
y[project['_id']] = pd.DataFrame()
x_3d[project['_id']] = np.array([])
for nwp in nwp_samples:
for project in self.projects:
if project['_id'] in nwp[2].keys():
if nwp[2][project['_id']].shape[0] != 0:
x[project['_id']] = pd.concat([x[project['_id']], nwp[0][project['_id']]])
y[project['_id']] = pd.concat([y[project['_id']], nwp[1][project['_id']]])
x_3d[project['_id']] = stack_3d(x_3d[project['_id']], nwp[2][project['_id']])
self.logger.info('All Inputs stacked')
dataset_x_csv = 'dataset_X_test.csv'
dataset_y_csv = 'dataset_y_test.csv'
dataset_cnn_pickle = 'dataset_cnn_test.pickle'
for project in self.projects:
project_id = project['_id']
data_path = project['static_data']['path_data']
dataset_x = x[project_id]
dataset_y = y[project_id]
if dataset_y.isna().any().values[0]:
dataset_x = dataset_x.drop(dataset_y.index[np.where(dataset_y.isna())[0]])
if len(x_3d.shape) > 1:
x_3d = np.delete(x_3d, np.where(dataset_y.isna())[0], axis=0)
dataset_y = dataset_y.drop(dataset_y.index[np.where(dataset_y.isna())[0]])
if dataset_x.isna().any().values[0]:
dataset_y = dataset_y.drop(dataset_x.index[np.where(dataset_x.isna())[0]])
if len(x_3d.shape) > 1:
x_3d = np.delete(x_3d, np.where(dataset_x.isna())[0], axis=0)
dataset_y = dataset_y.drop(dataset_y.index[np.where(dataset_y.isna())[0]])
index = [d for d in dataset_x.index if d in dataset_y.index]
dataset_x = dataset_x.loc[index]
dataset_y = dataset_y.loc[index]
ind = joblib.load(os.path.join(data_path, 'dataset_columns_order.pickle'))
columns = dataset_x.columns[ind]
dataset_x = dataset_x[columns]
dataset_x.to_csv(os.path.join(data_path, dataset_x_csv))
dataset_y.to_csv(os.path.join(data_path, dataset_y_csv))
joblib.dump(x_3d[project_id], os.path.join(data_path, dataset_cnn_pickle))
self.logger.info('Datasets saved for project %s', project['_id'])
def make_dataset_res_rabbitmq(self):
project = self.projects[0]
nwp_daily = self.stack_daily_nwps_rabbitmq(self.dates[0], self.path_nwp, self.nwp_model, project,
self.variables)
x = nwp_daily[0][project['_id']]
x_3d = nwp_daily[1][project['_id']]
project_id = project['_id']
data_path = project['static_data']['path_data']
dataset_x = x
if os.path.exists(os.path.join(data_path, 'dataset_columns_order.pickle')):
ind = joblib.load(os.path.join(data_path, 'dataset_columns_order.pickle'))
columns = dataset_x.columns[ind]
dataset_x = dataset_x[columns]
return dataset_x, x_3d
def make_dataset_res_online(self):
project = self.projects[0]
lats, longs = self.get_lats_longs()
nwp_daily = Parallel(n_jobs=self.n_jobs)(
delayed(self.stack_daily_nwps_online)(t, self.data, lats, longs, self.path_nwp, self.nwp_model, self.projects,
self.variables) for t in self.dates)
x = pd.DataFrame()
y = pd.DataFrame()
x_3d = np.array([])
for nwp in nwp_daily:
if nwp[1][project['_id']].shape[0] != 0:
x = pd.concat([x, nwp[0][project['_id']]])
x_3d = stack_3d(x_3d, nwp[2][project['_id']])
project_id = project['_id']
data_path = project['static_data']['path_data']
dataset_x = x
ind = joblib.load(os.path.join(data_path, 'dataset_columns_order.pickle'))
columns = dataset_x.columns[ind]
dataset_x = dataset_x[columns]
return dataset_x, x_3d
def make_dataset_res(self):
lats, longs = self.get_lats_longs()
nwp = self.stack_daily_nwps(self.dates[4], self.data, lats, longs, self.path_nwp, self.nwp_model, self.projects,
self.variables)
nwp_daily = Parallel(n_jobs=self.n_jobs)(
delayed(self.stack_daily_nwps)(t, self.data, lats, longs, self.path_nwp, self.nwp_model, self.projects,
self.variables) for t in self.dates)
x = dict()
y = dict()
x_3d = dict()
for project in self.projects:
x[project['_id']] = pd.DataFrame()
y[project['_id']] = pd.DataFrame()
x_3d[project['_id']] = np.array([])
for nwp in nwp_daily:
for project in self.projects:
if project['_id'] in nwp[2].keys():
if nwp[2][project['_id']].shape[0] != 0:
x[project['_id']] = | pd.concat([x[project['_id']], nwp[0][project['_id']]]) | pandas.concat |
import unittest
import numpy as np
import pandas as pd
from minotor.data_managers.data_types import DataType
class TestDataType(unittest.TestCase):
def test_numpy_float2data_type(self):
arr = np.array([[1.0, 1.2]])
dtype = arr.dtype
self.assertEqual(DataType.type2value(dtype), DataType.FLOAT)
def test_numpy_int2data_type(self):
arr = np.array([[1, 1]])
dtype = arr.dtype
self.assertEqual(DataType.type2value(dtype), DataType.INT)
def test_numpy_bool2data_type(self):
arr = np.array([[True, False]])
dtype = arr.dtype
self.assertEqual(DataType.type2value(dtype), DataType.BOOL)
def test_pandas_int2data_type(self):
s = pd.Series([1, 2])
self.assertEqual(DataType.type2value(s.dtype), DataType.INT)
def test_pandas_float2data_type(self):
s = pd.Series([1.0, 2.0])
self.assertEqual(DataType.type2value(s.dtype), DataType.FLOAT)
def test_pandas_cat2data_type(self):
s = pd.Series(["cat1", "cat2"], dtype="category")
self.assertEqual(DataType.type2value(s.dtype), DataType.CATEGORY)
def test_pandas_bool2data_type(self):
s = | pd.Series([True, False]) | pandas.Series |
import pandas as pd
from business_rules.operators import (DataframeType, StringType,
NumericType, BooleanType, SelectType,
SelectMultipleType, GenericType)
from . import TestCase
from decimal import Decimal
import sys
import pandas
class StringOperatorTests(TestCase):
def test_operator_decorator(self):
self.assertTrue(StringType("foo").equal_to.is_operator)
def test_string_equal_to(self):
self.assertTrue(StringType("foo").equal_to("foo"))
self.assertFalse(StringType("foo").equal_to("Foo"))
def test_string_not_equal_to(self):
self.assertTrue(StringType("foo").not_equal_to("Foo"))
self.assertTrue(StringType("foo").not_equal_to("boo"))
self.assertFalse(StringType("foo").not_equal_to("foo"))
def test_string_equal_to_case_insensitive(self):
self.assertTrue(StringType("foo").equal_to_case_insensitive("FOo"))
self.assertTrue(StringType("foo").equal_to_case_insensitive("foo"))
self.assertFalse(StringType("foo").equal_to_case_insensitive("blah"))
def test_string_starts_with(self):
self.assertTrue(StringType("hello").starts_with("he"))
self.assertFalse(StringType("hello").starts_with("hey"))
self.assertFalse(StringType("hello").starts_with("He"))
def test_string_ends_with(self):
self.assertTrue(StringType("hello").ends_with("lo"))
self.assertFalse(StringType("hello").ends_with("boom"))
self.assertFalse(StringType("hello").ends_with("Lo"))
def test_string_contains(self):
self.assertTrue(StringType("hello").contains("ell"))
self.assertTrue(StringType("hello").contains("he"))
self.assertTrue(StringType("hello").contains("lo"))
self.assertFalse(StringType("hello").contains("asdf"))
self.assertFalse(StringType("hello").contains("ElL"))
def test_string_matches_regex(self):
self.assertTrue(StringType("hello").matches_regex(r"^h"))
self.assertFalse(StringType("hello").matches_regex(r"^sh"))
def test_non_empty(self):
self.assertTrue(StringType("hello").non_empty())
self.assertFalse(StringType("").non_empty())
self.assertFalse(StringType(None).non_empty())
class NumericOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, err_string):
NumericType("foo")
def test_numeric_type_validates_and_casts_decimal(self):
ten_dec = Decimal(10)
ten_int = 10
ten_float = 10.0
if sys.version_info[0] == 2:
ten_long = long(10)
else:
ten_long = int(10) # long and int are same in python3
ten_var_dec = NumericType(ten_dec) # this should not throw an exception
ten_var_int = NumericType(ten_int)
ten_var_float = NumericType(ten_float)
ten_var_long = NumericType(ten_long)
self.assertTrue(isinstance(ten_var_dec.value, Decimal))
self.assertTrue(isinstance(ten_var_int.value, Decimal))
self.assertTrue(isinstance(ten_var_float.value, Decimal))
self.assertTrue(isinstance(ten_var_long.value, Decimal))
def test_numeric_equal_to(self):
self.assertTrue(NumericType(10).equal_to(10))
self.assertTrue(NumericType(10).equal_to(10.0))
self.assertTrue(NumericType(10).equal_to(10.000001))
self.assertTrue(NumericType(10.000001).equal_to(10))
self.assertTrue(NumericType(Decimal('10.0')).equal_to(10))
self.assertTrue(NumericType(10).equal_to(Decimal('10.0')))
self.assertFalse(NumericType(10).equal_to(10.00001))
self.assertFalse(NumericType(10).equal_to(11))
def test_numeric_not_equal_to(self):
self.assertTrue(NumericType(10).not_equal_to(10.00001))
self.assertTrue(NumericType(10).not_equal_to(11))
self.assertTrue(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.1')))
self.assertFalse(NumericType(10).not_equal_to(10))
self.assertFalse(NumericType(10).not_equal_to(10.0))
self.assertFalse(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.0')))
def test_other_value_not_numeric(self):
error_string = "10 is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, error_string):
NumericType(10).equal_to("10")
def test_numeric_greater_than(self):
self.assertTrue(NumericType(10).greater_than(1))
self.assertFalse(NumericType(10).greater_than(11))
self.assertTrue(NumericType(10.1).greater_than(10))
self.assertFalse(NumericType(10.000001).greater_than(10))
self.assertTrue(NumericType(10.000002).greater_than(10))
def test_numeric_greater_than_or_equal_to(self):
self.assertTrue(NumericType(10).greater_than_or_equal_to(1))
self.assertFalse(NumericType(10).greater_than_or_equal_to(11))
self.assertTrue(NumericType(10.1).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000001).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000002).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10).greater_than_or_equal_to(10))
def test_numeric_less_than(self):
self.assertTrue(NumericType(1).less_than(10))
self.assertFalse(NumericType(11).less_than(10))
self.assertTrue(NumericType(10).less_than(10.1))
self.assertFalse(NumericType(10).less_than(10.000001))
self.assertTrue(NumericType(10).less_than(10.000002))
def test_numeric_less_than_or_equal_to(self):
self.assertTrue(NumericType(1).less_than_or_equal_to(10))
self.assertFalse(NumericType(11).less_than_or_equal_to(10))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.1))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000001))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000002))
self.assertTrue(NumericType(10).less_than_or_equal_to(10))
class BooleanOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType("foo")
err_string = "None is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType(None)
def test_boolean_is_true_and_is_false(self):
self.assertTrue(BooleanType(True).is_true())
self.assertFalse(BooleanType(True).is_false())
self.assertFalse(BooleanType(False).is_true())
self.assertTrue(BooleanType(False).is_false())
class SelectOperatorTests(TestCase):
def test_contains(self):
self.assertTrue(SelectType([1, 2]).contains(2))
self.assertFalse(SelectType([1, 2]).contains(3))
self.assertTrue(SelectType([1, 2, "a"]).contains("A"))
def test_does_not_contain(self):
self.assertTrue(SelectType([1, 2]).does_not_contain(3))
self.assertFalse(SelectType([1, 2]).does_not_contain(2))
self.assertFalse(SelectType([1, 2, "a"]).does_not_contain("A"))
class SelectMultipleOperatorTests(TestCase):
def test_contains_all(self):
self.assertTrue(SelectMultipleType([1, 2]).
contains_all([2, 1]))
self.assertFalse(SelectMultipleType([1, 2]).
contains_all([2, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
contains_all([2, 1, "A"]))
def test_is_contained_by(self):
self.assertTrue(SelectMultipleType([1, 2]).
is_contained_by([2, 1, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
is_contained_by([2, 3, 4]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
is_contained_by([2, 1, "A"]))
def test_shares_at_least_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_at_least_one_element_with([4, "A"]))
def test_shares_exactly_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_exactly_one_element_with([4, "A"]))
self.assertFalse(SelectMultipleType([1, 2, 3]).
shares_exactly_one_element_with([2, 3, "a"]))
def test_shares_no_elements_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_no_elements_with([4, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_no_elements_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2, "a"]).
shares_no_elements_with([4, "A"]))
class DataframeOperatorTests(TestCase):
def test_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ],
})
result: pd.Series = DataframeType({"value": df}).exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_not_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ]
})
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, "", 7, ],
"var2": [3, 5, 6, "", 2, ],
"var3": [1, 3, 8, "", 7, ],
"var4": ["test", "issue", "one", "", "two", ]
})
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to({
"target": "--r1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 20
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var4",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False, False, ])))
def test_not_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": 20
}).equals(pandas.Series([True, True, True])))
def test_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "", "new", "val"],
"var2": ["WORD", "", "test", "VAL"],
"var3": ["LET", "", "GO", "read"]
})
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "NEW"
}).equals(pandas.Series([False, False, True, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False])))
def test_not_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "new", "val"],
"var2": ["WORD", "test", "VAL"],
"var3": ["LET", "GO", "read"],
"var4": ["WORD", "NEW", "VAL"]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
def test_less_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than({
"target": "--r1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": 3
}).equals(pandas.Series([True, True, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_less_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than_or_equal_to({
"target": "--r1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([False, False, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 5, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than_or_equal_to({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_greater_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": 5000
}).equals(pandas.Series([False, False, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_greater_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than_or_equal_to({
"target": "var1",
"comparator": "--r4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([True, True, False])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 3, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than_or_equal_to({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_contains(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": 5
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).does_not_contain({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([False, True, True])))
def test_contains_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["POKEMON", "CHARIZARD", "BULBASAUR"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "PIKACHU"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var4",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["pikachu", "charizard", "bulbasaur"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var1",
"comparator": "IVYSAUR"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var3",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var4",
"comparator": "var2"
}).equals(pandas.Series([False, True, True])))
def test_is_contained_by(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"var5": [[1,2,3], [1,2], [17]]
})
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": [4,5,6]
}).equals(pandas.Series([False, False, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_contained_by({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": [9, 10, 11]
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var5"
}).equals(pandas.Series([True, True, False])))
def test_is_not_contained_by(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"var5": [[1,2,3], [1,2], [17]]
})
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_not_contained_by({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": [9, 10, 11]
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var5"
}).equals(pandas.Series([False, False, True])))
def test_is_contained_by_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"],
"var4": [set(["word"]), set(["test"])]
})
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": ["word", "TEST"]
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var1"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var3",
"comparator": "var4"
}).equals(pandas.Series([False, False])))
def test_is_not_contained_by_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"],
"var4": [set(["word"]), set(["test"])]
})
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": ["word", "TEST"]
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var3",
"comparator": "var4"
}).equals(pandas.Series([True, True])))
def test_prefix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).prefix_matches_regex({
"target": "--r2",
"comparator": "w.*",
"prefix": 2
}).equals(pandas.Series([True, False])))
self.assertTrue(DataframeType({"value": df}).prefix_matches_regex({
"target": "var2",
"comparator": "[0-9].*",
"prefix": 2
}).equals(pandas.Series([False, False])))
def test_suffix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).suffix_matches_regex({
"target": "--r1",
"comparator": "es.*",
"suffix": 3
}).equals(pandas.Series([False, True])))
self.assertTrue(DataframeType({"value": df}).suffix_matches_regex({
"target": "var1",
"comparator": "[0-9].*",
"suffix": 3
}).equals(pandas.Series([False, False])))
def test_not_prefix_matches_suffix(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_prefix_matches_regex({
"target": "--r1",
"comparator": ".*",
"prefix": 2
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).not_prefix_matches_regex({
"target": "var2",
"comparator": "[0-9].*",
"prefix": 2
}).equals(pandas.Series([True, True])))
def test_not_suffix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df}).not_suffix_matches_regex({
"target": "var1",
"comparator": ".*",
"suffix": 3
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_suffix_matches_regex({
"target": "--r1",
"comparator": "[0-9].*",
"suffix": 3
}).equals(pandas.Series([True, True])))
def test_matches_suffix(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).matches_regex({
"target": "--r1",
"comparator": ".*",
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).matches_regex({
"target": "var2",
"comparator": "[0-9].*",
}).equals(pandas.Series([False, False])))
def test_not_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df}).not_matches_regex({
"target": "var1",
"comparator": ".*",
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_matches_regex({
"target": "--r1",
"comparator": "[0-9].*",
}).equals(pandas.Series([True, True])))
def test_starts_with(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).starts_with({
"target": "--r1",
"comparator": "WO",
}).equals(pandas.Series([True, False])))
self.assertTrue(DataframeType({"value": df}).starts_with({
"target": "var2",
"comparator": "ABC",
}).equals(pandas.Series([False, False])))
def test_ends_with(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).ends_with({
"target": "--r1",
"comparator": "abc",
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).ends_with({
"target": "var1",
"comparator": "est",
}).equals(pandas.Series([False, True])))
def test_has_equal_length(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
result = df_operator.has_equal_length({"target": "--r_1", "comparator": 4})
self.assertTrue(result.equals(pandas.Series([True, False])))
def test_has_not_equal_length(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
result = df_operator.has_not_equal_length({"target": "--r_1", "comparator": 4})
self.assertTrue(result.equals(pandas.Series([False, True])))
def test_longer_than(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'value']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.longer_than({"target": "--r_1", "comparator": 3}).equals(pandas.Series([True, True])))
def test_longer_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'alex']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.longer_than_or_equal_to({"target": "--r_1", "comparator": 3}).equals(pandas.Series([True, True])))
self.assertTrue(df_operator.longer_than_or_equal_to({"target": "var_1", "comparator": 4}).equals(pandas.Series([True, True])))
def test_shorter_than(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'val']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.shorter_than({"target": "--r_1", "comparator": 5}).equals(pandas.Series([True, True])))
def test_shorter_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var_1": ['test', 'alex']
}
)
df_operator = DataframeType({"value": df, "column_prefix_map": {"--": "va"}})
self.assertTrue(df_operator.shorter_than_or_equal_to({"target": "--r_1", "comparator": 5}).equals(pandas.Series([True, True])))
self.assertTrue(df_operator.shorter_than_or_equal_to({"target": "var_1", "comparator": 4}).equals(pandas.Series([True, True])))
def test_contains_all(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['test', 'value', 'word'],
"var2": ["test", "value", "test"]
}
)
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var1",
"comparator": "var2",
}))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains_all({
"target": "--r1",
"comparator": "--r2",
}))
self.assertFalse(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": "var1",
}))
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": ["test", "value"],
}))
def test_not_contains_all(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['test', 'value', 'word'],
"var2": ["test", "value", "test"]
}
)
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var1",
"comparator": "var2",
}))
self.assertFalse(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": "var1",
}))
self.assertTrue(DataframeType({"value": df}).contains_all({
"target": "var2",
"comparator": ["test", "value"],
}))
def test_invalid_date(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2099'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
}
)
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).invalid_date({"target": "--r1"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).invalid_date({"target": "var3"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).invalid_date({"target": "var2"})
.equals(pandas.Series([False, False, False, True, True])))
def test_date_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var1", "comparator": '2021'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "1997-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([False, False, True, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).date_equal_to({"target": "--r3", "comparator": "--r4", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "hour"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "minute"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "second"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "microsecond"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var5", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var6", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var6", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_not_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var1", "comparator": '2022'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "1998-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var6", "date_component": "hour"})
.equals(pandas.Series([False, False, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_not_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_less_than(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var1", "comparator": '2022'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "1998-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var6", "date_component": "hour"})
.equals(pandas.Series([False, False, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_greater_than(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var1", "comparator": '2020'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var3", "comparator": "1996-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var6", "comparator": "var3", "date_component": "hour"})
.equals(pandas.Series([False, False, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_greater_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var1", "comparator": '2020'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var1", "comparator": '2023'})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var3", "comparator": "1996-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var6", "comparator": "var3", "date_component": "hour"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_greater_than_or_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_date_less_than_or_equal_to(self):
df = pandas.DataFrame.from_dict(
{
"var1": ['2021', '2021', '2021', '2021', '2021'],
"var2": ["2099", "2022", "2034", "90999", "20999"],
"var3": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var4": ["1997-07", "1997-07-16", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
"var5": ["1997-08", "1997-08-16", "1997-08-16T19:20:30.45+01:00", "1997-08-16T19:20:30+01:00", "1997-08-16T19:20+01:00"],
"var6": ["1998-08", "1998-08-11", "1998-08-17T20:21:31.46+01:00", "1998-08-17T20:21:31+01:00", "1998-08-17T20:21+01:00"],
"var7": ["", None, "", "", ""]
}
)
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var1", "comparator": '2022'})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var1", "comparator": '2020'})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var3", "comparator": "1998-07-16T19:20:30.45+01:00"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var3", "comparator": "var4"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var3", "comparator": "var4", "date_component": "year"})
.equals(pandas.Series([True, True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var6", "comparator": "var3", "date_component": "hour"})
.equals(pandas.Series([True, True, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var3", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).date_less_than_or_equal_to({"target": "var7", "comparator": "var7", "date_component": "month"})
.equals(pandas.Series([False, False, False, False, False])))
def test_is_incomplete_date(self):
df = pandas.DataFrame.from_dict(
{
"var1": [ '2021', '2021', '2099'],
"var2": [ "1997-07-16", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
}
)
self.assertTrue(DataframeType({"value": df}).is_incomplete_date({"target" : "var1"})
.equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_incomplete_date({"target" : "var2"})
.equals(pandas.Series([False, False, False])))
def test_is_complete_date(self):
df = pandas.DataFrame.from_dict(
{
"var1": ["2021", "2021", "2099"],
"var2": ["1997-07-16", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20+01:00"],
}
)
self.assertTrue(DataframeType({"value": df}).is_complete_date({"target": "var1"})
.equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_complete_date({"target": "var2"})
.equals(pandas.Series([True, True, True])))
def test_is_unique_set(self):
df = pandas.DataFrame.from_dict( {"ARM": ["PLACEBO", "PLACEBO", "A", "A"], "TAE": [1,1,1,2], "LAE": [1,2,1,2], "ARF": [1,2,3,4]})
self.assertTrue(DataframeType({"value": df}).is_unique_set({"target" : "ARM", "comparator": "LAE"})
.equals(pandas.Series([True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_unique_set({"target" : "ARM", "comparator": ["LAE"]})
.equals(pandas.Series([True, True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_unique_set({"target" : "ARM", "comparator": ["TAE"]})
.equals(pandas.Series([False, False, True, True])))
self.assertTrue(DataframeType({"value": df}).is_unique_set({"target" : "ARM", "comparator": "TAE"})
.equals(pandas.Series([False, False, True, True])))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "AR"}}).is_unique_set({"target" : "--M", "comparator": "--F"})
.equals(pandas.Series([True, True, True, True])))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "AR"}}).is_unique_set({"target" : "--M", "comparator": ["--F"]})
.equals(pandas.Series([True, True, True, True])))
def test_is_not_unique_set(self):
df = pandas.DataFrame.from_dict( {"ARM": ["PLACEBO", "PLACEBO", "A", "A"], "TAE": [1,1,1,2], "LAE": [1,2,1,2], "ARF": [1,2,3,4]})
self.assertTrue(DataframeType({"value": df}).is_not_unique_set({"target" : "ARM", "comparator": "LAE"})
.equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_unique_set({"target" : "ARM", "comparator": ["LAE"]})
.equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_unique_set({"target" : "ARM", "comparator": ["TAE"]})
.equals(pandas.Series([True, True, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_unique_set({"target" : "ARM", "comparator": "TAE"})
.equals(pandas.Series([True, True, False, False])))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "AR"}}).is_not_unique_set({"target" : "--M", "comparator": "--F"})
.equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "AR"}}).is_not_unique_set({"target" : "--M", "comparator": ["--F"]})
.equals(pandas.Series([False, False, False, False])))
def test_is_ordered_set(self):
df = pandas.DataFrame.from_dict( {"USUBJID": [1,2,1,2], "SESEQ": [1,1,2,2] })
self.assertTrue(DataframeType({"value": df}).is_ordered_set({"target" : "SESEQ", "comparator": "USUBJID"}))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "SE"}}).is_ordered_set({"target" : "--SEQ", "comparator": "USUBJID"}))
df2 = pandas.DataFrame.from_dict( {"USUBJID": [1,2,1,2], "SESEQ": [3,1,2,2] })
self.assertFalse(DataframeType({"value": df2}).is_ordered_set({"target" : "SESEQ", "comparator": "USUBJID"}))
self.assertFalse(DataframeType({"value":df2, "column_prefix_map": {"--": "SE"}}).is_ordered_set({"target" : "--SEQ", "comparator": "USUBJID"}))
def test_is_not_ordered_set(self):
df = pandas.DataFrame.from_dict( {"USUBJID": [1,2,1,2], "SESEQ": [3,1,2,2] })
self.assertTrue(DataframeType({"value": df}).is_not_ordered_set({"target" : "SESEQ", "comparator": "USUBJID"}))
self.assertTrue(DataframeType({"value":df, "column_prefix_map": {"--": "SE"}}).is_not_ordered_set({"target" : "--SEQ", "comparator": "USUBJID"}))
df2 = pandas.DataFrame.from_dict( {"USUBJID": [1,2,1,2], "SESEQ": [1,1,2,2] })
self.assertFalse(DataframeType({"value": df2}).is_not_ordered_set({"target" : "SESEQ", "comparator": "USUBJID"}))
self.assertFalse(DataframeType({"value":df2, "column_prefix_map": {"--": "SE"}}).is_not_ordered_set({"target" : "--SEQ", "comparator": "USUBJID"}))
def test_is_unique_relationship(self):
"""
Test validates one-to-one relationship against a dataset.
One-to-one means that a pair of columns can be duplicated
but its integrity should not be violated.
"""
one_to_one_related_df = pandas.DataFrame.from_dict(
{
"STUDYID": [1, 2, 3, 1, 2],
"USUBJID": ["TEST", "TEST-1", "TEST-2", "TEST-3", "TEST-4", ],
"STUDYDESC": ["Russia", "USA", "China", "Russia", "USA", ],
}
)
self.assertTrue(
DataframeType({"value": one_to_one_related_df}).is_unique_relationship(
{"target": "STUDYID", "comparator": "STUDYDESC"}
).equals(pandas.Series([True, True, True, True, True]))
)
self.assertTrue(
DataframeType({"value": one_to_one_related_df}).is_unique_relationship(
{"target": "STUDYDESC", "comparator": "STUDYID"}
).equals(pandas.Series([True, True, True, True, True]))
)
self.assertTrue(
DataframeType({"value": one_to_one_related_df, "column_prefix_map":{"--": "STUDY"}}).is_unique_relationship(
{"target": "--ID", "comparator": "--DESC"}
).equals(pandas.Series([True, True, True, True, True]))
)
self.assertTrue(
DataframeType({"value": one_to_one_related_df, "column_prefix_map":{"--": "STUDY"}}).is_unique_relationship(
{"target": "--DESC", "comparator": "--ID"}
).equals(pandas.Series([True, True, True, True, True]))
)
df_violates_one_to_one = pandas.DataFrame.from_dict(
{
"STUDYID": ["TEST", "TEST-1", "TEST-2", "TEST-3", ],
"TESTID": [1, 2, 1, 3],
"TESTNAME": ["Functional", "Stress", "Functional", "Stress", ],
}
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one}).is_unique_relationship(
{"target": "TESTID", "comparator": "TESTNAME"}).equals(pandas.Series([True, False, True, False]))
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one}).is_unique_relationship(
{"target": "TESTNAME", "comparator": "TESTID"}).equals(pandas.Series([True, False, True, False]))
)
def test_is_not_unique_relationship(self):
"""
Test validates one-to-one relationship against a dataset.
One-to-one means that a pair of columns can be duplicated
but its integrity should not be violated.
"""
valid_df = pandas.DataFrame.from_dict(
{
"STUDYID": ["TEST", "TEST-1", "TEST-2", "TEST-3", ],
"VISITNUM": [1, 2, 1, 3],
"VISIT": ["Consulting", "Surgery", "Consulting", "Treatment", ],
}
)
self.assertTrue(DataframeType({"value": valid_df}).is_not_unique_relationship(
{"target": "VISITNUM", "comparator": "VISIT"}).equals(pandas.Series([False, False, False, False]))
)
self.assertTrue(DataframeType({"value": valid_df}).is_not_unique_relationship(
{"target": "VISIT", "comparator": "VISITNUM"}).equals(pandas.Series([False, False, False, False]))
)
valid_df_1 = pandas.DataFrame.from_dict(
{
"STUDYID": ["TEST", "TEST-1", "TEST-2", "TEST-3", ],
"VISIT": ["Consulting", "Surgery", "Consulting", "Treatment", ],
"VISITDESC": [
"Doctor Consultation", "Heart Surgery", "Doctor Consultation", "Long Lasting Treatment",
],
}
)
self.assertTrue(DataframeType({"value": valid_df_1}).is_not_unique_relationship(
{"target": "VISIT", "comparator": "VISITDESC"}).equals(pandas.Series([False, False, False, False]))
)
self.assertTrue(DataframeType({"value": valid_df_1}).is_not_unique_relationship(
{"target": "VISITDESC", "comparator": "VISIT"}).equals(pandas.Series([False, False, False, False]))
)
df_violates_one_to_one = pandas.DataFrame.from_dict(
{
"STUDYID": ["TEST", "TEST-1", "TEST-2", "TEST-3", ],
"VISITNUM": [1, 2, 1, 3],
"VISIT": ["Consulting", "Surgery", "Consulting", "Consulting", ],
}
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one}).is_not_unique_relationship(
{"target": "VISITNUM", "comparator": "VISIT"}).equals(pandas.Series([True, False, True, True]))
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one}).is_not_unique_relationship(
{"target": "VISIT", "comparator": "VISITNUM"}).equals(pandas.Series([True, False, True, True]))
)
df_violates_one_to_one_1 = pandas.DataFrame.from_dict(
{
"STUDYID": ["TEST", "TEST-1", "TEST-2", "TEST-3", "TEST-4", ],
"VISIT": ["Consulting", "Consulting", "Surgery", "Consulting", "Treatment", ],
"VISITDESC": ["Doctor Consultation", "Doctor Consultation", "Heart Surgery", "Heart Surgery", "Long Lasting Treatment", ],
}
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one_1}).is_not_unique_relationship(
{"target": "VISIT", "comparator": "VISITDESC"}).equals(pandas.Series([True, True, True, True, False]))
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one_1}).is_not_unique_relationship(
{"target": "VISITDESC", "comparator": "VISIT"}).equals(pandas.Series([True, True, True, True, False]))
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one_1, "column_prefix_map": {"--": "VI"}}).is_not_unique_relationship(
{"target": "--SIT", "comparator": "--SITDESC"}).equals(pandas.Series([True, True, True, True, False]))
)
self.assertTrue(DataframeType({"value": df_violates_one_to_one_1, "column_prefix_map": {"--": "VI"}}).is_not_unique_relationship(
{"target": "--SITDESC", "comparator": "--SIT"}).equals(pandas.Series([True, True, True, True, False]))
)
def test_empty_within_except_last_row(self):
df = pandas.DataFrame.from_dict(
{
"USUBJID": [1, 1, 1, 2, 2, 2],
"valid": ["2020-10-10", "2020-10-10", "2020-10-10", "2021", "2021", "2021", ],
"invalid": ["2020-10-10", None, None, "2020", "2020", None, ],
}
)
self.assertFalse(
DataframeType({"value": df}).empty_within_except_last_row({"target": "valid", "comparator": "USUBJID"})
)
self.assertTrue(
DataframeType({"value": df}).empty_within_except_last_row({"target": "invalid", "comparator": "USUBJID"})
)
def test_non_empty_within_except_last_row(self):
df = pandas.DataFrame.from_dict(
{
"USUBJID": [1, 1, 1, 2, 2, 2],
"valid": ["2020-10-10", "2020-10-10", "2020-10-10", "2021", "2021", "2021", ],
"invalid": ["2020-10-10", None, None, "2020", "2020", None, ],
}
)
self.assertTrue(
DataframeType({"value": df}).non_empty_within_except_last_row({"target": "valid", "comparator": "USUBJID"})
)
self.assertFalse(
DataframeType({"value": df}).non_empty_within_except_last_row({"target": "invalid", "comparator": "USUBJID"})
)
def test_is_valid_reference(self):
reference_data = {
"LB": {
"TEST": [],
"DATA": [1,2,3]
},
"AE": {
"AETERM": [1,2,3]
}
}
df = pandas.DataFrame.from_dict(
{
"RDOMAIN": ["LB", "LB", "AE"],
"IDVAR1": ["TEST", "DATA", "AETERM"],
"IDVAR2": ["TEST", "AETERM", "AETERM"]
}
)
self.assertTrue(
DataframeType({"value": df, "relationship_data": reference_data}).is_valid_reference({"target": "IDVAR1", "context": "RDOMAIN"})
.equals(pandas.Series([True, True, True]))
)
self.assertTrue(
DataframeType({"value": df, "relationship_data": reference_data}).is_valid_reference({"target": "IDVAR2", "context": "RDOMAIN"})
.equals(pandas.Series([True, False, True]))
)
def test_not_valid_reference(self):
reference_data = {
"LB": {
"TEST": [],
"DATA": [1,2,3]
},
"AE": {
"AETERM": [1,2,3]
}
}
df = pandas.DataFrame.from_dict(
{
"RDOMAIN": ["LB", "LB", "AE"],
"IDVAR1": ["TEST", "DATA", "AETERM"],
"IDVAR2": ["TEST", "AETERM", "AETERM"]
}
)
self.assertTrue(
DataframeType({"value": df, "relationship_data": reference_data}).is_not_valid_reference({"target": "IDVAR1", "context": "RDOMAIN"})
.equals(pandas.Series([False, False, False]))
)
self.assertTrue(
DataframeType({"value": df, "relationship_data": reference_data}).is_not_valid_reference({"target": "IDVAR2", "context": "RDOMAIN"})
.equals(pandas.Series([False, True, False]))
)
def test_is_valid_relationship(self):
reference_data = {
"LB": {
"TEST": pandas.Series([4,5,6]).values,
"DATA": pandas.Series([1,2,3]).values
},
"AE": {
"AETERM": pandas.Series([31, 323, 33]).values
}
}
df = pandas.DataFrame.from_dict(
{
"RDOMAIN": ["LB", "LB", "AE"],
"IDVAR1": ["TEST", "DATA", "AETERM"],
"IDVAR2": ["TEST", "DATA", "AETERM"],
"IDVARVAL1": [4, 1, 31],
"IDVARVAL2": [5, 1, 35]
}
)
self.assertTrue(
DataframeType({"value": df, "relationship_data": reference_data}).is_valid_relationship({"target": "IDVAR1", "comparator": "IDVARVAL1", "context": "RDOMAIN"})
.equals(pandas.Series([True, True, True]))
)
self.assertTrue(
DataframeType({"value": df, "relationship_data": reference_data}).is_valid_relationship({"target": "IDVAR2", "comparator": "IDVARVAL2", "context": "RDOMAIN"})
.equals(pandas.Series([True, True, False]))
)
def test_not_valid_relationship(self):
reference_data = {
"LB": {
"TEST": pandas.Series([4,5,6]).values,
"DATA": pandas.Series([1,2,3]).values
},
"AE": {
"AETERM": pandas.Series([31, 323, 33]).values
}
}
df = pandas.DataFrame.from_dict(
{
"RDOMAIN": ["LB", "LB", "AE"],
"IDVAR1": ["TEST", "DATA", "AETERM"],
"IDVAR2": ["TEST", "DATA", "AETERM"],
"IDVARVAL1": [4, 1, 31],
"IDVARVAL2": [5, 1, 35]
}
)
self.assertTrue(
DataframeType({"value": df, "relationship_data": reference_data}).is_not_valid_relationship({"target": "IDVAR1", "comparator": "IDVARVAL1", "context": "RDOMAIN"})
.equals(pandas.Series([False, False, False]))
)
self.assertTrue(
DataframeType({"value": df, "relationship_data": reference_data}).is_not_valid_relationship({"target": "IDVAR2", "comparator": "IDVARVAL2", "context": "RDOMAIN"})
.equals(pandas.Series([False, False, True]))
)
def test_non_conformant_value_length(self):
def filter_func(row):
return row["IDVAR1"] == "TEST"
def length_check(row):
return len(row["IDVAR2"]) <= 4
df = pandas.DataFrame.from_dict(
{
"RDOMAIN": ["LB", "LB", "AE"],
"IDVAR1": ["TEST", "TEST", "AETERM"],
"IDVAR2": ["TEST", "TOOLONG", "AETERM"],
}
)
vlm = [
{
"filter": filter_func,
"length_check": length_check
}
]
result = DataframeType({"value": df, "value_level_metadata": vlm }).non_conformant_value_length({})
self.assertTrue(result.equals(pandas.Series([False, True, False])))
def test_non_conformant_value_data_type(self):
def filter_func(row):
return row["IDVAR1"] == "TEST"
def type_check(row):
return isinstance(row["IDVAR2"], str)
df = pandas.DataFrame.from_dict(
{
"RDOMAIN": ["LB", "LB", "AE"],
"IDVAR1": ["TEST", "TEST", "AETERM"],
"IDVAR2": ["TEST", 1, "AETERM"],
}
)
vlm = [
{
"filter": filter_func,
"type_check": type_check
}
]
result = DataframeType({"value": df, "value_level_metadata": vlm }).non_conformant_value_data_type({})
self.assertTrue(result.equals(pandas.Series([False, True, False])))
def test_conformant_value_length(self):
def filter_func(row):
return row["IDVAR1"] == "TEST"
def length_check(row):
return len(row["IDVAR2"]) <= 4
df = pandas.DataFrame.from_dict(
{
"RDOMAIN": ["LB", "LB", "AE"],
"IDVAR1": ["TEST", "TEST", "AETERM"],
"IDVAR2": ["TEST", "TOOLONG", "AETERM"],
}
)
vlm = [
{
"filter": filter_func,
"length_check": length_check
}
]
result = DataframeType({"value": df, "value_level_metadata": vlm }).conformant_value_length({})
self.assertTrue(result.equals(pandas.Series([True, False, False])))
def test_conformant_value_data_type(self):
def filter_func(row):
return row["IDVAR1"] == "TEST"
def type_check(row):
return isinstance(row["IDVAR2"], str)
df = pandas.DataFrame.from_dict(
{
"RDOMAIN": ["LB", "LB", "AE"],
"IDVAR1": ["TEST", "TEST", "AETERM"],
"IDVAR2": ["TEST", 1, "AETERM"],
}
)
vlm = [
{
"filter": filter_func,
"type_check": type_check
}
]
result = DataframeType({"value": df, "value_level_metadata": vlm }).conformant_value_data_type({})
self.assertTrue(result.equals(pandas.Series([True, False, False])))
def test_has_next_corresponding_record(self):
"""
Test for has_next_corresponding_record operator.
"""
valid_df = pandas.DataFrame.from_dict(
{
"USUBJID": [789, 789, 789, 789, 790, 790, 790, 790, ],
"SESEQ": [1, 2, 3, 4, 5, 6, 7, 8, ],
"SEENDTC": ["2006-06-03T10:32", "2006-06-10T09:47", "2006-06-17", "2006-06-17", "2006-06-03T10:14", "2006-06-10T10:32", "2006-06-17", "2006-06-17"],
"SESTDTC": ["2006-06-01", "2006-06-03T10:32", "2006-06-10T09:47", "2006-06-17", "2006-06-01", "2006-06-03T10:14", "2006-06-10T10:32", "2006-06-17"],
}
)
other_value: dict = {"target": "SEENDTC", "comparator": "SESTDTC", "within": "USUBJID", "ordering": "SESEQ"}
result = DataframeType({"value": valid_df}).has_next_corresponding_record(other_value)
self.assertTrue(result.equals(pandas.Series([True, True, True, pandas.NA, True, True, True, pandas.NA])))
invalid_df = pandas.DataFrame.from_dict(
{
"USUBJID": [789, 789, 789, 789, 790, 790, 790, 790, ],
"SESEQ": [1, 2, 3, 4, 5, 6, 7, 8, ],
"SEENDTC": ["2006-06-03T10:32", "2006-06-10T09:47", "2006-06-17", "2006-06-17", "2006-06-03T10:14", "2006-06-10T10:32", "2006-06-17", "2006-06-17"],
"SESTDTC": ["2006-06-01", "2010-08-03", "2008-08", "2006-06-17T10:20", "2006-06-01", "2006-06-03T10:14", "2006-06-10T10:32", "2006-06-17"],
}
)
other_value: dict = {"target": "SEENDTC", "comparator": "SESTDTC", "within": "USUBJID", "ordering": "SESEQ"}
result = DataframeType({"value": invalid_df}).has_next_corresponding_record(other_value)
self.assertTrue(result.equals(pandas.Series([False, False, False, pandas.NA, True, True, True, pandas.NA])))
def test_does_not_have_next_corresponding_record(self):
"""
Test for does_not_have_next_corresponding_record operator.
"""
valid_df = pandas.DataFrame.from_dict(
{
"USUBJID": [789, 789, 789, 789, 790, 790, 790, 790, ],
"SESEQ": [1, 2, 3, 4, 5, 6, 7, 8, ],
"SEENDTC": ["2006-06-03T10:32", "2006-06-10T09:47", "2006-06-17", "2006-06-17", "2006-06-03T10:14", "2006-06-10T10:32", "2006-06-17", "2006-06-17"],
"SESTDTC": ["2006-06-01", "2006-06-03T10:32", "2006-06-10T09:47", "2006-06-17", "2006-06-01", "2006-06-03T10:14", "2006-06-10T10:32", "2006-06-17"],
}
)
other_value: dict = {"target": "SEENDTC", "comparator": "SESTDTC", "within": "USUBJID", "ordering": "SESEQ"}
result = DataframeType({"value": valid_df}).does_not_have_next_corresponding_record(other_value)
self.assertTrue(result.equals(pandas.Series([False, False, False, pandas.NA, False, False, False, pandas.NA])))
invalid_df = pandas.DataFrame.from_dict(
{
"USUBJID": [789, 789, 789, 789, 790, 790, 790, 790, ],
"SESEQ": [1, 2, 3, 4, 5, 6, 7, 8, ],
"SEENDTC": ["2006-06-03T10:32", "2006-06-10T09:47", "2006-06-17", "2006-06-17", "2006-06-03T10:14", "2006-06-10T10:32", "2006-06-17", "2006-06-17"],
"SESTDTC": ["2006-06-01", "2010-08-03", "2008-08", "2006-06-17T10:20", "2006-06-01", "2006-06-03T10:14", "2006-06-10T10:32", "2006-06-17"],
}
)
other_value: dict = {"target": "SEENDTC", "comparator": "SESTDTC", "within": "USUBJID", "ordering": "SESEQ"}
result = DataframeType({"value": invalid_df}).does_not_have_next_corresponding_record(other_value)
self.assertTrue(result.equals(pandas.Series([True, True, True, pandas.NA, False, False, False, pandas.NA])))
def test_present_on_multiple_rows_within(self):
"""
Unit test for present_on_multiple_rows_within operator.
"""
valid_df = pandas.DataFrame.from_dict(
{
"USUBJID": [1, 1, 1, 2, 2, 2, ],
"SEQ": [1, 2, 3, 4, 5, 6],
"RELID": ["AEHOSP1", "AEHOSP1", "AEHOSP1", "AEHOSP2", "AEHOSP2", "AEHOSP2"]
}
)
result = DataframeType({"value": valid_df}).present_on_multiple_rows_within(
{"target": "RELID", "within": "USUBJID", "comparator": 1}
)
self.assertTrue(result.equals(pandas.Series([True, True, True, True, True, True])))
valid_df_1 = pandas.DataFrame.from_dict(
{
"USUBJID": [5, 5, 5, 7, 7, 7, ],
"SEQ": [1, 2, 3, 4, 5, 6],
"RELID": ["AEHOSP1", "AEHOSP1", "AEHOSP1", "AEHOSP2", "AEHOSP2", "AEHOSP2"]
}
)
result = DataframeType({"value": valid_df_1}).present_on_multiple_rows_within(
{"target": "RELID", "within": "USUBJID", "comparator": 2}
)
self.assertTrue(result.equals(pandas.Series([True, True, True, True, True, True])))
invalid_df = pandas.DataFrame.from_dict(
{
"USUBJID": [1, 1, 1, 2, 2, 2, 3],
"SEQ": [1, 2, 3, 4, 5, 6, 7],
"RELID": ["AEHOSP1", "AEHOSP1", "AEHOSP1", "AEHOSP2", "AEHOSP2", "AEHOSP2", "AEHOSP3"]
}
)
result = DataframeType({"value": invalid_df}).present_on_multiple_rows_within(
{"target": "RELID", "within": "USUBJID", "comparator": 1}
)
self.assertTrue(result.equals(pandas.Series([True, True, True, True, True, True, False])))
def test_not_present_on_multiple_rows_within(self):
"""
Unit test for not_present_on_multiple_rows_within operator.
"""
valid_df = pandas.DataFrame.from_dict(
{
"USUBJID": [1, 1, 1, 2, 2, 2, ],
"SEQ": [1, 2, 3, 4, 5, 6],
"RELID": ["AEHOSP1", "AEHOSP1", "AEHOSP1", "AEHOSP2", "AEHOSP2", "AEHOSP2"]
}
)
result = DataframeType({"value": valid_df}).not_present_on_multiple_rows_within(
{"target": "RELID", "within": "USUBJID", "comparator": 1}
)
self.assertTrue(result.equals(pandas.Series([False, False, False, False, False, False])))
invalid_df = pandas.DataFrame.from_dict(
{
"USUBJID": [1, 1, 1, 2, 2, 2, 3],
"SEQ": [1, 2, 3, 4, 5, 6, 7],
"RELID": ["AEHOSP1", "AEHOSP1", "AEHOSP1", "AEHOSP2", "AEHOSP2", "AEHOSP2", "AEHOSP3"]
}
)
result = DataframeType({"value": invalid_df}).not_present_on_multiple_rows_within(
{"target": "RELID", "within": "USUBJID", "comparator": 1}
)
self.assertTrue(result.equals(pandas.Series([False, False, False, False, False, False, True])))
def test_additional_columns_empty(self):
"""
Unit test for additional_columns_empty operator.
"""
valid_df = pandas.DataFrame.from_dict(
{
"USUBJID": [1, 1, 1, 1, ],
"TSVAL": [None, None, "another value", None], # original column may be empty
"TSVAL1": ["value", "value", "value", None], # valid since TSVAL2 is also null in the same row
"TSVAL2": [None, "value 2", "value 2", None],
}
)
result = DataframeType({"value": valid_df, }).additional_columns_empty({"target": "TSVAL", })
self.assertTrue(result.equals(pandas.Series([False, False, False, False, ])))
invalid_df = pandas.DataFrame.from_dict(
{
"USUBJID": [1, 1, 1, 1, ],
"TSVAL": ["value", None, "another value", None], # original column may be empty
"TSVAL1": ["value", None, "value", "value"], # invalid column
"TSVAL2": ["value 2", "value 2", "value 2", None],
"TSVAL3": ["value 3", "value 3", None, "value 3"],
}
)
result = DataframeType({"value": invalid_df, }).additional_columns_empty({"target": "TSVAL", })
self.assertTrue(result.equals(pandas.Series([False, True, False, True, ])))
def test_additional_columns_not_empty(self):
"""
Unit test for additional_columns_not_empty operator.
"""
df_with_empty_rows = pandas.DataFrame.from_dict(
{
"USUBJID": [1, 1, 1, 1, ],
"TSVAL": ["value", None, "another value", None], # original column may be empty
"TSVAL1": ["value", None, "value", "value"],
"TSVAL2": ["value 2", "value 2", "value 2", "value 2"],
}
)
result = DataframeType({"value": df_with_empty_rows, }).additional_columns_not_empty({"target": "TSVAL", })
self.assertTrue(result.equals(pandas.Series([True, False, True, True, ])))
df_without_empty_rows = pandas.DataFrame.from_dict(
{
"USUBJID": [1, 1, 1, 1, ],
"TSVAL": ["value", None, "another value", None], # original column may be empty
"TSVAL1": ["value", "value", "value", "value"],
"TSVAL2": ["value 2", "value 2", "value 2", "value 2"],
}
)
result = DataframeType({"value": df_without_empty_rows, }).additional_columns_not_empty({"target": "TSVAL", })
self.assertTrue(result.equals(pandas.Series([True, True, True, True, ])))
def test_references_valid_codelist(self):
df = pandas.DataFrame.from_dict(
{
"define_variable_name": ["TEST", "COOLVAR", "ANOTHERVAR" ],
"define_variable_controlled_terms": ["C123", "C456", "C789"],
"define_variable_invalid_terms": ["C123", "C456", "C786"]
}
)
column_codelist_map = {
"TEST": ["C123", "C456"],
"COOLVAR": ["C123", "C456"],
"ANOTHERVAR": ["C789"]
}
dft = DataframeType({
"value": df,
"column_codelist_map": column_codelist_map
})
result = dft.references_correct_codelist({"target": "define_variable_name", "comparator": "define_variable_controlled_terms"})
self.assertTrue(result.equals(pandas.Series([True, True, True ])))
bad_result = dft.references_correct_codelist({"target": "define_variable_name", "comparator": "define_variable_invalid_terms"})
self.assertTrue(bad_result.equals(pandas.Series([True, True, False])))
def test_does_not_reference_valid_codelist(self):
df = pandas.DataFrame.from_dict(
{
"define_variable_name": ["TEST", "COOLVAR", "ANOTHERVAR" ],
"define_variable_controlled_terms": ["C123", "C456", "C789"],
"define_variable_invalid_terms": ["C123", "C456", "C786"]
}
)
column_codelist_map = {
"TEST": ["C123", "C456"],
"--OLVAR": ["C123", "C456"],
"ANOTHERVAR": ["C789"]
}
dft = DataframeType({
"value": df,
"column_codelist_map": column_codelist_map,
"column_prefix_map": {
"--": "CO"
}
})
result = dft.does_not_reference_correct_codelist({"target": "define_variable_name", "comparator": "define_variable_controlled_terms"})
self.assertTrue(result.equals(pandas.Series([False, False, False ])))
bad_result = dft.does_not_reference_correct_codelist({"target": "define_variable_name", "comparator": "define_variable_invalid_terms"})
self.assertTrue(bad_result.equals( | pandas.Series([False, False, True]) | pandas.Series |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core import ops
from pandas.errors import NullFrequencyError
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = pd.timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
| tm.assert_equal(result, expected) | pandas.util.testing.assert_equal |
import os
import zipfile as zp
import pandas as pd
import numpy as np
import core
import requests
class Labels:
init_cols = [
'station_id', 'station_name', 'riv_or_lake', 'hydroy', 'hydrom', 'day',
'lvl', 'flow', 'temp', 'month']
trans_cols = [
'date', 'year', 'month', 'day', 'hydroy', 'hydrom', 'station_id', 'station_name',
'riv_or_lake', 'riv_or_lake_id', 'lvl', 'flow', 'temp']
def transform(trans_df):
trans_df = trans_df.reset_index().drop('index', axis=1)
dfc = trans_df.copy()
lstrip = 'AĄBCĆDEĘFGHIJKLŁMNŃOÓPQRSŚTUVWXYZŹŻaąbcćdeęfghijklłmnńoópqrsśtuvwxyzźż( '
rivlakeid = dfc['riv_or_lake'].map(lambda x: x.lstrip(lstrip).rstrip(')'))
trans_df['riv_or_lake'] = trans_df['riv_or_lake'].map(lambda x: x.rstrip(' ()1234567890 '))
trans_df['riv_or_lake_id'] = rivlakeid
trans_df['month'] = trans_df['month'].fillna(method='ffill').astype(int)
trans_df['day'] = trans_df['day'].fillna(method='ffill').astype(int)
trans_df['year'] = trans_df['hydroy']
trans_df.loc[(trans_df['month'] == 11) | (trans_df['month'] == 12), 'year'] = trans_df['year'].astype(int) - 1
trans_df['date'] = pd.to_datetime(trans_df[['year', 'month', 'day']])
trans_df = trans_df[Labels.trans_cols]
trans_df.loc[trans_df['lvl'] == 9999, 'lvl'] = np.nan
trans_df.loc[trans_df['flow'] == 99999.999, 'flow'] = np.nan
trans_df.loc[trans_df['temp'] == 99.9, 'temp'] = np.nan
return trans_df
def getframe(year: int, month: int, stationid=None, station=None):
core.makedir(dirname='temp')
zipname = f'codz_{year}_{core.strnumb(month)}.zip'
csvname = f'codz_{year}_{core.strnumb(month)}.csv'
url = f'https://danepubliczne.imgw.pl/data/dane_pomiarowo_obserwacyjne/dane_hydrologiczne/dobowe/{year}/{zipname}'
r = requests.get(url)
with open(f'temp/{zipname}', 'wb') as file:
file.write(r.content)
with zp.ZipFile(f'temp/{zipname}', 'r') as zip_ref:
zip_ref.extractall(path='temp')
df = | pd.read_csv(f'temp/{csvname}', encoding='windows-1250', header=None) | pandas.read_csv |
"""
Purpose: Import the federal funds future data, and the effective federal funds
and adjust the current month for the day of the month on which the meeting
taked place. Then, it allows for the plot of the federal funds future
curve around the meeting days.
Status: Final -- 06/26/2019
@author: olivergiesecke
"""
###############################################################################
### Import packages
import pandas as pd
import re
import os
import numpy as np
import matplotlib.pyplot as plt
from calendar import monthrange
###############################################################################
### Read the effective federal funds future data
# =============================================================================
# # this is only needed for the raw data from the Fed NY
# ffr_raw=pd.read_excel("../data/NYF_effective_federal_funds_data.xls",skiprows=4)
# ffr=ffr_raw[['EFFR\n(PERCENT)','DATE']]
# ffr.rename(columns={'EFFR\n(PERCENT)':'effr','DATE':'date'},inplace=True )
# ffr=ffr[ffr.index<4648]
#
# def tranform_date(date):
# return re.search('(\d{4}\-\d{2}\-\d{2})([\[r\]]*)',date,re.IGNORECASE).group(1)
# ffr.loc[:,'date']=ffr.loc[:,'date'].apply(tranform_date)
# =============================================================================
def main():
data=construct_dataset()
data=define_adjusted_future(data)
rates=reshape_data(data)
rates.to_csv("../output/federal_funds_futures.csv",index=False)
def construct_dataset():
ffr=pd.read_excel("../../../collection/python/data/FRED_DFF.xls",skiprows=10)
ffr.rename(columns={"observation_date":"date","DFF":"effr"},inplace=True)
ffr['date'] = pd.to_datetime(ffr['date'])
### Merge with the futures data
data=pd.read_excel("../../../collection/python//data/FFF_1m3m_extract.xlsx")
data['date'] = | pd.to_datetime(data['date']) | pandas.to_datetime |
import itertools
import json
import logging
from collections import OrderedDict, defaultdict
from typing import Iterable
import pandas as pd
from celery.result import AsyncResult
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import User, Group
from django.core.exceptions import PermissionDenied, ImproperlyConfigured
from django.db.utils import IntegrityError
from django.forms.models import inlineformset_factory, ALL_FIELDS
from django.forms.widgets import TextInput
from django.http.response import HttpResponse, HttpResponseRedirect, HttpResponseServerError, JsonResponse
from django.shortcuts import get_object_or_404, render, redirect
from django.urls.base import reverse
from django.views.decorators.cache import cache_page
from django.views.decorators.http import require_POST
from django.views.decorators.vary import vary_on_cookie
from django_messages.models import Message
from global_login_required import login_not_required
from guardian.shortcuts import get_objects_for_group, get_objects_for_user
from termsandconditions.decorators import terms_required
from analysis.analysis_templates import get_sample_analysis
from analysis.forms import AnalysisOutputNodeChoiceForm
from analysis.models import AnalysisTemplate, SampleStats, SampleStatsPassingFilter
from annotation.forms import GeneCountTypeChoiceForm
from annotation.manual_variant_entry import create_manual_variants, can_create_variants
from annotation.models import AnnotationVersion
from annotation.models.models import ManualVariantEntryCollection, VariantAnnotationVersion
from annotation.models.models_gene_counts import GeneValueCountCollection, \
GeneCountType, SampleAnnotationVersionVariantSource, CohortGeneCounts
from classification.classification_stats import get_grouped_classification_counts
from classification.models.clinvar_export_sync import clinvar_export_sync
from classification.views.classification_datatables import ClassificationColumns
from genes.custom_text_gene_list import create_custom_text_gene_list
from genes.forms import CustomGeneListForm, UserGeneListForm, GeneAndTranscriptForm
from genes.models import GeneListCategory, CustomTextGeneList, GeneList
from library.constants import WEEK_SECS, HOUR_SECS
from library.django_utils import add_save_message, get_model_fields, set_form_read_only
from library.guardian_utils import assign_permission_to_user_and_groups, DjangoPermission
from library.keycloak import Keycloak
from library.utils import full_class_name, import_class, rgb_invert
from ontology.models import OntologyTerm
from patients.forms import PatientForm
from patients.models import Patient, Clinician
from patients.views import get_patient_upload_csv
from snpdb import forms
from snpdb.bam_file_path import get_example_replacements
from snpdb.forms import SampleChoiceForm, VCFChoiceForm, \
UserSettingsOverrideForm, UserForm, UserContactForm, SampleForm, TagForm, SettingsInitialGroupPermissionForm, \
OrganizationForm, LabForm, LabUserSettingsOverrideForm, OrganizationUserSettingsOverrideForm
from snpdb.graphs import graphcache
from snpdb.graphs.allele_frequency_graph import AlleleFrequencyHistogramGraph
from snpdb.graphs.chromosome_density_graph import SampleChromosomeDensityGraph
from snpdb.graphs.chromosome_intervals_graph import ChromosomeIntervalsGraph
from snpdb.graphs.homozygosity_percent_graph import HomozygosityPercentGraph
from snpdb.import_status import set_vcf_and_samples_import_status
from snpdb.models import CachedGeneratedFile, VariantGridColumn, UserSettings, \
VCF, UserTagColors, CustomColumnsCollection, CustomColumn, Cohort, \
CohortSample, GenomicIntervalsCollection, Sample, UserDataPrefix, UserGridConfig, \
get_igv_data, SampleLocusCount, UserContact, Tag, Wiki, Organization, GenomeBuild, \
Trio, AbstractNodeCountSettings, CohortGenotypeCollection, UserSettingsOverride, NodeCountSettingsCollection, Lab, \
LabUserSettingsOverride, OrganizationUserSettingsOverride, LabHead, SomalierRelatePairs, \
VariantZygosityCountCollection, VariantZygosityCountForVCF, ClinVarKey, AvatarDetails
from snpdb.models.models_enums import ProcessingStatus, ImportStatus, BuiltInFilters
from snpdb.tasks.soft_delete_tasks import soft_delete_vcfs
from snpdb.utils import LabNotificationBuilder
from upload.uploaded_file_type import retry_upload_pipeline
@terms_required
def index(request):
if Clinician.user_is_clinician(request.user):
return redirect('clinician_login')
return render(request, 'index.html')
def data(request):
return render(request, 'snpdb/data/data.html')
def maps(request):
return render(request, 'maps.html')
def get_writable_class_object(user, class_name, primary_key):
klass = import_class(class_name)
name = klass.__name__
obj = klass.objects.get(pk=primary_key)
if not obj.can_write(user):
write_perm = DjangoPermission.perm(obj, DjangoPermission.WRITE)
msg = f"You do not have permission {write_perm} needed to modify {name}"
raise PermissionDenied(msg)
return obj, name
def get_writable_class_objects(user, class_name):
klass = import_class(class_name)
name = klass.__name__
write_perm = DjangoPermission.perm(klass, DjangoPermission.WRITE)
qs = get_objects_for_user(user, write_perm, klass=klass, accept_global_perms=False)
return qs, name
def group_permissions(request, class_name, primary_key):
obj, name = get_writable_class_object(request.user, class_name, primary_key)
try:
# If object has "get_permission_object" it can delegate it.
permission_obj = obj.get_permission_object()
perm_obj_name = permission_obj.__class__.__name__
except AttributeError:
# Default is use itself
permission_obj = obj
perm_obj_name = name
permission_forms = get_group_permission_forms(request, permission_obj)
if request.method == 'POST':
valid = all([pf.is_valid() for pf in permission_forms])
if valid:
for pf in permission_forms:
pf.save()
add_save_message(request, valid, f"{perm_obj_name} group permissions")
get_listing_url = getattr(obj, "get_listing_url", None)
if get_listing_url:
delete_redirect_url = get_listing_url()
else:
delete_redirect_url = "/"
context = {'permission_forms': permission_forms,
'class_name': class_name,
'name': name,
'perm_obj_name': perm_obj_name,
'permission_obj': permission_obj,
'instance': obj,
'delete_redirect_url': delete_redirect_url}
return render(request, 'snpdb/data/group_permissions.html', context)
@require_POST
def group_permissions_object_delete(request, class_name, primary_key):
if class_name == 'snpdb.models.VCF': # TODO: Hack? Make some class object?
soft_delete_vcfs(request.user, primary_key)
else:
obj, _ = get_writable_class_object(request.user, class_name, primary_key)
try:
obj.delete()
except IntegrityError as ie:
pks = ", ".join(str(o.pk) for o in ie.args[1])
error_message = f"{ie.args[0]}: {pks}"
return HttpResponseServerError(content=error_message)
return HttpResponse()
def bulk_group_permissions(request, class_name):
qs, name = get_writable_class_objects(request.user, class_name)
groups = list(request.user.groups.all().order_by("name"))
objects_and_forms = []
for obj in qs:
permission_forms = get_group_permission_forms(request, obj, groups=groups)
objects_and_forms.append((obj, permission_forms))
if request.method == 'POST':
all_forms = []
for _, permission_forms in objects_and_forms:
all_forms.extend(permission_forms)
valid = all([pf.is_valid() for pf in all_forms])
if valid:
for pf in all_forms:
pf.save()
add_save_message(request, valid, f"{name} group permissions")
context = {"name": name,
"groups": groups,
"objects_and_forms": objects_and_forms}
return render(request, 'snpdb/data/bulk_group_permissions.html', context)
def _get_vcf_sample_stats(vcf, klass):
""" Count is het + hom """
ss_fields = ("sample_id", "sample__name", "variant_count", "ref_count", "het_count", "hom_count", "unk_count")
ss_values_qs = klass.objects.filter(sample__vcf=vcf).order_by("sample").values(*ss_fields)
sample_stats_het_hom_count = {}
sample_names = []
sample_zygosities = defaultdict(list)
for value_dict in ss_values_qs:
sample_id = value_dict.pop("sample_id")
sample_names.append(value_dict.pop("sample__name"))
value_dict.pop("variant_count")
sample_stats_het_hom_count[sample_id] = value_dict["het_count"] + value_dict["hom_count"]
for k, v in value_dict.items():
sample_zygosities[k].append(v)
return sample_stats_het_hom_count, sample_names, tuple(sample_zygosities.items())
def view_vcf(request, vcf_id):
vcf = VCF.get_for_user(request.user, vcf_id)
# I couldn't get prefetch_related_objects([vcf], "sample_set__samplestats") to work - so storing in a dict
sample_stats_het_hom_count, sample_names, sample_zygosities = _get_vcf_sample_stats(vcf, SampleStats)
sample_stats_pass_het_hom_count, _, sample_zygosities_pass = _get_vcf_sample_stats(vcf, SampleStatsPassingFilter)
VCFSampleFormSet = inlineformset_factory(VCF, Sample, extra=0, can_delete=False,
fields=["vcf_sample_name", "name", "patient", "specimen", "bam_file_path"],
widgets=SampleForm.Meta.widgets)
post = request.POST or None
vcf_form = forms.VCFForm(post, instance=vcf)
samples_form = VCFSampleFormSet(post, instance=vcf)
for form in samples_form.forms:
form.fields["vcf_sample_name"].disabled = True
requires_user_input = vcf.import_status == ImportStatus.REQUIRES_USER_INPUT
reload_vcf = False
if request.method == 'POST':
valid = all(f.is_valid() for f in [vcf_form, samples_form])
if valid:
vcf = vcf_form.save()
reload_vcf = requires_user_input and vcf.genome_build
samples_form.save()
add_save_message(request, valid, "VCF")
try:
# Some legacy data was too hard to fix and relies on being re-imported
_ = vcf.cohort
_ = vcf.cohort.cohort_genotype_collection
except (Cohort.DoesNotExist, CohortGenotypeCollection.DoesNotExist):
messages.add_message(request, messages.ERROR, "This legacy VCF is missing data and needs to be reloaded.")
if reload_vcf:
set_vcf_and_samples_import_status(vcf, ImportStatus.IMPORTING)
retry_upload_pipeline(vcf.uploadedvcf.uploaded_file.uploadpipeline)
vcf_form = forms.VCFForm(post, instance=vcf) # Reload as import status has changed
messages.add_message(request, messages.INFO, "Reloading VCF")
for warning, _ in vcf.get_warnings():
messages.add_message(request, messages.WARNING, warning, extra_tags='import-message')
has_write_permission = vcf.can_write(request.user)
if not has_write_permission:
messages.add_message(request, messages.WARNING, "You can view but not modify this data.")
variant_zygosity_count_collections = {}
for vzcc in VariantZygosityCountCollection.objects.all():
vzc_vcf = VariantZygosityCountForVCF.objects.filter(vcf=vcf, collection=vzcc).first()
variant_zygosity_count_collections[vzcc] = vzc_vcf
context = {
'vcf': vcf,
'sample_stats_het_hom_count': sample_stats_het_hom_count,
'sample_stats_pass_het_hom_count': sample_stats_pass_het_hom_count,
'sample_names': sample_names,
'sample_zygosities': sample_zygosities,
'vcf_form': vcf_form,
'samples_form': samples_form,
'patient_form': PatientForm(user=request.user), # blank
'has_write_permission': has_write_permission,
'can_download_vcf': (not settings.VCF_DOWNLOAD_ADMIN_ONLY) or request.user.is_superuser,
"variant_zygosity_count_collections": variant_zygosity_count_collections,
}
return render(request, 'snpdb/data/view_vcf.html', context)
def get_patient_upload_csv_for_vcf(request, pk):
vcf = VCF.get_for_user(request.user, pk)
sample_qs = vcf.sample_set.all()
filename = f"vcf_{pk}_patient_upload"
return get_patient_upload_csv(filename, sample_qs)
def view_sample(request, sample_id):
sample = Sample.get_for_user(request.user, sample_id)
has_write_permission = sample.can_write(request.user)
form = forms.SampleForm(request.POST or None, instance=sample)
if not has_write_permission:
set_form_read_only(form)
messages.add_message(request, messages.WARNING, "You can view but not modify this data.")
if request.method == 'POST':
if not has_write_permission:
raise PermissionDenied("Can't modify public data")
valid = form.is_valid()
if valid:
form.save()
add_save_message(request, valid, "Sample")
sample_locus_count = list(SampleLocusCount.objects.filter(sample=sample).order_by("locus_count"))
igv_data = get_igv_data(request.user, genome_build=sample.genome_build)
patient_form = PatientForm(user=request.user) # blank
related_samples = None
if settings.SOMALIER.get("enabled"):
related_samples = SomalierRelatePairs.get_for_sample(sample).order_by("relate")
context = {'sample': sample,
'samples': [sample],
'sample_locus_count': sample_locus_count,
'form': form,
'patient_form': patient_form,
'cohorts': cohorts,
'has_write_permission': has_write_permission,
'igv_data': igv_data,
"related_samples": related_samples}
return render(request, 'snpdb/data/view_sample.html', context)
def sample_variants_tab(request, sample_id):
sample = Sample.get_for_user(request.user, sample_id)
analysis = None
error_message = None
if settings.ANALYSIS_TEMPLATES_AUTO_SAMPLE:
try:
analysis_template = AnalysisTemplate.objects.get(name=settings.ANALYSIS_TEMPLATES_AUTO_SAMPLE)
analysis = get_sample_analysis(sample, analysis_template)
except AnalysisTemplate.DoesNotExist:
error_message = f"Analysis Template '{settings.ANALYSIS_TEMPLATES_AUTO_SAMPLE}' does not exist!"
else:
error_message = "settings.ANALYSIS_TEMPLATES_AUTO_SAMPLE not set. Talk to your administrator"
if error_message:
messages.add_message(request, messages.ERROR, error_message)
context = {
'sample': sample,
"analysis": analysis,
'output_node_form': AnalysisOutputNodeChoiceForm(analysis=analysis)
}
return render(request, 'snpdb/data/sample_variants_tab.html', context)
def sample_variants_gene_detail(request, sample_id, gene_symbol):
sample = Sample.get_for_user(request.user, sample_id)
context = {'sample': sample,
'sample_ids': [sample.pk],
'gene_symbol': gene_symbol,
"datatable_config": ClassificationColumns(request)}
return render(request, 'snpdb/data/sample_variants_gene_detail.html', context)
def sample_graphs_tab(request, sample_id):
sample = Sample.get_for_user(request.user, sample_id)
context = {'sample': sample}
return render(request, 'snpdb/data/sample_graphs_tab.html', context)
def get_group_permission_forms(request, obj, groups=None):
if groups is None:
groups = request.user.groups.all().order_by("name")
return [forms.GroupPermissionForm(request.POST or None, obj=obj, group=group) for group in groups]
def sample_permissions_tab(request, sample_id):
sample = Sample.get_for_user(request.user, sample_id)
context = {'sample': sample,
'class_name': full_class_name(Sample)}
return render(request, 'snpdb/data/sample_permissions_tab.html', context)
def view_genomic_intervals(request, genomic_intervals_collection_id):
gic = get_object_or_404(GenomicIntervalsCollection, pk=genomic_intervals_collection_id)
if not request.user.has_perm('view_genomicintervalscollection', gic):
raise PermissionDenied()
form = forms.GenomicIntervalsCollectionForm(request.POST or None, instance=gic)
if request.method == "POST":
valid = form.is_valid()
if valid:
gic = form.save()
add_save_message(request, valid, "Genomic Intervals")
if gic.genome_build is None:
msg = "Unable to automatically set build, please select manually."
messages.add_message(request, messages.WARNING, msg, extra_tags='import-message')
context = {'gic': gic,
'form': form,
"has_write_permission": gic.can_write(request.user)}
return render(request, 'snpdb/data/view_genomic_intervals.html', context)
@require_POST
def cached_generated_file_delete(request):
cgf_id = request.POST["cgf_id"]
cgf = get_object_or_404(CachedGeneratedFile, pk=cgf_id)
cgf.delete()
return HttpResponse()
def vcfs(request):
context = {
"form": VCFChoiceForm(),
}
return render(request, 'snpdb/data/vcfs.html', context=context)
def samples(request):
groups = request.user.groups.values_list("name", flat=True)
groups_str = ', '.join(groups)
num_groups = len(groups)
if num_groups > 1:
group_info = f"(or owned by one of your groups: {groups_str})"
elif num_groups:
group_info = f"(or owned by your group: {groups_str})"
else:
group_info = ''
context = {
"form": SampleChoiceForm(),
"group_info": group_info,
}
return render(request, 'snpdb/data/samples.html', context=context)
def bed_files(request):
return render(request, 'snpdb/data/bed_files.html')
@require_POST
def messages_bulk_delete(request):
messages_str = request.POST['message_ids']
message_ids = json.loads(messages_str)
user_messages_qs = Message.objects.filter(recipient=request.user)
user_messages_qs.filter(pk__in=message_ids).delete()
return HttpResponse()
def manual_variant_entry(request):
if can_create_variants(request.user):
form = forms.ManualVariantEntryForm(request.POST or None, user=request.user)
if request.method == 'POST':
valid = form.is_valid()
if valid:
variants_text = form.cleaned_data['variants_text']
genome_build_pk = form.cleaned_data['genome_build']
genome_build = GenomeBuild.objects.get(pk=genome_build_pk)
create_manual_variants(request.user, genome_build, variants_text)
form = forms.ManualVariantEntryForm(None, user=request.user) # Reset form
add_save_message(request, valid, "Manually entered variants")
else:
form = None
messages.add_message(request, messages.INFO, "Manual variant entry has been disabled by an admin.")
mvec_qs = ManualVariantEntryCollection.objects.order_by("-id")
context = {"form": form,
"mvec_qs": mvec_qs}
return render(request, 'snpdb/data/manual_variant_entry.html', context=context)
@require_POST
def set_user_row_config(request):
""" This is set from jqgrid.html setRowChangeCallbacks when changing grid rows """
grid_name = request.POST["grid_name"]
grid_rows = int(request.POST["grid_rows"])
UserGridConfig.objects.update_or_create(user=request.user, grid_name=grid_name, defaults={"rows": grid_rows})
return HttpResponse()
@require_POST
def set_user_data_grid_config(request):
""" This is set from user_data_grid_filter.html, should contain either filter_level+checked or filter_name """
grid_name = request.POST["grid_name"]
user_grid_config = UserGridConfig.get(request.user, grid_name)
filter_level = request.POST.get("filter_level")
if filter_level:
checked = json.loads(request.POST["checked"])
if filter_level == 'groups':
user_grid_config.show_group_data = checked
elif filter_level == 'incomplete':
user_grid_config.show_incomplete_data = checked
elif filter_level == 'hidden':
user_grid_config.show_hidden_data = checked
else:
msg = f"Unknown value for filter_level: '{filter_level}'"
raise ValueError(msg)
else:
user_grid_config.filter_name = request.POST["filter_name"]
user_grid_config.save()
return HttpResponse()
def view_user_settings(request):
user = request.user
user_contact = UserContact.get_for_user(user)
action = request.POST.get('action') if request.POST else None
post = request.POST or None if not action else None
user_form = UserForm(post, instance=user)
user_contact_form = UserContactForm(post, instance=user_contact)
user_settings = UserSettings.get_for_user(user)
override_source, override_values = user_settings.get_override_source_and_values_before_user()
user_settings_override = UserSettingsOverride.objects.get(user=user)
user_settings_override_form = UserSettingsOverrideForm(post, instance=user_settings_override)
labs_by_group_name = {l.group_name: l for l in Lab.valid_labs_qs(user)}
group_initial_perm_forms = {}
if settings.USER_SETTINGS_SHOW_GROUPS:
read_groups, write_groups = user_settings.initial_perm_read_and_write_groups
for group in user.groups.all().order_by('name'):
initial = {"read": group in read_groups, "write": group in write_groups}
group_initial_perm_forms[group] = SettingsInitialGroupPermissionForm(request.POST or None, initial=initial,
settings_override=user_settings_override,
group=group)
if request.method == "POST":
all_valid = True
action = request.POST.get('action')
if action == 'password-reset':
keycloak = Keycloak()
keycloak.change_password(user)
messages.add_message(request, level=messages.INFO, message='Password reset email sent',
extra_tags='save-message')
else:
if not settings.USE_OIDC:
if user_form.is_valid():
user = user_form.save()
else:
all_valid = False
for form in itertools.chain([user_contact_form, user_settings_override_form],
group_initial_perm_forms.values()):
if form.is_valid():
form.save()
else:
all_valid = False
add_save_message(request, all_valid, "User Settings")
context = {
'user': user,
'user_form': user_form,
'user_contact_form': user_contact_form,
'user_settings_form': user_settings_override_form,
'group_initial_perm_forms': group_initial_perm_forms,
'accounts_email': settings.ACCOUNTS_EMAIL,
'account_manage_url': settings.OIDC_USER_SERVICES,
'override_source': override_source,
'override_values': override_values,
'labs_by_group_name': labs_by_group_name,
'avatar_details': AvatarDetails.avatar_for(user)
}
return render(request, 'snpdb/settings/view_user_settings.html', context)
def user_settings_node_counts_tab(request):
user_settings_override = UserSettingsOverride.objects.get_or_create(user=request.user)[0]
return _settings_override_node_counts_tab(request, user_settings_override)
def lab_settings_node_counts_tab(request, pk):
lab = get_object_or_404(Lab, pk=pk)
has_write_permission = lab.can_write(request.user)
if has_write_permission is False:
_add_read_only_settings_message(request, [lab])
lab_settings_override = LabUserSettingsOverride.objects.get_or_create(lab=lab)[0]
return _settings_override_node_counts_tab(request, lab_settings_override, has_write_permission=has_write_permission)
def organization_settings_node_counts_tab(request, pk):
organization = get_object_or_404(Organization, pk=pk)
has_write_permission = organization.can_write(request.user)
if has_write_permission is False:
_add_read_only_settings_message(request, organization.lab_set.all())
org_settings_override = OrganizationUserSettingsOverride.objects.get_or_create(organization=organization)[0]
return _settings_override_node_counts_tab(request, org_settings_override, has_write_permission=has_write_permission)
def _settings_override_node_counts_tab(request, settings_override, has_write_permission=True):
# This calls _analysis_settings_node_counts_tab with a FakeAnalysis object that
# handles loading/saving a global one against User settings objects instead of analysis
class FakeAnalysis:
def set_node_count_types(self, node_counts_array):
collection, _ = NodeCountSettingsCollection.objects.get_or_create(settings=settings_override)
AbstractNodeCountSettings.save_count_configs_from_array(collection.nodecountsettings_set, node_counts_array)
def get_node_count_types(self):
try:
node_count_config = settings_override.nodecountsettingscollection
node_count_filters = node_count_config.get_node_count_filters()
except:
node_count_filters = BuiltInFilters.DEFAULT_NODE_COUNT_FILTERS
return AbstractNodeCountSettings.get_types_from_labels(node_count_filters)
fake_analysis = FakeAnalysis()
from analysis.views.views import _analysis_settings_node_counts_tab # Circular import
return _analysis_settings_node_counts_tab(request, fake_analysis,
pass_analysis_settings=False, has_write_permission=has_write_permission)
def view_user(request, pk):
user = get_object_or_404(User, pk=pk)
user_contact = UserContact.get_for_user(user)
context = {"user": user,
'user_contact': user_contact}
return render(request, 'snpdb/settings/view_user.html', context)
def _add_read_only_settings_message(request, lab_list: Iterable[Lab]):
""" lab_list: labs where lab heads can modify settings """
lab_heads_qs = LabHead.objects.filter(lab__in=lab_list).distinct()
lab_head_names = ", ".join([str(lh.user) for lh in lab_heads_qs])
if lab_head_names:
lab_head_msg = f" or lab heads: {lab_head_names}"
else:
lab_head_msg = ""
read_only_message = f"Only administrators{lab_head_msg} can modify these settings"
messages.add_message(request, messages.INFO, read_only_message)
def view_lab(request, pk):
lab = get_object_or_404(Lab, pk=pk)
lab_form = LabForm(request.POST or None, instance=lab)
lab_settings_override = LabUserSettingsOverride.objects.get_or_create(lab=lab)[0]
override_fields = set(get_model_fields(LabUserSettingsOverride)) - {"id", "settingsoverride_ptr", "lab"}
parent_overrides = UserSettings.get_settings_overrides(organization=lab.organization)
override_source, override_values = UserSettings.get_override_source_and_values(override_fields, parent_overrides)
settings_overrides = parent_overrides + [lab_settings_override]
read_groups, write_groups = UserSettings.get_initial_perm_read_and_write_groups([lab.group], settings_overrides)
initial = {"read": lab.group in read_groups, "write": lab.group in write_groups}
group_initial_perm_form = None
if settings.USER_SETTINGS_SHOW_GROUPS:
group_initial_perm_form = SettingsInitialGroupPermissionForm(request.POST or None, initial=initial,
settings_override=lab_settings_override,
group=lab.group)
lab_settings_override_form = LabUserSettingsOverrideForm(request.POST or None, instance=lab_settings_override)
has_write_permission = lab.can_write(request.user)
all_forms = [form for form in [lab_form, group_initial_perm_form, lab_settings_override_form] if form]
if request.method == "POST":
lab.check_can_write(request.user)
if debug_method := request.POST.get("debug_method"):
if "Test Slack" == debug_method:
if not lab.slack_webhook:
messages.add_message(request, messages.ERROR, "Slack URL not configured correctly")
else:
#try:
notification_builder = LabNotificationBuilder(lab=lab, message="Testing Slack Integration", notification_type=LabNotificationBuilder.NotificationType.SLACK_ONLY)
notification_builder.add_header(f"{settings.SITE_NAME} -> Slack Integration Test")
notification_builder.add_markdown("If you can see this, then integration has worked! :smile:")
notification_builder.send()
messages.add_message(request, messages.SUCCESS, "Message sent, check your Slack to confirm")
#except:
# report_exc_info()
# messages.add_message(request, messages.ERROR, "Unable to send test notification")
return redirect(reverse('view_lab', kwargs={"pk":pk}))
else:
raise ValueError(f"Un-supported debug method {debug_method}")
else:
all_valid = True
for form in all_forms:
if form.is_valid():
form.save()
else:
all_valid = False
add_save_message(request, all_valid, "Lab Settings")
if has_write_permission is False:
for form in all_forms:
set_form_read_only(form)
# we just hide the form now
# _add_read_only_settings_message(request, [lab])
if settings.VARIANT_CLASSIFICATION_STATS_USE_SHARED:
visibility = "Shared"
else:
visibility = f"Created"
context = {
"lab": lab,
"visibility": visibility,
"is_member": lab.is_member(request.user) or request.user.is_superuser,
"lab_form": lab_form,
'settings_override_form': lab_settings_override_form,
'group_initial_perm_form': group_initial_perm_form,
'override_source': override_source,
'override_values': override_values,
'has_write_permission': has_write_permission,
'clinvar_export_enabled': clinvar_export_sync.is_enabled
}
return render(request, 'snpdb/settings/view_lab.html', context)
def view_clinvar_key(request, pk: str):
clinvar_key = get_object_or_404(ClinVarKey, pk=pk)
clinvar_key.check_user_can_access(request.user)
return render(request, 'snpdb/settings/clinvar_key.html', {
'clinvar_key': clinvar_key,
'labs': Lab.objects.filter(clinvar_key=clinvar_key).order_by('name')
})
def view_organization(request, pk):
organization = get_object_or_404(Organization, pk=pk)
organization_form = OrganizationForm(request.POST or None, instance=organization)
org_settings_override = OrganizationUserSettingsOverride.objects.get_or_create(organization=organization)[0]
override_fields = set(get_model_fields(OrganizationUserSettingsOverride)) - {"id", "settingsoverride_ptr", "organization"}
parent_overrides = UserSettings.get_settings_overrides()
override_source, override_values = UserSettings.get_override_source_and_values(override_fields, parent_overrides)
org_settings_override_form = OrganizationUserSettingsOverrideForm(request.POST or None, instance=org_settings_override)
all_forms = [organization_form, org_settings_override_form]
if request.method == "POST":
organization.check_can_write(request.user)
all_valid = True
for form in all_forms:
if form.is_valid():
form.save()
else:
all_valid = False
add_save_message(request, all_valid, "Organization Settings")
has_write_permission = organization.can_write(request.user)
if has_write_permission is False:
for form in all_forms:
set_form_read_only(form)
# put on individual tabs now
# _add_read_only_settings_message(request, organization.lab_set.all())
context = {
"organization": organization,
"is_member": organization.is_member(request.user) or request.user.is_superuser,
"organization_form": organization_form,
'settings_override_form': org_settings_override_form,
'override_source': override_source,
'override_values': override_values,
'has_write_permission': has_write_permission,
}
return render(request, 'snpdb/settings/view_organization.html', context)
def custom_columns(request):
context = {}
form = forms.CustomColumnsCollectionForm(request.POST or None, user=request.user)
if request.method == "POST":
if form.is_valid():
ccc = form.save()
return HttpResponseRedirect(reverse("view_custom_columns", kwargs={"custom_columns_collection_id": ccc.pk}))
add_save_message(request, False, "Columns", created=True)
context["form"] = form
return render(request, 'snpdb/settings/custom_columns.html', context)
# Based on code from http://j-syk.com/weblog/2012/10/18/jquery-sortables-ajax-django/
def view_custom_columns(request, custom_columns_collection_id):
ccc = CustomColumnsCollection.get_for_user(request.user, custom_columns_collection_id)
custom_columns_qs = VariantGridColumn.objects.filter(customcolumn__custom_columns_collection=ccc)
my_columns = list(custom_columns_qs.order_by("customcolumn__sort_order"))
available_columns = list(VariantGridColumn.objects.exclude(grid_column_name__in=my_columns))
variant_grid_columns = {}
for vgc in VariantGridColumn.objects.all():
variant_grid_columns[vgc.pk] = vgc
has_write_permission = ccc.can_write(request.user)
if not has_write_permission:
msg = "You do not have permission to edit these columns. " \
"If you wish to customise them, click 'clone' and modify the copy"
messages.add_message(request, messages.WARNING, msg)
if request.method == "POST":
ccc.check_can_write(request.user)
if name := request.POST.get("name"):
ccc.name = name
ccc.save()
elif my_columns_str := request.POST.get("columns"):
def update_user_columns(id_list, active):
for i, col in enumerate(id_list):
column = variant_grid_columns[col]
CustomColumn.objects.update_or_create(custom_columns_collection=ccc, column=column,
defaults={"sort_order": i})
# Delete any not in id_list
CustomColumn.objects.filter(custom_columns_collection=ccc).exclude(column__in=id_list).delete()
my_columns_list = my_columns_str.split(',') if my_columns_str else []
active = 'my_columns' in request.POST
update_user_columns(my_columns_list, active)
return HttpResponse() # Nobody ever looks at this
context_dict = {
'available_columns_list': available_columns,
'my_columns_list': my_columns,
'custom_columns': ccc,
'has_write_permission': has_write_permission,
}
return render(request, 'snpdb/settings/view_custom_columns.html', context_dict)
def tag_settings(request):
form = forms.CreateTagForm(request.POST or None)
if request.method == "POST":
valid = form.is_valid()
if valid:
tag_name = form.cleaned_data['tag']
name = f"Tag {tag_name}"
try:
Tag.objects.create(pk=tag_name)
except:
valid = False
else:
name = "Tag"
add_save_message(request, valid, name, created=True)
user_tag_styles, user_tag_colors = UserTagColors.get_tag_styles_and_colors(request.user)
context_dict = {'form': form,
'user_tag_styles': user_tag_styles,
'user_tag_colors': user_tag_colors}
return render(request, 'snpdb/settings/tag_settings.html', context_dict)
@require_POST
def set_user_tag_color(request):
tag = request.POST['tag']
rgb = request.POST['rgb']
(utc, _) = UserTagColors.objects.get_or_create(user=request.user, tag_id=tag)
utc.rgb = rgb
utc.save()
logging.info("saved %s", utc)
return HttpResponse()
def igv_integration(request):
widgets = {"prefix": TextInput(attrs={'placeholder': 'from...'}),
"replacement": TextInput(attrs={'placeholder': 'to...'})}
UserDataPrefixFormSet = inlineformset_factory(User,
UserDataPrefix,
can_delete=True,
fields=ALL_FIELDS,
widgets=widgets,
max_num=10,
extra=3, )
formset = UserDataPrefixFormSet(request.POST or None, instance=request.user)
if request.method == "POST":
valid = formset.is_valid()
if valid:
formset.save()
add_save_message(request, valid, "IGV Integration")
context_dict = {'user': request.user,
'formset': formset,
'example_replacements': get_example_replacements(request.user)}
return render(request, 'snpdb/settings/igv_integration.html', context_dict)
def cohorts(request):
user_settings = UserSettings.get_for_user(request.user)
initial = {'genome_build': user_settings.default_genome_build}
form = forms.CreateCohortForm(request.POST or None, initial=initial)
if request.method == "POST":
valid = form.is_valid()
if valid:
cohort = form.save()
assign_permission_to_user_and_groups(request.user, cohort)
return HttpResponseRedirect(reverse('view_cohort', kwargs={'cohort_id': cohort.pk}))
else:
add_save_message(request, valid, "Cohort", created=True)
context = {"form": form}
return render(request, 'snpdb/patients/cohorts.html', context)
def view_cohort_details_tab(request, cohort_id):
cohort = Cohort.get_for_user(request.user, cohort_id)
context = {"cohort": cohort,
"has_write_permission": cohort.can_write(request.user)}
return render(request, 'snpdb/patients/view_cohort_details_tab.html', context)
def view_cohort(request, cohort_id):
cohort = Cohort.get_for_user(request.user, cohort_id)
if cohort.vcf:
return redirect('view_vcf', vcf_id=cohort.vcf.pk)
try:
cohort_genotype_collection = cohort.cohort_genotype_collection
except CohortGenotypeCollection.DoesNotExist:
cohort_genotype_collection = None
form = forms.CohortForm(request.POST or None, instance=cohort)
if request.method == "POST":
if valid := form.is_valid():
cohort = form.save()
add_save_message(request, valid, "Cohort")
sample_form = SampleChoiceForm(genome_build=cohort.genome_build)
sample_form.fields['sample'].required = False
context = {"form": form,
"sample_form": sample_form,
"cohort": cohort,
"cohort_genotype_collection": cohort_genotype_collection,
"has_write_permission": cohort.can_write(request.user)}
return render(request, 'snpdb/patients/view_cohort.html', context)
def cohort_sample_edit(request, cohort_id):
cohort = Cohort.get_for_user(request.user, cohort_id)
if request.method == "POST":
cohort_op = request.POST['cohort_op']
sample_ids_str = request.POST['sample_ids']
sample_ids = json.loads(sample_ids_str)
if cohort_op == 'add':
for sample_id in sample_ids:
cohort.add_sample(sample_id)
elif cohort_op == 'remove':
for sample_id in sample_ids:
try:
cohort_sample = CohortSample.objects.get(cohort=cohort, sample_id=sample_id)
cohort_sample.delete()
logging.info("Removed: %s", sample_id)
except CohortSample.DoesNotExist:
pass
else:
raise ValueError(f"Unknown cohort_op '{cohort_op}'")
return HttpResponse()
def cohort_hotspot(request, cohort_id):
cohort = Cohort.get_for_user(request.user, cohort_id)
form = GeneAndTranscriptForm(genome_build=cohort.genome_build)
try:
cohort_genotype_collection = cohort.cohort_genotype_collection
except Exception as e:
cohort_genotype_collection = None
logging.error(e)
context = {"cohort": cohort,
"cohort_genotype_collection": cohort_genotype_collection,
"form": form}
return render(request, 'snpdb/patients/cohort_hotspot.html', context)
def cohort_gene_counts(request, cohort_id):
cohort = Cohort.get_for_user(request.user, cohort_id)
COHORT_CUSTOM_GENE_LIST = f"__QC_COVERAGE_CUSTOM_GENE_LIST__{request.user}"
# We only want to keep 1 per user
custom_text_gene_list, _ = CustomTextGeneList.objects.get_or_create(name=COHORT_CUSTOM_GENE_LIST)
custom_gene_list_form = CustomGeneListForm(request.POST or None,
initial={"custom_gene_list_text": custom_text_gene_list.text})
if custom_gene_list_form.is_valid():
custom_text_gene_list.text = custom_gene_list_form.cleaned_data['custom_gene_list_text']
custom_text_gene_list.save()
create_custom_text_gene_list(custom_text_gene_list, request.user, GeneListCategory.QC_COVERAGE_CUSTOM_TEXT,
hidden=True)
gene_list_id = custom_text_gene_list.gene_list.pk
else:
gene_list_id = None
context = {"cohort": cohort,
'gene_list_id': gene_list_id,
'gene_list_form': UserGeneListForm(),
'custom_gene_list_form': custom_gene_list_form,
'gene_count_type_choice_form': GeneCountTypeChoiceForm()}
return render(request, 'snpdb/patients/cohort_gene_counts.html', context)
def cohort_gene_counts_matrix(request, cohort_id, gene_count_type_id, gene_list_id):
cohort = Cohort.get_for_user(request.user, cohort_id)
gene_count_type = GeneCountType.objects.get(pk=gene_count_type_id)
gene_list = GeneList.get_for_user(request.user, gene_list_id)
samples = list(cohort.get_samples())
annotation_version = AnnotationVersion.latest(cohort.genome_build)
variant_annotation_version = annotation_version.variant_annotation_version
cgc, created = CohortGeneCounts.objects.get_or_create(variant_annotation_version=variant_annotation_version,
gene_count_type=gene_count_type,
cohort=cohort,
cohort_version=cohort.version)
graph_kwargs = {"cohort_id": cohort_id,
"gene_count_type_id": gene_count_type_id,
"gene_list_id": gene_list_id}
redirect_url = reverse("cohort_gene_counts_matrix", kwargs=graph_kwargs)
if created or (cgc.processing_status not in ProcessingStatus.FINISHED_STATES):
celery_task = cgc.launch_task()
wait_for_task_kwargs = {"celery_task": celery_task, "sleep_ms": 2000, "redirect_url": redirect_url}
wait_url = reverse("wait_for_task", kwargs=wait_for_task_kwargs)
return HttpResponseRedirect(wait_url)
else:
if cgc.processing_status == ProcessingStatus.SUCCESS:
return sample_gene_matrix(request, variant_annotation_version, samples, gene_list, gene_count_type)
else:
raise ValueError(f"{cgc} had ProcessingStatus: {cgc.processing_status}")
def trios(request):
context = {}
return render(request, 'snpdb/patients/trios.html', context)
def view_trio(request, pk):
trio = Trio.get_for_user(request.user, pk)
context = {"trio": trio,
"has_write_permission": trio.cohort.can_write(request.user)}
return render(request, 'snpdb/patients/view_trio.html', context)
@login_not_required
def sample_gene_matrix(request, variant_annotation_version, samples, gene_list,
gene_count_type, highlight_gene_symbols=None):
""" highlight_gene_symbols - put these genes 1st """
# 19/07/18 - Plotly can't display a categorical color map. See: https://github.com/plotly/plotly.js/issues/1747
# So just doing as HTML table
if gene_list:
genes = gene_list.get_genes(variant_annotation_version.gene_annotation_release)
gene_symbols = set(gene_list.get_gene_names())
else:
# This was originally designed around a gene list, but now we need to support no gene list (only when uses
# variant classifications)
genes = []
gene_symbols = []
qs = gene_count_type.get_variant_queryset(variant_annotation_version)
GS_PATH = "variantannotation__transcript_version__gene_version__gene_symbol"
qs = qs.filter(**{GS_PATH + "__isnull": False})
for gene, gene_symbol in qs.values_list("variantannotation__gene", GS_PATH).distinct():
genes.append(gene)
gene_symbols.append(gene_symbol)
gene_values = list(gene_count_type.genevalue_set.all().order_by("id"))
default_color = "#d9d9d9"
default_text = ""
empty_gene_value = list(filter(lambda x: x.use_as_empty_value, gene_values))
if len(empty_gene_value) == 1:
default_color = empty_gene_value[0].rgb
phenotypes = ["Age", "HPO", "OMIM"]
highlight_gene_labels = []
other_gene_labels = []
gene_links_lookup = OrderedDict()
for gene_symbol in sorted(gene_symbols):
gene_classes_list = ["gene-label", gene_symbol]
highlight = highlight_gene_symbols and gene_symbol in highlight_gene_symbols
if highlight:
gene_classes_list.append("highlight-gene")
gene_classes = ' '.join(gene_classes_list)
if request.user.is_authenticated: # Only display links to logged in users
url = reverse('view_gene_symbol', kwargs={"gene_symbol": gene_symbol})
gene_symbol_text = f'<a class="{gene_classes}" href="{url}">{gene_symbol}</a>'
else:
gene_symbol_text = f"<span class='{gene_classes}'>{gene_symbol}</span>"
if highlight:
highlight_gene_labels.append(gene_symbol_text)
else:
other_gene_labels.append(gene_symbol_text)
gene_links_lookup[gene_symbol] = gene_symbol_text
matrix_rows = phenotypes + highlight_gene_labels + other_gene_labels
color_df = | pd.DataFrame(index=matrix_rows, dtype='O') | pandas.DataFrame |
#!/usr/bin/env python
'''
<NAME> October 2018
Scripts for looking at and evaluating input data files for dvmdostem.
Generally data has been prepared by M. Lindgren of SNAP for the IEM project and
consists of directories of well labled .tif images, with one image for each
timestep.
This script has (or will have) a variety of routines for summarizing the data
and displaying plots that will let us look for problems, missing data, or
anomolies.
'''
import os
import sys
import subprocess
import glob
import pickle
import multiprocessing
import datetime as dt
from osgeo import gdal
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.ticker as ticker
TMP_DATA = 'climatology-intermediate-data'
def timeseries_summary_stats_and_plots(base_path, secondary_path_list):
'''
'''
# Decades for projected, truncated first
fx_periods = [
(2006,2010),(2010,2020),(2020,2030),(2030,2040),(2040,2050),
(2050,2060),(2060,2070),(2070,2080),(2080,2090),(2090,2100)
]
# Decades for historic, truncated at end
hist_periods = [
(1901,1911),(1911,1921),(1921,1931),(1931,1941),(1941,1951),
(1951,1961),(1961,1971),(1971,1981),(1981,1991),(1991,2001),
(2001,2011),(2011,2015)
]
procs = []
for i in secondary_path_list:
if 'pr_total' in i.lower():
units = 'mm month-1'
elif 'tas_mean' in i.lower():
units = 'degrees C'
elif 'vap_mean' in i.lower():
units = 'hPa'
elif 'rsds_mean' in i.lower():
units = 'MJ-m2-d1'
elif 'hurs_mean' in i.lower():
units = 'percent'
else:
print("ERROR! hmmm can't find variable in {}".format(i))
if '_cru' in i.lower():
periods = hist_periods
elif '_mri' in i.lower():
periods = fx_periods
elif '_ncar' in i.lower():
periods = fx_periods
secondary_path = i
print("MAIN PROCESS! [{}] Starting worker...".format(os.getpid()))
p = multiprocessing.Process(target=worker_func, args=(base_path, secondary_path, units, periods))
procs.append(p)
p.start()
print("Done starting processes. Looping to set join on each process...")
for p in procs:
p.join()
print("DONE! Plots should be saved...")
def worker_func(base_path, secondary_path, units, periods):
'''
'''
print("worker function! pid:{}".format(os.getpid()))
print(" [{}] {}".format(os.getpid(), base_path))
print(" [{}] {}".format(os.getpid(), secondary_path))
print(" [{}] {}".format(os.getpid(), units))
monthlies_figure = get_monthlies_figure(
base_path, secondary_path,
title='\n'.join((base_path, secondary_path)),
units=units,
src='fresh',
save_intermediates=False,
madata=None
)
overveiw_figure, period_averages = get_overview_figure(
periods,
base_path, secondary_path,
title='\n'.join((base_path, secondary_path)),
units=units,
src='fresh', # can be: fresh, pickle, or passed
save_intermediates=False,
padata=None
)
individual_figs, _ = get_period_avg_figures(
periods,
base_path, secondary_path,
title=os.path.dirname(secondary_path),
units=units,
src='passed',
padata=period_averages
)
# Create multi-page pdf document
import matplotlib.backends.backend_pdf
ofname = "climatology_{}.pdf".format(secondary_path.split("/")[0])
print("Building PDF with many images: {}".format(ofname))
pdf = matplotlib.backends.backend_pdf.PdfPages(ofname)
pdf.savefig(monthlies_figure)
pdf.savefig(overveiw_figure)
for f in individual_figs:
pdf.savefig(f)
pdf.close()
print("Done saving pdf: {}".format(ofname))
def create_vrt(filelist, ofname):
'''
Creates a GDAL vrt (virtual file format) for a series of input files.
Expects the each of the files in the filelist to be a single band GeoTiff.
The files will be combined into a single .vrt file with one Band for each
of the input files. The single VRT file may then be further manipulated with
GDAL (i.e take the average over all the bands).
Parameters
----------
filelist : list of strings (paths) to files that will be combined
ofname : string for a filename that will be written
Returns
-------
None
Use Cases, Examples
-------------------
- Create a monthly or decadal summary file for a set of images representing
a timeseries (e.g. tifs that will be pre-processed and turned to netcdf files
for dvmdostem runs).
'''
basename = os.path.basename(ofname)
basename_noext, ext = os.path.splitext(basename)
temporary_filelist_file = os.path.join("/tmp/", "filelist-pid-{}-{}.txt".format(os.getpid(), basename_noext))
with open(temporary_filelist_file, 'w') as f:
f.write("\n".join(filelist))
result = subprocess.check_call([
'gdalbuildvrt',
'-overwrite',
'-separate',
ofname,
'-input_file_list', temporary_filelist_file
])
os.remove(temporary_filelist_file)
def average_over_bands(ifname, bands='all'):
'''
Given an input file (`ifname`), this function computes the average over all
the bands and returns the result. Assumes the bands are named Band1, Band2,
etc.
Parameters
----------
ifname : str
A multi-band file that can be opened and read with GDAL. Expects that all
bands have data and are the same spatial extents. Ignored data less
than -9999.
bands : str
One of 'all', 'first10', or 'first3'. Selects a subset of bands for faster
processing for testing and development.
Returns
-------
avg : numpy masked array
Returned array is the same shape as an individual band in the input file,
and with each pixel being the average of the pixel values in all of the
input file's bands.
'''
ds = gdal.Open(ifname)
print(" [ DESCRIPTION ]: ", ds.GetDescription())
print(" [ RASTER BAND COUNT ]: ", ds.RasterCount)
print(" [ RASTER Y SIZE ]: ", ds.RasterYSize)
print(" [ RASTER X SIZE ]: ", ds.RasterXSize)
if bands == 'all':
band_range = list(range(1, ds.RasterCount+1))
elif bands == 'first10':
band_range = list(range(1, 10+1))
elif bands == 'first3':
band_range = list(range(1, 3+1))
print(" [ AVERAGE OVER BANDS ]: {}".format(len(band_range)))
print(" [ START BAND ]: {}".format(band_range[0]))
print(" [ END BAND ]: {}".format(band_range[-1]))
# allocate a storage location
running_sum = np.ma.masked_less_equal(np.zeros((ds.RasterYSize, ds.RasterXSize)), -9999)
for band in band_range:
dsb = ds.GetRasterBand(band)
if dsb is None:
print("huh??")
# continue (? as per example here: https://pcjericks.github.io/py-gdalogr-cookbook/raster_layers.html)
masked_data = np.ma.masked_less_equal(dsb.ReadAsArray(), -9999)
running_sum += masked_data
print("adding band: {} band min/max: {}/{} running_sum min/max: {}/{}".format(
band,
masked_data.min(), masked_data.max(),
running_sum.min(), running_sum.max()
))
# Compute average
avg = running_sum / float(len(band_range)+1)
# Close gdal file
ds = None
return avg
def read_period_averages(periods):
'''
Reads pickled period average data from the TMP_DATA directory. Expects files
to be in a further sub-directory, period-averages, and have names
like: "pa-{start}-{end}.pickle".
Parameters
----------
periods : list of tuples
Each tuple should have values (start, end) that are used to define the
period.
Returns
-------
period_averages : list
A list of (masked) numpy arrays that have been un-pickled from the TMP_DATA
directory. The pickles are expected to be the period averages built using
other routines in this script.
'''
print("Reading period average pickles into list...")
period_averages = []
for i, (start, end) in enumerate(periods):
path = os.path.join(TMP_DATA, 'period-averages-pid{}'.format(os.getpid()), 'pa-{}-{}.pickle'.format(start, end))
pa = pickle.load(file(path))
period_averages.append(pa)
print("Done reading period average pickles into list.")
return period_averages
def read_monthly_pickles(months=list(range(1,13))):
print("reading monthly pickle files for months {}...".format(months))
mavgs = []
for m in months:
path = os.path.join(
TMP_DATA,
'month-averages-pid{}'.format(os.getpid()),
'month-{:02d}.pickle'.format(m)
)
ma = pickle.load(file(path))
mavgs.append(ma)
print("Returning monthly averages list..")
return mavgs
def calculate_period_averages(periods, base_path, secondary_path, save_intermediates=False):
'''Given a stack of tif files, one file for each month, this routine will
calculate the averages for the supplied periods. Periods are expected to be
selections of years, i.e. 1901 to 1911.
Parameters
----------
periods : list of tuples
each tuple has a start and end year for the period
base_path : str
path on the file system where files are located
secondary_path : str
remainder of path on file system where files will be found. The secondary
path string is expected to be somethign like this:
"ar5_MRI-CGCM3_rcp85_{month:}_{year:}.tif"
with the one set of braces for the month one set of braces for the year.
This function will fill the braces to match any month and the years
specified in the periods tuples
save_intermediates : bool
when true, period average array will be pickled for each period. Will be
saved like so 'climatology/period-averages/pa-{}-{}.pickle'
Returns
-------
list of 2D masked numpy arrays
'''
# Ensure there is a place to put the vrt files
path = os.path.join(TMP_DATA, 'period-averages-pid{}'.format(os.getpid()))
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
# Make the VRTs for the periods
for i, (start, end) in enumerate(periods):
print("[ period {} ] Making vrt for period {} to {} (range {})".format(i, start, end, list(range(start, end))))
filelist = []
for year in range(start, end):
final_secondary_path = secondary_path.format(month="*", year="{:04d}")
#print os.path.join(base_path, final_secondary_path.format(year))
single_year_filelist = sorted(glob.glob(os.path.join(base_path, final_secondary_path.format(year))))
#print "Length of single year filelist {}".format(len(single_year_filelist))
filelist += single_year_filelist
print("Length of full filelist: {} ".format(len(filelist)))
vrtp = os.path.join(TMP_DATA, 'period-averages-pid{}'.format(os.getpid()), "period-{}-{}.vrt".format(start, end))
create_vrt(filelist, vrtp)
# Calculate the period averages from the VRT files
period_averages = []
for i, (start, end) in enumerate(periods):
# Find the average over the selected range
vrtp = os.path.join(TMP_DATA, 'period-averages-pid{}'.format(os.getpid()), "period-{}-{}.vrt".format(start, end))
pa = average_over_bands(vrtp, bands='all')
period_averages.append(pa)
if save_intermediates:
# Make sure there is a place to put our pickles
path = os.path.join(TMP_DATA, 'period-averages-pid{}'.format(os.getpid()))
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
print("Dumping pickle for period {} to {}".format(start, end))
pickle.dump(pa, file(os.path.join(path, "pa-{}-{}.pickle".format(start, end)), 'wb'))
# Clean up any intermediate files.
if not save_intermediates:
papath = os.path.join(TMP_DATA, 'period-averages-pid{}'.format(os.getpid()))
for f in os.listdir(papath):
os.remove(os.path.join(papath, f))
os.rmdir(papath)
print("Returning period averages list...")
return period_averages
def calculate_monthly_averages(months, base_path, secondary_path, save_intermediates=False):
'''
'''
# Make sure there is a place to put our pickles and vrt files
intermediates_path = os.path.join(TMP_DATA, 'month-averages-pid{}'.format(os.getpid()))
try:
os.makedirs(intermediates_path)
except OSError:
if not os.path.isdir(intermediates_path):
raise
# Build the vrt files
print("Creating monthly VRT files...")
for im, MONTH in enumerate(months[:]):
final_secondary_path = secondary_path.format(month="{:02d}", year="*").format(im+1)
filelist = sorted(glob.glob(os.path.join(base_path, final_secondary_path)))
if len(filelist) < 1:
print("ERROR! No files found in {}".format( os.path.join(base_path, final_secondary_path) ))
vrt_path = os.path.join(intermediates_path,"month-{:02d}.vrt".format(im+1))
create_vrt(filelist, vrt_path)
print("Computing monthly averages from monthly VRT files...")
# make list of expected input vrt paths
ivp_list = [os.path.join(intermediates_path,"month-{:02d}.vrt".format(im)) for im in range(1, len(months)+1)]
monthly_averages = [average_over_bands(ivp, bands='all') for ivp in ivp_list]
if save_intermediates:
print("Saving pickles...")
for im, ma in enumerate(monthly_averages):
pp = os.path.join(intermediates_path, "month-{:02d}.pickle".format(im+1))
pickle.dump(ma, file(pp, 'wb'))
print("Done saving pickles...")
# Clean up any intermediate files.
if not save_intermediates:
mapath = os.path.join(TMP_DATA, 'month-averages-pid{}'.format(os.getpid()))
for f in os.listdir(mapath):
os.remove(os.path.join(mapath, f))
os.rmdir(mapath)
print("Returning monthly_averages list...")
return monthly_averages
def get_monthlies_figure(base_path, secondary_path, title, units,
src='fresh', save_intermediates=True, madata=None ):
'''
Creates a single figure with 12 subplots, each showing the average for that
month across the timeseries.
'''
months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
if src == 'fresh':
monthly_averages = calculate_monthly_averages(months, base_path, secondary_path, save_intermediates=save_intermediates)
elif src == 'pickle':
monthly_averages = read_monthly_pickles(months=list(range(1,13)))
elif src == 'passed':
monthly_averages = madata
else:
print("Invalid argument for src! '{}'".format(src))
vmax = np.max([avg.max() for avg in monthly_averages])
vmin = np.min([avg.min() for avg in monthly_averages])
print("vmax: {} vmin: {}".format(vmax, vmin))
print("Creating monthlies figure...")
fig, axes = plt.subplots(figsize=(11,8.5), nrows=3, ncols=4, sharex=True, sharey=True)
imgs = []
for ax, avg, month in zip(axes.flat, monthly_averages, months):
im = ax.imshow(avg, vmin=vmin, vmax=vmax, cmap='gist_ncar')
imgs.append(im)
ax.set_title(month)
cbar = fig.colorbar(imgs[0], ax=axes.ravel().tolist())
cbar.set_label(units)
fig.suptitle(title)
print("Done creating monthlies figure.")
return fig
def get_overview_figure(periods, base_path, secondary_path, title='',
units='', src='fresh', save_intermediates=True, padata=None):
'''
Creates and returns a matplotlib figure that has ??
Parameters
----------
Returns
-------
fig : matplotlib figure instance
'''
if src == 'fresh':
period_averages = calculate_period_averages(periods, base_path, secondary_path, save_intermediates=save_intermediates)
elif src == 'pickle':
period_averages = read_period_averages(periods)
elif src == 'passed':
period_averages = padata
else:
print("Invalid argument for src! '{}'".format(src))
print("Converting to stacked masked array...")
pa2 = np.ma.stack(period_averages)
vmax = pa2.max()
vmin = pa2.min()
print("vmax: {} vmin: {}".format(vmax, vmin))
NCOLS = 4 # fixed number of cols, may add more rows
NROWS = len(period_averages)/NCOLS
if (len(period_averages) % NCOLS) > 0:
NROWS += 1
if len(period_averages) < NCOLS:
NCOLS = len(period_averages)
NROWS = 1
overview_fig, axes = plt.subplots(nrows=NROWS, ncols=NCOLS, sharex=True, sharey=True)
overview_fig.set_size_inches((11, 8.5), forward=True)
imgs = [] # in case we need to manipulate the images all at once
for ax, avg, period in zip(axes.flat, period_averages, periods):
print("plotting image for period:", period)
# Setting vmax and vmin normalized the colorbars across all images
im = ax.imshow(avg, vmin=vmin, vmax=vmax, cmap='gist_ncar')
ax.set_title('{} to {}'.format(period[0], period[1]))
imgs.append(im)
# set a colorbar on the first axes
cbar = overview_fig.colorbar(imgs[0], ax=axes.ravel().tolist())
cbar.set_label(units)
overview_fig.suptitle(title)
return overview_fig, period_averages
def get_period_avg_figures(periods, base_path, secondary_path,
title='', units='', src='fresh', save_intermediates=True, padata=None):
'''
Parameters
----------
Returns
-------
'''
if src == 'fresh':
period_averages = calculate_period_averages(periods, base_path, secondary_path, save_intermediates=save_intermediates)
elif src == 'pickle':
period_averages = read_period_averages(periods)
elif src == 'passed':
period_averages = padata
else:
print("Invalid argument for src! '{}'".format(src))
print("Converting to stacked masked array...")
pa2 = np.ma.stack(period_averages)
vmax = pa2.max()
vmin = pa2.min()
print("vmax: {} vmin: {}".format(vmax, vmin))
ind_figures = []
for i, ((start,end), periodavg) in enumerate(zip(periods, pa2)):
fig = plt.figure()
fig.suptitle(title) #fontsize=8
im = plt.imshow(periodavg, vmin=vmin, vmax=vmax, cmap='gist_ncar')
ax = fig.axes[0]
ax.set_title('Average, {} to {}'.format(start, end))
cbar = plt.colorbar()
cbar.set_label(units)
ind_figures.append(fig)
return ind_figures, padata
def worker_func2(f):
if f == 'file3':
time.sleep(1)
if f == 'file7':
time.sleep(5)
print("will open, read, average {}".format(f))
return f
def worker_func3(in_file_path):
'''
'''
# Deduce month and year from file name
bname = os.path.basename(in_file_path)
n, ext = os.path.splitext(bname)
parts = n.split('_')
month, year = [int(p) for p in parts[-2:]]
date = dt.date(year=year, month=month, day=1)
# Open the file, get some stats
ds = gdal.Open(in_file_path)
ds_array = ds.ReadAsArray()
ds_m = np.ma.masked_less_equal(ds_array, -9999)
data_dict = dict(
fname=bname,
date=date,
statewide_mean=ds_m.mean(),
statewide_min=ds_m.min(),
statewide_max=ds_m.max(),
statewide_std=ds_m.std()
)
return data_dict
def generate_spatial_summary_stats(base_path, secondary_path):
'''
'''
# This produces a bunch of csv files with statewide averages
for sec_path in secondary_path_list[0:]:
files = sorted(glob.glob(os.path.join(base_path, sec_path.format(month='*', year='*'))))
p = multiprocessing.Pool()
results = p.map(worker_func3, files[0:])
p.close()
p.join()
s_results = sorted(results, key=lambda k: k['date'])
stats_path = "SPATIAL_SUMMARY_STATS_{}.csv".format(sec_path.split('/')[0])
import pandas as pd
df = pd.DataFrame(s_results)
df.to_csv(stats_path)
def plot_timeseries_of_spatial_summary_stats():
'''
'''
# Build this automatically:
# - look for SPATIAL_SUMMARY_STATS_*
ss_file_list = [
'SPATIAL_SUMMARY_STATS_hurs_mean_pct_ar5_MRI-CGCM3_rcp85_2006_2100_fix.csv',
'SPATIAL_SUMMARY_STATS_hurs_mean_pct_ar5_NCAR-CCSM4_rcp85_2006_2100_fix.csv',
'SPATIAL_SUMMARY_STATS_hurs_mean_pct_iem_CRU-TS40_historical_1901_2015_fix.csv',
'SPATIAL_SUMMARY_STATS_pr_total_mm_ar5_MRI-CGCM3_rcp85_2006_2100.csv',
'SPATIAL_SUMMARY_STATS_pr_total_mm_ar5_NCAR-CCSM4_rcp85_2006_2100.csv',
'SPATIAL_SUMMARY_STATS_pr_total_mm_iem_cru_TS40_1901_2015.csv',
'SPATIAL_SUMMARY_STATS_rsds_mean_MJ-m2-d1_ar5_MRI-CGCM3_rcp85_2006_2100_fix.csv',
'SPATIAL_SUMMARY_STATS_rsds_mean_MJ-m2-d1_ar5_NCAR-CCSM4_rcp85_2006_2100_fix.csv',
'SPATIAL_SUMMARY_STATS_rsds_mean_MJ-m2-d1_iem_CRU-TS40_historical_1901_2015_fix.csv',
'SPATIAL_SUMMARY_STATS_tas_mean_C_ar5_MRI-CGCM3_rcp85_2006_2100.csv',
'SPATIAL_SUMMARY_STATS_tas_mean_C_ar5_NCAR-CCSM4_rcp85_2006_2100.csv',
'SPATIAL_SUMMARY_STATS_tas_mean_C_iem_cru_TS40_1901_2015.csv',
'SPATIAL_SUMMARY_STATS_vap_mean_hPa_ar5_MRI-CGCM3_rcp85_2006_2100_fix.csv',
'SPATIAL_SUMMARY_STATS_vap_mean_hPa_ar5_NCAR-CCSM4_rcp85_2006_2100_fix.csv',
'SPATIAL_SUMMARY_STATS_vap_mean_hPa_iem_CRU-TS40_historical_1901_2015_fix.csv',
]
# Create multi-page pdf document
import matplotlib.backends.backend_pdf
ofname = "climatology_statewide_averages.pdf".format()
print("Saving PDF: {}".format(ofname))
pdf = matplotlib.backends.backend_pdf.PdfPages(ofname)
var_list = ['tas_mean','pr_total','rsds_mean','vap_mean','hurs_mean']
unit_list = ['celsius', 'mm month-1', 'MJ-m2-d1','hPa', 'percent']
for var, units in zip(var_list, unit_list):
# Figure out the right files to work on
var_files = [x for x in ss_file_list if var in x.lower()]
print(var_files)
print()
h_file = [x for x in var_files if 'cru' in x.lower()]
pmri_file = [x for x in var_files if 'mri' in x.lower()]
pncar_file = [x for x in var_files if 'ncar' in x.lower()]
# Filtering above should result in single item lists, unpack for convenience.
h_file = h_file[0]
pmri_file = pmri_file[0]
pncar_file = pncar_file[0]
print("var: ", var)
print("hfile: ", h_file)
print("pmri_file: ", pmri_file)
print("pncar_file: ", pncar_file)
print()
# Read data into DataFrames
hdf = pd.read_csv( h_file )
hdf.set_index( pd.to_datetime(hdf['date']), inplace=True )
pmri_df = pd.read_csv( pmri_file )
pmri_df.set_index( | pd.to_datetime(pmri_df['date']) | pandas.to_datetime |
###################################################################################
## Description: Text analytics of the labelled mse data ###
## Status: WIP ###
###################################################################################
import json, time, datetime, sys
from nltk.corpus import stopwords
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import operator, string, re
# path to data folders
pathIn = '../data/build'
pathOut = '../data/results'
## choose weighting factor: default or tf-idf
# default: term frequency adjusted for document length
# tf-idf: statistical measure to evaluate the importance of a word to a document in
# a collection, the importance increases proportionally to the number of times
# a word appears in the document but is offset by the frequency of the word in the corpus
mode = 'tf-idf'
#========== reading data
with open(pathIn+'/build_test.json', 'r') as read_file:
df = | pd.read_json(read_file) | pandas.read_json |
import pandas as pd
import torch
from corai_error import Error_type_setter
from corai_estimator import Estimator
from corai_util.tools.src.decorator import decorator_delayed_keyboard_interrupt
from corai_util.tools.src.function_iterable import is_iterable
from corai_util.tools.src.function_json import is_jsonable
class Estim_history(Estimator):
CORE_COL = {'fold', 'epoch'}
def __init__(self, metric_names, validation, df=None, hyper_params={}.copy()):
# we require from the df to be either empty, or have columns with the names of metric_names, in the format from
# `_generate_all_column_names()`: lossName_training, or lossName_validation.
# it is possible to automatically create the right collumn names from a df with:
# `Estim_history.deconstruct_column_names(df.columns)`
# metric names contain all ["L1","L4"...] but not the loss used for back prop.
self.metric_names = metric_names
self.validation = validation
self.list_best_epoch = [] # list each entry corresponds to a fold
self.list_train_times = [] # list of times it takes to train each fold
self.hyper_params = hyper_params # dict with serializable objects to be saved into a json.
self.best_fold = -1 # negative strictly number means no best_fold found yet. Will be set in
# train_kfold_a_fold_after_split
if df is not None:
super().__init__(df=df)
else:
df_column_names = self._generate_all_column_names()
super().__init__(df= | pd.DataFrame(columns=df_column_names) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import pandas as pd
import os
import random
import warnings
from keras.preprocessing.sequence import pad_sequences
import tensorflow as tf
import keras
from keras.models import Sequential
from keras.layers import Embedding, Dense, LSTM
from keras.layers import Bidirectional
import matplotlib.pyplot as plt
from keras.layers import Input, Dense, BatchNormalization, Dropout, GaussianNoise, GaussianDropout
from keras.models import Model
from keras.utils import np_utils
from keras.callbacks import CSVLogger, History
import keras.backend as backend
from tensorflow.python.keras.utils.vis_utils import plot_model
from datetime import datetime
from tensorflow.keras.callbacks import Callback, TensorBoard
from tensorflow.keras.metrics import Precision, Recall
from tensorflow.python.keras.utils.vis_utils import plot_model
from tensorflow.keras import layers
from keras_multi_head import MultiHead
import datetime
import pickle
# input file
c1_train = "{class I MHC train data set path}"
c1_val ="{class I MHC validation data set path}"
c2_train = "{class II MHC train data set path}"
c2_val = "{class II MHC validation data set path}"
# pkl file are available from https://github.com/rikenbit/MTL4MHC2/tree/main/dict
with open("{Path_to_pkl_file}/monovec.pkl","rb") as f:
monovec = pickle.load(f)
with open("{Path_to_pkl_file}/trigram_to_idx_MHC.pkl","rb") as f:
trigram_to_idx_MHC = pickle.load(f)
with open("{Path_to_pkl_file}/monogram_to_idx.pkl","rb") as f:
monogram_to_idx = pickle.load(f)
with open("{Path_to_pkl_file}/trivec1_MHC.pkl","rb") as f:
trivec1_MHC = pickle.load(f)
# function
# normalization
def replace(raw_seq_0):
B_aa = 'DN'
J_aa = 'IL'
Z_aa = 'EQ'
X_aa = 'ACDEFGHIKLMNPQRSTVWY'
seq = raw_seq_0.str.replace('B', random.choice(B_aa))
seq = seq.str.replace('J', random.choice(J_aa))
seq = seq.str.replace('Z', random.choice(Z_aa))
seq = seq.str.replace('X', random.choice(X_aa))
raw_seq_0 = seq
return raw_seq_0
# monogram
def monogram(raw_seq_0):
feature_0 = []
for i in range(0, len(raw_seq_0)):
strain_embedding = []
for j in range(0, len(raw_seq_0[i])):
monogram = raw_seq_0[i][j]
mono_embedding = monogram_to_idx["".join(monogram)]
strain_embedding.append(mono_embedding)
feature_0.append(strain_embedding)
return feature_0
# trigram
def trigram(raw_seq_0):
feature_0 = []
for i in range(0, len(raw_seq_0)):
strain_embedding = []
for j in range(0, len(raw_seq_0[i]) - 2):
trigram = raw_seq_0[i][j:j + 3]
tri_embedding = trigram_to_idx_MHC["".join(trigram)]
strain_embedding.append(tri_embedding)
feature_0.append(strain_embedding)
return feature_0
# model
def multimodal_bilstm(out_dim, dropoutrate, out_dim2):
pep_input = Input(shape=(None,))
mhc_input = Input(shape=(None,))
pep_emb = Embedding(47, 100, weights=[monovec], trainable=False)(pep_input)
mhc_emb = Embedding(9419, 100, weights=[trivec1_MHC], trainable=False)(mhc_input)
# peptide
pep_output1 = Bidirectional(LSTM(out_dim,dropout=dropoutrate), merge_mode='concat')(pep_emb)
pep_output2 = Dense(64, activation='relu')(pep_output1)
# mhc
mhc_output1 = Bidirectional(LSTM(out_dim2,dropout=dropoutrate), merge_mode='concat')(mhc_emb)
mhc_output2 = Dense(64, activation='relu')(mhc_output1)
conc = layers.concatenate([pep_output2, mhc_output2], axis=-1)
out = Dense(2, activation='softmax')(conc)
model = Model([pep_input, mhc_input], out)
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=['accuracy'])
return model
# pretreatment
d = [0]*100
p = np.array(['-'])
lp = np.array(d, dtype=float)
lps = np.append(p, lp)
lpsd = pd.DataFrame(lps).T
# class II
raw_seq_al_2 = pd.read_csv(c2_train)
raw_seq_al_2 = raw_seq_al_2.sample(frac=1).reset_index(drop=True)
raw_seq_2 = raw_seq_al_2["peptide"]
raw_seq_2MHC = raw_seq_al_2["mhc_amino_acid"]
raw_seq_al_2v = pd.read_csv(c2_val)
raw_seq_al_2v = raw_seq_al_2v.sample(frac=1).reset_index(drop=True)
raw_seq_2v = raw_seq_al_2v["peptide"]
raw_seq_2MHCv = raw_seq_al_2v["mhc_amino_acid"]
# Normalization
raw_seq_2 = replace(raw_seq_2)
raw_seq_2v = replace(raw_seq_2v)
raw_seq_2MHC = replace(raw_seq_2MHC)
raw_seq_2MHCv = replace(raw_seq_2MHCv)
feature_2 = monogram(raw_seq_2)
feature_2v = monogram(raw_seq_2v)
feature_2MHC = trigram(raw_seq_2MHC)
feature_2MHCv = trigram(raw_seq_2MHCv)
label_2 = raw_seq_al_2["bind"]
label_2v = raw_seq_al_2v["bind"]
label_2 = pd.get_dummies(label_2, sparse=True)
label_2v = | pd.get_dummies(label_2v, sparse=True) | pandas.get_dummies |
'''
MIT License
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
"""
Los productos que salen del reporte diario son:
3
4
5
7
8
9
10
11
12
13
14
17
20
23
24
26
27
30
36
"""
import pandas as pd
from utils import *
from shutil import copyfile
from os import listdir
from os.path import isfile, join
from datetime import datetime
import numpy as np
def prod4(fte, producto):
print('Generando producto 4')
now = datetime.now()
today = now.strftime("%Y-%m-%d")
output = producto + today + '-CasosConfirmados-totalRegional.csv'
df = pd.read_csv(fte, quotechar='"', sep=',', thousands=r'.', decimal=",")
df.rename(columns={'Unnamed: 0': 'Region'}, inplace=True)
if 'Unnamed: 7' in df.columns:
df.drop(columns=['Unnamed: 7'], inplace=True)
df_obj = df.select_dtypes(['object'])
df[df_obj.columns] = df_obj.apply(lambda x: x.str.strip())
regionName(df)
df.at[16, 'Region'] = 'Total'
# texttract reconoce 0 como o
df.replace({'O': 0}, inplace=True)
numeric_columns = [x for x in df.columns if x != 'Region']
for i in numeric_columns:
df[i] = df[i].astype(str)
#df[i] = df[i].replace({r'\.': ''}, regex=True)
df[i] = df[i].replace({r'\,': '.'}, regex=True)
df.to_csv(output, index=False)
def prod5(fte, producto):
print('Generando producto 5')
# necesito series a nivel nacional por fecha:
# Casos nuevos con sintomas
# Casos totales
# Casos recuperados #ya no se reporta
# Fallecidos
# Casos activos
# Casos nuevos sin sintomas
now = datetime.now()
timestamp = now.strftime("%Y-%m-%d")
timestamp_dia_primero = now.strftime("%d-%m-%Y")
a = pd.read_csv(fte + 'CasosConfirmados.csv')
a['Fecha'] = timestamp
a = a[a['Region'] == 'Total']
print(a.to_string())
#las columnas son :
# Casos totales acumulados Casos nuevos totales Casos nuevos con sintomas Casos nuevos sin sintomas* Fallecidos totales % Total Fecha
a.rename(columns={'Casos totales acumulados': 'Casos totales',
'Casos nuevos totales': 'Casos nuevos totales',
'Casos nuevos con sintomas': 'Casos nuevos con sintomas',
'Casos nuevos sin sintomas*': 'Casos nuevos sin sintomas',
'Fallecidos totales': 'Fallecidos'}, inplace=True)
#Faltan casos activos: prod 5 esta ahora en el reporte diario, hay que migrar para alla
casos_confirmados_totales = pd.read_csv('../input/ReporteDiario/CasosConfirmadosTotales.csv')
today_row = (casos_confirmados_totales[casos_confirmados_totales['Fecha'] == timestamp_dia_primero])
a['Casos activos'] = today_row['Casos activos'].values
## esto es estandar
totales = pd.read_csv(producto)
#print(totales.columns[1:])
# add Casos nuevos totales = Casos nuevos con sintomas + Casos nuevos sin sintomas
for eachColumn in totales.columns[1:]:
print('Checking if Casos nuevos totales is fine on ' + eachColumn)
#print(totales.index[totales['Fecha'] == 'Casos nuevos con sintomas'].values[0])
#print(totales.at[totales.index[totales['Fecha'] == 'Casos nuevos con sintomas'].values[0], eachColumn])
rowConSintomas = totales.index[totales['Fecha'] == 'Casos nuevos con sintomas'].values[0]
rowSinSintomas = totales.index[totales['Fecha'] == 'Casos nuevos sin sintomas'].values[0]
rowCasosNuevosTotales = totales.index[totales['Fecha'] == 'Casos nuevos totales'].values[0]
#print('row con ' + str(rowConSintomas))
#print('row sin ' + str(rowSinSintomas))
#print('expected is ' + str(totales.at[rowConSintomas, eachColumn]) + ' + ' + str(totales.at[rowSinSintomas, eachColumn]))
#check for NaN
if not np.isnan(totales.at[rowConSintomas, eachColumn]) and not np.isnan(totales.at[rowSinSintomas, eachColumn]):
expectedTotal = totales.at[rowConSintomas, eachColumn] + totales.at[rowSinSintomas, eachColumn]
elif not np.isnan(totales.at[rowConSintomas, eachColumn]) and np.isnan(totales.at[rowSinSintomas, eachColumn]):
expectedTotal = totales.at[rowConSintomas, eachColumn]
elif np.isnan(totales.at[rowConSintomas, eachColumn]) and not np.isnan(totales.at[rowSinSintomas, eachColumn]):
expectedTotal = totales.at[rowSinSintomas, eachColumn]
registeredTotal = totales.at[rowCasosNuevosTotales, eachColumn]
if registeredTotal != expectedTotal:
print('Casos nuevos totales debería ser ' + str(expectedTotal) + ' pero es ' + str(registeredTotal))
#print(totales.at[rowCasosNuevosTotales, eachColumn])
totales.at[rowCasosNuevosTotales, eachColumn] = expectedTotal
#print(totales.at[rowCasosNuevosTotales, eachColumn])
#print(totales.to_string())
#normalizamos headers
#expectedHeaders=['Casos nuevos con sintomas', 'Casos totales', 'Casos recuperados', 'Fallecidos',
# 'Casos activos', 'Casos nuevos sin sintomas', 'Casos totales acumulados', 'Casos nuevos totales']
emptyrow = [] * len(totales.columns)
if 'Casos nuevos con sintomas' not in totales['Fecha'].values:
totales['Fecha'][0] = 'Casos nuevos con sintomas'
if 'Casos nuevos sin sintomas' not in totales['Fecha'].values:
ax = ['Casos nuevos sin sintomas']
bx = [''] * (len(totales.columns) - 1)
ax.extend(bx)
row = pd.DataFrame([ax], columns=totales.columns)
aux = pd.concat([totales, row], ignore_index=True)
totales = aux
#totales['Fecha'][len(totales['Fecha']) + 1] = 'Casos nuevos sin sintomas'
if 'Casos totales' not in totales['Fecha'].values:
print('Casos totales not found')
ax = ['Casos totales']
bx = [''] * (len(totales.columns) - 1)
ax.extend(bx)
row = pd.DataFrame([ax], columns=totales.columns)
aux = pd.concat([totales, row], ignore_index=True)
totales = aux
if 'Casos nuevos totales' not in totales['Fecha'].values:
ax = ['Casos nuevos totales']
bx = [''] * (len(totales.columns) - 1)
ax.extend(bx)
row = pd.DataFrame([ax], columns=totales.columns)
aux = pd.concat([totales, row], ignore_index=True)
totales = aux
#print(totales)
#print(totales['Fecha'])
#print(str(a['Fecha'].values[0]) + ' is in ' + str(totales.columns))
if (a['Fecha'].values[0]) in totales.columns:
print(a['Fecha'] + ' ya esta en el dataframe. No actualizamos')
return
else:
#print(totales.iloc[:, 0])
newColumn=[]
#Need to add new rows to totales:
for eachValue in totales.iloc[:, 0]:
print('each values is ' + eachValue)
if eachValue in a.columns:
print((a[eachValue].values))
newColumn.append(str(a[eachValue].values[0]))
else:
#print('appending ""')
newColumn.append('')
print(newColumn)
totales[timestamp] = newColumn
totales.to_csv(producto, index=False)
totales_t = totales.transpose()
totales_t.to_csv(producto.replace('.csv', '_T.csv'), header=False)
#print(totales.to_string())
totales.rename(columns={'Fecha': 'Dato'}, inplace=True)
identifiers = ['Dato']
variables = [x for x in totales.columns if x not in identifiers]
df_std = pd.melt(totales, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Total')
df_std.to_csv(producto.replace('.csv', '_std.csv'), index=False)
def prod3_13_14_26_27(fte):
onlyfiles = [f for f in listdir(fte) if isfile(join(fte, f))]
cumulativoCasosNuevos = pd.DataFrame({'Region': [],
'Casos nuevos': []})
cumulativoCasosTotales = pd.DataFrame({'Region': [],
'Casos totales': []})
cumulativoFallecidos = pd.DataFrame({'Region': [],
'Fallecidos': []})
casosNuevosConSintomas = pd.DataFrame({'Region': [],
'Fecha': []})
casosNuevosSinSintomas = pd.DataFrame({'Region': [],
'Fecha': []})
onlyfiles.sort()
onlyfiles.remove('README.md')
for eachfile in onlyfiles:
print('processing ' + eachfile)
date = eachfile.replace("-CasosConfirmados-totalRegional", "").replace(".csv", "")
dataframe = pd.read_csv(fte + eachfile)
# sanitize headers
#print(eachfile)
dataframe.rename(columns={'Región': 'Region'}, inplace=True)
dataframe.rename(columns={'Casos nuevos': 'Casos nuevos'}, inplace=True)
dataframe.rename(columns={' Casos nuevos': 'Casos nuevos'}, inplace=True)
dataframe.rename(columns={'Casos nuevos totales': 'Casos nuevos'}, inplace=True)
dataframe.rename(columns={'Casos nuevos totales ': 'Casos nuevos'}, inplace=True)
dataframe.rename(columns={'Casos nuevos totales': 'Casos nuevos'}, inplace=True)
dataframe.rename(columns={'Casos totales': 'Casos totales'}, inplace=True)
dataframe.rename(columns={' Casos totales': 'Casos totales'}, inplace=True)
dataframe.rename(columns={'Casos totales acumulados': 'Casos totales'}, inplace=True)
dataframe.rename(columns={'Casos totales acumulados ': 'Casos totales'}, inplace=True)
dataframe.rename(columns={'Casos totales acumulados': 'Casos totales'}, inplace=True)
dataframe.rename(columns={' Casos fallecidos': 'Fallecidos'}, inplace=True)
dataframe.rename(columns={'Fallecidos totales ': 'Fallecidos'}, inplace=True)
dataframe.rename(columns={'Fallecidos totales': 'Fallecidos'}, inplace=True)
dataframe.rename(columns={'Casos nuevos con síntomas': 'Casos nuevos con sintomas'}, inplace=True)
dataframe.rename(columns={' Casos nuevos con síntomas': 'Casos nuevos con sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos con síntomas': 'Casos nuevos con sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos con sintomas': 'Casos nuevos con sintomas'}, inplace=True)
dataframe.rename(columns={' Casos nuevos con sintomas': 'Casos nuevos con sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos con sintomas': 'Casos nuevos con sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos con sintomas ': 'Casos nuevos con sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin síntomas': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={' Casos nuevos sin síntomas': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin síntomas': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin síntomas*': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={' Casos nuevos sin síntomas*': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin síntomas*': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin sintomas': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={' Casos nuevos sin sintomas': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin sintomas': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin sintomas*': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={' Casos nuevos sin sintomas*': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin sintomas*': 'Casos nuevos sin sintomas'}, inplace=True)
dataframe.rename(columns={'Casos nuevos sin sintomas* ': 'Casos nuevos sin sintomas'}, inplace=True)
if cumulativoCasosNuevos['Region'].empty:
cumulativoCasosNuevos[['Region', 'Casos nuevos']] = dataframe[['Region', 'Casos nuevos']]
cumulativoCasosNuevos.rename(columns={'Casos nuevos': date}, inplace=True)
cumulativoCasosTotales[['Region', 'Casos totales']] = dataframe[['Region', 'Casos totales']]
cumulativoCasosTotales.rename(columns={'Casos totales': date}, inplace=True)
else:
print(dataframe.columns)
cumulativoCasosNuevos[date] = dataframe['Casos nuevos']
cumulativoCasosTotales[date] = dataframe['Casos totales']
if 'Fallecidos' in dataframe.columns:
if cumulativoFallecidos['Region'].empty:
cumulativoFallecidos[['Region', 'Fallecidos']] = dataframe[['Region', 'Fallecidos']]
cumulativoFallecidos.rename(columns={'Fallecidos': date}, inplace=True)
else:
cumulativoFallecidos[date] = dataframe['Fallecidos']
if 'Casos nuevos con sintomas' in dataframe.columns:
if casosNuevosConSintomas['Region'].empty:
casosNuevosConSintomas[['Region', 'Fecha']] = dataframe[['Region', 'Casos nuevos con sintomas']]
casosNuevosConSintomas.rename(columns={'Fecha': date}, inplace=True)
else:
casosNuevosConSintomas[date] = dataframe['Casos nuevos con sintomas']
else:
date2 = (pd.to_datetime(date)).strftime('%Y-%m-%d')
if date2 < '2020-04-29':
if casosNuevosConSintomas['Region'].empty:
casosNuevosConSintomas[['Region', 'Fecha']] = dataframe[['Region','Casos nuevos']]
casosNuevosConSintomas.rename(columns={'Fecha': date}, inplace=True)
else:
casosNuevosConSintomas[date] = dataframe['Casos nuevos']
if 'Casos nuevos sin sintomas' in dataframe.columns:
if casosNuevosSinSintomas['Region'].empty:
casosNuevosSinSintomas[['Region', 'Fecha']] = dataframe[['Region', 'Casos nuevos sin sintomas']]
casosNuevosSinSintomas.rename(columns={'Fecha': date}, inplace=True)
else:
casosNuevosSinSintomas[date] = dataframe['Casos nuevos sin sintomas']
# estandarizar nombres de regiones
regionName(cumulativoCasosNuevos)
regionName(cumulativoCasosTotales)
regionName(cumulativoFallecidos)
regionName(casosNuevosConSintomas)
regionName(casosNuevosSinSintomas)
cumulativoCasosNuevos_T = cumulativoCasosNuevos.transpose()
cumulativoCasosTotales_T = cumulativoCasosTotales.transpose()
cumulativoFallecidos_T = cumulativoFallecidos.transpose()
casosNuevosConSintomas_T = casosNuevosConSintomas.transpose()
casosNuevosSinSintomas_T = casosNuevosSinSintomas.transpose()
#### PRODUCTO 3
cumulativoCasosTotales.to_csv('../output/producto3/CasosTotalesCumulativo.csv', index=False)
cumulativoCasosTotales_T.to_csv('../output/producto3/CasosTotalesCumulativo_T.csv', header=False)
identifiers = ['Region']
variables = [x for x in cumulativoCasosTotales.columns if x not in identifiers]
df_std = pd.melt(cumulativoCasosTotales, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Total')
df_std.to_csv('../output/producto3/CasosTotalesCumulativo_std.csv', index=False)
#### PRODUCTO 13
cumulativoCasosNuevos.to_csv('../output/producto13/CasosNuevosCumulativo.csv', index=False)
cumulativoCasosNuevos_T.to_csv('../output/producto13/CasosNuevosCumulativo_T.csv', header=False)
identifiers = ['Region']
variables = [x for x in cumulativoCasosTotales.columns if x not in identifiers]
df_std = pd.melt(cumulativoCasosNuevos, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Total')
df_std.to_csv('../output/producto13/CasosNuevosCumulativo_std.csv', index=False)
#### PRODUCTO 14
cumulativoFallecidos.to_csv('../output/producto14/FallecidosCumulativo.csv', index=False)
cumulativoFallecidos_T.to_csv('../output/producto14/FallecidosCumulativo_T.csv', header=False)
identifiers = ['Region']
variables = [x for x in cumulativoFallecidos.columns if x not in identifiers]
df_std = pd.melt(cumulativoFallecidos, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Total')
df_std.to_csv('../output/producto14/FallecidosCumulativo_std.csv', index=False)
#### PRODUCTO 26
casosNuevosConSintomas.to_csv('../output/producto26/CasosNuevosConSintomas.csv', index=False)
casosNuevosConSintomas_T.to_csv('../output/producto26/CasosNuevosConSintomas_T.csv', header=False)
identifiers = ['Region']
variables = [x for x in casosNuevosConSintomas.columns if x not in identifiers]
df_std = pd.melt(casosNuevosConSintomas, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Casos confirmados')
df_std.to_csv('../output/producto26/CasosNuevosConSintomas_std.csv', index=False)
#### PRODUCTO 27
casosNuevosSinSintomas.to_csv('../output/producto27/CasosNuevosSinSintomas.csv', index=False)
casosNuevosSinSintomas_T.to_csv('../output/producto27/CasosNuevosSinSintomas_T.csv', header=False)
identifiers = ['Region']
variables = [x for x in casosNuevosSinSintomas.columns if x not in identifiers]
df_std = pd.melt(casosNuevosSinSintomas, id_vars=identifiers, value_vars=variables, var_name='Fecha',
value_name='Casos confirmados')
df_std.to_csv('../output/producto27/CasosNuevosSinSintomas_std.csv', index=False)
def prod7_8(fte, producto):
df = pd.read_csv(fte, dtype={'Codigo region': object})
regionName(df)
df = df.replace('-', '', regex=True)
df_t = df.T
df.to_csv(producto + '.csv', index=False)
df_t.to_csv(producto + '_T.csv', header=False)
identifiers = ['Region', 'Codigo region', 'Poblacion']
variables = [x for x in df.columns if x not in identifiers]
df_std = pd.melt(df, id_vars=identifiers, value_vars=variables, var_name='fecha', value_name='numero')
df_std.to_csv(producto + '_std.csv', index=False)
def prod9_10(fte, producto):
copyfile(fte, producto + '.csv')
HospitalizadosUCIEtario_T = transpone_csv(producto + '.csv')
HospitalizadosUCIEtario_T.to_csv(producto + '_T.csv', header=False)
df = pd.read_csv(fte)
identifiers = ['Grupo de edad']
variables = [x for x in df.columns if x not in identifiers]
df_std = pd.melt(df, id_vars=identifiers, value_vars=variables, var_name='fecha', value_name='Casos confirmados')
df_std.to_csv(producto + '_std.csv', index=False)
def prod17(fte, producto):
copyfile(fte, producto + '.csv')
df = pd.read_csv(fte)
df_t = df.T
df_t.to_csv(producto + '_T.csv', header=False)
identifiers = ['Establecimiento', 'Examenes']
variables = [x for x in df.columns if x not in identifiers]
df_std = pd.melt(df, id_vars=identifiers, value_vars=variables, var_name='fecha', value_name='Numero de PCR')
df_std.to_csv(producto + '_std.csv', index=False)
def prod20(fte, producto):
copyfile(fte, producto + '.csv')
df = pd.read_csv(fte)
df_t = df.T
df_t.to_csv(producto + '_T.csv', header=False)
identifiers = ['Ventiladores']
variables = [x for x in df.columns if x not in identifiers]
df_std = pd.melt(df, id_vars=identifiers, value_vars=variables, var_name='fecha', value_name='numero')
df_std.to_csv(producto + '_std.csv', index=False)
def prod23(fte, producto):
copyfile(fte, producto + '.csv')
df = pd.read_csv(fte)
df_t = df.T
df_t.to_csv(producto + '_T.csv', header=False)
identifiers = ['Casos']
variables = [x for x in df.columns if x not in identifiers]
df_std = pd.melt(df, id_vars=identifiers, value_vars=variables, var_name='fecha', value_name='Casos confirmados')
df_std.to_csv(producto + '_std.csv', index=False)
def prod24(fte, producto):
copyfile(fte, producto + '.csv')
df = pd.read_csv(fte)
df_t = df.T
df_t.to_csv(producto + '_T.csv', header=False)
identifiers = ['Tipo de cama']
variables = [x for x in df.columns if x not in identifiers]
df_std = pd.melt(df, id_vars=identifiers, value_vars=variables, var_name='fecha', value_name='Casos confirmados')
df_std.to_csv(producto + '_std.csv', index=False)
def prod30(fte, producto):
copyfile(fte, producto + '.csv')
df = pd.read_csv(fte)
df_t = df.T
df_t.to_csv(producto + '_T.csv', header=False)
identifiers = ['Casos']
variables = [x for x in df.columns if x not in identifiers]
df_std = pd.melt(df, id_vars=identifiers, value_vars=variables, var_name='Fecha', value_name='Casos confirmados')
df_std.to_csv(producto + '_std.csv', index=False)
def prod36(fte, producto):
copyfile(fte, producto + '.csv')
df = | pd.read_csv(fte) | pandas.read_csv |
import json
import pandas as pd
from AnalysisModule.prepare.diagram import BuildingUnit
from AnalysisModule.routines.util import read_jsonfile
"""
4 ways to deal with strange bus
A - exclude bu, keep crystals
A' - merge bu, keep crystals
B - exclude crystals
C - keep all
note 2020/11/24:
- all A are modified to A'
- use bu_0 bu_1 bu_2 to represent bus
- if bu_x does not exist, the field is set to -1
buid: 9 len: 9 C
- H2PO2, reactants include hypophosphorous acid
buid: 18 len: 9 A'
- this is tricky: some of them come from PO4 across pbc, some of them are HPO3 without hydrogens
- HPO3: (BU0)
BEZVIO
BEZVOU
BEZVUA
BEZWAH
CASWIE
TEXSEV
- PO4: (BU1)
CUHCIR
POVMOC
QOBWEJ
buid: 19 len: 21 B
- some of them are MeOH e.g. coordinated to a metal, some of them are MeO with O acting as a bridge e.g. between metals
buid: 21 len: 5 B
- ethylene glycol
buid: 23 len: 8 B
- ethanol
buid: 24 len: 9 A (2020/11/24 -> 1 A', BU1)
- PFO3
- similar to BU25, HPF6 is used as input
buid: 25 len: 2 A (2020/11/24 -> 1 A', BU1)
- PF2O2
- HPF6 involved in both synthesis
buid: 28 len: 1 A (2020/11/24 -> 1 A', BU5)
- octahedral SiO6(2-)
buid: 29 len: 1 B
- O-C(O)-CH2-O
- synthesis uses glycolic acid
buid: 31 len: 7 B
- O2C-CH2-S-CH2-CO2
- uranyl thiodigycolate is used in synthesis
buid: 32 len: 1 A'
- KAPSUR, distorted NO3 (BU10)
buid: 34 len: 1 A'
- SiO3, just broken SiO4 (BU5) by pbc
buid: 36 len: 1 A'
- WAQVOZ, glitched CO3 (BU15)
"""
records = pd.read_csv("3_bulist.csv").to_dict("records")
curated_records = []
curated_bus = []
for ir in range(len(records)):
records[ir]["bus"] = json.loads(records[ir]["bus"])
identifier = records[ir]["identifier"]
# merge A'
if 18 in records[ir]["bus"]:
if identifier in ["BEZVIO", "BEZVOU", "BEZVUA", "BEZWAH", "CASWIE", "TEXSEV", ]:
records[ir]["bus"] = [0 if x == 18 else x for x in records[ir]["bus"]]
elif identifier in ["CUHCIR", "POVMOC", "QOBWEJ", ]:
records[ir]["bus"] = [1 if x == 18 else x for x in records[ir]["bus"]]
else:
raise NotImplementedError("BU 18 not merged due to unknown identifier: {}".format(identifier))
# aprime_dict = {32: 10, 34: 5, 36: 15}
aprime_dict = {32: 10, 34: 5, 36: 15, 24: 1, 25: 1, 28: 5} # 2020/11/24
for buid_aprime in aprime_dict.keys():
if buid_aprime in records[ir]["bus"]:
records[ir]["bus"] = [aprime_dict[buid_aprime] if x == buid_aprime else x for x in records[ir]["bus"]]
# exclude crystals with B
if set(records[ir]["bus"]).intersection({19, 21, 23, 29, 31}):
continue
# very few crystal has more than 2 bus
if len(records[ir]["bus"]) > 2:
print("bus len > 2: ", len(records[ir]["bus"]), records[ir]["identifier"])
curated_bus += records[ir]["bus"]
curated_records.append(records[ir])
df = | pd.DataFrame.from_records(curated_records) | pandas.DataFrame.from_records |
# coding: utf-8
# # Online Retail
#
# - http://archive.ics.uci.edu/ml/datasets/online+retail#
#
#
# ## Data Set Information:
#
# This is a transnational data set which contains all the transactions occurring between 01/12/2010 and 09/12/2011 for a UK-based and registered non-store online retail.The company mainly sells unique all-occasion gifts. Many customers of the company are wholesalers.
#
#
# ## Attribute Information:
#
# - InvoiceNo: Invoice number. Nominal, a 6-digit integral number uniquely assigned to each transaction. If this code starts with letter 'c', it indicates a cancellation.
# - StockCode: Product (item) code. Nominal, a 5-digit integral number uniquely assigned to each distinct product.
# - Description: Product (item) name. Nominal.
# - Quantity: The quantities of each product (item) per transaction. Numeric.
# - InvoiceDate: Invice Date and time. Numeric, the day and time when each transaction was generated.
# - UnitPrice: Unit price. Numeric, Product price per unit in sterling.
# - CustomerID: Customer number. Nominal, a 5-digit integral number uniquely assigned to each customer.
# - Country: Country name. Nominal, the name of the country where each customer resides.
#
#
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import warnings
import itertools
import operator
import statsmodels.api as sm
# In[2]:
online_retail = | pd.read_excel('data/Online Retail.xlsx') | pandas.read_excel |
# encoding: utf-8
import itertools
import random
from datetime import date
from typing import List, Tuple
import pandas as pd
class DataFrameMock:
@staticmethod
def df_generic(sample_size):
"""
Create a generic DataFrame with ``sample_size`` samples and 2 columns.
The 2 columns of the returned DataFrame contain numerical and string
values, respectively.
Parameters
----------
sample_size:
Number of samples in the returned DataFrame.
Returns
-------
pd.DataFrame
Pandas DataFrame instance with ``sample_size`` samples and 2 columns:
one with numerical values and the other with string values only.
"""
return pd.DataFrame(
{
"metadata_num_col": list(range(sample_size)),
"metadata_str_col": [f"value_{i}" for i in range(sample_size)],
"exam_num_col_0": list(range(sample_size)),
"exam_num_col_1": list(range(sample_size)),
"exam_str_col_0": [f"value_{i}" for i in range(sample_size)],
}
)
@staticmethod
def df_many_nans(nan_ratio: float, n_columns: int) -> pd.DataFrame:
"""
Create pandas DataFrame with ``n_columns`` containing ``nan_ratio`` ratio of
NaNs.
DataFrame has 100 rows and ``n_columns``+5 columns. The additional 5 columns
contain less than ``nan_ratio`` ratio of NaNs.
Parameters
----------
nan_ratio : float
Ratio of NaNs that will be present in ``n_columns`` of the DataFrame.
n_columns : int
Number of columns that will contain ``nan_ratio`` ratio of NaNs.
Returns
-------
pd.DataFrame
Pandas DataFrame with ``n_columns`` containing ``nan_ratio`` ratio of NaNs
and 5 columns with a lower ratio of NaNs.
"""
many_nan_dict = {}
sample_count = 100
# Create n_columns columns with NaN
nan_sample_count = int(sample_count * nan_ratio)
for i in range(n_columns):
many_nan_dict[f"nan_{i}"] = [pd.NA] * nan_sample_count + [1] * (
sample_count - nan_sample_count
)
# Create not_nan_columns with less than nan_ratio ratio of NaNs
not_nan_columns = 5
for j in range(not_nan_columns):
nan_ratio_per_column = nan_ratio - 0.01 * (j + 1)
# If nan_ratio_per_column < 0, set 0 samples to NaN (to avoid negative
# sample counts)
if nan_ratio_per_column < 0:
nan_sample_count = 0
else:
nan_sample_count = int(sample_count * nan_ratio_per_column)
many_nan_dict[f"not_nan_{j}"] = [pd.NA] * nan_sample_count + [1] * (
sample_count - nan_sample_count
)
return pd.DataFrame(many_nan_dict)
@staticmethod
def df_nans_filled(columns: List[str]) -> pd.DataFrame:
"""Starting from the df returned by ``.df_many_nans``, set ``columns`` to 1s.
Parameters
----------
columns : List[str]
Name of the columns to set to 1s
Returns
-------
pd.DataFrame
DataFrame with the ``columns`` set to 1s
"""
df = DataFrameMock.df_many_nans(nan_ratio=0.5, n_columns=3)
for column in columns:
df[column] = pd.Series(pd.Series([1] * 100))
return df
@staticmethod
def df_same_value(n_columns: int) -> pd.DataFrame:
"""
Create pandas DataFrame with ``n_columns`` containing the same repeated value.
DataFrame has 100 rows and ``n_columns``+5 columns. The additional 5 columns
contain different valid values (and a variable count of a repeated value).
Parameters
----------
n_columns : int
Number of columns that will contain the same repeated value.
Returns
-------
pd.DataFrame
Pandas DataFrame with ``n_columns`` containing the same repeated value
and 5 columns with some different values.
"""
random.seed(42)
constant_value_dict = {}
sample_count = 100
# Create n_columns columns with same repeated value
for i in range(n_columns):
constant_value_dict[f"same_{i}"] = [4] * sample_count
# Create not_constant_columns with repeated values and random values
not_constant_columns = 5
for j in range(not_constant_columns):
constant_value_sample_count = int(sample_count * (1 - 0.1 * (j + 1)))
constant_value_dict[f"not_same_{j}"] = [4] * constant_value_sample_count + [
random.random()
for _ in range(sample_count - constant_value_sample_count)
]
return | pd.DataFrame(constant_value_dict) | pandas.DataFrame |
# import spacy
from collections import defaultdict
# nlp = spacy.load('en_core_web_lg')
import pandas as pd
import seaborn as sns
import random
import pickle
import numpy as np
from xgboost import XGBClassifier
import matplotlib.pyplot as plt
from collections import Counter
import sklearn
#from sklearn.pipeline import Pipeline
from sklearn import linear_model
#from sklearn import svm
#from sklearn.ensemble import GradientBoostingClassifier, AdaBoostClassifier
from sklearn.model_selection import KFold #cross_validate, cross_val_score
from sklearn.metrics import classification_report, accuracy_score, precision_recall_fscore_support
from sklearn.metrics import precision_score, f1_score, recall_score
from sklearn import metrics
from sklearn.model_selection import StratifiedKFold
import warnings
warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
all_sr = ['bpd', 'cfs','crohnsdisease', 'dementia', 'depression',\
'diabetes', 'dysautonomia', 'gastroparesis','hypothyroidism', 'ibs', \
'interstitialcystitis', 'kidneystones', 'menieres', 'multiplesclerosis',\
'parkinsons', 'psoriasis', 'rheumatoid', 'sleepapnea']
all_dis = {el:i for i, el in enumerate(all_sr)}
disease_values_dict = all_dis
# these will be used to take disease names for each prediction task
disease_names = list(disease_values_dict.keys())
disease_labels = list(disease_values_dict.values())
etype="DL"
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams.update({'font.size': 16})
features_file = "data/features/{}_embdedded_features.pckl".format(etype)
results_file = "results/{}_multiclasscm.csv".format(etype)
word_emb_len = 300
def sample_all_diseases(df, n=1):
if etype == "DL":
smallest_disease=all_dis['parkinsons']
else:
smallest_disease=all_dis['gastroparesis']
def merge_rows(row):
if n == 1:
return row
res_row = np.zeros(len(row[0]))
for i in range(n):
res_row = res_row+row[i]
return res_row / n
df = df.sample(frac=1).reset_index(drop=True)
dis_size = len(df[df['disease']==smallest_disease])
sample_size = int(dis_size/n)*n
print(dis_size, sample_size)
df_sample= pd.DataFrame()
for disease in all_dis:
df_dis = df[df['disease'] == all_dis[disease]]
df_dis = df_dis.sample(n=sample_size, random_state=11).reset_index()
if n > 1:
df_dis = df_dis.groupby(df_dis.index // n).agg(lambda x: list(x))
df_dis['disease'] = all_dis[disease]
df_sample = pd.concat([df_dis, df_sample])
if n > 1:
df_sample['features'] = df_sample['features'].apply(lambda row: merge_rows(row))
df_sample = df_sample.drop(columns=['index'])
return df_sample
def prepare_training_data_for_multi_disease(features, n=1):
dis_sample = sample_all_diseases(features, n)
print("Subsampled all diseases for ", len(dis_sample), " posts")
training = dis_sample.copy()
training = training.reset_index(drop=True)
return training
def XGBoost_cross_validate():
features = pd.read_pickle(features_file)
features.rename(columns={'vec':'features'}, inplace=True)
features = features.drop(columns=['subreddit', 'entities'])
disease = features['disease']
print ("Post per subreddit ")
print (features.groupby('disease').size())
# print('Distribution before imbalancing: {}'.format(Counter(disease)))
training = prepare_training_data_for_multi_disease(features)
print(training.tail())
training_labels = training["disease"].astype(int)
training_labels.head()
training_features = pd.DataFrame(training["features"].tolist())
training_features.head()
# XGBoost
AUC_results = []
f1_results = []
results = []
cm_all = []
kf = StratifiedKFold(n_splits=10, random_state=11, shuffle=True)
for train_index, test_index in kf.split(training_features,training_labels):
X_train = training_features.loc[train_index]
y_train = training_labels.loc[train_index]
X_test = training_features.loc[test_index]
y_test = training_labels.loc[test_index]
model = XGBClassifier(n_estimators=100, n_jobs=11, max_depth=4) # 1000 200
model.fit(X_train, y_train.values.ravel())
predictions = model.predict(X_test)
results.append(precision_recall_fscore_support(y_test, predictions))
f1_results.append(f1_score(y_true=y_test, y_pred=predictions, average='weighted'))
cm_cv = sklearn.metrics.confusion_matrix(y_true=y_test, y_pred=predictions, labels=disease_labels)
cm_all.append(cm_cv)
print ("Accuracy : %.4g" % metrics.accuracy_score(y_test, predictions))
f1_results_avg = [ | pd.np.mean(f1_results) | pandas.np.mean |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sys
import librosa
def salvar_csv(x_mean,y_mean,z_mean,x_desv,y_desv,z_desv,nome,tam_frequencia):
dfdict = {}
dfdict["Mean x"] = x_mean
dfdict["Mean y"] = y_mean
dfdict["Mean z"] = z_mean
dfdict["Desv x"] = x_desv
dfdict["Desv y"] = y_desv
dfdict["Desv z"] = z_desv
# dfdict["MFC y"] = mfcc
# dfdict["Classificação 0 desalinhado 1 alinhado"] = classificacao
df = pd.DataFrame(dfdict)
nome = nome.split('.')
nome = nome[0]
print(nome)
df.to_csv((str(nome) + '_tratado_SUP'+str(tam_frequencia)+'.csv'),index = False, header = True)#, columns=["Media x","Media y","Media z","Desv x","Desv y","Desv z","MFC y", "Classificação 0 desalinhado 1 alinhado"])
##
if __name__ == '__main__':
frequencia_de_coleta = 125
velocidade_correia = 0.5 # m/s
tamanho_correia = 1 # metros
tamanho_coletas = 0.1 # metros
# tam_frequencia = int(frequencia_de_coleta*tamanho_correia/velocidade_correia)
tam_frequencia = 25
dataset_file = | pd.read_csv(sys.argv[1]) | pandas.read_csv |
# CPTAC Images Join
import pandas as pd
import numpy as np
imglist = pd.read_csv('../CPTAC-LUAD-HEslide-filename-mapping_Jan2019.csv', header=0)
samplelist = pd.read_csv('../CPTAC_LUAD.csv', header=0)
imglist = imglist[['Slide_ID', 'FileName']]
samplelist = samplelist.join(imglist.set_index('Slide_ID'), how='inner', on='Slide_ID')
samplelist = samplelist.dropna(subset=['FileName'])
samplelist = samplelist[['Case_ID', 'Slide_ID', 'FileName']]
Labelfile = | pd.read_csv('../luad-v2.0-sample-annotation.csv', header=0) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 1 18:25:21 2020
@author: vinnie
"""
import tweepy
from collections import defaultdict
import pandas as pd
import argparse
import os
from stats import tweet_analyzer
from wordanalysis import WordsAnalysis
from keys import (
api_key,
api_secret_key,
access_token,
access_token_secret
)
auth = tweepy.OAuthHandler(api_key, api_secret_key)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
class GetTweets:
def __init__(self, api, userid, include_rts = False):
self.userid = userid
self.include_rts = include_rts
print('Fetching tweets of: ', self.userid)
self.tweets = api.user_timeline(screen_name = self.userid, count = 200, include_rts = self.include_rts, tweet_mode = 'extended')
self.tweets_dict = defaultdict(list)
self.acc_info = self.tweets[0].user
def __check_if_retweet(self, obj):
if hasattr(obj, 'retweeted_status'):
return True
return False
def __get_hashtags(self, hashtags):
tags_list = []
for tags in hashtags:
tags_list.append(tags['text'])
return tags_list
def __get_account_info(self):
twt = self.acc_info
print(f'\n \nName:\t {twt.name}')
print(f'Description: {twt.description}' )
print(f'Followers: {twt.followers_count}\t Follwing: {twt.friends_count}' )
print(f'Account created on: {twt.created_at}\t Location: {twt.location}\n')
with open("data/info" + self.userid, "w") as text_file:
text_file.write(f'Name: {twt.name}\n Description: {twt.description}\n \
Followers: {twt.followers_count}\t Follwing: {twt.friends_count}\n \
Account created on: {twt.created_at}\t Location: {twt.location}')
def __build_dictionary(self):
for status in self.tweets:
self.tweets_dict['id'].append(status.id_str)
self.tweets_dict['favourite_count'].append(status.favorite_count)
self.tweets_dict['created_at'].append(status.created_at)
self.tweets_dict['retweet_count'].append(status.retweet_count)
self.tweets_dict['tweet'].append(status.full_text)
self.tweets_dict['tags'].append(self.__get_hashtags(status.entities.get('hashtags')))
tweet_url = 'https://twitter.com/twitter/status/' + status.id_str
self.tweets_dict['tweet_url'].append(tweet_url)
if not self.include_rts:
self.tweets_dict['is_retweet'].append(self.__check_if_retweet(status))
def fetch_tweets(self):
oldest_id = self.tweets[-1].id
self.__build_dictionary()
n_tweets = len(self.tweets)
while True:
print('Tweets fetched till now {}'.format(n_tweets))
self.tweets = api.user_timeline(screen_name = self.userid,
count = 200, include_rts = False,
max_id = oldest_id - 1,
tweet_mode = 'extended')
n_tweets += len(self.tweets)
if len(self.tweets) == 0:
break
oldest_id = self.tweets[-1].id
self.__build_dictionary()
self.__get_account_info()
return | pd.DataFrame.from_dict(self.tweets_dict) | pandas.DataFrame.from_dict |
import pandas as pd
import csv
import json
import sys
import numpy
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, numpy.integer):
return int(obj)
elif isinstance(obj, numpy.floating):
return float(obj)
elif isinstance(obj, numpy.ndarray):
return obj.tolist()
else:
return super(MyEncoder, self).default(obj)
df = pd.read_csv(sys.argv[1], encoding='UTF-8', sep=',', quotechar='"')
final_tp = []
def trend_detection(topicid):
selectid = df.loc[df['topicId'] == topicid]
topic = pd.DataFrame(selectid, columns=['created', 'topicId'])
topic.index = range(len(topic))
topic['created'] = | pd.to_datetime(topic['created']) | pandas.to_datetime |
# Utility to scrape NASDAQ IPO lists
import pandas as pd
import datetime
from datetime import timedelta
from collections import OrderedDict
def get_ipo_list(start_date, end_date=datetime.datetime.today().strftime('%Y-%m-%d')):
"""Scrape NASDAQ IPO lists, returns DataFrame
arguments:
start_date -- %Y-%m-%d
end_date -- %Y-%m-%d
"""
# make dates range
date_range = [start_date, end_date]
start, end = [datetime.datetime.strptime(_, "%Y-%m-%d") for _ in date_range]
date_dict = OrderedDict(((start + timedelta(_)).strftime(r"%Y-%m"), None) for _ in range((end - start).days)).keys()
print('date range:', date_dict)
# scrape
df_symbols = | pd.DataFrame() | pandas.DataFrame |
"""
@file process.py
@author <NAME>
Data pre-processing.
"""
from src import config
import numpy as np
import pandas as pd
from scipy import stats
from scipy.stats import skew
class Processor:
def __init__(self):
self.df = {}
self.lmbda = {'austria' : {}, 'belgium': {}, 'germany': {},
'italy' : {},
'netherlands': {}}
self.numerical_features = {'austria': [], 'belgium': [], 'germany': [],
'italy' : [], 'netherlands': []}
self.skewed_features = {'austria': [], 'belgium': [], 'germany': [],
'italy' : [], 'netherlands': []}
self.means = {'austria' : {}, 'belgium': {}, 'germany': {},
'italy' : {},
'netherlands': {}}
self.std_deviations = {'austria' : {}, 'belgium': {}, 'germany': {},
'italy' : {},
'netherlands': {}}
for country in config.COUNTRIES:
# read file
file_path = config.cleaned_data_path / (country + '.csv')
self.df[country] = pd.read_csv(file_path)
def process_vector(self, pageviews, country):
data = pd.DataFrame.from_dict(pageviews)
data, self.lmbda[country] = self.apply_leo_johnson(data,
self.lmbda[country],
self.skewed_features[
country])
self.apply_std_normal(data,
self.numerical_features[country],
self.means[country],
self.std_deviations[country])
return data
def process_data(self):
for country in config.COUNTRIES:
self.hot_encode_weeks(country)
labels = | pd.Series(self.df[country].incidence) | pandas.Series |
import collections
import logging
import os
import subprocess
import sys
from pathlib import Path
from subprocess import check_output, CalledProcessError
import numpy as np
import pandas as pd
from django.db.models import Q
from django.db import close_old_connections
from emgapi import models as emg_models
from emgapianns.management.lib import utils
from emgapianns.management.lib.utils import DownloadFileDatabaseHandler
class StudySummaryGenerator(object):
def __init__(self, accession, pipeline, rootpath, nfs_public_rootpath, database):
self.study_accession = accession
self.pipeline = pipeline
self.rootpath = rootpath
self.nfs_public_rootpath = nfs_public_rootpath
self.emg_db_name = database
self.study = emg_models.Study.objects.using(self.emg_db_name).get(secondary_accession=self.study_accession)
self.study_result_dir = os.path.join(self.rootpath, self.study.result_directory)
self.summary_dir = None
self.MAPSEQ_COLUMN_MAPPER = {'SSU': 'SILVA', 'LSU': 'SILVA', 'unite': 'UNITE', 'itsonedb': 'ITSone'}
def run(self):
if not os.path.exists(self.study_result_dir):
sys.exit(
"Study result directory for {} does not exist:\n{}".format(self.study_accession, self.study_result_dir))
jobs = emg_models.AnalysisJob.objects.using(self.emg_db_name)
jobs = jobs.filter(
Q(study__secondary_accession=self.study_accession) &
Q(analysis_status__analysis_status='completed') &
Q(pipeline__release_version=self.pipeline))
experiment_types = set()
analysis_jobs = {}
for job in jobs:
job_result_directory = os.path.join(self.rootpath, job.result_directory)
job_input_file_name = job.input_file_name
analysis_jobs[job_input_file_name] = job_result_directory
experiment_types.add(job.experiment_type.experiment_type)
self.summary_dir = os.path.join(self.study_result_dir, 'version_{}/project-summary'.format(self.pipeline))
self.create_summary_dir()
for rna_types in self.MAPSEQ_COLUMN_MAPPER.keys():
self.generate_taxonomy_phylum_summary(analysis_jobs, self.pipeline, '{}'.format(rna_types),
'phylum_taxonomy_abundances_{}_v{}.tsv'.format(rna_types,
self.pipeline))
self.generate_taxonomy_summary(analysis_jobs, '{}'.format(rna_types),
'taxonomy_abundances_{}_v{}.tsv'.format(rna_types, self.pipeline))
if len(experiment_types) == 1 and 'amplicon' in experiment_types:
logging.info("AMPLICON datasets only! Skipping the generation of the functional matrix files!")
else:
self.generate_ipr_summary(analysis_jobs, 'IPR_abundances_v{}.tsv'.format(self.pipeline), self.pipeline)
self.generate_go_summary(analysis_jobs, 'slim', self.pipeline)
self.generate_go_summary(analysis_jobs, 'full', self.pipeline)
self.sync_study_summary_files()
logging.info("Program finished successfully.")
def sync_study_summary_files(self):
logging.info("Syncing project summary files over to NFS public...")
_study_result_dir = self.study.result_directory
nfs_prod_dest = os.path.join(self.rootpath, _study_result_dir,
'version_{}/{}'.format(self.pipeline, 'project-summary'))
nfs_public_dest = os.path.join(self.nfs_public_rootpath, _study_result_dir, 'version_{}/'.format(self.pipeline))
logging.info("From: " + nfs_prod_dest)
logging.info("To: " + nfs_public_dest)
rsync_options = ['-rtDzv']
more_rsync_options = ['--no-owner', '--no-perms', '--prune-empty-dirs', '--exclude', '*.lsf',
'--delete-excluded', '--chmod=Do-w,Fu+x,Fg+x,Fo+r']
rsync_cmd = ["sudo", "-H", "-u", "emg_adm", "rsync"] + rsync_options + more_rsync_options + [nfs_prod_dest,
nfs_public_dest]
logging.info(rsync_cmd)
subprocess.check_call(rsync_cmd)
logging.info("Synchronisation is done.")
@staticmethod
def _get_group_type(rna_type):
group = None
if rna_type in ['SSU', 'LSU']:
group = 'Taxonomic analysis {} rRNA'.format(rna_type)
elif rna_type == 'unite':
group = 'Taxonomic analysis UNITE'
elif rna_type == 'itsonedb':
group = 'Taxonomic analysis ITSoneDB'
else:
logging.warning("RNA type {} not supported!".format(rna_type))
return group
@staticmethod
def _get_phylum_file_description(rna_type):
desc = None
if rna_type in ['SSU', 'LSU', 'unite']:
desc = 'Phylum level taxonomies {}'.format(rna_type.upper())
elif rna_type == 'itsonedb':
desc = 'Phylum level taxonomies ITSoneDB'
else:
logging.warning("RNA type {} not supported!".format(rna_type))
return desc
@staticmethod
def _get_abundance_file_description(rna_type):
desc = None
if rna_type in ['SSU', 'LSU', 'unite']:
desc = 'Taxonomic assignments {}'.format(rna_type.upper())
elif rna_type == 'itsonedb':
desc = 'Taxonomic assignments ITSoneDB'
else:
logging.warning("RNA type {} not supported!".format(rna_type))
return desc
def generate_taxonomy_phylum_summary(self, analysis_jobs, version, rna_type, filename):
study_df = None
if version == '4.1':
study_df = self.generate_taxonomy_phylum_summary_v4(analysis_jobs, rna_type)
elif version == '5.0':
study_df = self.generate_taxonomy_phylum_summary_v5(analysis_jobs, rna_type)
else:
logging.warning("Pipeline version {} not supported yet!".format(version))
pass
if not study_df.empty:
self.write_results_file(study_df, filename)
alias = '{}_phylum_taxonomy_abundances_{}_v{}.tsv'.format(self.study_accession, rna_type, self.pipeline)
description = self._get_phylum_file_description(rna_type)
group = self._get_group_type(rna_type)
self.upload_study_file(filename, alias, description, group)
def generate_taxonomy_phylum_summary_v4(self, analysis_result_dirs, su_type):
res_files = self.get_kingdom_counts_files(analysis_result_dirs, su_type)
study_df = self.merge_dfs(res_files,
delimiter='\t',
key=['kingdom', 'phylum'],
raw_cols=['kingdom', 'phylum', 'count', 'ignored'])
return study_df
def generate_taxonomy_phylum_summary_v5(self, analysis_jobs, rna_type):
job_data_frames = dict()
# Iterate over each run
for acc, result_directory in analysis_jobs.items():
# Define results files and for each result file perform necessary operations
if rna_type in ['unite', 'itsonedb']:
sequence_file = self.__get_rna_fasta_file(result_directory, 'ITS_masked.fasta.gz')
else: # for SILVA: LSU and SSU
sequence_file = self.__get_rna_fasta_file(result_directory, '{}.fasta.gz'.format(rna_type))
if not sequence_file:
continue
num_rna_seqs = self.__count_number_of_seqs(sequence_file)
#
mapseq_result_file = self.__get_mapseq_result_file(acc, result_directory, rna_type, '.fasta.mseq.gz')
if not mapseq_result_file:
continue
phylum_count_data = self.__parse_phylum_counts_v5(mapseq_result_file, num_rna_seqs, rna_type)
job_df = self.__build_dataframe(phylum_count_data)
job_data_frames[acc] = job_df
study_df = self.merge_dfs_v5(job_data_frames, key=['superkingdom', 'kingdom', 'phylum'])
return study_df
def generate_taxonomy_summary(self, analysis_result_dirs, rna_type, filename):
res_files = self.get_mapseq_result_files(analysis_result_dirs, rna_type, '.fasta.mseq.tsv')
raw_cols = ['OTU', 'count', 'lineage']
if self.pipeline in ['5.0']:
raw_cols = ['OTU', 'count', 'lineage', 'taxid']
study_df = self.merge_dfs(res_files,
key=['lineage'],
delimiter='\t',
raw_cols=raw_cols,
skip_rows=2)
study_df = study_df.rename(columns={'lineage': '#SampleID'})
if len(study_df.index) > 0:
self.write_results_file(study_df, filename)
alias = '{}_taxonomy_abundances_{}_v{}.tsv'.format(self.study_accession, rna_type, self.pipeline)
description = self._get_abundance_file_description(rna_type)
group = self._get_group_type(rna_type)
self.upload_study_file(filename, alias, description, group)
def get_raw_result_files(self, res_file_re):
paths = list(Path(self.study_result_dir).glob(res_file_re))
return [str(p.resolve()) for p in paths]
def merge_dfs_v5(self, dataframes, key):
study_df = | pd.DataFrame(columns=key) | pandas.DataFrame |
"""
This module contains the class definitions for ``BaseChainLadder``.-
"""
import functools
import pandas as pd
import numpy as np
class BaseChainLadder:
"""
From the Casualty Actuarial Society's "Estimating Unpaid Claims Using
Basic Techniques" Version 3 (Friedland, Jacqueline - 2010), the
development method ('Chain Ladder') consists of seven basic steps:
1. Compile claims data in a development triangle
2. Calculate age-to-age factors
3. Calculate averages of the age-to-age factors
4. Select claim development factors
5. Select tail factor
6. Calculate cumulative claims
7. Project ultimate claims
"""
def __init__(self, cumtri):
"""
Generate point estimates for outstanding claim liabilities at
ultimate for each origin year and in aggregate. The
``BaseChainLadder`` class exposes no functionality to estimate
variability around the point estimates at ultimate.
Parameters
----------
cumtri: triangle._CumTriangle
A cumulative.CumTriangle instance.
"""
self.tri = cumtri
def __call__(self, sel="all-weighted", tail=1.0):
"""
Compile a summary of ultimate and reserve estimates resulting from
the application of the development technique over a triangle instance.
Generated DataFrame is comprised of origin year, maturity of origin
year, loss amount at latest evaluation, cumulative loss development
factors, projected ultimates and the reserve estimate, by origin
year and in aggregate.
Parameters
----------
sel: str
The ldf average to select from ``triangle._CumTriangle.a2a_avgs``.
Defaults to "all-weighted".
tail: float
Tail factor. Defaults to 1.0.
Returns
-------
trikit.chainladder.BaseChainLadderResult
"""
ldfs_ = self._ldfs(sel=sel, tail=tail)
cldfs_ = self._cldfs(ldfs=ldfs_)
ultimates_ = self._ultimates(cldfs=cldfs_)
reserves_ = self._reserves(ultimates=ultimates_)
maturity_ = self.tri.maturity.astype(np.str)
latest_ = self.tri.latest_by_origin
trisqrd_ = self._trisqrd(ldfs=ldfs_)
# Compile chain ladder point estimate summary.
dfmatur_ = maturity_.to_frame().reset_index(drop=False).rename({"index":"origin"}, axis=1)
dfcldfs_ = cldfs_.to_frame().reset_index(drop=False).rename({"index":"maturity"}, axis=1)
dfcldfs_["maturity"] = dfcldfs_["maturity"].astype(np.str)
dfsumm = dfmatur_.merge(dfcldfs_, on=["maturity"], how="left").set_index("origin")
dfsumm.index.name = None
dflatest_ = latest_.to_frame().rename({"latest_by_origin":"latest"}, axis=1)
dfultimates_, dfreserves_ = ultimates_.to_frame(), reserves_.to_frame()
dfsumm = functools.reduce(
lambda df1, df2: df1.join(df2),
(dflatest_, dfultimates_, dfreserves_), dfsumm
)
dfsumm.loc["total"] = dfsumm.sum()
dfsumm.loc["total", "maturity"] = ""
dfsumm.loc["total", "cldf"] = np.NaN
dfsumm = dfsumm.reset_index().rename({"index":"origin"}, axis=1)
kwds = {"sel":sel, "tail":tail}
# Initialize and return _ChainLadderResult instance.
clresult_ = BaseChainLadderResult(
summary=dfsumm, tri=self.tri, ldfs=ldfs_, cldfs=cldfs_,
latest=latest_, maturity=maturity_, ultimates=ultimates_,
reserves=reserves_, trisqrd=trisqrd_, **kwds)
return(clresult_)
def _ldfs(self, sel="all-weighted", tail=1.0):
"""
Lookup loss development factors corresponding to ``sel``.
Parameters
----------
sel: str
The ldf average to select from ``triangle._CumTriangle.a2a_avgs``.
Defaults to "all-weighted".
tail: float
Tail factor. Defaults to 1.0.
Returns
-------
pd.Series
"""
try:
ldfs_ = self.tri.a2a_avgs.loc[sel]
tindx_ = ldfs_.index.max() + 1
ldfs_ = ldfs_.append(pd.Series(data=[tail], index=[tindx_]))
except KeyError:
print("Invalid age-to-age selection: `{}`".format(sel))
ldfs_ = pd.Series(data=ldfs_, index=ldfs_.index, dtype=np.float_, name="ldf")
return(ldfs_.sort_index())
def _cldfs(self, ldfs):
"""
Calculate cumulative loss development factors by successive
multiplication beginning with the tail factor and the oldest
age-to-age factor. The cumulative claim development factor projects
the total growth over the remaining valuations. Cumulative claim
development factors are also known as "Age-to-Ultimate Factors"
or "Claim Development Factors to Ultimate".
Parameters
----------
ldfs: pd.Series
Selected ldfs, typically the output of calling ``self._ldfs``.
Returns
-------
pd.Series
"""
cldfs_indx = ldfs.index.values
cldfs_ = np.cumprod(ldfs.values[::-1])[::-1]
cldfs_ = pd.Series(data=cldfs_, index=ldfs.index.values, name="cldf")
return(cldfs_.astype(np.float_).sort_index())
def _ultimates(self, cldfs):
"""
Ultimate claims are equal to the product of the latest valuation of
losses (the amount along latest diagonal of any ``_CumTriangle``
instance) and the appropriate cldf/age-to-ultimate factor. We
determine the appropriate age-to-ultimate factor based on the age
of each origin year relative to the evaluation date.
Parameters
----------
cldfs: pd.Series
Cumulative loss development factors, conventionally obtained
via BaseChainLadder's ``_cldfs`` method.
Returns
-------
pd.Series
"""
ultimates_ = pd.Series(
data=self.tri.latest_by_origin.values * cldfs.values[::-1],
index=self.tri.index, name="ultimate"
)
return(ultimates_.astype(np.float_).sort_index())
def _reserves(self, ultimates):
"""
Return IBNR/reserve estimates by origin and in aggregate. Represents
the difference between ultimate projections for each origin period
and the latest cumulative value.
Since outstanding claim liabilities can be referred to differently
based on the type of losses represented in the triangle ("ibnr" if
reported/incurred, "unpaid" if paid losses), we use the general term
"reserve" to represent the difference between ultimate projections
and latest cumulative value by origin and in total.
Parameters
----------
ultimates: pd.Series
Estimated ultimate losses, conventionally obtained from
BaseChainLadder's ``_ultimates`` method.
Returns
-------
pd.Series
"""
reserves_ = pd.Series(
data=ultimates - self.tri.latest_by_origin,
index=self.tri.index, name='reserve')
return(reserves_.astype(np.float_).sort_index())
def _trisqrd(self, ldfs):
"""
Project claims growth for each future development period. Returns a
DataFrame of loss projections for each subsequent development period
for each accident year. Populates the triangle's lower-right or
southeast portion (i.e., the result of "squaring the triangle").
Returns
-------
pd.DataFrame
"""
trisqrd_ = self.tri.copy(deep=True)
rposf = self.tri.index.size
clvi = self.tri.clvi["row_offset"]
for i in enumerate(trisqrd_.columns[1:], start=1):
ii , devp = i[0], i[1]
ildf, rposi = ldfs.values[ii - 1], clvi[devp] + 1
trisqrd_.iloc[rposi:rposf, ii] = \
trisqrd_.iloc[rposi:rposf, ii - 1] * ildf
# Multiply right-most column by tail factor.
max_devp = trisqrd_.columns[-1]
trisqrd_["ultimate"] = trisqrd_.loc[:,max_devp].values * ldfs.values[-1]
return(trisqrd_.astype(np.float_).sort_index())
class BaseChainLadderResult:
"""
Summary class consisting of output resulting from invocation of
``BaseChainLadder``'s ``__call__`` method.
"""
def __init__(self, summary, tri, ldfs, cldfs, latest, maturity,
ultimates, reserves, trisqrd, **kwargs):
"""
Container object for ``BaseChainLadder`` output.
Parameters
----------
summary: pd.DataFrame
Chain Ladder summary compilation.
tri: trikit.triangle._CumTriangle
A cumulative triangle instance.
ldfs: pd.Series
Loss development factors.
cldfs: pd.Series
Cumulative loss development factors.
latest: pd.Series
Latest loss amounts by origin.
maturity: pd.Series
Represents ther maturity of each origin relative to development
period.
ultimates: pd.Series
Represents Chain Ladder ultimate projections.
reserves: pd.Series
Represents the projected reserve amount. For each origin period,
this equates to the ultimate loss projection minus the latest
loss amount for the origin period (reserve = ultimate - latest).
kwargs: dict
Additional keyword arguments passed into ``BaseChainLadder``'s
``run`` method.
"""
self.ultimates = ultimates
self.reserves = reserves
self.summary = summary
self.trisqrd = trisqrd
self.cldfs = cldfs
self.ldfs = ldfs
self.tri = tri
if kwargs is not None:
for key_ in kwargs:
setattr(self, key_, kwargs[key_])
self._summspecs = {
"ultimate":"{:.0f}".format, "reserve":"{:.0f}".format,
"latest":"{:.0f}".format, "cldf":"{:.5f}".format,
}
def _data_transform(self):
"""
Transform dataset for use in FacetGrid plot by origin exhibting chain
ladder ultimate & reserve estimates.
Returns
-------
pd.DataFrame
"""
df0 = self.trisqrd.reset_index(drop=False).rename({"index":"origin" }, axis=1)
df0 = pd.melt(df0, id_vars=["origin"], var_name="dev", value_name="value")
df0 = df0[~np.isnan(df0["value"])].reset_index(drop=True)
df1 = self.tri.triind.reset_index(drop=False).rename({"index":"origin"}, axis=1)
df1 = | pd.melt(df1, id_vars=["origin"], var_name="dev", value_name="value") | pandas.melt |
import pandas as pd
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.formula.api import ols
import matplotlib.pyplot as plt
import numpy as np
import pickle
from collections import defaultdict
from opencage.geocoder import OpenCageGeocode
key = 'dd95342554c14f01a470950c1ae84c92'
geocoder = OpenCageGeocode(key)
from math import radians, cos, sin, asin, sqrt
AVG_EARTH_RADIUS = 6371 # in km
c = 299792458 #in m.s*
def haversine(point1, point2, miles=False):
""" Calculate the great-circle distance between two points on the Earth surface.
:input: two 2-tuples, containing the latitude and longitude of each point
in decimal degrees.
Example: haversine((45.7597, 4.8422), (48.8567, 2.3508))
:output: Returns the distance bewteen the two points.
The default unit is kilometers. Miles can be returned
if the ``miles`` parameter is set to True.
"""
# unpack latitude/longitude
lat1, lng1 = point1
lat2, lng2 = point2
# convert all latitudes/longitudes from decimal degrees to radians
lat1, lng1, lat2, lng2 = map(radians, (lat1, lng1, lat2, lng2))
# calculate haversine
lat = lat2 - lat1
lng = lng2 - lng1
d = sin(lat * 0.5) ** 2 + cos(lat1) * cos(lat2) * sin(lng * 0.5) ** 2
h = 2 * AVG_EARTH_RADIUS * asin(sqrt(d))
if miles:
return h * 0.621371 # in miles
else:
return h # in kilometers
### This function allows to do some data cleaning/ no need to go through it!
def symmetrize(data):
mat = data.values
newmat = np.ndarray
indexes = data.index
columns = data.columns
X, Y = mat.shape
symDict = {}
for key1 in columns:
symDict[key1] = {}
for key2 in columns:
symDict[key1][key2] = np.nan
for i in range(X):
for j in range(Y):
if np.isnan(mat[i, j]):
if not np.isnan(symDict[columns[j]][indexes[i]]):
symDict[indexes[i]][columns[j]] = symDict[columns[j]][indexes[i]]
else:
if np.isnan(symDict[columns[j]][indexes[i]]):
symDict[indexes[i]][columns[j]] = mat[i, j]
symDict[columns[j]][indexes[i]] = mat[i, j]
else:
symDict[indexes[i]][columns[j]] = min(mat[i, j], symDict[columns[j]][indexes[i]])
symDict[columns[j]][indexes[i]] = symDict[indexes[i]][columns[j]]
symData = pd.DataFrame(symDict)
return symData
#### loading all the observed cities
#
# df = pd.read_csv('/Users/geode/Work/Research/Geometry-Internet/Cloud/google_cloud/gcp_min_rtt_20201202.csv',index_col = 0)
# print(df)
# print(df.index)
# df = df[df.columns[:-1]]
# # print(df[df.columns[:-1]])
# elem = {}
# for i in df.index:
# val = {}
# for s in range(0,len(df.index)):
# print(s,[df.columns[2*s:2*s+2]])
# t = df[df.columns[2*s:2*s+2]][df.index==i].values[0]
# print(t)
# val[df.index[s]] = t[0]
# elem[i] = val
# df_lat = pd.DataFrame(elem)
# print(df_lat)
# print(df_lat.index)
# dico_cloud = dict(zip(df_lat.index,[0]*len(df_lat.index)))
# # dico_cloud = {'Helsinki': '172.16.31.10', 'Frankfurt': '192.168.127.12','Kane': '172.16.31.10','London': '192.168.127.12','Los Angeles': '172.16.31.10',
# # 'Montreal':'192.168.3.11','Mumbai': '192.168.127.12', 'Amsterdam': '172.16.31.10','Ashburn': '192.168.3.11','Sao Paulo': '172.16.58.3',
# # 'Sydney': '172.16.17.32','Tokyo': '172.16.31.10'}
# dico_cloud = {'Belgium': '172.16.17.32','Hong Kong': '192.168.3.11','London': '192.168.127.12','Ashburn': '192.168.3.11','Oregon': '172.16.31.10',
# 'Osaka': '192.168.127.12','Sao Paulo': '172.16.58.3','Singapore': '172.16.17.32','Charleston': '192.168.3.11', 'Sydney': '172.16.17.32','Taiwan': '172.16.58.3',
# 'Zurich': '192.168.3.11'}
# dico_cloud_spe = {}
# for s in dico_cloud.keys():
# dico_cloud_spe['Google_'+s] = s
#
df = pd.read_csv('/Users/Geode/Downloads/azure_minRTT_21-25Dec (1).csv',index_col=0)
print(df[df.columns[:-1]])
elem = {}
for i in df.index:
val = {}
for s in range(0,len(df.index)):
print(s,[df.columns[2*s:2*s+2]])
t = df[df.columns[2*s:2*s+2]][df.index==i].values[0]
print(t)
val[df.index[s]] = min(t)
elem[i] = val
df_lat = | pd.DataFrame(elem) | pandas.DataFrame |
# encoding: utf-8
"""
Classes defined in py_expression_eval moduel are used to parse string expressions
and do corresponding calculations. They are used in DataView. Since expression parsing
is error-prone, we do not recommend directly modifying this module .
"""
# Author: <NAME>. http://axiacore.com
#
# Based on js-expression-eval, by <NAME> (<EMAIL>, http://silentmatt.com/)
# https://github.com/silentmatt/js-expression-eval
#
# Ported to Python and modified by <NAME> (<EMAIL>, http://vero4ka.info/)
#
# You are free to use and modify this code in anyway you find useful. Please leave this comment in the code
# to acknowledge its original source. If you feel like it, I enjoy hearing about projects that use my code,
# but don't feel like you have to let me know or ask permission.
# modified by symbol from quantOS.org
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import pandas as pd
from jaqs.data.align import align
import jaqs.util.numeric as numeric
from jaqs.util import rank_with_mask
TNUMBER = 0
TOP1 = 1
TOP2 = 2
TVAR = 3
TFUNCALL = 4
'''
single quarter / TTM + year on year / month on month
'''
def cum_to_single_quarter(df, report_date):
df = df.copy()
is_nan = df.isnull()
df = df.fillna(method='ffill').fillna(0.0)
year = report_date // 10000
def cum_to_single_within_year(df_):
first_row = df_.iloc[0, :].copy()
df_ = df_.diff(1, axis=0)
df_.iloc[0, :] = first_row
return df_
single_quarter = df.groupby(by=year).apply(cum_to_single_within_year)
single_quarter[is_nan] = np.nan
return single_quarter
def calc_ttm(df):
return df.rolling(window=4, axis=0).sum()
def calc_year_on_year_return(df):
return df.pct_change(4, axis=0)
def calc_quarter_on_quarter_return(df):
return df.pct_change(1, axis=0)
class Expression(object):
def __init__(self, tokens, ops1, ops2, functions):
self.tokens = tokens
self.ops1 = ops1
self.ops2 = ops2
self.functions = functions
self.ann_dts = None
self.trade_dts = None
self.index_member = None
def simplify(self, values):
values = values or {}
nstack = []
newexpression = []
L = len(self.tokens)
for i in range(0, L):
item = self.tokens[i]
type_ = item.type_
if type_ == TNUMBER:
nstack.append(item)
elif type_ == TVAR and item.index_ in values:
item = Token(TNUMBER, 0, 0, values[item.index_])
nstack.append(item)
elif type_ == TOP2 and len(nstack) > 1:
n2 = nstack.pop()
n1 = nstack.pop()
f = self.ops2[item.index_]
item = Token(TNUMBER, 0, 0, f(n1.number_, n2.number_))
nstack.append(item)
elif type_ == TOP1 and nstack:
n1 = nstack.pop()
f = self.ops1[item.index_]
item = Token(TNUMBER, 0, 0, f(n1.number_))
nstack.append(item)
else:
while len(nstack) > 0:
newexpression.append(nstack.pop(0))
newexpression.append(item)
while nstack:
newexpression.add(nstack.pop(0))
return Expression(newexpression, self.ops1, self.ops2, self.functions)
def substitute(self, variable, expr):
if not isinstance(expr, Expression):
pass # expr = Parser().parse(str(expr))
newexpression = []
L = len(self.tokens)
for i in range(0, L):
item = self.tokens[i]
type_ = item.type_
if type_ == TVAR and item.index_ == variable:
for j in range(0, len(expr.tokens)):
expritem = expr.tokens[j]
replitem = Token(
expritem.type_,
expritem.index_,
expritem.prio_,
expritem.number_,
)
newexpression.append(replitem)
else:
newexpression.append(item)
ret = Expression(newexpression, self.ops1, self.ops2, self.functions)
return ret
def evaluate(self, values, ann_dts=None, trade_dts=None):
self.ann_dts = ann_dts
self.trade_dts = trade_dts
values = values or {}
nstack = []
L = len(self.tokens)
for i in range(0, L):
item = self.tokens[i]
type_ = item.type_
if type_ == TNUMBER:
nstack.append(item.number_)
elif type_ == TOP2:
n2 = nstack.pop()
n1 = nstack.pop()
f = self.ops2[item.index_]
nstack.append(f(n1, n2))
elif type_ == TVAR:
if item.index_ in values:
nstack.append(values[item.index_])
elif item.index_ in self.functions:
nstack.append(self.functions[item.index_])
else:
raise Exception('undefined variable: ' + item.index_)
elif type_ == TOP1:
n1 = nstack.pop()
f = self.ops1[item.index_]
nstack.append(f(n1))
elif type_ == TFUNCALL:
n1 = nstack.pop()
f = nstack.pop()
if f.apply and f.call:
if type(n1) is list:
nstack.append(f.apply(None, n1))
else:
nstack.append(f.call(None, n1))
else:
raise Exception(f + ' is not a function')
else:
raise Exception('invalid Expression')
if len(nstack) > 1:
raise Exception('invalid Expression (parity)')
return nstack[0]
def toString(self, toJS=False):
nstack = []
L = len(self.tokens)
for i in range(0, L):
item = self.tokens[i]
type_ = item.type_
if type_ == TNUMBER:
nstack.append(item.number_)
elif type_ == TOP2:
n2 = nstack.pop()
n1 = nstack.pop()
f = item.index_
if toJS and f == '^':
nstack.append('math.pow(' + n1 + ',' + n2 + ')')
else:
nstack.append('(' + n1 + f + n2 + ')')
elif type_ == TVAR:
nstack.append(item.index_)
elif type_ == TOP1:
n1 = nstack.pop()
f = item.index_
if f == '-':
nstack.append('({0}{1})'.format(f, n1))
else:
nstack.append('{0}({1})'.format(f, n1))
elif type_ == TFUNCALL:
n1 = nstack.pop()
f = nstack.pop()
nstack.append(f + '(' + n1 + ')')
else:
raise Exception('invalid Expression')
if len(nstack) > 1:
raise Exception('invalid Expression (parity)')
return nstack[0]
def variables(self):
vars = []
for i in range(0, len(self.tokens)):
item = self.tokens[i]
if item.type_ == TVAR and \
not item.index_ in vars and \
True : #item.index_ not in self.functions:
vars.append(item.index_)
return vars
class Token(object):
def __init__(self, type_, index_, prio_, number_):
self.type_ = type_
self.index_ = index_ or 0
self.prio_ = prio_ or 0
self.number_ = number_ if number_ != None else 0
def to_str(self):
if self.type_ == TNUMBER:
return self.number_
if self.type_ == TOP1 or self.type_ == TOP2 or self.type_ == TVAR:
return self.index_
elif self.type_ == TFUNCALL:
return 'CALL'
else:
return 'Invalid Token'
class Parser(object):
def __init__(self):
self.success = False
self.errormsg = ''
self.expression = ''
self.pos = 0
self.tokens = None
self.tokennumber = 0
self.tokenprio = 0
self.tokenindex = 0
self.tmpprio = 0
self.PRIMARY = 1
self.OPERATOR = 2
self.FUNCTION = 4
self.LPAREN = 8
self.RPAREN = 16
self.COMMA = 32
self.SIGN = 64
self.CALL = 128
self.NULLARY_CALL = 256
# do not need parenthesis
self.ops1 = {
'Sin': np.sin,
'Cos': np.cos,
'Tan': np.tan,
# 'asin': np.asin,
# 'acos': np.acos,
# 'atan': np.atan,
# 'Mean': np.mean,
'Sqrt': np.sqrt,
'Log': np.log,
'Abs': np.abs,
'Ceil': np.ceil,
'Floor': np.floor,
'Round': np.round,
'-': self.neg,
'!': self.logicalNot,
'Sign': np.sign,
# 'Rank': self.rank,
'exp': np.exp
}
self.ops2 = {
'+': self.add,
'-': self.sub,
'*': self.mul,
'/': self.div,
'%': self.mod,
'^': np.power,
',': self.append,
# '||': self.concat,
"==": self.equal,
"!=": self.notEqual,
">": self.greaterThan,
"<": self.lessThan,
">=": self.greaterThanEqual,
"<=": self.lessThanEqual,
"&&": self.andOperator,
"||": self.orOperator
}
# need parenthesis
self.functions = {
# cross section
'Min': np.minimum,
'Max': np.maximum,
'Percentile': self.percentile,
'GroupPercentile': self.group_percentile,
'Quantile': self.to_quantile,
'GroupQuantile': self.group_quantile,
'Rank': self.rank,
'GroupRank': self.group_rank,
'Mask': self.mask,
'ConditionRank': self.cond_rank,
'ConditionPercentile': self.cond_percentile,
'ConditionQuantile': self.cond_quantile,
'Standardize': self.standardize,
'Cutoff': self.cutoff,
# 'GroupApply': self.group_apply,
# time series
'CumToSingle': self.cum_to_single,
'TTM': self.calc_ttm,
'TTM_jl': self.calc_ttm_jli,
'YOY': calc_year_on_year_return,
'QOQ': calc_quarter_on_quarter_return,
'Ts_Rank': self.ts_rank,
'Ts_Percentile': self.ts_percentile,
'Ts_Quantile': self.ts_quantile,
'Ewma': self.ewma,
'Sma':self.sma,
'Ts_Sum': self.ts_sum,
'Ts_Product': self.ts_product, # rolling product
'CountNans': self.count_nans, # rolling count Nans
'StdDev': self.std_dev,
'Covariance': self.cov,
'Correlation': self.corr,
'Corr': self.corr,
'Delay': self.delay,
'Delta': self.delta,
'Return': self.calc_return,
'Ts_Mean': self.ts_mean,
'Ts_Min': self.ts_min,
'Ts_Max': self.ts_max,
'Ts_Skewness': self.ts_skew,
'Ts_Kurtosis': self.ts_kurt,
'Tail': self.tail,
'Step': self.step,
'Decay_linear': self.decay_linear,
'Decay_exp': self.decay_exp,
# inplace
'Pow': np.power,
'SignedPower': self.signed_power,
'IsNan': self.is_nan,
# others
'If': self.ifFunction,
'FillNan': self.fill_nan,
'Return_Abs': self.calc_return_abs,
'Return_Fwd': self.calc_return_fwd,
# test
}
self.consts = {
'E': math.e,
'PI': math.pi,
}
# no use
self.values = {
'sin': math.sin,
'cos': math.cos,
'tan': math.tan,
'asin': math.asin,
'acos': math.acos,
'atan': math.atan,
'sqrt': math.sqrt,
'log': math.log,
'abs': abs,
'ceil': math.ceil,
'floor': math.floor,
'round': round,
'random': self.random,
'fac': self.fac,
'exp': math.exp,
'min': min,
'max': max,
'pyt': self.pyt,
'pow': math.pow,
'atan2': math.atan2,
'E': math.e,
'PI': math.pi
}
self.ann_dts = None
self.trade_dts = None
# -----------------------------------------------------
# functions
def add(self, a, b):
(a, b) = self._align_bivariate(a, b)
return a + b
def sub(self, a, b):
(a, b) = self._align_bivariate(a, b)
return a - b
def mul(self, a, b):
(a, b) = self._align_bivariate(a, b)
return a * b
def div(self, a, b):
(a, b) = self._align_bivariate(a, b)
res = a / b
if isinstance(res, pd.DataFrame):
res = res.replace([np.inf, -np.inf], np.nan)
return res
def mod(self, a, b):
(a, b) = self._align_bivariate(a, b)
return a % b
def pow(self, a, b):
return np.power(a, b)
def signed_power(self, x, e):
signs = np.sign(x)
return signs * np.power(np.abs(x), e)
def concat(self, a, b, *args):
result = u'{0}{1}'.format(a, b)
for arg in args:
result = u'{0}{1}'.format(result, arg)
return result
@staticmethod
def _to_array(x):
if isinstance(x, (pd.DataFrame, pd.Series)):
return x.values
elif isinstance(x, np.ndarray):
return x
elif isinstance(x, (int, float, bool, np.integer, np.float, np.bool)):
return np.asarray(x)
else:
print(x)
raise ValueError("Cannot convert type {} to numpy array".format(repr(type(x))))
def equal(self, a, b):
(a, b) = self._align_bivariate(a, b)
arr, brr = self._to_array(a), self._to_array(b)
mask = np.logical_or(np.isnan(arr), np.isnan(brr))
res = arr == brr
res = res.astype(float)
res[mask] = np.nan
return pd.DataFrame(index=a.index, columns=a.columns, data=res)
def notEqual(self, a, b):
(a, b) = self._align_bivariate(a, b)
arr, brr = self._to_array(a), self._to_array(b)
mask = np.logical_or(np.isnan(arr), np.isnan(brr))
res = arr != brr
res = res.astype(float)
res[mask] = np.nan
return pd.DataFrame(index=a.index, columns=a.columns, data=res)
def greaterThan(self, a, b):
(a, b) = self._align_bivariate(a, b)
arr, brr = self._to_array(a), self._to_array(b)
mask = np.logical_or(np.isnan(arr), np.isnan(brr))
res = arr > brr
res = res.astype(float)
res[mask] = np.nan
return pd.DataFrame(index=a.index, columns=a.columns, data=res)
def lessThan(self, a, b):
(a, b) = self._align_bivariate(a, b)
arr, brr = self._to_array(a), self._to_array(b)
mask = np.logical_or(np.isnan(arr), np.isnan(brr))
res = arr < brr
res = res.astype(float)
res[mask] = np.nan
return pd.DataFrame(index=a.index, columns=a.columns, data=res)
def greaterThanEqual(self, a, b):
(a, b) = self._align_bivariate(a, b)
arr, brr = self._to_array(a), self._to_array(b)
mask = np.logical_or(np.isnan(arr), np.isnan(brr))
res = arr >= brr
res = res.astype(float)
res[mask] = np.nan
return pd.DataFrame(index=a.index, columns=a.columns, data=res)
def lessThanEqual(self, a, b):
(a, b) = self._align_bivariate(a, b)
arr, brr = self._to_array(a), self._to_array(b)
mask = np.logical_or(np.isnan(arr), np.isnan(brr))
res = arr <= brr
res = res.astype(float)
res[mask] = np.nan
return pd.DataFrame(index=a.index, columns=a.columns, data=res)
def andOperator(self, a, b):
(a, b) = self._align_bivariate(a, b)
arr, brr = self._to_array(a), self._to_array(b)
mask = np.logical_or(np.isnan(arr), np.isnan(brr))
res = np.logical_and(arr, brr)
res = res.astype(float)
res[mask] = np.nan
return pd.DataFrame(index=a.index, columns=a.columns, data=res)
def orOperator(self, a, b):
(a, b) = self._align_bivariate(a, b)
arr, brr = self._to_array(a), self._to_array(b)
mask = np.logical_or(np.isnan(arr), np.isnan(brr))
res = np.logical_or(arr, brr)
res = res.astype(float)
res[mask] = np.nan
return | pd.DataFrame(index=a.index, columns=a.columns, data=res) | pandas.DataFrame |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import pandas as pd
import pandas.util.testing as pdt
import qiime2
from q2_taxa import collapse, filter_table, filter_seqs
class CollapseTests(unittest.TestCase):
def assert_index_equal(self, a, b):
# this method is derived from scikit-bio 0.5.1
pdt.assert_index_equal(a, b,
exact=True,
check_names=True,
check_exact=True)
def assert_data_frame_almost_equal(self, left, right):
# this method is derived from scikit-bio 0.5.1
pdt.assert_frame_equal(left, right,
check_dtype=True,
check_index_type=True,
check_column_type=True,
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False)
self.assert_index_equal(left.index, right.index)
def test_collapse(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat2'])
actual = collapse(table, taxonomy, 1)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 2)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 3)
expected = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b;c', 'a;b;d'])
self.assert_data_frame_almost_equal(actual, expected)
def test_collapse_missing_level(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b', 'a; b; d'],
index=['feat1', 'feat2'])
actual = collapse(table, taxonomy, 1)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 2)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 3)
expected = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b;__', 'a;b;d'])
self.assert_data_frame_almost_equal(actual, expected)
def test_collapse_bad_level(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat2'])
with self.assertRaisesRegex(ValueError, 'of 42 is larger'):
collapse(table, taxonomy, 42)
with self.assertRaisesRegex(ValueError, 'of 0 is too low'):
collapse(table, taxonomy, 0)
def test_collapse_missing_table_ids_in_taxonomy(self):
table = pd.DataFrame([[2.0, 2.0],
[1.0, 1.0],
[9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat3'])
with self.assertRaisesRegex(ValueError, 'missing.*feat2'):
collapse(table, taxonomy, 1)
class FilterTable(unittest.TestCase):
def test_filter_no_filters(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'At least one'):
filter_table(table, taxonomy)
def test_alt_delimiter(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# include with delimiter
obs = filter_table(table, taxonomy, include='<EMAIL>',
query_delimiter='@peanut@')
pdt.assert_frame_equal(obs, table, check_like=True)
# exclude with delimiter
obs = filter_table(table, taxonomy, exclude='<EMAIL>',
query_delimiter='@peanut@')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
def test_filter_table_unknown_mode(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'Unknown mode'):
filter_table(table, taxonomy, include='bb', mode='not-a-mode')
def test_filter_table_include(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='bb')
pdt.assert_frame_equal(obs, table, check_like=True)
obs = filter_table(table, taxonomy, include='cc,ee')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, include='cc')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='aa; bb; cc')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, include='dd')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='ee')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='dd ee')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='aa; bb; dd ee')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, include='peanut!')
def test_filter_table_include_exact_match(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='aa; bb; cc,aa; bb; dd ee',
mode='exact')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, include='aa; bb; cc',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, include='aa; bb; dd ee',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, include='bb', mode='exact')
def test_filter_table_exclude(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, exclude='ab')
pdt.assert_frame_equal(obs, table, check_like=True)
obs = filter_table(table, taxonomy, exclude='xx')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, exclude='dd')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='dd ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; dd ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, exclude='cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, exclude='aa')
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, exclude='aa; bb')
def test_filter_table_exclude_exact_match(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, exclude='peanut!',
mode='exact')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, exclude='aa; bb; dd ee',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; dd ee,aa',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, exclude='aa; bb; cc',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; cc,aa',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
exclude='aa; bb; cc,aa; bb; dd ee',
mode='exact')
def test_filter_table_include_exclude(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='aa', exclude='peanut!')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only - feat2 dropped at exclusion step
obs = filter_table(table, taxonomy, include='aa', exclude='ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat1 only - feat2 dropped at inclusion step
obs = filter_table(table, taxonomy, include='cc', exclude='ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only - feat1 dropped at exclusion step
obs = filter_table(table, taxonomy, include='aa', exclude='cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only - feat1 dropped at inclusion step
obs = filter_table(table, taxonomy, include='ee', exclude='cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features - all dropped at exclusion
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
include='aa',
exclude='bb',
mode='exact')
# keep no features - one dropped at inclusion, one dropped at exclusion
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
include='cc',
exclude='cc',
mode='exact')
# keep no features - all dropped at inclusion
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
include='peanut',
exclude='bb',
mode='exact')
def test_filter_table_underscores_escaped(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep feat1 only - underscore not treated as a wild card
obs = filter_table(table, taxonomy, include='cc,d_')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat1 only - underscore in query matches underscore in
# taxonomy annotation
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; c_', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
obs = filter_table(table, taxonomy, include='c_')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
def test_all_features_with_frequency_greater_than_zero_get_filtered(self):
table = pd.DataFrame([[2.0, 0.0], [1.0, 0.0], [9.0, 0.0], [1.0, 0.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# empty - feat2, which is matched by the include term, has a frequency
# of zero in all samples, so all samples end up dropped from the table
with self.assertRaisesRegex(ValueError,
expected_regex='greater than zero'):
filter_table(table, taxonomy, include='dd')
def test_extra_taxon_ignored(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee', 'aa; bb; cc'],
index=pd.Index(['feat1', 'feat2', 'feat3'],
name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='bb')
pdt.assert_frame_equal(obs, table, check_like=True)
def test_missing_taxon_errors(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc'],
index=pd.Index(['feat1'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, expected_regex='All.*feat2'):
filter_table(table, taxonomy, include='bb')
class FilterSeqs(unittest.TestCase):
def test_filter_no_filters(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'At least one'):
filter_seqs(seqs, taxonomy)
def test_alt_delimiter(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# include with delimiter
obs = filter_seqs(seqs, taxonomy, include='cc<EMAIL>',
query_delimiter='@peanut@')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# exclude with delimiter
obs = filter_seqs(seqs, taxonomy, exclude='ww<EMAIL>',
query_delimiter='@peanut@')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
def test_filter_seqs_unknown_mode(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'Unknown mode'):
filter_seqs(seqs, taxonomy, include='bb', mode='not-a-mode')
def test_filter_seqs_include(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_seqs(seqs, taxonomy, include='bb')
exp = | pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2']) | pandas.Series |
import pandas as pd
import streamlit as st
import numpy as np
import pickle
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import MinMaxScaler
from PIL import Image
# Header
header_image = Image.open('Images/ebooks.jpg')
st.image(header_image)
# Creating sidebar comments
st.sidebar.title('Kindle eBook Recommendations')
st.sidebar.caption('By [<NAME>](https://www.linkedin.com/in/daniel-burdeno-39a298ab/)')
# Load in appropriate DataFrames, user ratings
df_user = pd.read_csv('Data/df_user.csv', index_col=0)
# Meta data for collabortive filtering
df_meta = pd.read_csv('Data/meta5.csv', index_col='asin')
df_meta.drop(columns =['Unnamed: 0'], inplace=True)
# Meta data for content based
df_meta_all = | pd.read_csv('Data/meta_all.csv', index_col='asin') | pandas.read_csv |
from datetime import datetime
from decimal import Decimal
from io import StringIO
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, read_csv
import pandas._testing as tm
from pandas.core.base import SpecificationError
import pandas.core.common as com
def test_repr():
# GH18203
result = repr(pd.Grouper(key="A", level="B"))
expected = "Grouper(key='A', level='B', axis=0, sort=False)"
assert result == expected
@pytest.mark.parametrize("dtype", ["int64", "int32", "float64", "float32"])
def test_basic(dtype):
data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
for k, v in grouped:
assert len(v) == 3
agged = grouped.aggregate(np.mean)
assert agged[1] == 1
tm.assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
tm.assert_series_equal(agged, grouped.mean())
tm.assert_series_equal(grouped.agg(np.sum), grouped.sum())
expected = grouped.apply(lambda x: x * x.sum())
transformed = grouped.transform(lambda x: x * x.sum())
assert transformed[7] == 12
tm.assert_series_equal(transformed, expected)
value_grouped = data.groupby(data)
tm.assert_series_equal(
value_grouped.aggregate(np.mean), agged, check_index_type=False
)
# complex agg
agged = grouped.aggregate([np.mean, np.std])
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped.aggregate({"one": np.mean, "two": np.std})
group_constants = {0: 10, 1: 20, 2: 30}
agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())
assert agged[1] == 21
# corner cases
msg = "Must produce aggregated value"
# exception raised is type Exception
with pytest.raises(Exception, match=msg):
grouped.aggregate(lambda x: x * 2)
def test_groupby_nonobject_dtype(mframe, df_mixed_floats):
key = mframe.index.codes[0]
grouped = mframe.groupby(key)
result = grouped.sum()
expected = mframe.groupby(key.astype("O")).sum()
tm.assert_frame_equal(result, expected)
# GH 3911, mixed frame non-conversion
df = df_mixed_floats.copy()
df["value"] = range(len(df))
def max_value(group):
return group.loc[group["value"].idxmax()]
applied = df.groupby("A").apply(max_value)
result = applied.dtypes
expected = Series(
[np.dtype("object")] * 2 + [np.dtype("float64")] * 2 + [np.dtype("int64")],
index=["A", "B", "C", "D", "value"],
)
tm.assert_series_equal(result, expected)
def test_groupby_return_type():
# GH2893, return a reduced type
df1 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 2, "val2": 27},
{"val1": 2, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df1.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
df2 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 1, "val2": 27},
{"val1": 1, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df2.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
# GH3596, return a consistent type (regression in 0.11 from 0.10.1)
df = DataFrame([[1, 1], [1, 1]], columns=["X", "Y"])
with tm.assert_produces_warning(FutureWarning):
result = df.groupby("X", squeeze=False).count()
assert isinstance(result, DataFrame)
def test_inconsistent_return_type():
# GH5592
# inconsistent return type
df = DataFrame(
dict(
A=["Tiger", "Tiger", "Tiger", "Lamb", "Lamb", "Pony", "Pony"],
B=Series(np.arange(7), dtype="int64"),
C=date_range("20130101", periods=7),
)
)
def f(grp):
return grp.iloc[0]
expected = df.groupby("A").first()[["B"]]
result = df.groupby("A").apply(f)[["B"]]
tm.assert_frame_equal(result, expected)
def f(grp):
if grp.name == "Tiger":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Tiger"] = np.nan
tm.assert_frame_equal(result, e)
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Pony"] = np.nan
| tm.assert_frame_equal(result, e) | pandas._testing.assert_frame_equal |
"""
Copyright 2021 Merck & Co., Inc. Kenilworth, NJ, USA.
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import requests
import json
import pandas as pd
import numpy as np
from scipy.stats import variation, kurtosis, skew
from termcolor import colored
from datetime import datetime
from pytz import timezone
import sys
import os
from urllib.parse import quote_plus
def element_fmt(input):
if (str(input).find("/") > 0):
return quote_plus(str(input))
else:
return str(input)
def validate(value, possible_values, value_name):
if value == "" or value is None:
dataset_error = "Dataset not specified"
print(colored(
"ERROR! " + value_name + " name [" + str(value) + "] not specified", "red"))
return False
elif value not in possible_values:
dataset_error = "Dataset name not valid"
print(colored(
"ERROR! " + value_name + " name [" + str(value) + "] not valid", "red"))
return False
return True
def requestResponsePrint(response, total_run_time, verbose):
if str(response) == "<Response [200]>":
if verbose:
print(colored(
"\nDownload successful! Request completed in " + str(total_run_time), "green"))
elif str(response) == "<Response [401]>":
print(colored( "\nERROR! Unauthorized. Your credentials are either invalid or expired.",
"red"))
elif str(response) == "<Response [404]>":
print(colored("\nERROR! You don't have permission to access the resource you're \
trying to. If you believe this is in error, please contact the \
Data Profiler Team.",
"red"))
elif str(response == "<Response [403]>"):
print(colored("\nERROR! The request had an error due to programming errors or \
cluster component downtime. Please try again, and contact the \
Data Profiler Team if the problem persists.",
"red"))
def map_listtodict(listdict):
'''
Takes a list of dictionaries and converts to dictionary
[{'value': 'val1', 'count': 23},{'value': 'val2', 'count': 2}, ..]
-> {'val1': 23, 'val2': 2}
Parameters:
listdict (list): list of dictinaries with keys as value and count only
Returns:
dictionary: dictionary with keys as value and value as count
'''
valcnt_dict = {}
for valcnt in listdict:
valcnt_dict[valcnt['value']] = valcnt['count']
return valcnt_dict
class Column():
def __init__(self, environment, dataset_name, table_name, column_name, filters={}):
self.column_name = column_name
self.table_name = table_name
self.dataset_name = dataset_name
self.env = environment
self.filters = filters
validated = self.validateData()
self.metadata = self.getColumnMetadata()
if self.filters != {}:
validate_filter = self.validateFilters()
if validate_filter==False:
print (colored("ERROR: this is not valid input", "red"))
if validated==False:
print (colored("ERROR: this is not valid input", "red"))
##### data about the column itself #####
def getColumnMetadata(self):
url = self.env.url + '/v1/columns/{}/{}'.format(self.dataset_name, self.table_name)
response = requests.get(url, headers=self.env.header)
return response.json()[self.column_name]
##### setting filters for listing columns counts #####
def setFilters(self, filters):
self.filters = filters
self.validateData()
##### retrieving data stored within the column metadata #####
def getColumnDataType(self):
return self.metadata['data_type']
def getValueCount(self):
return self.metadata['num_values']
def getUniqueValueCount(self):
return self.metadata['num_unique_values']
def getVisibility(self):
return self.metadata['visibility']
def getUserAccessList(self):
url = self.env.url + "/rules_of_use"
post_data = json.dumps({
"query":"{usersWithAttribute(value:\""+self.getVisibility()+"\"){username}}"
})
response = requests.post(url, headers=self.env.header, data=post_data)
if response.status_code == 401 or response.status_code == 403:
print (colored("Unauthorized: You are not authorized to perform this request,\
please contact the Data Profiler team", "red"))
return None
try:
usernames = [x["username"] for x in json.loads(response.text)["data"]["usersWithAttribute"]]
return usernames
except:
print (colored("There was a {} error processing your \
request".format(response.status_code), "red"))
return None
##### lists a dictionary of column counts with the structures as follows #####
##### [{'value':'value1', 'count':'count1'},...] #####
def listColumnCounts(self):
## filters do not work for this endpoints
post_data = json.dumps({
"dataset": self.dataset_name,
"table": self.table_name,
"column": self.column_name,
"limit": 0,
"sort": "CNT_DESC"
})
url = self.env.url + '/v1/colCounts'
response = requests.post(url, headers=self.env.header, data=post_data)
if response.status_code == 401 or response.status_code == 403:
print (colored("Unauthorized: You are not authorized to perform this request," + \
" please contact the Data Profiler team", "red"))
return None
try:
text_data = response.text
text_data = text_data[:-1]
json_data = json.loads(text_data)
return json_data
except:
print (colored(f"There was a {response.status_code}" +
" error processing your request", "red"))
return None
## get a dictionary of the listed column values and their
## counts that are within the provided range of values
## returns empty list if errors or no values exist
## returns list of dicts: [{"value": "val_1", "count":"count_1"},
## {"value": "val_2", "count":"count_2"}]
def getColumnValuesInRange(self, min_val, max_val):
try:
range_len = float(max_val) - float(min_val)
except:
print (colored("Range values must be numbers", "red"))
return []
if float(max_val) <= float(min_val):
print (colored("Max range value must be greater than the min", "red"))
return []
all_value_count = self.listColumnCounts()
values_in_range = []
for value in all_value_count:
try:
if (float(value["value"]) >= float(min_val) and \
float(value["value"]) < float(max_val)):
values_in_range.append(value)
except:
continue
return values_in_range
def __isint(self, ignore={np.nan, '', ' ', '-', None}, threshold='0.70'):
conversion = {'integer','long'}
dt = self.getColumnDataType()
cnt = 0
icnt = 0
if dt == 'string':
all_value_count = self.listColumnCounts()
for valdict in all_value_count:
if valdict['value'] not in ignore:
cnt += valdict['count']
try :
int(valdict['value'])
icnt += valdict['count']
except:
pass
try:
if icnt/cnt >= float(threshold):
return True
else:
return False
except:
print (colored("Range values must be numbers", "red"))
return None
else:
if dt in conversion:
return True
else:
return False
def __isfloat(self, ignore={np.nan, '', ' ', '-', None}, threshold='0.70'):
conversion = {'integer','float','long'}
dt = self.getColumnDataType()
cnt = 0
fcnt = 0
if dt == 'string':
all_value_count = self.listColumnCounts()
for valdict in all_value_count:
if valdict['value'] not in ignore:
cnt += valdict['count']
try :
float(valdict['value'])
fcnt += valdict['count']
except:
pass
try:
if fcnt/cnt >= float(threshold):
return True
else:
return False
except:
print (colored("Range values must be numbers", "red"))
return None
else:
if dt in conversion:
return True
else:
return False
def __getdatatype(self):
if self.isint():
return int
elif self.isfloat():
return float
elif self.getColumnDataType() == 'string':
return str
else:
return self.getColumnDataType()
##### Lists of valid datasets, tables, and columns #####
def validDatasets(self):
return self.env.getDatasetList()
def validTables(self):
url = self.env.url + '/v1/tables/{}'.format(self.dataset_name)
response = requests.get(url, headers=self.env.header)
return list(response.json().keys())
def validColumns(self):
url = self.env.url + '/v1/columns/{}/{}'.format(self.dataset_name, self.table_name)
response = requests.get(url, headers=self.env.header)
return list(response.json().keys())
##### validates the dataset, table, and column specified on initialization #####
def validateData(self):
valid_datasets = self.validDatasets()
dataset_valid = validate(self.dataset_name, valid_datasets, "Dataset")
if dataset_valid:
valid_tables = self.validTables()
table_valid = validate(self.table_name, valid_tables, "Table")
if table_valid:
valid_columns = self.validColumns()
column_valid = validate(self.column_name, self.validColumns(), "Column")
return dataset_valid & table_valid & column_valid
##### validates the filters the user can choose to set #####
def validateFilters(self):
if self.filters != {}:
filter_keys = [x for x in self.filters]
for key in filter_keys:
valid_filter = validate(key, self.validColumns(), "Filter Column")
if valid_filter==False:
return False
return True
# Check for number of missing/blank values in the column
def __getNAscount(self,blank_types = {'',' ','-',None, np.nan}):
'''
Find missing values present in selected column
Parameters:
blank_types (set): what constitutes missing values
Returns:
int: Returns the number of missing values present
'''
ValCount = self.listColumnCounts()
cnt_all = 0
for vc in ValCount:
if vc['value'] in blank_types:
cnt_all += vc['count']
return cnt_all
class Table():
def __init__(self, environment, dataset_name, table_name, filters={}):
self.table_name = table_name
self.dataset_name = dataset_name
self.env = environment
self.filters = filters
validated = self.validateData()
if validated==False:
print (colored("ERROR: The input data is not valid", "red"))
self.table_info = self.getTableInfo()
self.metadata = self.getTableMetadata()
if self.filters != {}:
validated_filters = validateFilters()
if validated_filters==False:
print (colored("ERROR: The input data is not valid", "red"))
#### get specific information about the inside of the table #####
def getTableInfo(self):
url = self.env.url + '/v1/columns/{}/{}'.format(self.dataset_name, self.table_name)
response = requests.get(url, headers=self.env.header)
return response.json()
##### get metadata about the table #####
def getTableMetadata(self):
url = self.env.url + '/v1/tables/{}'.format(self.dataset_name)
response = requests.get(url, headers=self.env.header)
return response.json()[self.table_name]
##### set filters for loading table rows #####
def setFilters(self, filters):
self.filters = filters
self.validateFilters()
##### get functions to access the information in the table #####
def getColumnList(self):
return list(self.table_info.keys())
def getColumnCount(self):
return len(self.getColumnList())
##### get functions to access the table metadata #####
def getUploadDate(self):
epoch_time = float(self.metadata['load_time'])/1000
return datetime.fromtimestamp(epoch_time)
def getUpdateDate(self):
epoch_time = float(self.metadata['update_time'])/1000
return datetime.fromtimestamp(epoch_time)
def getVisibility(self):
return self.metadata["visibility"]
def getTableCount(self):
return self.metadata["num_tables"]
def getColumnCount(self):
return self.metadata["num_columns"]
def getValueCount(self):
return self.metadata["num_values"]
def getPullTimestamp(self):
epoch_time = float(self.metadata["timestamp"])/1000
return datetime.fromtimestamp(epoch_time)
def getUserAccessList(self):
url = self.env.url + "/rules_of_use"
post_data = json.dumps({
"query":"{usersWithAttribute(value:\""+self.getVisibility()+"\"){username}}"
})
response = requests.post(url, headers=self.env.header, data=post_data)
if response.status_code == 401 or response.status_code == 403:
print (colored("Unauthorized: You are not authorized to perform this request, \
please contact the Data Profiler team", "red"))
return None
try:
usernames = [x["username"] for x in json.loads(response.text)["data"]["usersWithAttribute"]]
return usernames
except:
print (colored("There was a {} error processing your \
request".format(response.status_code), "red"))
return None
##### format data for post requests #####
## If no sample size is given, then the limit is set to 0 which returns all rows
def getPostData(self, sample_size=0):
post_data = json.dumps({
"dataset": self.dataset_name,
"table": self.table_name,
"sort": "CNT_DESC",
"filters": self.filters,
"limit": sample_size
})
return post_data
##### format data for post requests #####
## If no sample size is given, then the limit is set to 0 which returns all rows
def getRowsPostData(self, start_location=None, page_size = 5000):
if page_size >= 10000:
raise ValueError("Rows Page Size must be less than 10,000")
post_data = {
"dataset": self.dataset_name,
"table": self.table_name,
"filters": self.filters,
"limit": page_size,
"start_location": start_location,
"pageSize": page_size
}
# Remove None values (likely start_location)
return json.dumps({k: v for k, v in post_data.items() if v})
##### load the rows from the table #####
def loadRows(self):
if self.filters == {}:
print("\nDownloading ALL ROWS: {}, {}...".format(self.dataset_name, self.table_name))
else:
print("\nDownloading FILTERED TABLE: {} | {} \nFilter(s) \
applied: {}...".format(self.dataset_name, self.table_name, self.filters))
try:
url = self.env.url + '/v2/rows'
# Time post request
start_time = datetime.now(timezone('US/Eastern'))
# Tracking variables
start_location = None
results = []
# Run the rows endpoint through until we break
while True:
post_data = self.getRowsPostData(start_location)
response = requests.post(
url, headers=self.env.header, data=post_data).json()
results.extend(response['rows'])
# If endLocation is null/None, we have successfully gotten all the rows
# This is also where you'd want to check against the limit (if len(results) > limit)
if response['endLocation'] is None:
break
# Update the start location and loop again
start_location = response['endLocation']
total_run_time = str(datetime.now(
timezone('US/Eastern')) - start_time)
requestResponsePrint(response, total_run_time, self.env.verbose)
if len(results) == 0:
if self.env.verbose:
print(colored("Data request empty!", "red"))
## returns an empty dataframe with the column structure of the table itself
df = pd.DataFrame(columns=self.getColumnList())
else:
df = pd.DataFrame(results)
return df
except ValueError: # includes simplejson.decoder.JSONDecodeError
print(
colored("\nError - check response message or text to json parser.", "red"))
return None
## loads a subset of the table rows
## it will default to 100 rows, but a user can specfiy a number of rows
def loadTableSample(self, sample_size=100):
print((colored("Note: This endpoint is experimental - expect longer load times \
for larger datasets", "cyan")))
if self.filters == {}:
print("\nDownloading {} ROWS: {}, {}...".format(sample_size, self.dataset_name,
self.table_name))
else:
print("\nDownloading {} FILTERED ROWS: {} | {} \nFilter(s) \
applied: {} ...".format(sample_size, self.dataset_name, self.table_name,
self.filters))
try:
url = self.env.url + '/v1/rows'
post_data = self.getPostData(sample_size)
# Time post request
start_time = datetime.now(timezone('US/Eastern'))
response = requests.post(
url, headers=self.env.header, data=post_data)
total_run_time = str(datetime.now(
timezone('US/Eastern')) - start_time)
requestResponsePrint(response, total_run_time, self.env.verbose)
# Convert to text to remove extra "$" character
response.encoding = 'utf-8'
text_data = response.text
text_data = text_data[:-1]
if len(text_data) < 2:
if self.env.verbose:
print(colored("Data request empty!", "red"))
## returns an empty dataframe with the column structure of the table itself
df = pd.DataFrame(columns=self.getColumnList())
else:
json_data = json.loads(text_data)
df = | pd.DataFrame(json_data) | pandas.DataFrame |
import io
from typing import List
import joblib
import matplotlib
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.compose import make_column_selector
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder
from xautoml.models import Candidate, Ensemble
from xautoml.util.datasets import down_sample
from xautoml.util.pipeline_utils import DataFrameImputer, InplaceOrdinalEncoder
class EnsembleInspection:
@staticmethod
def member_predictions(candidates: List[Candidate], X: pd.DataFrame, n_jobs=1):
def _model_predict(candidate: Candidate, X: pd.DataFrame) -> np.ndarray:
return candidate.y_transformer(candidate.model.predict(X.copy()))
all_predictions = joblib.Parallel(n_jobs=n_jobs)(
joblib.delayed(_model_predict)(candidate=candidate, X=X) for candidate in candidates
)
all_predictions = np.array(all_predictions)
return all_predictions
@staticmethod
def ensemble_overview(ensemble: Ensemble, candidates: List[Candidate], X: pd.DataFrame, y_pred: pd.Series,
n_jobs=1):
all_predictions = EnsembleInspection.member_predictions(candidates, X, n_jobs)
mask = np.min(all_predictions, axis=0) == np.max(all_predictions, axis=0)
indices = np.where(~mask)[0]
ensemble_consensus = (np.tile(y_pred, (all_predictions.shape[0], 1)) == all_predictions).sum() / (
all_predictions.shape[0] * all_predictions.shape[1])
metrics = {'Ensemble': {'consensus': float(ensemble_consensus), 'weight': float(np.sum(ensemble.weights))}}
for i in range(all_predictions.shape[0]):
metrics[candidates[i].id] = {
'consensus': float(np.sum(all_predictions[i, :] == y_pred) / len(mask)),
'weight': float(ensemble.weight_map[candidates[i].id])
}
return metrics, indices
@staticmethod
def plot_decision_surface(ensemble: Ensemble, candidates: List[Candidate], X: pd.DataFrame, y: pd.Series):
# Dimension reduction for plotting
cat_columns = make_column_selector(dtype_exclude=np.number)(X)
pipeline = Pipeline(steps=[
('imputation', DataFrameImputer()),
('encoding', InplaceOrdinalEncoder(cat_columns, X.columns)),
('pca', PCA(n_components=2))
])
X_2d = pipeline.fit_transform(X)
label_encoder = LabelEncoder()
label_encoder.fit(y)
x_min, x_max = X_2d[:, 0].min(), X_2d[:, 0].max()
y_min, y_max = X_2d[:, 1].min(), X_2d[:, 1].max()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 50), np.linspace(y_min, y_max, 50))
grid_2d = np.c_[xx.ravel(), yy.ravel()]
grid = pipeline.inverse_transform(grid_2d)
models = [(ensemble.model, lambda y: y)] + [(c.model, c.y_transformer) for c in candidates]
names = ['Ensemble'] + [c.id for c in candidates]
contours = {}
for (clf, y_trans), cid in zip(models, names):
fig, ax = plt.subplots(1, 1, figsize=(10, 10), dpi=10)
Z = y_trans(clf.predict(grid))
Z = label_encoder.transform(Z)
Z = Z.reshape(xx.shape)
norm = matplotlib.colors.Normalize(vmin=0.0, vmax=label_encoder.classes_.shape[0])
ax.contourf(Z, levels=2, alpha=0.75, norm=norm, cmap='viridis')
ax.axis('off')
ax.set_position([0, 0, 1, 1])
buf = io.BytesIO()
plt.savefig(buf, format='svg')
buf.seek(0)
wrapper = io.TextIOWrapper(buf, encoding='utf-8')
svg = ''.join(wrapper.readlines()[18:-1]).replace('\n', ' ')
contours[cid] = svg
plt.close(fig)
X_2d, y = down_sample( | pd.DataFrame(X_2d, columns=['x', 'y']) | pandas.DataFrame |
# -*-coding:utf-8 -*-
# Kmeans 聚类算法
import pandas as pd
from sklearn.cluster import KMeans #导入kmeans聚类算法
outfile = '../subset/classifision_data.xls'
filename = '../subset/most_value_data.xls'
#filename = '../subset/cleaned_data.xls' # 标准化前数据
data = pd.read_excel(filename)
k = 5 # 聚类的类别数
iteration = 500
kmodel = KMeans(n_clusters=k,max_iter=iteration)
kmodel.fit(data) # 训练模型
# 属性值标签 与元数据 组合
r3 = pd.Series(kmodel.labels_,index=range(len(data)))
cluster_data = pd.concat([data,r3],axis=1)
cluster_data.columns = list(data.columns)+[u'聚类类别']
cluster_data.to_excel(outfile)
# 标准化以后的数据 与 聚类类别 组合
r1 = pd.Series(kmodel.labels_).value_counts() # value_counts() 统计各个类标记的数目
r2 = pd.DataFrame(kmodel.cluster_centers_)
r = | pd.concat([r2,r1],axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
import pandas as pd
news = | pd.read_csv("list.csv") | pandas.read_csv |
import nose
import pandas
from pandas.compat import u
from pandas.util.testing import network
from pandas.util.testing import assert_frame_equal
from numpy.testing.decorators import slow
from pandas.io.wb import search, download, get_countries
import pandas.util.testing as tm
class TestWB(tm.TestCase):
@slow
@network
def test_wdi_search(self):
raise nose.SkipTest
expected = {u('id'): {2634: u('GDPPCKD'),
4649: u('NY.GDP.PCAP.KD'),
4651: u('NY.GDP.PCAP.KN'),
4653: u('NY.GDP.PCAP.PP.KD')},
u('name'): {2634: u('GDP per Capita, constant US$, '
'millions'),
4649: u('GDP per capita (constant 2000 US$)'),
4651: u('GDP per capita (constant LCU)'),
4653: u('GDP per capita, PPP (constant 2005 '
'international $)')}}
result = search('gdp.*capita.*constant').ix[:, :2]
expected = pandas.DataFrame(expected)
expected.index = result.index
assert_frame_equal(result, expected)
@slow
@network
def test_wdi_download(self):
raise nose.SkipTest
expected = {'GDPPCKN': {(u('United States'), u('2003')): u('40800.0735367688'), (u('Canada'), u('2004')): u('37857.1261134552'), (u('United States'), u('2005')): u('42714.8594790102'), (u('Canada'), u('2003')): u('37081.4575704003'), (u('United States'), u('2004')): u('41826.1728310667'), (u('Mexico'), u('2003')): u('72720.0691255285'), (u('Mexico'), u('2004')): u('74751.6003347038'), (u('Mexico'), u('2005')): u('76200.2154469437'), (u('Canada'), u('2005')): u('38617.4563629611')}, 'GDPPCKD': {(u('United States'), u('2003')): u('40800.0735367688'), (u('Canada'), u('2004')): u('34397.055116118'), (u('United States'), u('2005')): u('42714.8594790102'), (u('Canada'), u('2003')): u('33692.2812368928'), (u('United States'), u('2004')): u('41826.1728310667'), ( | u('Mexico') | pandas.compat.u |
import io
import time
import json
from datetime import datetime
import pandas as pd
from pathlib import Path
import requests
drop_cols = [
'3-day average of daily number of positive tests (may count people more than once)',
'daily total tests completed (may count people more than once)',
'3-day average of new people who tested positive (counts first positive lab per person)',
'3-day average of currently hospitalized',
'daily number of vaccine doses administered beyond the primary series '
]
def save_file(df, file_path, current_date):
# save/update file
if not Path(file_path).exists():
df.to_csv(file_path, index=False)
else:
# get prior file date
prior = pd.read_csv(file_path, parse_dates=['date'])
prior_date = pd.to_datetime(prior['date'].max()).date()
if current_date > prior_date:
df.to_csv(file_path, mode='a', header=False, index=False)
return
def scrape_sheet(sheet_id):
# load previous raw_data and get prior date
raw_general = './data/raw/ri-covid-19.csv'
df = pd.read_csv(raw_general, parse_dates=['date'])
prior_date = df['date'].max().tz_localize('EST').date()
# wait till 5:05 then check every 15 mins for update
target = datetime.now().replace(hour=17).replace(minute=5)
while datetime.now() < target:
print(f"[status] waiting for 5pm", end='\r')
time.sleep(60)
# load data from RI - DOH spreadsheet
gen_url = f'https://docs.google.com/spreadsheets/d/{sheet_id}264100583'
df = pd.read_csv(gen_url).dropna(axis=1, how='all')
date = list(df)[1].strip()
date = pd.to_datetime(date).tz_localize('EST').date()
if df.shape[0] != 27:
print('[ERROR: summary page format changed]')
while not prior_date < date:
print(f"[status] waiting for update...{time.strftime('%H:%M')}", end='\r')
time.sleep(5 * 60)
df = pd.read_csv(gen_url)
date = list(df)[1].strip()
date = pd.to_datetime(date).tz_localize('EST').date()
else:
print('[status] found new update pausing for 2 mins')
time.sleep(2 * 60)
## transform general sheet
df['date'] = date
df.columns = ['metric', 'count', 'date']
save_file(df, raw_general, date)
## scrape geographic sheet
geo_url = f'https://docs.google.com/spreadsheets/d/{sheet_id}901548302'
geo_df = pd.read_csv(geo_url)
# get grographic date & fix cols
geo_date = geo_df.iloc[-1][1]
geo_date = pd.to_datetime(geo_date)
geo_df['date'] = geo_date
cols = [x for x in list(geo_df) if 'Rate' not in x]
geo_df = geo_df[cols]
geo_df = geo_df.dropna(axis=0)
geo_df.columns = ['city_town', 'count', 'hostpialized', 'deaths', 'fully_vaccinated', 'date']
# save file
raw_geo = './data/raw/geo-ri-covid-19.csv'
save_file(geo_df, raw_geo, geo_date)
## scrape demographics sheet
dem_url = f'https://docs.google.com/spreadsheets/d/{sheet_id}31350783'
dem_df = pd.read_csv(dem_url)
# make sure no columns were added/removed
if not dem_df.shape == (31, 9):
print('[error] demographics format changed')
return
else:
# get demographics updated date
dem_date = dem_df.iloc[-1][1]
dem_date = pd.to_datetime(dem_date).tz_localize('EST').date()
# drop percentage columns & rename
dem_df = dem_df.drop(dem_df.columns[[1, 2, 4, 6, 8]], axis=1)
dem_df.columns = ['metric', 'case_count', 'hosptialized', 'deaths']
# get data
sex = dem_df[1:4]
age = dem_df[5:17]
race = dem_df[18:24]
dem_df = pd.concat([sex, age, race])
dem_df['date'] = dem_date
raw_dem = './data/raw/demographics-covid-19.csv'
save_file(dem_df, raw_dem, dem_date)
def scrape_revised(sheet_id):
# load previous revised_data and get prior date
raw_revised = './data/raw/revised-data.csv'
df = | pd.read_csv(raw_revised, parse_dates=['date']) | pandas.read_csv |
import itertools
import logging
import numpy as np
import tensorflow as tf
import time
import os
import pandas as pd
import subprocess
def check_dir(cur_dir):
if not os.path.exists(cur_dir):
return False
return True
def copy_file(src_dir, tar_dir):
cmd = 'cp %s %s' % (src_dir, tar_dir)
subprocess.check_call(cmd, shell=True)
def find_file(cur_dir, suffix='.ini'):
for file in os.listdir(cur_dir):
if file.endswith(suffix):
return cur_dir + '/' + file
logging.error('Cannot find %s file' % suffix)
return None
def init_dir(base_dir, pathes=['log', 'data', 'model']):
if not os.path.exists(base_dir):
os.mkdir(base_dir)
dirs = {}
for path in pathes:
cur_dir = base_dir + '/%s/' % path
if not os.path.exists(cur_dir):
os.mkdir(cur_dir)
dirs[path] = cur_dir
return dirs
def init_log(log_dir):
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',
level=logging.INFO,
handlers=[
logging.FileHandler('%s/%d.log' % (log_dir, time.time())),
logging.StreamHandler()
])
def init_test_flag(test_mode):
if test_mode == 'no_test':
return False, False
if test_mode == 'in_train_test':
return True, False
if test_mode == 'after_train_test':
return False, True
if test_mode == 'all_test':
return True, True
return False, False
def plot_train(data_dirs, labels):
pass
def plot_evaluation(data_dirs, labels):
pass
class Counter:
def __init__(self, total_step, test_step, log_step,save_step):
self.counter = itertools.count(1)
self.cur_step = 0
self.cur_test_step = 0
self.total_step = total_step
self.test_step = test_step
self.log_step = log_step
self.save_step = save_step
self.stop = False
def next(self):
self.cur_step = next(self.counter)
return self.cur_step
def should_test(self):
test = False
if (self.cur_step - self.cur_test_step) >= self.test_step:
test = True
self.cur_test_step = self.cur_step
return test
def should_log(self):
return (self.cur_step % self.log_step == 0)
def should_stop(self):
if self.cur_step >= self.total_step:
return True
return self.stop
def should_save(self):
return (self.cur_step%self.save_step==0)
class Trainer():
def __init__(self, env, model, global_counter, summary_writer, run_test, output_path=None,save_path=None):
self.cur_step = 0
self.global_counter = global_counter
self.env = env
self.agent = self.env.agent
self.model = model
self.sess = self.model.sess
self.n_step = self.model.n_step
self.summary_writer = summary_writer
self.run_test = run_test
assert self.env.T % self.n_step == 0
self.data = []
self.output_path = output_path
self.save_path = save_path
if run_test:
self.test_num = self.env.test_num
logging.info('Testing: total test num: %d' % self.test_num)
self._init_summary()
def _init_summary(self):
self.train_reward = tf.placeholder(tf.float32, [])
self.train_summary = tf.summary.scalar('train_reward', self.train_reward)
self.test_reward = tf.placeholder(tf.float32, [])
self.test_summary = tf.summary.scalar('test_reward', self.test_reward)
def _add_summary(self, reward, global_step, is_train=True):
if is_train:
summ = self.sess.run(self.train_summary, {self.train_reward: reward})
else:
summ = self.sess.run(self.test_summary, {self.test_reward: reward})
self.summary_writer.add_summary(summ, global_step=global_step)
def explore(self, prev_ob,old_obs,last_out_values):
obs = prev_ob
rewards = []
for _ in range(self.n_step):
actions, cur_values = self.model.forward(obs,old_obs,last_out_values, mode='explore')
next_obs, cur_rewards, done = self.env.step(actions)
global_step = self.global_counter.next()
self.cur_step += 1
self.model.add_transition(obs,old_obs,last_out_values,cur_values,actions,cur_rewards,next_obs,done)
if self.global_counter.should_log():
logging.info('''Training: global step %d, episode step %d,
ob: %s, a: %s, r: %.2f, train r: %.2f, done: %r''' %
(global_step, self.cur_step,
str(obs), str(actions), np.sum(cur_rewards), np.mean(cur_rewards), done))
if self.global_counter.should_save():
self.model.save(self.save_path, self.global_counter.cur_step)
rewards.append(np.sum(cur_rewards))
if done:
break
obs,old_obs,last_out_values = next_obs,obs,cur_values
return obs,old_obs,last_out_values, done, rewards
def perform(self, test_ind):
ob = self.env.reset(test_ind=test_ind)
old_ob = ob
last_out_values = np.zeros([25,5])
rewards = []
i = 0
time_a = time.time()
while True:
if i%20 == 0 :
time_b = time.time()
print('steps:',i,'\t\t times:',time_b-time_a)
time_a = time_b
if self.agent == 'greedy':
action = self.model.forward(ob)
elif self.agent.endswith('a2c'):
policy = self.model.forward(ob, False, 'p')
if self.agent == 'ma2c':
self.env.update_fingerprint(policy)
if self.agent == 'a2c':
action = np.argmax(np.array(policy))
else:
action = []
for pi in policy:
action.append(np.argmax(np.array(pi)))
elif self.agent == 'iedqn':
action, cur_values = self.model.forward(ob,old_ob,last_out_values)
else:
action, _ = self.model.forward()
next_ob, reward, done = self.env.step(action)
rewards.append(np.sum(reward))
i = i+1
if done:
break
old_ob,ob,last_out_values = ob,next_ob,cur_values
mean_reward = np.mean(np.array(rewards))
std_reward = np.std(np.array(rewards))
return mean_reward, std_reward
def run(self):
cur_episode=0
while not self.global_counter.should_stop():
# test
if self.run_test and self.global_counter.should_test():
rewards = []
global_step = self.global_counter.cur_step
self.env.train_mode = False
for test_ind in range(self.test_num):
mean_reward, std_reward = self.perform(test_ind)
self.env.terminate()
rewards.append(mean_reward)
log = {'agent': self.agent,
'step': global_step,
'test_id': test_ind,
'avg_reward': mean_reward,
'std_reward': std_reward}
self.data.append(log)
avg_reward = np.mean(np.array(rewards))
self._add_summary(avg_reward, global_step, is_train=False)
logging.info('Testing: global step %d, avg R: %.2f' %
(global_step, avg_reward))
# train
self.env.train_mode = True
obs = self.env.reset()
cur_time = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
print('current time:\t',cur_time,'\tcurrent episode\t',cur_episode)
old_obs = obs
last_out_values = np.zeros([25,5])
done = False
self.cur_step = 0
rewards = []
global_step = self.global_counter.cur_step
while True:
obs, old_obs, last_out_values,done, n_step_rewards = self.explore(obs,old_obs,last_out_values)
rewards += n_step_rewards
global_step = self.global_counter.cur_step
self.model.backward(self.summary_writer, global_step)
if done:
self.env.terminate()
cur_episode +=1
break
rewards = np.array(rewards)
mean_reward = np.mean(rewards)
std_reward = np.std(rewards)
log = {'agent': self.agent,
'step': global_step,
'test_id': -1,
'avg_reward': mean_reward,
'std_reward': std_reward}
self.data.append(log)
self._add_summary(mean_reward, global_step)
self.summary_writer.flush()
df = | pd.DataFrame(self.data) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
import os
import shutil
import openpyxl
import rpy2.robjects as ro
from rpy2.robjects import r
from rpy2.robjects.packages import importr
ro.r['options'](warn=-1)
r('as.POSIXct("2015-01-01 00:00:01")+0 ')
base = importr('base')
cars = importr('car')
mvtnorm = importr('mvtnorm')
broom = importr('broom')
psych = importr('psych')
mhtmult = importr('MHTmult')
def get_rfo_os_num_rows(root, modality):
os.chdir(root)
rfo = pd.read_csv(str(modality) + '_reformatted_output.csv')
ones = pd.read_csv('ONE_SAMPLE.csv')
[num_rows, _] = ones.shape
return [rfo, ones, num_rows]
def run_stats(ones, rfo, row):
group = ones.iloc[row]["GROUP"]
seed = ones.iloc[row]["SEED"]
rois = ones.iloc[row]["ROIS"]
r.assign('rGROUP', group)
r.assign('rSEED', seed)
r.assign('rROIS', rois)
rfo_gs = rfo.loc[rfo['Group'] == group].reset_index(drop=True)
rfo_gs_rs = pd.concat([rfo_gs[["Group", "Seed", "Subject_ID"]], rfo_gs[rois]], axis=1)
rfo_gs_rs_ss = rfo_gs_rs.loc[rfo_gs_rs['Seed'] == seed].reset_index(drop=True)
osd = pd.DataFrame(rfo_gs_rs_ss[rois])
osd.to_csv('osd.csv', index=False)
ro.globalenv['osd'] = "osd.csv"
r('osd<-read.csv(osd)')
r('ttest <- t.test(osd, mu = 0)')
roisdf = pd.DataFrame([rois])
roisdf.columns = ["ROI"]
seeddf = pd.DataFrame([seed])
seeddf.columns = ["Seed"]
groupdf = pd.DataFrame([group])
groupdf.columns = ["Group"]
name = | pd.concat([groupdf, seeddf, roisdf], axis=1) | pandas.concat |
import logging
import os
import re
import pandas as pd
import numpy as np
from pandas import DataFrame
from tqdm import tqdm
from joblib import dump, load
from nltk.tokenize import word_tokenize
from nltk import pos_tag
from ..MyMertrics import *
from pprint import pformat
from util.file_manager import remake_dir
def feature_filter(features_list):
res = []
for feature in features_list:
if len(feature) < 2:
res.append(feature)
return res
def merge_similar_feature(data, features):
column = data[features].sum(axis=1)
df = pd.DataFrame(data=column, columns=[features[0]])
data = data.drop(columns=features)
data.insert(2, features[0], df)
return data
def merge(f_path):
"""
:param f_path: file path
:return: results path
"""
if not os.path.exists("myData"):
remake_dir("myData")
logging.info("[*] Merging %s " % f_path)
data = pd.read_csv(f_path)
# features = feature_filter(data.columns[2:-1])
# data = data.drop(columns=features)
data_features = data.columns[2:-1]
features = [[], [], [], []]
for feature in data_features:
if 'aha' in feature:
features[0].append(feature)
if 'lmao' in feature:
features[1].append(feature)
if 'lmf' in feature or "fao" in feature:
features[2].append(feature)
if 'jus' in feature:
features[3].append(feature)
features.append(["huh", "hun"])
features.append(["taco", "tacos"])
features.append(["icheated", "icheatedbecause"])
features.append(["lt", "ltlt", "ltreply"])
features.append(["mad", "madd"])
features.append(["b", "be"])
features.append(["n", "and"])
features.append(["u", "you"])
features.append(["flex", "flexin"])
features.append(["dam", "damn", 'da'])
features.append(["kno", "know", 'knw'])
features.append(["dat", "dats"])
features.append(["gon", "gone"])
features.append(["iono", "ion"])
features.append(["factaboutme", "factsaboutme"])
features.append(["bt", "btwn"])
features.append(["loll", "lolss", "lolsz"])
features.append(["cali", "california"])
for f in features:
data = merge_similar_feature(data, f)
data['class'] = data['class'].map(MAP)
_columns = data.columns
users = set(data['user-id'])
new_df = | DataFrame(columns=data.columns[2:]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 20 16:11:49 2019
@author: Nate
"""
import pandas as pd
import numpy as np
from difflib import SequenceMatcher
from tqdm import tqdm
from langdetect import detect
import swifter
from sklearn.utils import shuffle
# read in data and drop duplicate content
R = pd.read_csv('R.csv', index_col='Unnamed: 0').drop_duplicates(subset='content')
RC = pd.read_csv('RC.csv', index_col='Unnamed: 0').drop_duplicates(subset='content')
C = pd.read_csv('C.csv', index_col='Unnamed: 0').drop_duplicates(subset='content')
LC = pd.read_csv('LC.csv', index_col='Unnamed: 0').drop_duplicates(subset='content')
L = pd.read_csv('L.csv', index_col='Unnamed: 0').drop_duplicates(subset='content')
# exception safe function for language detection
def try_me(text):
try:
return detect(text)
except:
print(text)
return 'xx'
# remove non english articles
pbar = tqdm(total=5)
R = R[R['content'].swifter.allow_dask_on_strings().apply(try_me) == 'en']
pbar.update(1)
RC = RC[RC['content'].swifter.allow_dask_on_strings().apply(try_me) == 'en']
pbar.update(1)
C = C[C['content'].swifter.allow_dask_on_strings().apply(try_me) == 'en']
pbar.update(1)
LC = LC[LC['content'].swifter.allow_dask_on_strings().apply(try_me) == 'en']
pbar.update(1)
L = L[L['content'].swifter.allow_dask_on_strings().apply(try_me) == 'en']
pbar.update(1)
pbar.close()
"""
# character length 'len' and word length 'count' analysis
R['len'] = R['content'].apply(len)
RC['len'] = RC['content'].apply(len)
C['len'] = C['content'].apply(len)
LC['len'] = LC['content'].apply(len)
L['len'] = L['content'].apply(len)
R['count'] = R['content'].apply(lambda x: len(x.split(' ')))
RC['count'] = RC['content'].apply(lambda x: len(x.split(' ')))
C['count'] = C['content'].apply(lambda x: len(x.split(' ')))
L['count'] = L['content'].apply(lambda x: len(x.split(' ')))
LC['count'] = LC['content'].apply(lambda x: len(x.split(' ')))
"""
# shuffle
L = shuffle(L)
LC = shuffle(LC)
C = shuffle(C)
RC = shuffle(RC)
R = shuffle(R)
# add labels
L['label'] = 'L'
LC['label'] = 'LC'
C['label'] = 'C'
RC['label'] = 'RC'
R['label'] = 'R'
# under sampling
low = min(map(len, [R, RC, C, LC, L]))
R = R[:low]
RC = RC[:low]
C = C[:low]
LC = LC[:low]
L = L[:low]
size = 344
# train and test datasets
train = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@file
@brief Various function to download data about population
"""
import gzip
import os
import numpy
import pandas
from pyquickhelper.loghelper import noLOG
from pyensae.datasource import download_data
from .data_exceptions import DataFormatException
def population_france_year(url="https://www.insee.fr/fr/statistiques/fichier/1892086/pop-totale-france.xls",
sheet_name=0, year=2020):
"""
Downloads the data for the French population from INSEE website
@param url url
@param sheet_name sheet index
@param year last year to find
@return DataFrame
The sheet index is 0 for the all France, 1 for metropolitean France.
The last row aggregates multiple ages ``1914 ou avant``, they will remain
aggregated but the label will be changed to 1914. ``100 ou plus`` is replaced by 100.
By default, the data is coming from `INSEE, Bilan Démographique <https://www.insee.fr/fr/statistiques/1892086?sommaire=1912926>`_.
**2017/01**: pandas does not seem to be able to read the format (old format).
You should convert the file in txt with Excel.
"""
try:
df = pandas.read_excel(url, sheet_name=sheet_name)
skiprows = 5
except Exception as e: # pragma: no cover
# we try to find a local version
this = os.path.dirname(__file__)
name = os.path.join(this, "data_population", url.split(
"/")[-1].replace(".xls", ".xlsx"))
if not os.path.exists(name):
raise FileNotFoundError(
"Unable to find a replacement for '{0}' as '{1}'".format(url, name)) from e
df = pandas.read_excel(name, sheet_name=sheet_name)
url = name
skiprows = 0
col = df.columns[0]
if len(col) == 0:
raise DataFormatException( # pragma: no cover
"Unable to find {0} (year) in table at url '{1}'".format(year, url))
if skiprows > 0 and str(year) not in col:
raise DataFormatException( # pragma: no cover
"Unable to find {0} (year) in first column name '{1}' at url "
"'{2}'".format(year, col, url))
table = pandas.read_excel(url, sheet_name=sheet_name, skiprows=skiprows)
table.columns = ["naissance", "age", "hommes", "femmes", "ensemble"]
table = table[(table.naissance != 'Champ : France y c. Mayotte.') &
table.naissance.apply(lambda s: "Source" not in str(s))].copy()
table["naissance"] = table.apply(lambda r: r["naissance"] if isinstance(r["naissance"], (int, float)) else
r["naissance"].replace(" ou avant", ""), axis=1)
table["age"] = table.apply(lambda r: r["age"] if isinstance(r["age"], (int, float)) else
r["age"].replace(" ou plus", "") if isinstance(
r["age"], str) else r["age"],
axis=1)
table = table.dropna(axis=0)
for c in table.columns:
table[c] = table[c].astype(int)
return table
def table_mortalite_france_00_02(homme=None, femme=None):
"""
Download mortality table for France assuming they
are available in Excel format.
@param homme table for men
@param femme table for women
@return DataFrame
The final DataFrame merges both sheets.
The data is coming from
`Institut des Actuaires: Reférences de mortalité <http://www.institutdesactuaires.com/gene/main.php?base=2127>`_ or
`Références techniques <http://www.ressources-actuarielles.net/EXT/ISFA/fp-isfa.nsf/
34a14c286dfb0903c1256ffd00502d73/d62719e329025b94c12577c100545bb7?OpenDocument>`_.
"""
this = os.path.join(os.path.abspath(
os.path.dirname(__file__)), "data_population")
if homme is None:
homme = os.path.join(this, "TH00-02_D.xls")
sheeth = "Table"
else:
sheeth = 0
if femme is None:
femme = os.path.join(this, "TF00-02_D.xls")
sheetf = "Table"
else:
sheetf = 0
isexch = os.path.splitext(homme)[-1] in (".xls", ".xlsx")
dfh = pandas.read_excel(
homme, sheet_name=sheeth) if isexch else pandas.read_csv(homme, sep=";")
if dfh.shape[1] > 2:
dfh = dfh[dfh.columns[:2]]
isexcf = os.path.splitext(femme)[-1] in (".xls", ".xlsx")
dff = pandas.read_excel(
femme, sheet_name=sheetf) if isexcf else pandas.read_csv(femme, sep=";")
if dff.shape[1] > 2:
dff = dff[dff.columns[:2]]
df = dfh.merge(dff, on="Age")
df.columns = ["Age", "Homme", "Femme"]
return df.dropna().reset_index(drop=True)
def fecondite_france(url=None):
"""
download fecondity table for France (Excel format)
@param url source (url or file)
@return DataFrame
By default, the data is coming from a local file
which is a copy of
`INSEE: Fécondité selon l'âge détaillé de la mère <https://www.insee.fr/fr/statistiques/2045366?sommaire=2045470&q=fecondite>`_.
The original file cannot be read by pandas so we convert it first.
See also `INSEE Bilan Démographique 2016 <https://www.insee.fr/fr/statistiques/1892259?sommaire=1912926>`_.
"""
if url is None:
this = os.path.abspath(os.path.dirname(__file__))
url = os.path.join(this, "data_population", "irsocsd2014_G10.xlsx")
df = | pandas.read_excel(url) | pandas.read_excel |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: pd.Timestamp("2013-04-12 00:00:00"),
346: pd.Timestamp("2013-04-13 00:00:00"),
347: pd.Timestamp("2013-04-14 00:00:00"),
348: pd.Timestamp("2013-04-15 00:00:00"),
349: pd.Timestamp("2013-04-16 00:00:00"),
350: pd.Timestamp("2013-04-17 00:00:00"),
351: pd.Timestamp("2013-04-18 00:00:00"),
352: pd.Timestamp("2013-04-19 00:00:00"),
353: pd.Timestamp("2013-04-20 00:00:00"),
354: pd.Timestamp("2013-04-21 00:00:00"),
355: pd.Timestamp("2013-04-22 00:00:00"),
356: pd.Timestamp("2013-04-23 00:00:00"),
357: pd.Timestamp("2013-04-24 00:00:00"),
358: pd.Timestamp("2013-04-25 00:00:00"),
359: pd.Timestamp("2013-04-26 00:00:00"),
360: pd.Timestamp("2013-04-27 00:00:00"),
361: pd.Timestamp("2013-04-28 00:00:00"),
362: pd.Timestamp("2013-04-29 00:00:00"),
363: pd.Timestamp("2013-04-30 00:00:00"),
364: pd.Timestamp("2013-05-01 00:00:00"),
365: pd.Timestamp("2013-05-02 00:00:00"),
366: pd.Timestamp("2013-05-03 00:00:00"),
367: pd.Timestamp("2013-05-04 00:00:00"),
368: pd.Timestamp("2013-05-05 00:00:00"),
369: pd.Timestamp("2013-05-06 00:00:00"),
370: pd.Timestamp("2013-05-07 00:00:00"),
371: pd.Timestamp("2013-05-08 00:00:00"),
372: pd.Timestamp("2013-05-09 00:00:00"),
373: pd.Timestamp("2013-05-10 00:00:00"),
374: pd.Timestamp("2013-05-11 00:00:00"),
375: pd.Timestamp("2013-05-12 00:00:00"),
376: pd.Timestamp("2013-05-13 00:00:00"),
377: pd.Timestamp("2013-05-14 00:00:00"),
378: pd.Timestamp("2013-05-15 00:00:00"),
379: pd.Timestamp("2013-05-16 00:00:00"),
380: pd.Timestamp("2013-05-17 00:00:00"),
381: pd.Timestamp("2013-05-18 00:00:00"),
382: pd.Timestamp("2013-05-19 00:00:00"),
383: pd.Timestamp("2013-05-20 00:00:00"),
384: pd.Timestamp("2013-05-21 00:00:00"),
385: pd.Timestamp("2013-05-22 00:00:00"),
386: pd.Timestamp("2013-05-23 00:00:00"),
387: pd.Timestamp("2013-05-24 00:00:00"),
388: pd.Timestamp("2013-05-25 00:00:00"),
389: pd.Timestamp("2013-05-26 00:00:00"),
390: pd.Timestamp("2013-05-27 00:00:00"),
391: pd.Timestamp("2013-05-28 00:00:00"),
392: pd.Timestamp("2013-05-29 00:00:00"),
393: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.348604308646497,
1: 8.348964254851197,
2: 8.349324201055898,
3: 8.349684147260598,
4: 8.350044093465298,
5: 8.350404039669998,
6: 8.3507639858747,
7: 8.3511239320794,
8: 8.3514838782841,
9: 8.351843824488801,
10: 8.352203770693501,
11: 8.352563716898201,
12: 8.352923663102903,
13: 8.353283609307603,
14: 8.353643555512303,
15: 8.354003501717003,
16: 8.354363447921704,
17: 8.354723394126404,
18: 8.355083340331104,
19: 8.355443286535806,
20: 8.355803232740506,
21: 8.356163178945206,
22: 8.356523125149906,
23: 8.356883071354607,
24: 8.357243017559307,
25: 8.357602963764007,
26: 8.357962909968709,
27: 8.358322856173409,
28: 8.358682802378109,
29: 8.35904274858281,
30: 8.35940269478751,
31: 8.35976264099221,
32: 8.36012258719691,
33: 8.360482533401612,
34: 8.360842479606312,
35: 8.361202425811012,
36: 8.361562372015714,
37: 8.361922318220413,
38: 8.362282264425113,
39: 8.362642210629813,
40: 8.363002156834515,
41: 8.363362103039215,
42: 8.363722049243915,
43: 8.364081995448617,
44: 8.364441941653316,
45: 8.364801887858016,
46: 8.365161834062716,
47: 8.365521780267418,
48: 8.365881726472118,
49: 8.366241672676818,
50: 8.36660161888152,
51: 8.36696156508622,
52: 8.36732151129092,
53: 8.367681457495621,
54: 8.368041403700321,
55: 8.368401349905021,
56: 8.36876129610972,
57: 8.369121242314423,
58: 8.369481188519122,
59: 8.369841134723822,
60: 8.370201080928524,
61: 8.370561027133224,
62: 8.370920973337924,
63: 8.371280919542624,
64: 8.371640865747326,
65: 8.372000811952026,
66: 8.372360758156725,
67: 8.372720704361427,
68: 8.373080650566127,
69: 8.373440596770827,
70: 8.373800542975529,
71: 8.374160489180229,
72: 8.374520435384929,
73: 8.374880381589628,
74: 8.37524032779433,
75: 8.37560027399903,
76: 8.37596022020373,
77: 8.376320166408432,
78: 8.376680112613132,
79: 8.377040058817832,
80: 8.377400005022531,
81: 8.377759951227233,
82: 8.378119897431933,
83: 8.378479843636633,
84: 8.378839789841335,
85: 8.379199736046035,
86: 8.379559682250735,
87: 8.379919628455436,
88: 8.380279574660136,
89: 8.380639520864836,
90: 8.380999467069536,
91: 8.381359413274238,
92: 8.381719359478938,
93: 8.382079305683638,
94: 8.38243925188834,
95: 8.38279919809304,
96: 8.38315914429774,
97: 8.383519090502439,
98: 8.38387903670714,
99: 8.38423898291184,
100: 8.38459892911654,
101: 8.384958875321242,
102: 8.385318821525942,
103: 8.385678767730642,
104: 8.386038713935344,
105: 8.386398660140044,
106: 8.386758606344744,
107: 8.387118552549444,
108: 8.387478498754145,
109: 8.387838444958845,
110: 8.388198391163545,
111: 8.388558337368247,
112: 8.388918283572947,
113: 8.389278229777647,
114: 8.389638175982347,
115: 8.389998122187048,
116: 8.390358068391748,
117: 8.390718014596448,
118: 8.39107796080115,
119: 8.39143790700585,
120: 8.39179785321055,
121: 8.392157799415251,
122: 8.392517745619951,
123: 8.392877691824651,
124: 8.393237638029351,
125: 8.393597584234053,
126: 8.393957530438753,
127: 8.394317476643453,
128: 8.394677422848154,
129: 8.395037369052854,
130: 8.395397315257554,
131: 8.395757261462254,
132: 8.396117207666956,
133: 8.396477153871656,
134: 8.396837100076356,
135: 8.397197046281057,
136: 8.397556992485757,
137: 8.397916938690457,
138: 8.398276884895157,
139: 8.398636831099859,
140: 8.398996777304559,
141: 8.399356723509259,
142: 8.39971666971396,
143: 8.40007661591866,
144: 8.40043656212336,
145: 8.400796508328062,
146: 8.401156454532762,
147: 8.401516400737462,
148: 8.401876346942162,
149: 8.402236293146863,
150: 8.402596239351563,
151: 8.402956185556263,
152: 8.403316131760965,
153: 8.403676077965665,
154: 8.404036024170365,
155: 8.404395970375065,
156: 8.404755916579767,
157: 8.405115862784466,
158: 8.405475808989166,
159: 8.405835755193868,
160: 8.406195701398568,
161: 8.406555647603268,
162: 8.40691559380797,
163: 8.40727554001267,
164: 8.40763548621737,
165: 8.40799543242207,
166: 8.408355378626771,
167: 8.408715324831471,
168: 8.409075271036171,
169: 8.409435217240873,
170: 8.409795163445573,
171: 8.410155109650272,
172: 8.410515055854972,
173: 8.410875002059674,
174: 8.411234948264374,
175: 8.411594894469074,
176: 8.411954840673776,
177: 8.412314786878476,
178: 8.412674733083175,
179: 8.413034679287877,
180: 8.413394625492577,
181: 8.413754571697277,
182: 8.414114517901977,
183: 8.414474464106679,
184: 8.414834410311379,
185: 8.415194356516078,
186: 8.41555430272078,
187: 8.41591424892548,
188: 8.41627419513018,
189: 8.41663414133488,
190: 8.416994087539582,
191: 8.417354033744282,
192: 8.417713979948982,
193: 8.418073926153683,
194: 8.418433872358383,
195: 8.418793818563083,
196: 8.419153764767785,
197: 8.419513710972485,
198: 8.419873657177185,
199: 8.420233603381885,
200: 8.420593549586586,
201: 8.420953495791286,
202: 8.421313441995986,
203: 8.421673388200688,
204: 8.422033334405388,
205: 8.422393280610088,
206: 8.422753226814788,
207: 8.42311317301949,
208: 8.42347311922419,
209: 8.423833065428889,
210: 8.42419301163359,
211: 8.42455295783829,
212: 8.42491290404299,
213: 8.42527285024769,
214: 8.425632796452392,
215: 8.425992742657092,
216: 8.426352688861792,
217: 8.426712635066494,
218: 8.427072581271194,
219: 8.427432527475894,
220: 8.427792473680595,
221: 8.428152419885295,
222: 8.428512366089995,
223: 8.428872312294695,
224: 8.429232258499397,
225: 8.429592204704097,
226: 8.429952150908797,
227: 8.430312097113498,
228: 8.430672043318198,
229: 8.431031989522898,
230: 8.431391935727598,
231: 8.4317518819323,
232: 8.432111828137,
233: 8.4324717743417,
234: 8.432831720546401,
235: 8.433191666751101,
236: 8.433551612955801,
237: 8.433911559160503,
238: 8.434271505365203,
239: 8.434631451569903,
240: 8.434991397774603,
241: 8.435351343979304,
242: 8.435711290184004,
243: 8.436071236388704,
244: 8.436431182593406,
245: 8.436791128798106,
246: 8.437151075002806,
247: 8.437511021207506,
248: 8.437870967412207,
249: 8.438230913616907,
250: 8.438590859821607,
251: 8.438950806026309,
252: 8.439310752231009,
253: 8.439670698435709,
254: 8.44003064464041,
255: 8.44039059084511,
256: 8.44075053704981,
257: 8.44111048325451,
258: 8.441470429459212,
259: 8.441830375663912,
260: 8.442190321868612,
261: 8.442550268073314,
262: 8.442910214278013,
263: 8.443270160482713,
264: 8.443630106687413,
265: 8.443990052892115,
266: 8.444349999096815,
267: 8.444709945301515,
268: 8.445069891506217,
269: 8.445429837710916,
270: 8.445789783915616,
271: 8.446149730120318,
272: 8.446509676325018,
273: 8.446869622529718,
274: 8.447229568734418,
275: 8.44758951493912,
276: 8.44794946114382,
277: 8.44830940734852,
278: 8.448669353553221,
279: 8.449029299757921,
280: 8.449389245962621,
281: 8.449749192167321,
282: 8.450109138372023,
283: 8.450469084576723,
284: 8.450829030781422,
285: 8.451188976986124,
286: 8.451548923190824,
287: 8.451908869395524,
288: 8.452268815600226,
289: 8.452628761804926,
290: 8.452988708009626,
291: 8.453348654214325,
292: 8.453708600419027,
293: 8.454068546623727,
294: 8.454428492828427,
295: 8.454788439033129,
296: 8.455148385237829,
297: 8.455508331442529,
298: 8.455868277647228,
299: 8.45622822385193,
300: 8.45658817005663,
301: 8.45694811626133,
302: 8.457308062466032,
303: 8.457668008670732,
304: 8.458027954875432,
305: 8.458387901080131,
306: 8.458747847284833,
307: 8.459107793489533,
308: 8.459467739694233,
309: 8.459827685898935,
310: 8.460187632103635,
311: 8.460547578308335,
312: 8.460907524513036,
313: 8.461267470717736,
314: 8.461627416922436,
315: 8.461987363127136,
316: 8.462347309331838,
317: 8.462707255536538,
318: 8.463067201741238,
319: 8.46342714794594,
320: 8.46378709415064,
321: 8.46414704035534,
322: 8.464506986560039,
323: 8.46486693276474,
324: 8.46522687896944,
325: 8.46558682517414,
326: 8.465946771378842,
327: 8.466306717583542,
328: 8.466666663788242,
329: 8.467026609992944,
330: 8.467386556197644,
331: 8.467746502402344,
332: 8.468106448607044,
333: 8.468466394811745,
334: 8.468826341016445,
335: 8.469186287221145,
336: 8.469546233425847,
337: 8.469906179630547,
338: 8.470266125835247,
339: 8.470626072039947,
340: 8.470986018244648,
341: 8.471345964449348,
342: 8.471705910654048,
343: 8.47206585685875,
344: 8.47242580306345,
345: 8.47278574926815,
346: 8.473145695472851,
347: 8.473505641677551,
348: 8.473865587882251,
349: 8.474225534086951,
350: 8.474585480291653,
351: 8.474945426496353,
352: 8.475305372701053,
353: 8.475665318905754,
354: 8.476025265110454,
355: 8.476385211315154,
356: 8.476745157519854,
357: 8.477105103724556,
358: 8.477465049929256,
359: 8.477824996133956,
360: 8.478184942338657,
361: 8.478544888543357,
362: 8.478904834748057,
363: 8.479264780952759,
364: 8.479624727157459,
365: 8.479984673362159,
366: 8.480344619566859,
367: 8.48070456577156,
368: 8.48106451197626,
369: 8.48142445818096,
370: 8.481784404385662,
371: 8.482144350590362,
372: 8.482504296795062,
373: 8.482864242999762,
374: 8.483224189204464,
375: 8.483584135409163,
376: 8.483944081613863,
377: 8.484304027818565,
378: 8.484663974023265,
379: 8.485023920227965,
380: 8.485383866432667,
381: 8.485743812637367,
382: 8.486103758842066,
383: 8.486463705046766,
384: 8.486823651251468,
385: 8.487183597456168,
386: 8.487543543660868,
387: 8.48790348986557,
388: 8.48826343607027,
389: 8.48862338227497,
390: 8.48898332847967,
391: 8.489343274684371,
392: 8.489703220889071,
393: 8.490063167093771,
},
"fcst_lower": {
0: -np.inf,
1: -np.inf,
2: -np.inf,
3: -np.inf,
4: -np.inf,
5: -np.inf,
6: -np.inf,
7: -np.inf,
8: -np.inf,
9: -np.inf,
10: -np.inf,
11: -np.inf,
12: -np.inf,
13: -np.inf,
14: -np.inf,
15: -np.inf,
16: -np.inf,
17: -np.inf,
18: -np.inf,
19: -np.inf,
20: -np.inf,
21: -np.inf,
22: -np.inf,
23: -np.inf,
24: -np.inf,
25: -np.inf,
26: -np.inf,
27: -np.inf,
28: -np.inf,
29: -np.inf,
30: -np.inf,
31: -np.inf,
32: -np.inf,
33: -np.inf,
34: -np.inf,
35: -np.inf,
36: -np.inf,
37: -np.inf,
38: -np.inf,
39: -np.inf,
40: -np.inf,
41: -np.inf,
42: -np.inf,
43: -np.inf,
44: -np.inf,
45: -np.inf,
46: -np.inf,
47: -np.inf,
48: -np.inf,
49: -np.inf,
50: -np.inf,
51: -np.inf,
52: -np.inf,
53: -np.inf,
54: -np.inf,
55: -np.inf,
56: -np.inf,
57: -np.inf,
58: -np.inf,
59: -np.inf,
60: -np.inf,
61: -np.inf,
62: -np.inf,
63: -np.inf,
64: -np.inf,
65: -np.inf,
66: -np.inf,
67: -np.inf,
68: -np.inf,
69: -np.inf,
70: -np.inf,
71: -np.inf,
72: -np.inf,
73: -np.inf,
74: -np.inf,
75: -np.inf,
76: -np.inf,
77: -np.inf,
78: -np.inf,
79: -np.inf,
80: -np.inf,
81: -np.inf,
82: -np.inf,
83: -np.inf,
84: -np.inf,
85: -np.inf,
86: -np.inf,
87: -np.inf,
88: -np.inf,
89: -np.inf,
90: -np.inf,
91: -np.inf,
92: -np.inf,
93: -np.inf,
94: -np.inf,
95: -np.inf,
96: -np.inf,
97: -np.inf,
98: -np.inf,
99: -np.inf,
100: -np.inf,
101: -np.inf,
102: -np.inf,
103: -np.inf,
104: -np.inf,
105: -np.inf,
106: -np.inf,
107: -np.inf,
108: -np.inf,
109: -np.inf,
110: -np.inf,
111: -np.inf,
112: -np.inf,
113: -np.inf,
114: -np.inf,
115: -np.inf,
116: -np.inf,
117: -np.inf,
118: -np.inf,
119: -np.inf,
120: -np.inf,
121: -np.inf,
122: -np.inf,
123: -np.inf,
124: -np.inf,
125: -np.inf,
126: -np.inf,
127: -np.inf,
128: -np.inf,
129: -np.inf,
130: -np.inf,
131: -np.inf,
132: -np.inf,
133: -np.inf,
134: -np.inf,
135: -np.inf,
136: -np.inf,
137: -np.inf,
138: -np.inf,
139: -np.inf,
140: -np.inf,
141: -np.inf,
142: -np.inf,
143: -np.inf,
144: -np.inf,
145: -np.inf,
146: -np.inf,
147: -np.inf,
148: -np.inf,
149: -np.inf,
150: -np.inf,
151: -np.inf,
152: -np.inf,
153: -np.inf,
154: -np.inf,
155: -np.inf,
156: -np.inf,
157: -np.inf,
158: -np.inf,
159: -np.inf,
160: -np.inf,
161: -np.inf,
162: -np.inf,
163: -np.inf,
164: -np.inf,
165: -np.inf,
166: -np.inf,
167: -np.inf,
168: -np.inf,
169: -np.inf,
170: -np.inf,
171: -np.inf,
172: -np.inf,
173: -np.inf,
174: -np.inf,
175: -np.inf,
176: -np.inf,
177: -np.inf,
178: -np.inf,
179: -np.inf,
180: -np.inf,
181: -np.inf,
182: -np.inf,
183: -np.inf,
184: -np.inf,
185: -np.inf,
186: -np.inf,
187: -np.inf,
188: -np.inf,
189: -np.inf,
190: -np.inf,
191: -np.inf,
192: -np.inf,
193: -np.inf,
194: -np.inf,
195: -np.inf,
196: -np.inf,
197: -np.inf,
198: -np.inf,
199: -np.inf,
200: -np.inf,
201: -np.inf,
202: -np.inf,
203: -np.inf,
204: -np.inf,
205: -np.inf,
206: -np.inf,
207: -np.inf,
208: -np.inf,
209: -np.inf,
210: -np.inf,
211: -np.inf,
212: -np.inf,
213: -np.inf,
214: -np.inf,
215: -np.inf,
216: -np.inf,
217: -np.inf,
218: -np.inf,
219: -np.inf,
220: -np.inf,
221: -np.inf,
222: -np.inf,
223: -np.inf,
224: -np.inf,
225: -np.inf,
226: -np.inf,
227: -np.inf,
228: -np.inf,
229: -np.inf,
230: -np.inf,
231: -np.inf,
232: -np.inf,
233: -np.inf,
234: -np.inf,
235: -np.inf,
236: -np.inf,
237: -np.inf,
238: -np.inf,
239: -np.inf,
240: -np.inf,
241: -np.inf,
242: -np.inf,
243: -np.inf,
244: -np.inf,
245: -np.inf,
246: -np.inf,
247: -np.inf,
248: -np.inf,
249: -np.inf,
250: -np.inf,
251: -np.inf,
252: -np.inf,
253: -np.inf,
254: -np.inf,
255: -np.inf,
256: -np.inf,
257: -np.inf,
258: -np.inf,
259: -np.inf,
260: -np.inf,
261: -np.inf,
262: -np.inf,
263: -np.inf,
264: -np.inf,
265: -np.inf,
266: -np.inf,
267: -np.inf,
268: -np.inf,
269: -np.inf,
270: -np.inf,
271: -np.inf,
272: -np.inf,
273: -np.inf,
274: -np.inf,
275: -np.inf,
276: -np.inf,
277: -np.inf,
278: -np.inf,
279: -np.inf,
280: -np.inf,
281: -np.inf,
282: -np.inf,
283: -np.inf,
284: -np.inf,
285: -np.inf,
286: -np.inf,
287: -np.inf,
288: -np.inf,
289: -np.inf,
290: -np.inf,
291: -np.inf,
292: -np.inf,
293: -np.inf,
294: -np.inf,
295: -np.inf,
296: -np.inf,
297: -np.inf,
298: -np.inf,
299: -np.inf,
300: -np.inf,
301: -np.inf,
302: -np.inf,
303: -np.inf,
304: -np.inf,
305: -np.inf,
306: -np.inf,
307: -np.inf,
308: -np.inf,
309: -np.inf,
310: -np.inf,
311: -np.inf,
312: -np.inf,
313: -np.inf,
314: -np.inf,
315: -np.inf,
316: -np.inf,
317: -np.inf,
318: -np.inf,
319: -np.inf,
320: -np.inf,
321: -np.inf,
322: -np.inf,
323: -np.inf,
324: -np.inf,
325: -np.inf,
326: -np.inf,
327: -np.inf,
328: -np.inf,
329: -np.inf,
330: -np.inf,
331: -np.inf,
332: -np.inf,
333: -np.inf,
334: -np.inf,
335: -np.inf,
336: -np.inf,
337: -np.inf,
338: -np.inf,
339: -np.inf,
340: -np.inf,
341: -np.inf,
342: -np.inf,
343: -np.inf,
344: -np.inf,
345: -np.inf,
346: -np.inf,
347: -np.inf,
348: -np.inf,
349: -np.inf,
350: -np.inf,
351: -np.inf,
352: -np.inf,
353: -np.inf,
354: -np.inf,
355: -np.inf,
356: -np.inf,
357: -np.inf,
358: -np.inf,
359: -np.inf,
360: -np.inf,
361: -np.inf,
362: -np.inf,
363: -np.inf,
364: -np.inf,
365: -np.inf,
366: -np.inf,
367: -np.inf,
368: -np.inf,
369: -np.inf,
370: -np.inf,
371: -np.inf,
372: -np.inf,
373: -np.inf,
374: -np.inf,
375: -np.inf,
376: -np.inf,
377: -np.inf,
378: -np.inf,
379: -np.inf,
380: -np.inf,
381: -np.inf,
382: -np.inf,
383: -np.inf,
384: -np.inf,
385: -np.inf,
386: -np.inf,
387: -np.inf,
388: -np.inf,
389: -np.inf,
390: -np.inf,
391: -np.inf,
392: -np.inf,
393: -np.inf,
},
"fcst_upper": {
0: np.inf,
1: np.inf,
2: np.inf,
3: np.inf,
4: np.inf,
5: np.inf,
6: np.inf,
7: np.inf,
8: np.inf,
9: np.inf,
10: np.inf,
11: np.inf,
12: np.inf,
13: np.inf,
14: np.inf,
15: np.inf,
16: np.inf,
17: np.inf,
18: np.inf,
19: np.inf,
20: np.inf,
21: np.inf,
22: np.inf,
23: np.inf,
24: np.inf,
25: np.inf,
26: np.inf,
27: np.inf,
28: np.inf,
29: np.inf,
30: np.inf,
31: np.inf,
32: np.inf,
33: np.inf,
34: np.inf,
35: np.inf,
36: np.inf,
37: np.inf,
38: np.inf,
39: np.inf,
40: np.inf,
41: np.inf,
42: np.inf,
43: np.inf,
44: np.inf,
45: np.inf,
46: np.inf,
47: np.inf,
48: np.inf,
49: np.inf,
50: np.inf,
51: np.inf,
52: np.inf,
53: np.inf,
54: np.inf,
55: np.inf,
56: np.inf,
57: np.inf,
58: np.inf,
59: np.inf,
60: np.inf,
61: np.inf,
62: np.inf,
63: np.inf,
64: np.inf,
65: np.inf,
66: np.inf,
67: np.inf,
68: np.inf,
69: np.inf,
70: np.inf,
71: np.inf,
72: np.inf,
73: np.inf,
74: np.inf,
75: np.inf,
76: np.inf,
77: np.inf,
78: np.inf,
79: np.inf,
80: np.inf,
81: np.inf,
82: np.inf,
83: np.inf,
84: np.inf,
85: np.inf,
86: np.inf,
87: np.inf,
88: np.inf,
89: np.inf,
90: np.inf,
91: np.inf,
92: np.inf,
93: np.inf,
94: np.inf,
95: np.inf,
96: np.inf,
97: np.inf,
98: np.inf,
99: np.inf,
100: np.inf,
101: np.inf,
102: np.inf,
103: np.inf,
104: np.inf,
105: np.inf,
106: np.inf,
107: np.inf,
108: np.inf,
109: np.inf,
110: np.inf,
111: np.inf,
112: np.inf,
113: np.inf,
114: np.inf,
115: np.inf,
116: np.inf,
117: np.inf,
118: np.inf,
119: np.inf,
120: np.inf,
121: np.inf,
122: np.inf,
123: np.inf,
124: np.inf,
125: np.inf,
126: np.inf,
127: np.inf,
128: np.inf,
129: np.inf,
130: np.inf,
131: np.inf,
132: np.inf,
133: np.inf,
134: np.inf,
135: np.inf,
136: np.inf,
137: np.inf,
138: np.inf,
139: np.inf,
140: np.inf,
141: np.inf,
142: np.inf,
143: np.inf,
144: np.inf,
145: np.inf,
146: np.inf,
147: np.inf,
148: np.inf,
149: np.inf,
150: np.inf,
151: np.inf,
152: np.inf,
153: np.inf,
154: np.inf,
155: np.inf,
156: np.inf,
157: np.inf,
158: np.inf,
159: np.inf,
160: np.inf,
161: np.inf,
162: np.inf,
163: np.inf,
164: np.inf,
165: np.inf,
166: np.inf,
167: np.inf,
168: np.inf,
169: np.inf,
170: np.inf,
171: np.inf,
172: np.inf,
173: np.inf,
174: np.inf,
175: np.inf,
176: np.inf,
177: np.inf,
178: np.inf,
179: np.inf,
180: np.inf,
181: np.inf,
182: np.inf,
183: np.inf,
184: np.inf,
185: np.inf,
186: np.inf,
187: np.inf,
188: np.inf,
189: np.inf,
190: np.inf,
191: np.inf,
192: np.inf,
193: np.inf,
194: np.inf,
195: np.inf,
196: np.inf,
197: np.inf,
198: np.inf,
199: np.inf,
200: np.inf,
201: np.inf,
202: np.inf,
203: np.inf,
204: np.inf,
205: np.inf,
206: np.inf,
207: np.inf,
208: np.inf,
209: np.inf,
210: np.inf,
211: np.inf,
212: np.inf,
213: np.inf,
214: np.inf,
215: np.inf,
216: np.inf,
217: np.inf,
218: np.inf,
219: np.inf,
220: np.inf,
221: np.inf,
222: np.inf,
223: np.inf,
224: np.inf,
225: np.inf,
226: np.inf,
227: np.inf,
228: np.inf,
229: np.inf,
230: np.inf,
231: np.inf,
232: np.inf,
233: np.inf,
234: np.inf,
235: np.inf,
236: np.inf,
237: np.inf,
238: np.inf,
239: np.inf,
240: np.inf,
241: np.inf,
242: np.inf,
243: np.inf,
244: np.inf,
245: np.inf,
246: np.inf,
247: np.inf,
248: np.inf,
249: np.inf,
250: np.inf,
251: np.inf,
252: np.inf,
253: np.inf,
254: np.inf,
255: np.inf,
256: np.inf,
257: np.inf,
258: np.inf,
259: np.inf,
260: np.inf,
261: np.inf,
262: np.inf,
263: np.inf,
264: np.inf,
265: np.inf,
266: np.inf,
267: np.inf,
268: np.inf,
269: np.inf,
270: np.inf,
271: np.inf,
272: np.inf,
273: np.inf,
274: np.inf,
275: np.inf,
276: np.inf,
277: np.inf,
278: np.inf,
279: np.inf,
280: np.inf,
281: np.inf,
282: np.inf,
283: np.inf,
284: np.inf,
285: np.inf,
286: np.inf,
287: np.inf,
288: np.inf,
289: np.inf,
290: np.inf,
291: np.inf,
292: np.inf,
293: np.inf,
294: np.inf,
295: np.inf,
296: np.inf,
297: np.inf,
298: np.inf,
299: np.inf,
300: np.inf,
301: np.inf,
302: np.inf,
303: np.inf,
304: np.inf,
305: np.inf,
306: np.inf,
307: np.inf,
308: np.inf,
309: np.inf,
310: np.inf,
311: np.inf,
312: np.inf,
313: np.inf,
314: np.inf,
315: np.inf,
316: np.inf,
317: np.inf,
318: np.inf,
319: np.inf,
320: np.inf,
321: np.inf,
322: np.inf,
323: np.inf,
324: np.inf,
325: np.inf,
326: np.inf,
327: np.inf,
328: np.inf,
329: np.inf,
330: np.inf,
331: np.inf,
332: np.inf,
333: np.inf,
334: np.inf,
335: np.inf,
336: np.inf,
337: np.inf,
338: np.inf,
339: np.inf,
340: np.inf,
341: np.inf,
342: np.inf,
343: np.inf,
344: np.inf,
345: np.inf,
346: np.inf,
347: np.inf,
348: np.inf,
349: np.inf,
350: np.inf,
351: np.inf,
352: np.inf,
353: np.inf,
354: np.inf,
355: np.inf,
356: np.inf,
357: np.inf,
358: np.inf,
359: np.inf,
360: np.inf,
361: np.inf,
362: np.inf,
363: np.inf,
364: np.inf,
365: np.inf,
366: np.inf,
367: np.inf,
368: np.inf,
369: np.inf,
370: np.inf,
371: np.inf,
372: np.inf,
373: np.inf,
374: np.inf,
375: np.inf,
376: np.inf,
377: np.inf,
378: np.inf,
379: np.inf,
380: np.inf,
381: np.inf,
382: np.inf,
383: np.inf,
384: np.inf,
385: np.inf,
386: np.inf,
387: np.inf,
388: np.inf,
389: np.inf,
390: np.inf,
391: np.inf,
392: np.inf,
393: np.inf,
},
}
)
PEYTON_FCST_LINEAR_INVALID_NEG_ONE = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: | pd.Timestamp("2012-11-09 00:00:00") | pandas.Timestamp |
from tqdm import tqdm
import pandas as pd
def balanced_sampling(df, num_instances=500):
L = []
counter = 1
categories = df['category'].unique()
for category in tqdm(categories):
sample = df[df.category==category]
try:
sample_reliable = sample[sample.label_quality=='reliable'].sample(num_instances,replace=True)
L.append(sample_reliable)
except:
x=1
sample_unreliable = sample[sample.label_quality=='unreliable'].sample(num_instances,replace=True)
L.append(sample_unreliable)
counter+=1
return | pd.concat(L) | pandas.concat |
"""
This module merges temperature, humidity, and influenza data together
"""
import pandas as pd
import ast
__author__ = '<NAME>'
__license__ = 'MIT'
__status__ = 'release'
__url__ = 'https://github.com/caominhduy/TH-Flu-Modulation'
__version__ = '1.0.0'
def merge_flu(path='data/epidemiology/processed_CDC_2008_2021.csv'):
df = pd.read_csv(path, low_memory=False)
df['week'] = df['week'].astype('int')
df['year'] = df['year'].astype('int')
cols = ['state', 'week', 'year', 'level']
df = df.reindex(columns=cols)
return df
def merge_weather():
with open('data/geodata/state_abbr.txt', 'r') as f:
contents = f.read()
state_abbr_dict = ast.literal_eval(contents)
states = list(state_abbr_dict.values())
df_temp = pd.DataFrame(columns=['week', 'temp', 'state', 'year'])
df_humid = pd.DataFrame(columns=['week', 'humid', 'state', 'year'])
for year in list(range(2008, 2020)):
y = str(year)
df = pd.read_csv('data/weather/' + y + '-temp.csv')
temps = df[states[0]]
weeks = df['week']
snames = pd.Series(states)
snames = snames.repeat(len(weeks)).reset_index(drop=True)
for s in states[1:]:
temps = temps.append(df[s]).reset_index(drop=True)
weeks = weeks.append(df['week']).reset_index(drop=True)
frames = {'week': weeks, 'temp': temps, 'state': snames}
df2 = pd.DataFrame(frames)
df2['year'] = y
df_temp = df_temp.append(df2)
for year in list(range(2008, 2020)):
y = str(year)
df = pd.read_csv('data/weather/' + y + '-humid.csv')
humids = df[states[0]]
weeks = df['week']
snames = | pd.Series(states) | pandas.Series |
import logging
import os
import ast
import pandas as pd
from pandas.io.json import json_normalize
import sys as sys
import json
import numpy as np
def main(argv=None):
"""
Utilize Pandas library to read in both UNSD M49 country and area .csv file
(tab delimited) as well as the UNESCO heritage site .csv file (tab delimited).
Extract regions, sub-regions, intermediate regions, country and areas, and
other column data. Filter out duplicate values and NaN values and sort the
series in alphabetical order. Write out each series to a .csv file for inspection.
"""
if argv is None:
argv = sys.argv
msg = [
'Source file read {0}',
'UNSD M49 regions written to file {0}',
'UNSD M49 sub-regions written to file {0}',
'UNSD M49 intermediate regions written to file {0}',
'UNSD M49 countries and areas written to file {0}',
'UNSD M49 development status written to file {0}',
'UNESCO heritage site countries/areas written to file {0}',
'UNESCO heritage site categories written to file {0}',
'UNESCO heritage site regions written to file {0}',
'UNESCO heritage site transboundary values written to file {0}'
]
# Creating small sample of data:
business_json = './input/json/yelp_academic_dataset_business.json'
business_df = pd.read_json(business_json, lines=True, encoding='utf8')
## Creating full clean business csv
attributes_df = business_df[['business_id','attributes']]
attire = []
noise = []
for val in attributes_df['attributes']:
try:
attire.append(val['RestaurantsAttire'])
except:
attire.append("")
try:
noise.append(val['NoiseLevel'])
except:
noise.append("")
attributes_df['Attire'] = pd.Series(attire)
attributes_df['Noise'] = | pd.Series(noise) | pandas.Series |
import pandas as pd
from sodapy import Socrata
import datetime
import definitions
# global variables for main data:
hhs_data, test_data, nyt_data_us, nyt_data_state, max_hosp_date = [],[],[],[],[]
"""
get_data()
Fetches data from API, filters, cleans, and combines with provisional.
After running, global variables are filled for use in subsequent functions
"""
def get_data():
global nyt_data_us
global nyt_data_state
global test_data
global hhs_data
global max_hosp_date
nyt_data_us = pd.read_csv("https://raw.githubusercontent.com/nytimes/covid-19-data/master/rolling-averages/us.csv")
nyt_data_state = pd.read_csv("https://raw.githubusercontent.com/nytimes/covid-19-data/master/rolling-averages/us-states.csv")
client = Socrata("healthdata.gov", None)
results = client.get("g62h-syeh", limit=2000000)
test_results = client.get("j8mb-icvb", limit=2000000)
print("LOG: Fetched all raw data")
# Filter data to get columns of interest
hhs_data = pd.DataFrame.from_records(results)[['state', 'date', 'inpatient_beds_used_covid']]
hhs_data.inpatient_beds_used_covid = hhs_data.inpatient_beds_used_covid.fillna(0)
hhs_data = hhs_data.astype({'inpatient_beds_used_covid': 'int32'})
test_data = pd.DataFrame.from_records(test_results)[['state', 'date', 'overall_outcome', 'new_results_reported']]
test_data.new_results_reported = test_data.new_results_reported.fillna(0)
test_data = test_data.astype({'new_results_reported': 'int32'})
print("LOG: Filtered Data")
# For provisional data, gets days since most recent update of HHS time series
max_date = hhs_data.date.max()
max_hosp_date = max_date
provisional = client.get("4cnb-m4rz", limit=2000000, where=f"update_date > '{max_date}'")
hhs_provisional = pd.DataFrame.from_records(provisional)[['update_date', 'archive_link']]
hhs_provisional.update_date = hhs_provisional.update_date.apply(lambda x: x[:10])
hhs_provisional.update_date = | pd.to_datetime(hhs_provisional.update_date) | pandas.to_datetime |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.