prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import unittest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from yitian.datasource import *
from yitian.datasource import preprocess
class Test(unittest.TestCase):
# def test_standardize_date(self):
# data_pd = pd.DataFrame([
# ['01/01/2019', 11.11],
# ['01/04/2019', 44.44],
# ['01/03/2019', 33.33],
# ['01/02/2019', 22.22]
# ], columns=['Trade Date', 'price'])
#
# expect_pd = pd.DataFrame([
# ['01/01/2019', 11.11],
# ['01/04/2019', 44.44],
# ['01/03/2019', 33.33],
# ['01/02/2019', 22.22]
# ], columns=['date', 'price'])
#
# assert_frame_equal(expect_pd, preprocess.standardize_date(data_pd))
#
# def test_standardize_date_with_multi_date_column(self):
# data_pd = pd.DataFrame([
# ['2019-01-01 00:00:00', '2019-01-01 00:00:00', 11.11],
# ['2019-01-02 00:00:00', '2019-01-01 00:00:00', 22.22],
# ['2019-01-03 00:00:00', '2019-01-01 00:00:00', 33.33],
# ['2019-01-04 00:00:00', '2019-01-01 00:00:00', 44.44],
# ], columns=['DATE', 'date', 'price'])
#
# with self.assertRaises(ValueError) as context:
# preprocess.standardize_date(data_pd)
#
# assert str(context.exception) == \
# str("Original cols ({cols}) cannot be reconnciled with date options ({option})"\
# .format(cols=data_pd.columns.tolist(), option=RAW_DATE_OPTIONS))
def test_create_ts_pd(self):
data_pd = pd.DataFrame([
['01/01/2019', 11.11],
['01/04/2019', 44.44],
['01/03/2019', 33.33],
['01/02/2019', 22.22]
], columns=['date', 'price'])
expect_pd = pd.DataFrame([
[pd.Timestamp('2019-01-01'), 11.11],
[pd.Timestamp('2019-01-02'), 22.22],
[pd.Timestamp('2019-01-03'), 33.33],
[pd.Timestamp('2019-01-04'), 44.44]
], columns=['date', 'price']).set_index('date')
assert_frame_equal(expect_pd, preprocess.create_ts_pd(data_pd))
def test_create_ts_pd_datetime(self):
data_pd = pd.DataFrame([
['2019-01-01 11:11:11', 11.11],
['2019-01-04 04:44:44', 44.44],
['2019-01-03 03:33:33', 33.33],
['2019-01-02 22:22:22', 22.22]
], columns=['datetime', 'price'])
expect_pd = pd.DataFrame([
[pd.Timestamp('2019-01-01 11:11:11'), 11.11],
[pd.Timestamp('2019-01-02 22:22:22'), 22.22],
[pd.Timestamp('2019-01-03 03:33:33'), 33.33],
[pd.Timestamp('2019-01-04 04:44:44'), 44.44]
], columns=['datetime', 'price']).set_index('datetime')
assert_frame_equal(expect_pd, preprocess.create_ts_pd(data_pd, index_col=DATETIME))
def test_add_ymd(self):
data_pd = pd.DataFrame([
[pd.Timestamp('2019-01-01'), 11.11],
[pd.Timestamp('2019-02-02'), 22.22],
[pd.Timestamp('2019-03-03'), 33.33],
[pd.Timestamp('2019-04-04'), 44.44]
], columns=['date', 'price']).set_index('date')
expect_pd = pd.DataFrame([
[pd.Timestamp('2019-01-01'), 11.11, 2019, 1, 1],
[pd.Timestamp('2019-02-02'), 22.22, 2019, 2, 2],
[pd.Timestamp('2019-03-03'), 33.33, 2019, 3, 3],
[pd.Timestamp('2019-04-04'), 44.44, 2019, 4, 4]
], columns=['date', 'price', 'year', 'month', 'day']).set_index('date')
assert_frame_equal(expect_pd, preprocess.add_ymd(data_pd))
def test_add_ymd_datetime(self):
data_pd = pd.DataFrame([
[pd.Timestamp('2019-01-01 11:11:11'), 11.11],
[pd.Timestamp('2019-02-02 22:22:22'), 22.22],
[pd.Timestamp('2019-03-03 03:33:33'), 33.33],
[pd.Timestamp('2019-04-04 04:44:44'), 44.44]
], columns=['datetime', 'price']).set_index('datetime')
expect_pd = pd.DataFrame([
[pd.Timestamp('2019-01-01 11:11:11'), 11.11, 2019, 1, 1],
[pd.Timestamp('2019-02-02 22:22:22'), 22.22, 2019, 2, 2],
[pd.Timestamp('2019-03-03 03:33:33'), 33.33, 2019, 3, 3],
[pd.Timestamp('2019-04-04 04:44:44'), 44.44, 2019, 4, 4]
], columns=['datetime', 'price', 'year', 'month', 'day']).set_index('datetime')
assert_frame_equal(expect_pd, preprocess.add_ymd(data_pd, index_col=DATETIME))
def test_filter_dates(self):
data_pd = pd.DataFrame([
[pd.Timestamp('2019-01-01'), 11.11],
[pd.Timestamp('2019-01-04'), 44.44],
[pd.Timestamp('2019-01-03'), 33.33],
[pd.Timestamp('2019-01-02'), 22.22],
[pd.Timestamp('2019-01-06'), 66.66],
[pd.Timestamp('2019-01-07'), 77.77],
], columns=['date', 'price']).set_index('date')
expect_pd_one = pd.DataFrame([
[pd.Timestamp('2019-01-01 00:00:00'), 11.11],
[pd.Timestamp('2019-01-02 00:00:00'), 22.22],
[pd.Timestamp('2019-01-03 00:00:00'), 33.33],
[pd.Timestamp('2019-01-04 00:00:00'), 44.44],
[pd.Timestamp('2019-01-05 00:00:00'), np.nan],
[pd.Timestamp('2019-01-06 00:00:00'), 66.66],
[pd.Timestamp('2019-01-07 00:00:00'), 77.77]
], columns=['date', 'price']).set_index('date')
expect_pd_two = pd.DataFrame([
[pd.Timestamp('2019-01-04 00:00:00'), 44.44],
[pd.Timestamp('2019-01-05 00:00:00'), np.nan],
[pd.Timestamp('2019-01-06 00:00:00'), 66.66],
[ | pd.Timestamp('2019-01-07 00:00:00') | pandas.Timestamp |
# -*- coding: utf-8 -*-
import os
import sys
import time
import openpyxl as openpyxl
import pandas
import pandas as pd
import tushare as ts
import numpy as np
from datetime import datetime, timedelta
import matplotlib.ticker as ticker
import matplotlib.dates as mdates
import mplfinance as mpf
import matplotlib.pyplot as plt
from PyQt5.QtWidgets import QApplication, QMessageBox
from dateutil.relativedelta import relativedelta
from mpl_finance import candlestick_ohlc, candlestick2_ohlc
import numpy as np
import decimal
import sys
from PyQt5 import QtGui, QtWidgets, QtCore
from PyQt5.QtWidgets import QDialog, QApplication
from primodial import Ui_Dialog
from numpy import long
# author : ye
mode = 0
fig, ax = plt.subplots()
datadir = './data/'
strategydir = './strategy/'
financialdir = './financialdata/'
x, y, lastday, xminnow, xmaxnow = 1, 1, 0, 0, 0
# 云层细代表震荡,越来越细改变的趋势也越大,要看有没有最高点
# to avoid data collection, change return value to suffix of the file in 'data' dictionary -> enter offline mode!
def endDate():
return time.strftime('%Y%m%d')
# return '20210818'
# 1:excel 0:tushare
def getDataByTscode(ts_code, mode):
if mode == 1:
filedir = os.path.join(datadir, nameStrategy(ts_code))
byexcel = pd.read_excel(filedir)
byexcel.index = byexcel['Unnamed: 0']
byexcel = byexcel.drop(columns=['Unnamed: 0'])
return byexcel
if mode == 0:
ts.set_token('<KEY>')
pro = ts.pro_api()
t1 = endDate()
t2 = (datetime.now() - relativedelta(years=1)).strftime('%Y%m%d')
df = pro.daily(ts_code=ts_code, start_date=t2, end_date=t1)
df = df.iloc[::-1]
return df
def nameStrategy(code):
return code + '-' + endDate() + '.xlsx'
def vision(data, ts_name):
ichimoku = Ichimoku(data)
ichimoku.run()
ichimoku.plot(ts_name)
def call_back(event):
axtemp = event.inaxes
x_min, x_max = axtemp.get_xlim()
fanwei = (x_max - x_min) / 10
if event.button == 'up':
axtemp.set(xlim=(x_min + fanwei, x_max - fanwei))
elif event.button == 'down':
axtemp.set(xlim=(x_min - fanwei, x_max + fanwei))
fig.canvas.draw_idle()
def button_press_callback(click):
global x
global y
x = click.xdata
y = click.ydata
point = (click.xdata, click.ydata)
print(point)
def motion_notify_callback(event):
global x, xminnow, xmaxnow
if event.button != 1: return
xnow = event.xdata
print(x)
delta = x - xnow
plt.xlim(xmin=xminnow + delta, xmax=xmaxnow + delta)
xminnow = xminnow + delta
xmaxnow = xmaxnow + delta
x = xnow
point = (event.xdata, event.ydata, xminnow, xmaxnow)
print(point)
fig.canvas.draw_idle()
class Ichimoku():
"""
@param: ohcl_df <DataFrame>
Required columns of ohcl_df are:
Date<Float>,Open<Float>,High<Float>,Close<Float>,Low<Float>
"""
def __init__(self, ohcl_df):
self.ohcl_df = ohcl_df
ohcl_df['trade_date'] = pandas.to_datetime(ohcl_df['trade_date'].astype(str))
def run(self):
tenkan_window = 9
kijun_window = 26
senkou_span_b_window = 52
cloud_displacement = 26
chikou_shift = -26
ohcl_df = self.ohcl_df
# Dates are floats in mdates like 736740.0
# the period is the difference of last two dates
last_date = ohcl_df["trade_date"].iloc[-1].date()
period = 1
# Add rows for N periods shift (cloud_displacement)
ext_beginning = last_date + timedelta(days=1)
ext_end = last_date + timedelta(days=((period * cloud_displacement) + period))
dates_ext = | pd.date_range(start=ext_beginning, end=ext_end) | pandas.date_range |
#coding: UTF-8
#開始運行前,請先全選doc,按ctrl + shift + f9移除所有超鏈接並保存
#如果有超鏈接,轉換會出錯
#执行宏操作把doc中的编号转成固定文本
#編譯辭書格式:
#第一章 XXX
#第二章 XXX
#第三章 XX鎮、鄉、區
# 第一節 鎮名緣起
# 第二節 自然環境
# 第三節 區域特色
# 第四節 各里地名釋義
# 第一項 XX里(村) <-------------------------------抓取起始位置
# 里(村)名由來
# 里(村)的description,若干行
# 地名釋義
# (一)具體地名1
# 具體地名1的description,若干行
# (二)具體地名2
# 具體地名2的description,若干行
# ......
# 其他
# (一)具體地名1
# 具體地名1的description,若干行
# (二)具體地名2
# 具體地名2的description,若干行
# ......
# 第二項 XX里(村)
# ......
# 第三項 XX里(村)
# ......
#第四章 XX鎮、鄉、區
#基本算法架構:step1:根據“第X項”找到里(村)作為開始,以“章”結束一個鎮、鄉、區,據此找到每個里(村)相關內容的上下邊界
# step2:在每個里(村)中根據“里(村)名由來”找到每個里(村)description的上下邊界
# step3:在每個里(村)中根據“地名釋義”和“()”找到每個里(村)的具體地名及其上下邊界
# step4:返回正文,從根據上下邊界抓取具體內文,輸出到csv的固定位置
#***************************************************************************************************************
import numpy as np
import pandas as pd
from docx import Document
import win32com.client as wc
import os
import re
from tqdm import tqdm
#數據處理,doc轉docx轉txt存入本地
class data_processor():
def __init__(self):
self.dir_name = 'F:/street_name/book/'
#self.dir_name = 'D:/street_name/book/'
def doc2docx(self):
#doc转docx
word = wc.Dispatch("Word.Application")
for i in os.listdir(self.dir_name):
if i.endswith('.doc') and not i .startswith('~$'):
doc_path = os.path.join(self.dir_name, i)
doc = word.Documents.Open(doc_path)
rename = os.path.splitext(i)
save_path = os.path.join(self.dir_name, rename[0] + '.docx')
doc.SaveAs(save_path, 12)
doc.Close()
print(i)
word.Quit()
def docx2txt(self):
#docx转txt,去除所有不必要的格式
for i in os.listdir(self.dir_name):
if i.endswith('.docx') and not i.startswith('~$'):
docx_path = os.path.join(self.dir_name, i)
document = Document(docx_path)
txt_path = os.path.join(self.dir_name, str(i).replace('.docx', '.txt'))
txt_file = open(txt_path, 'w', encoding = 'utf-8')
mode = False
for paragraph in tqdm(document.paragraphs):
new_paragraph = paragraph.text.strip('/r')
new_paragraph = new_paragraph.strip()
new_paragraph = new_paragraph.replace(' ', '')
new_paragraph = new_paragraph.replace(' ', '')
if new_paragraph == '註:':
mode = True
continue
if mode:
if new_paragraph.startswith('('):
continue
else:
mode = False
if new_paragraph != '':
txt_file.write(new_paragraph + '\n')
txt_file.close()
#删除使用过的docx
os.remove(docx_path)
#分行
class word_cut():
def __init__(self):
#初始化全局變量
#工作目錄
self.dir_name = 'F:/street_name/book/'
#self.dir_name = 'D:/street_name/book/'
#中文字符常量
self.chinese = ['一', '二', '三', '四', '五', '六', '七', '八', '九', '十']
self.img_id = []
self.tab_id = []
self.tab_id_xiang = []
self.img_id2 = []
self.tab_id2 = []
for i in range(1, 30):
for j in range(1, 100):
self.img_id.append('(如圖{i}-{j}所示)')
self.tab_id.append('(如表{i}-{j})')
self.tab_id_xiang.append('(詳如表{i}-{j})')
self.img_id2.append('圖{i}-{j}')
self.tab_id2.append('表{i}-{j}')
def run(self):
for i in os.listdir(self.dir_name):
if i.endswith('.txt') and not i .startswith('~$'):
self.save_name = i.replace('txt', 'csv')
print('Begin read ' + str(i))
self.get_txt(i)
self.get_vil_index_up_down()
self.cut_vli_by_index()
self.get_small_name()
self.re_index() #若註解此行則以村里為單位標記No
self.split_taipei()
self.save_csv()
def get_txt(self, file_name):
txt_path = os.path.join(self.dir_name, str(file_name))
with open(txt_path, 'r', encoding = 'utf-8') as txt_file:
#获得txt文本存入list
self.document_list = txt_file.readlines()
txt_file.close()
#定義一個df存放村裡對應的行號上下界
self.vil_df_index = pd.DataFrame(columns = ['No', 'dist_name', 'vil_name', 'vil_index_down', 'vil_index_up'], dtype = int)
#定義一個df供保存需要的數據
self.df_save = | pd.DataFrame(columns = ['No', 'name_dist', 'name_li', 'name', 'name_eng', 'location', 'description']) | pandas.DataFrame |
# built-in modules
from pathlib import Path
import re
from typing import List, Union
# third-party modules
import pandas as pd
# local modules
from bitome.features import Gene, TranscriptionFactor, TFBindingSite, IModulon
DATA_DIR = Path(Path(__file__).parent.parent, 'data')
def load_locus_tag_yome_lookup() -> dict:
"""
Load y-ome information and return a dictionary with locus tags as keys and y-ome categorization as values
:return dict y_ome_lookup:
"""
y_ome_df = pd.read_csv(Path(DATA_DIR, 'y-ome', 'y-ome-genes.tsv'), sep='\t')
y_ome_lookup = {}
for y_ome_row in y_ome_df.itertuples(index=False):
y_ome_lookup[y_ome_row.locus_id] = y_ome_row.category
return y_ome_lookup
def load_ytfs_and_binding_sites(
existing_tfs: List[TranscriptionFactor] = None
) -> List[Union[TranscriptionFactor, TFBindingSite]]:
"""
Loads BitomeFeature objects for yTF binding sites observed experimentally in SBRG via ChIP-exo experiments
:param List[TranscriptionFactor] existing_tfs: a list of pre-loaded TranscriptionFactor objects; if provided, and if
a matching yTF is present, its binding sites will be added to this TF
:return List[Union[TranscriptionFactor, TFBindingSite]] ytfs_and_binding_sites: a list of ytfs and binding sites
"""
ytf_df = load_ytf_df()
ytf_names = list(set(ytf_df['TF']))
# ytfs_for_binding_sites includes all TFs, EVEN IF already existing in passed list; ytfs is the final return, NOT
# including the pre-existing objects (all to avoid duplicates at higher level)
ytfs_for_binding_sites = []
ytfs = []
for i, ytf_name in enumerate(ytf_names):
if existing_tfs is not None:
existing_tf_objs = [tf for tf in existing_tfs if tf.name == ytf_name]
if existing_tf_objs:
existing_tf = existing_tf_objs[0]
else:
existing_tf = None
else:
existing_tf = None
if existing_tf is None:
transcription_factor = TranscriptionFactor('yTF_' + str(i), ytf_name)
else:
transcription_factor = existing_tf
ytf_site_df = ytf_df[ytf_df['TF'] == ytf_name]
ytf_final_states = list(set(ytf_site_df['TF_FINAL_STATE']))
for ytf_final_state in ytf_final_states:
ytf_final_state_site_df = ytf_site_df[ytf_site_df['TF_FINAL_STATE'] == ytf_final_state]
ytf_sites = []
for ytf_site_row in ytf_final_state_site_df.itertuples(index=False):
left_right = ytf_site_row.Start, ytf_site_row.End
site_obj = TFBindingSite(
ytf_site_row.SITE_ID,
left_right
)
ytf_sites.append(site_obj)
transcription_factor.add_binding_sites(ytf_sites, ytf_final_state)
# don't append to the final ytf return list if we already had a TF object
ytfs_for_binding_sites.append(transcription_factor)
if existing_tf is None:
ytfs.append(transcription_factor)
ytf_binding_sites = []
for ytf in ytfs_for_binding_sites:
# TranscriptionFactor object stores binding sites in a dictionary, keys are conformation names, values are sites
for conf_binding_sites in ytf.binding_sites.values():
ytf_binding_sites += conf_binding_sites
return ytfs + ytf_binding_sites
def load_ytf_df() -> pd.DataFrame:
"""
Reads a file containing yTF binding sites and parses into a single dataframe
:return pd.DataFrame ytf_df: a DataFrame listing binding sites for yTFs experimentally determined by ChIP-exo
"""
# read_excel will return an OrderedDict with sheet_name: sheet_df items when sheet_name is None
ytf_data_dict = pd.read_excel(Path(DATA_DIR, 'ytf_binding_sites.xlsx'), sheet_name=None)
final_state_names = []
tf_names = []
dfs = []
for ytf_name, ytf_df in ytf_data_dict.items():
# capitalize the yTF name for cross-referencing later with RegulonDB; also swap underscore for dash to be
# consistent with RegulonDB final conformation state nomenclature
final_state_name = re.sub('_', '-', ytf_name[0].upper() + ytf_name[1:])
tf_name = final_state_name[:4]
final_state_names.append(final_state_name)
tf_names.append(tf_name)
dfs.append(ytf_df)
for i, (final_name, base_tf_name) in enumerate(zip(final_state_names, tf_names)):
dfs[i]['TF_FINAL_STATE'] = final_name
dfs[i]['Transcription factors'] = base_tf_name
full_ytf_df = | pd.concat(dfs, ignore_index=True) | pandas.concat |
# pylint: disable-msg=E1101,W0612
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core.sparse.api import SparseDtype
class TestSparseSeriesIndexing(object):
def setup_method(self, method):
self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
self.sparse = self.orig.to_sparse()
def test_getitem(self):
orig = self.orig
sparse = self.sparse
assert sparse[0] == 1
assert np.isnan(sparse[1])
assert sparse[3] == 3
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse[:2], orig[:2].to_sparse())
tm.assert_sp_series_equal(sparse[4:2], orig[4:2].to_sparse())
tm.assert_sp_series_equal(sparse[::2], orig[::2].to_sparse())
tm.assert_sp_series_equal(sparse[-5:], orig[-5:].to_sparse())
def test_getitem_int_dtype(self):
# GH 8292
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], name='xxx')
res = s[::2]
exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6], name='xxx')
tm.assert_sp_series_equal(res, exp)
assert res.dtype == SparseDtype(np.int64)
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], fill_value=0, name='xxx')
res = s[::2]
exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6],
fill_value=0, name='xxx')
tm.assert_sp_series_equal(res, exp)
assert res.dtype == SparseDtype(np.int64)
def test_getitem_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
assert sparse[0] == 1
assert np.isnan(sparse[1])
assert sparse[2] == 0
assert sparse[3] == 3
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_ellipsis(self):
# GH 9467
s = pd.SparseSeries([1, np.nan, 2, 0, np.nan])
tm.assert_sp_series_equal(s[...], s)
s = pd.SparseSeries([1, np.nan, 2, 0, np.nan], fill_value=0)
tm.assert_sp_series_equal(s[...], s)
def test_getitem_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse[:2],
orig[:2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[4:2],
orig[4:2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[::2],
orig[::2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[-5:],
orig[-5:].to_sparse(fill_value=0))
def test_loc(self):
orig = self.orig
sparse = self.sparse
assert sparse.loc[0] == 1
assert np.isnan(sparse.loc[1])
result = sparse.loc[[1, 3, 4]]
exp = orig.loc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# exceeds the bounds
result = sparse.reindex([1, 3, 4, 5])
exp = orig.reindex([1, 3, 4, 5]).to_sparse()
tm.assert_sp_series_equal(result, exp)
# padded with NaN
assert np.isnan(result[-1])
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_index(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=list('ABCDE'))
sparse = orig.to_sparse()
assert sparse.loc['A'] == 1
assert np.isnan(sparse.loc['B'])
result = sparse.loc[['A', 'C', 'D']]
exp = orig.loc[['A', 'C', 'D']].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_index_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
assert sparse.loc['A'] == 1
assert np.isnan(sparse.loc['B'])
result = sparse.loc[['A', 'C', 'D']]
exp = orig.loc[['A', 'C', 'D']].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
def test_loc_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc[2:], orig.loc[2:].to_sparse())
def test_loc_slice_index_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.loc['C':],
orig.loc['C':].to_sparse(fill_value=0))
def test_loc_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.loc[2:],
orig.loc[2:].to_sparse(fill_value=0))
def test_iloc(self):
orig = self.orig
sparse = self.sparse
assert sparse.iloc[3] == 3
assert np.isnan(sparse.iloc[2])
result = sparse.iloc[[1, 3, 4]]
exp = orig.iloc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
result = sparse.iloc[[1, -2, -4]]
exp = orig.iloc[[1, -2, -4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
with pytest.raises(IndexError):
sparse.iloc[[1, 3, 5]]
def test_iloc_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
assert sparse.iloc[3] == 3
assert np.isnan(sparse.iloc[1])
assert sparse.iloc[4] == 0
result = sparse.iloc[[1, 3, 4]]
exp = orig.iloc[[1, 3, 4]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
def test_iloc_slice(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.iloc[2:], orig.iloc[2:].to_sparse())
def test_iloc_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.iloc[2:],
orig.iloc[2:].to_sparse(fill_value=0))
def test_at(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
sparse = orig.to_sparse()
assert sparse.at[0] == orig.at[0]
assert np.isnan(sparse.at[1])
assert np.isnan(sparse.at[2])
assert sparse.at[3] == orig.at[3]
assert np.isnan(sparse.at[4])
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('abcde'))
sparse = orig.to_sparse()
assert sparse.at['a'] == orig.at['a']
assert np.isnan(sparse.at['b'])
assert np.isnan(sparse.at['c'])
assert sparse.at['d'] == orig.at['d']
assert np.isnan(sparse.at['e'])
def test_at_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0],
index=list('abcde'))
sparse = orig.to_sparse(fill_value=0)
assert sparse.at['a'] == orig.at['a']
assert np.isnan(sparse.at['b'])
assert sparse.at['c'] == orig.at['c']
assert sparse.at['d'] == orig.at['d']
assert sparse.at['e'] == orig.at['e']
def test_iat(self):
orig = self.orig
sparse = self.sparse
assert sparse.iat[0] == orig.iat[0]
assert np.isnan(sparse.iat[1])
assert np.isnan(sparse.iat[2])
assert sparse.iat[3] == orig.iat[3]
assert np.isnan(sparse.iat[4])
assert np.isnan(sparse.iat[-1])
assert sparse.iat[-5] == orig.iat[-5]
def test_iat_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse()
assert sparse.iat[0] == orig.iat[0]
assert np.isnan(sparse.iat[1])
assert sparse.iat[2] == orig.iat[2]
assert sparse.iat[3] == orig.iat[3]
assert sparse.iat[4] == orig.iat[4]
assert sparse.iat[-1] == orig.iat[-1]
assert sparse.iat[-5] == orig.iat[-5]
def test_get(self):
s = pd.SparseSeries([1, np.nan, np.nan, 3, np.nan])
assert s.get(0) == 1
assert np.isnan(s.get(1))
assert s.get(5) is None
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list('ABCDE'))
assert s.get('A') == 1
assert np.isnan(s.get('B'))
assert s.get('C') == 0
assert s.get('XX') is None
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list('ABCDE'),
fill_value=0)
assert s.get('A') == 1
assert np.isnan(s.get('B'))
assert s.get('C') == 0
assert s.get('XX') is None
def test_take(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.take([0]),
orig.take([0]).to_sparse())
tm.assert_sp_series_equal(sparse.take([0, 1, 3]),
orig.take([0, 1, 3]).to_sparse())
tm.assert_sp_series_equal(sparse.take([-1, -2]),
orig.take([-1, -2]).to_sparse())
def test_take_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([0]),
orig.take([0]).to_sparse(fill_value=0))
exp = orig.take([0, 1, 3]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([0, 1, 3]), exp)
exp = orig.take([-1, -2]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([-1, -2]), exp)
def test_reindex(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse()
tm.assert_sp_series_equal(res, exp)
# all missing & fill_value
res = sparse.reindex(['B', 'E', 'C'])
exp = orig.reindex(['B', 'E', 'C']).to_sparse()
tm.assert_sp_series_equal(res, exp)
orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse()
tm.assert_sp_series_equal(res, exp)
def test_fill_value_reindex(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# includes missing and fill_value
res = sparse.reindex(['A', 'B', 'C'])
exp = orig.reindex(['A', 'B', 'C']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# all missing
orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# all fill_value
orig = pd.Series([0., 0., 0., 0., 0.],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
def test_fill_value_reindex_coerces_float_int(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_reindex_fill_value(self):
floats = pd.Series([1., 2., 3.]).to_sparse()
result = floats.reindex([1, 2, 3], fill_value=0)
expected = pd.Series([2., 3., 0], index=[1, 2, 3]).to_sparse()
tm.assert_sp_series_equal(result, expected)
def test_reindex_nearest(self):
s = pd.Series(np.arange(10, dtype='float64')).to_sparse()
target = [0.1, 0.9, 1.5, 2.0]
actual = s.reindex(target, method='nearest')
expected = pd.Series(np.around(target), target).to_sparse()
tm.assert_sp_series_equal(expected, actual)
actual = s.reindex(target, method='nearest', tolerance=0.2)
expected = pd.Series([0, 1, np.nan, 2], target).to_sparse()
tm.assert_sp_series_equal(expected, actual)
actual = s.reindex(target, method='nearest',
tolerance=[0.3, 0.01, 0.4, 3])
expected = pd.Series([0, np.nan, np.nan, 2], target).to_sparse()
tm.assert_sp_series_equal(expected, actual)
def tests_indexing_with_sparse(self):
# GH 13985
for kind in ['integer', 'block']:
for fill in [True, False, np.nan]:
arr = pd.SparseArray([1, 2, 3], kind=kind)
indexer = pd.SparseArray([True, False, True], fill_value=fill,
dtype=bool)
tm.assert_sp_array_equal(pd.SparseArray([1, 3], kind=kind),
arr[indexer],)
s = pd.SparseSeries(arr, index=['a', 'b', 'c'],
dtype=np.float64)
exp = pd.SparseSeries(
[1, 3], index=['a', 'c'],
dtype=SparseDtype(np.float64, s.fill_value),
kind=kind
)
tm.assert_sp_series_equal(s[indexer], exp)
tm.assert_sp_series_equal(s.loc[indexer], exp)
tm.assert_sp_series_equal(s.iloc[indexer], exp)
indexer = pd.SparseSeries(indexer, index=['a', 'b', 'c'])
tm.assert_sp_series_equal(s[indexer], exp)
tm.assert_sp_series_equal(s.loc[indexer], exp)
msg = ("iLocation based boolean indexing cannot use an "
"indexable as a mask")
with tm.assert_raises_regex(ValueError, msg):
s.iloc[indexer]
class TestSparseSeriesMultiIndexing(TestSparseSeriesIndexing):
def setup_method(self, method):
# Mi with duplicated values
idx = pd.MultiIndex.from_tuples([('A', 0), ('A', 1), ('B', 0),
('C', 0), ('C', 1)])
self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=idx)
self.sparse = self.orig.to_sparse()
def test_getitem_multi(self):
orig = self.orig
sparse = self.sparse
assert sparse[0] == orig[0]
assert np.isnan(sparse[1])
assert sparse[3] == orig[3]
tm.assert_sp_series_equal(sparse['A'], orig['A'].to_sparse())
tm.assert_sp_series_equal(sparse['B'], orig['B'].to_sparse())
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
| tm.assert_sp_series_equal(result, exp) | pandas.util.testing.assert_sp_series_equal |
"""Evaluate and train Keras Classifiers."""
from sklearn.model_selection import StratifiedKFold
from sklearn.pipeline import Pipeline
from sklearn.dummy import DummyClassifier
from pandas import read_csv, DataFrame
# from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
from keras.wrappers.scikit_learn import KerasClassifier
sys.stderr = stderr
from autoclf.classification import eval_utils as eu
from autoclf import auto_utils as au
from autoclf.classification import neuralnets as nn
from autoclf.classification import train_calibrate as tc
import autoclf.getargs as ga
from autoclf.classification.param_grids_distros import Keras_param_grid
from autoclf.classification.evaluate import create_keras_classifiers
from autoclf.classification.evaluate import create_best_keras_clf_architecture
from pkg_resources import resource_string
from io import StringIO
from sklearn.datasets import load_iris
from sklearn.datasets import load_digits
def select_cv_method():
is_valid = 0
choice = 0
while not is_valid:
try:
choice = int(input("Select cv method: [1] Classical CV, [2] Nested-CV?\n"))
if choice in (1, 2):
is_valid = 1
else:
print("Invalid number. Try again...")
except ValueError as e:
print("'%s' is not a valid integer." % e.args[0].split(": ")[1])
return choice
def select_dataset():
df = None
is_valid = 0
choice = 0
while not is_valid:
try:
choice = int(input("Select dataset: "
"[1] German credit, [2] Credit Card, [3] Iris, [4] Digits\n"))
if choice in (1, 2, 3, 4):
is_valid = 1
if choice == 1:
names = [
'checkin_acc', 'duration', 'credit_history', 'purpose', 'amount',
'saving_acc', 'present_emp_since', 'inst_rate', 'personal_status',
'other_debtors', 'residing_since', 'property', 'age',
'inst_plans', 'housing', 'num_credits', 'job', 'dependents',
'telephone', 'foreign_worker', 'status']
german_bytes = resource_string(
"autoclf", os.path.join("datasets", 'german-credit.csv'))
german_file = StringIO(str(german_bytes,'utf-8'))
df = read_csv(german_file, header=None, delimiter=" ",
names=names)
target = 'status'
d_name = "GermanCr"
elif choice == 2:
# imb_credit_train.csv
data_bytes = resource_string(
"autoclf", os.path.join("datasets", 'creditcard.csv'))
data_file = StringIO(str(data_bytes,'utf-8'))
df = read_csv(data_file, delimiter=",")
target = 'Class'
d_name = 'ImbCredit'
elif choice == 3:
iris = load_iris()
f_names = [name.strip(' (cm)') for name in iris.feature_names]
df = DataFrame(data=iris.data, columns=f_names)
target = 'class'
df[target] = DataFrame(data=iris.target, columns=[target])
d_name = 'Iris'
else:
digits = load_digits()
df = DataFrame(data=digits.data)
target = 'class'
df[target] = | DataFrame(data=digits.target, columns=[target]) | pandas.DataFrame |
#%%
import pandas as pd
import numpy as np
from datetime import datetime
import os
import pickle
import matplotlib.pyplot as plt
exec(open('../env_vars.py').read())
dir_data = os.environ['dir_data']
dir_picklejar = os.environ['dir_picklejar']
dir_code_methods = os.environ['dir_code_methods']
#%%
###############################################################################
# Dictionaries for latent variable models
###############################################################################
filename = os.path.join(os.path.realpath(dir_picklejar), 'dict_selfreport')
infile = open(filename,'rb')
dict_selfreport = pickle.load(infile)
infile.close()
filename = os.path.join(os.path.realpath(dir_picklejar), 'dict_random_ema')
infile = open(filename,'rb')
dict_random_ema = pickle.load(infile)
infile.close()
filename = os.path.join(os.path.realpath(dir_picklejar), 'dict_puffmarker')
infile = open(filename,'rb')
dict_puffmarker = pickle.load(infile)
infile.close()
#%%
###############################################################################
# Create a data frame with records of start & end of day timestamps
# for each participant-day
###############################################################################
# output of this script is the data frame data_day_limits
exec(open(os.path.join(os.path.realpath(dir_code_methods), 'setup-day-limits.py')).read())
data_reference = data_day_limits.loc[:,['participant_id','study_day']].groupby('participant_id').count().reset_index()
data_reference = data_reference.rename(columns = {'study_day':'max_study_day'})
# SANITY CHECK
#data_reference['max_study_day'].value_counts() # this is equal to 14
#%%
###############################################################################
# Knit together various data streams
###############################################################################
all_participant_id = data_hq_episodes['id'].drop_duplicates()
all_participant_id.index = np.array(range(0,len(all_participant_id.index)))
all_dict = {}
# %%
for i in range(0, len(all_participant_id)):
current_participant = all_participant_id[i]
current_dict = {}
for j in range(1, 15):
this_study_day = j
# Lets work with selfeport first ##########################################
current_dict_selfreport = dict_selfreport[current_participant][j]
if len(current_dict_selfreport['hours_since_start_day'])==0:
tmp_selfreport = pd.DataFrame({})
else:
tmp_selfreport = pd.DataFrame({'assessment_type':'selfreport',
'hours_since_start_day': current_dict_selfreport['hours_since_start_day'],
'smoke': 'Yes',
'when_smoke': current_dict_selfreport['message'],
'delta': current_dict_selfreport['delta']
})
# Now let's work with Random EMA ##########################################
current_dict_random_ema = dict_random_ema[current_participant][j]
if len(current_dict_random_ema['hours_since_start_day'])==0:
tmp_random_ema = pd.DataFrame({})
else:
tmp_random_ema = pd.DataFrame({'assessment_type':'random_ema',
'hours_since_start_day': current_dict_random_ema['hours_since_start_day'],
'smoke': current_dict_random_ema['smoke'],
'when_smoke': current_dict_random_ema['when_smoke'],
'delta': current_dict_random_ema['delta']
})
# Now, let's concatanate ##################################################
frames = [tmp_selfreport, tmp_random_ema]
result = pd.concat(frames)
if len(result.index) > 0:
# important step to sort according to hours_since_start_day
result.sort_values(by=['hours_since_start_day'], inplace=True)
result['hours_since_start_day_shifted'] = result['hours_since_start_day'].shift(periods=+1)
result['hours_since_start_day_shifted'] = np.where(pd.isna(result['hours_since_start_day_shifted']), 0, result['hours_since_start_day_shifted'])
result['time_between'] = result['hours_since_start_day'] - result['hours_since_start_day_shifted']
# Let's create a time variable that depends on the value of 'smoke' #######
result['delta'] = np.where(np.logical_and(result['assessment_type']=='selfreport', result['when_smoke']==4), result['time_between']/2, result['delta'])
result['delta'] = np.where(np.logical_and(result['assessment_type']=='random_ema', result['when_smoke']==6), result['time_between']/2, result['delta'])
result['puff_time'] = np.where(result['smoke']=='Yes', result['hours_since_start_day']-result['delta'], np.nan)
# Rearrange columns #######################################################
#result = result.loc[:, ['assessment_type', 'smoke','hours_since_start_day_shifted','hours_since_start_day','time_between','puff_time']]
# Combine information into a dictionary ###################################
new_dict = {this_study_day: result}
current_dict.update(new_dict)
# Update participant ########################################################
all_dict.update({current_participant:current_dict})
#%%
filename = os.path.join(os.path.realpath(dir_picklejar), 'dict_knitted')
outfile = open(filename, 'wb')
pickle.dump(all_dict, outfile)
outfile.close()
###############################################################################
# Do checks on dict_knitted
###############################################################################
dict_knitted = all_dict
#%%
all_participant_id = data_hq_episodes['id'].drop_duplicates()
all_participant_id.index = np.array(range(0,len(all_participant_id.index)))
total_count_flagged = 0
total_count_rows = 0
#%%
for i in range(0, len(all_participant_id)):
current_participant = all_participant_id[i]
for j in range(1, 15):
this_study_day = j
dat = dict_knitted[current_participant][this_study_day]
if len(dat.index)>0:
dat['flag'] = np.where(dat['puff_time'] < dat['hours_since_start_day_shifted'], 1, 0)
dat['flag'] = np.where(pd.isna(dat['puff_time']), np.nan, dat['flag'])
total_count_flagged += dat['flag'].sum()
total_count_rows += len(dat.index)
else:
next
#%%
collect_dat = pd.DataFrame({})
for i in range(0, len(all_participant_id)):
current_participant = all_participant_id[i]
for j in range(1, 15):
this_study_day = j
dat = dict_knitted[current_participant][this_study_day]
if len(dat.index)>0:
subset_dat = dat.loc[dat['flag'] == 1]
else:
next
# Now, let's concatanate ##################################################
collect_dat = [collect_dat, subset_dat]
collect_dat = | pd.concat(collect_dat) | pandas.concat |
"""Tests for model_selection.py."""
import numpy as np
import pandas as pd
import pytest
from fclearn.model_selection import create_rolling_forward_indices, train_test_split
groupby = ["SKUID", "ForecastGroupID"]
class TestTrainTestSplit:
"""Test train_test_split()."""
def test_one(self, demand_df):
"""Whether splits on date."""
X = pd.DataFrame(
data=[["2017-01-02", 1], ["2017-01-03", 2]], columns=["Date", "value"]
)
y = pd.DataFrame(
data=[["2017-01-02", 3], ["2017-01-03", 4]], columns=["Date", "value"]
)
X["Date"] = pd.to_datetime(X["Date"])
y["Date"] = | pd.to_datetime(y["Date"]) | pandas.to_datetime |
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
import numpy as np
import pandas as pd
from typing import Tuple, Optional
from cmocean import cm
from .logging_utils import logger
plt.style.use('fivethirtyeight')
plt.rcParams['svg.fonttype'] = 'none'
def clean_axis(ax, ts=11, ga=0.4):
ax.xaxis.set_tick_params(labelsize=ts)
ax.yaxis.set_tick_params(labelsize=ts)
for i in ['top', 'bottom', 'left', 'right']:
ax.spines[i].set_visible(False)
ax.grid(which='major', linestyle='--', alpha=ga)
ax.figure.patch.set_alpha(0)
ax.patch.set_alpha(0)
return True
def plot_graph_qc(g):
_, axis = plt.subplots(1, 2, figsize=(12, 4))
ax = axis[0]
x = np.array((g != 0).sum(axis=0))[0]
y = pd.Series(x).value_counts().sort_index()
ax.bar(y.index, y.values, width=0.5)
xlim = np.percentile(x, 99.5) + 5
ax.set_xlim((0, xlim))
ax.set_xlabel('Node degree')
ax.set_ylabel('Frequency')
ax.text(xlim, y.values.max(), f"plot is clipped (max degree: {y.index.max()})",
ha='right', fontsize=9)
clean_axis(ax)
ax = axis[1]
ax.hist(g.data, bins=30)
ax.set_xlabel('Edge weight')
ax.set_ylabel('Frequency')
clean_axis(ax)
plt.tight_layout()
plt.show()
def plot_qc(data: pd.DataFrame, color: str = 'steelblue', cmap: str = 'tab20',
fig_size: tuple = None, label_size: float = 10.0, title_size: float = 10,
sup_title: str = None, sup_title_size: float = 12, scatter_size: float = 1.0,
max_points: int = 10000, show_on_single_row: bool = True):
n_plots = data.shape[1] - 1
n_groups = data['groups'].nunique()
if n_groups > 5 and show_on_single_row is True:
logger.info(f"Too many groups in the plot. If you think that plot is too wide then consider turning "
f"`show_on_single_row` parameter to False")
if show_on_single_row is True:
n_rows = 1
n_cols = n_plots
else:
n_rows = n_plots
n_cols = 1
if fig_size is None:
fig_width = min(15, n_groups+(2*n_cols))
fig_height = 1+2.5*n_rows
fig_size = (fig_width, fig_height)
fig = plt.figure(figsize=fig_size)
grouped = data.groupby('groups')
for i in range(n_plots):
if data.columns[i] == 'groups':
continue
vals = {'g': [], 'v': []}
for j in sorted(data['groups'].unique()):
val = grouped.get_group(j)[data.columns[i]].values
vals['g'].extend([j for _ in range(len(val))])
vals['v'].extend(list(val))
vals = pd.DataFrame(vals)
ax = fig.add_subplot(n_rows, n_cols, i+1)
if n_groups == 1:
sns.violinplot(y='v', x='g', data=vals, linewidth=1, orient='v', alpha=0.6,
inner=None, cut=0, color=color)
else:
sns.violinplot(y='v', x='g', data=vals, linewidth=1, orient='v', alpha=0.6,
inner=None, cut=0, palette=cmap)
if len(vals) > max_points:
sns.stripplot(x='g', y='v', data=vals.sample(n=max_points), jitter=0.4, ax=ax, orient='v',
s=scatter_size, color='k', alpha=0.4)
else:
sns.stripplot(x='g', y='v', data=vals, jitter=0.4, ax=ax, orient='v',
s=scatter_size, color='k', alpha=0.4)
ax.set_ylabel(data.columns[i], fontsize=label_size)
ax.set_xlabel('')
if n_groups == 1:
ax.set_xticklabels([])
if data['groups'].nunique() == 1:
ax.set_title('Median: %.1f' % (int(np.median(vals['v']))), fontsize=title_size)
clean_axis(ax)
fig.suptitle(sup_title, fontsize=sup_title_size)
plt.tight_layout()
plt.show()
def plot_mean_var(nzm: np.ndarray, fv: np.ndarray, n_cells: np.ndarray, hvg: np.ndarray,
ax_label_fs: float = 12, fig_size: Tuple[float, float] = (4.5, 4.0),
ss: Tuple[float, float] = (3, 30), cmaps: Tuple[str, str] = ('winter', 'magma_r')):
_, ax = plt.subplots(1, 1, figsize=fig_size)
nzm = np.log2(nzm)
fv = np.log2(fv)
ax.scatter(nzm[~hvg], fv[~hvg], alpha=0.6, c=n_cells[~hvg], cmap=cmaps[0], s=ss[0])
ax.scatter(nzm[hvg], fv[hvg], alpha=0.8, c=n_cells[hvg], cmap=cmaps[1], s=ss[1], edgecolor='k', lw=0.5)
ax.set_xlabel('Log mean non-zero expression', fontsize=ax_label_fs)
ax.set_ylabel('Log corrected variance', fontsize=ax_label_fs)
clean_axis(ax)
plt.tight_layout()
plt.show()
def plot_heatmap(cdf, fontsize: float = 10, width_factor: float = 0.03, height_factor: float = 0.02,
cmap=cm.matter_r, savename: str = None, save_dpi: int = 300, figsize=None):
if figsize is None:
figsize = (cdf.shape[1]*fontsize*width_factor, fontsize*cdf.shape[0]*height_factor)
cgx = sns.clustermap(cdf, yticklabels=cdf.index, xticklabels=cdf.columns, method='ward',
figsize=figsize, cmap=cmap, rasterized=True)
cgx.ax_heatmap.set_yticklabels(cdf.index[cgx.dendrogram_row.reordered_ind], fontsize=fontsize)
cgx.ax_heatmap.set_xticklabels(cdf.columns[cgx.dendrogram_col.reordered_ind], fontsize=fontsize)
cgx.ax_heatmap.figure.patch.set_alpha(0)
cgx.ax_heatmap.patch.set_alpha(0)
if savename:
plt.savefig(savename, dpi=save_dpi)
plt.show()
return None
def _scatter_fix_type(v: pd.Series, ints_as_cats: bool) -> pd.Series:
vt = v.dtype
if v.nunique() == 1:
return pd.Series(np.ones(len(v)), index=v.index).astype(np.float_)
if vt in [np.bool_]:
# converting first to int to handle bool
return v.astype(np.int_).astype('category')
if vt in [str, object] or vt.name == 'category':
return v.astype('category')
elif np.issubdtype(vt.type, np.integer) and ints_as_cats:
if v.nunique() > 100:
logger.warning("Too many categories. set force_ints_as_cats to false")
return v.astype(np.int_).astype('category')
else:
return v.astype(np.float_)
def _scatter_fix_mask(v: pd.Series, mask_vals: list, mask_name: str) -> pd.Series:
if mask_vals is None:
mask_vals = []
mask_vals += [np.NaN]
iscat = False
if v.dtype.name == 'category':
iscat = True
v = v.astype(object)
# There is a bug in pandas which causes failure above 1M rows
# v[v.isin(mask_vals)] = mask_name
v[np.isin(v, mask_vals)] = mask_name
if iscat:
v = v.astype('category')
return v
def _scatter_make_colors(v: pd.Series, cmap, color_key: Optional[dict], mask_color: str, mask_name: str):
from matplotlib.cm import get_cmap
if v.dtype.name != 'category':
if cmap is None:
return cm.deep, None
else:
return get_cmap(cmap), None
else:
if cmap is None:
cmap = 'tab20'
na_idx = v == mask_name
uv = v[~na_idx].unique()
if color_key is not None:
for i in uv:
if i not in color_key:
raise KeyError(f"ERROR: key {i} missing in `color_key`")
if na_idx.sum() > 0:
if mask_name not in color_key:
color_key[mask_name] = mpl.colors.to_hex(mask_color)
return None, color_key
else:
pal = sns.color_palette(cmap, n_colors=len(uv)).as_hex()
color_key = dict(zip(sorted(uv), pal))
if na_idx.sum() > 0:
color_key[mask_name] = mpl.colors.to_hex(mask_color)
return None, color_key
def _scatter_cleanup(ax, sw: float, sc: str, ds: tuple) -> None:
for i in ['bottom', 'left', 'top', 'right']:
spine = ax.spines[i]
if i in ds:
spine.set_visible(True)
spine.set_linewidth(sw)
spine.set_edgecolor(sc)
else:
spine.set_visible(False)
ax.figure.patch.set_alpha(0)
ax.patch.set_alpha(0)
ax.set_aspect('auto')
return None
def _scatter_label_axis(df, ax, fs: float, fo: float):
x, y = df.columns[:2]
ax.set_xlabel(x, fontsize=fs)
ax.set_ylabel(y, fontsize=fs)
vmin, vmax = df[x].min(), df[x].max()
ax.set_xlim((vmin - abs(vmin * fo), vmax + abs(vmax * fo)))
vmin, vmax = df[y].min(), df[y].max()
ax.set_ylim((vmin - abs(vmin * fo), vmax + abs(vmax * fo)))
ax.set_xticks([])
ax.set_yticks([])
return None
def _scatter_legends(df, ax, fig, cmap, ck, ondata: bool, onside: bool, fontsize: float,
n_per_col: int, scale: float, ls: float, cs: float) -> None:
from matplotlib.colors import Normalize
from matplotlib.colorbar import ColorbarBase
x, y, vc = df.columns[:3]
v = df[vc]
if v.nunique() <= 1:
return None
if v.dtype.name == 'category':
centers = df[[x, y, vc]].groupby(vc).median().T
for i in centers:
if ondata:
ax.text(centers[i][x], centers[i][y], i, fontsize=fontsize,
ha='center', va='center')
if onside:
ax.scatter([float(centers[i][x])], [float(centers[i][y])],
c=ck[i], label=i, alpha=1, s=0.01)
if onside:
n_cols = v.nunique() // n_per_col
if v.nunique() % n_per_col > 0:
n_cols += 1
ax.legend(ncol=n_cols, loc=(1, 0), frameon=False, fontsize=fontsize,
markerscale=scale, labelspacing=ls, columnspacing=cs)
else:
if fig is not None:
cbaxes = fig.add_axes([0.2, 1, 0.6, 0.05])
norm = Normalize(vmin=v.min(), vmax=v.max())
cb = ColorbarBase(cbaxes, cmap=cmap, norm=norm, orientation='horizontal')
cb.set_label(vc, fontsize=fontsize)
cb.ax.xaxis.set_label_position('top')
else:
logger.warning("Not plotting the colorbar because fig object was not passed")
return None
def plot_scatter(df, in_ax=None, fig=None, width: float = 6, height: float = 6,
default_color: str = 'steelblue', color_map=None, color_key: dict = None,
mask_values: list = None, mask_name: str = 'NA', mask_color: str = 'k',
point_size: float = 10, ax_label_size: float = 12, frame_offset: float = 0.05,
spine_width: float = 0.5, spine_color: str = 'k', displayed_sides: tuple = ('bottom', 'left'),
legend_ondata: bool = True, legend_onside: bool = True,
legend_size: float = 12, legends_per_col: int = 20,
marker_scale: float = 70, lspacing: float = 0.1, cspacing: float = 1,
savename: str = None, dpi: int = 300, force_ints_as_cats: bool = True, scatter_kwargs: dict = None):
from matplotlib.colors import to_hex
def _handle_scatter_kwargs(sk):
if sk is None:
sk = {}
if 'c' in sk:
logger.warning('scatter_kwarg value `c` will be ignored')
del sk['c']
if 's' in sk:
logger.warning('scatter_kwarg value `s` will be ignored')
del sk['s']
if 'lw' not in sk:
sk['lw'] = 0.1
if 'edgecolors' not in sk:
sk['edgecolors'] = 'k'
return sk
dim1, dim2, vc = df.columns[:3]
v = _scatter_fix_mask(df[vc].copy(), mask_values, mask_name)
v = _scatter_fix_type(v, force_ints_as_cats)
df[vc] = v
color_map, color_key = _scatter_make_colors(v, color_map, color_key,
mask_color, mask_name)
if v.dtype.name == 'category':
df['c'] = [color_key[x] for x in v]
else:
if v.nunique() == 1:
df['c'] = [default_color for _ in v]
else:
v = v.copy().fillna(0)
# FIXME: Why have the following line?
pal = color_map
mmv = (v - v.min()) / (v.max() - v.min())
df['c'] = [to_hex(pal(x)) for x in mmv]
if 's' not in df:
df['s'] = [point_size for _ in df.index]
scatter_kwargs = _handle_scatter_kwargs(sk=scatter_kwargs)
if in_ax is None:
fig, ax = plt.subplots(1, 1, figsize=(width, height))
else:
ax = in_ax
ax.scatter(df[dim1].values, df[dim2].values, c=df['c'].values, s=df['s'].values,
rasterized=True, **scatter_kwargs)
_scatter_label_axis(df, ax, ax_label_size, frame_offset)
_scatter_cleanup(ax, spine_width, spine_color, displayed_sides)
_scatter_legends(df, ax, fig, color_map, color_key, legend_ondata, legend_onside,
legend_size, legends_per_col, marker_scale, lspacing, cspacing)
if in_ax is None:
if savename:
plt.savefig(savename, dpi=dpi)
plt.show()
else:
return ax
def shade_scatter(df, figsize: float = 6, pixels: int = 1000, sampling: float = 0.1,
spread_px: int = 1, spread_threshold: float = 0.2, min_alpha: int = 10,
color_map=None, color_key: dict = None,
mask_values: list = None, mask_name: str = 'NA', mask_color: str = 'k',
ax_label_size: float = 12, frame_offset: float = 0.05,
spine_width: float = 0.5, spine_color: str = 'k', displayed_sides: tuple = ('bottom', 'left'),
legend_ondata: bool = True, legend_onside: bool = True,
legend_size: float = 12, legends_per_col: int = 20,
marker_scale: float = 70, lspacing: float = 0.1, cspacing: float = 1,
savename: str = None, dpi: int = 300, force_ints_as_cats: bool = True):
from holoviews.plotting import mpl as hmpl
from holoviews.operation.datashader import datashade, dynspread
import holoviews as hv
import datashader as dsh
from IPython.display import display
dim1, dim2, vc = df.columns[:3]
v = _scatter_fix_mask(df[vc].copy(), mask_values, mask_name)
v = _scatter_fix_type(v, force_ints_as_cats)
df[vc] = v
color_map, color_key = _scatter_make_colors(v, color_map, color_key,
mask_color, mask_name)
if v.dtype.name == 'category':
agg = dsh.count_cat(vc)
else:
if v.nunique() == 1:
agg = dsh.count(vc)
else:
agg = dsh.mean(vc)
points = hv.Points(df, kdims=[dim1, dim2], vdims=vc)
shader = datashade(points, aggregator=agg, cmap=color_map, color_key=color_key,
height=pixels, width=pixels,
x_sampling=sampling, y_sampling=sampling, min_alpha=min_alpha)
shader = dynspread(shader, threshold=spread_threshold, max_px=spread_px)
renderer = hmpl.MPLRenderer.instance()
fig = renderer.get_plot(shader.opts(fig_inches=(figsize, figsize))).state
ax = fig.gca()
_scatter_label_axis(df, ax, ax_label_size, frame_offset)
_scatter_cleanup(ax, spine_width, spine_color, displayed_sides)
_scatter_legends(df, ax, fig, color_map, color_key, legend_ondata, legend_onside,
legend_size, legends_per_col, marker_scale, lspacing, cspacing)
if savename:
fig.savefig(savename, dpi=dpi)
display(fig)
def _draw_pie(ax, dist, colors, xpos, ypos, size):
# https://stackoverflow.com/questions/56337732/how-to-plot-scatter-pie-chart-using-matplotlib
cumsum = np.cumsum(dist)
cumsum = cumsum / cumsum[-1]
pie = [0] + cumsum.tolist()
for r1, r2, c in zip(pie[:-1], pie[1:], colors):
angles = np.linspace(2 * np.pi * r1, 2 * np.pi * r2)
x = [0] + np.cos(angles).tolist()
y = [0] + np.sin(angles).tolist()
xy = np.column_stack([x, y])
ax.scatter([xpos], [ypos], marker=xy, s=size, c=c)
def plot_cluster_hierarchy(sg, clusts, color_values=None, force_ints_as_cats: bool = True,
width: float = 2, lvr_factor: float = 0.5, vert_gap: float = 0.2,
min_node_size: float = 10, node_size_multiplier: float = 1e4, node_power: float = 1,
root_size: float = 100, non_leaf_size: float = 10,
show_labels: bool = False, fontsize=10,
root_color: str = '#C0C0C0', non_leaf_color: str = 'k',
cmap: str = None, color_key: bool = None, edgecolors: str = 'k',
edgewidth: float = 1, alpha: float = 0.7, figsize=(5, 5), ax=None, show_fig: bool = True,
savename: str = None, save_dpi=300):
import networkx as nx
import EoN
import math
from matplotlib.colors import to_hex
if color_values is None:
color_values = pd.Series(clusts)
using_clust_for_colors = True
else:
color_values = | pd.Series(color_values) | pandas.Series |
# !/usr/bin/python
# -*- coding:utf-8 -*-
# @Time : 2021/3/16 19:40
# @Author : JamesYang
# @File : TextProcessing.py
import pandas as pd
"""
密文预处理
"""
def bitwise_32(file_dir): # 每32个bit计算一下1的个数
with open(file_dir, 'r', encoding='utf-8') as f:
line = f.readline()
transform = []
for i in range(1024):
temp = line[i * 8:(i + 1) * 8]
rlt = bin(int(temp, 16))[2:]
transform.append(rlt.count('1'))
return transform
def bitwise_16(file_dir):
"""
读取一个密文数据并且生成特征 密文最少有4096个十六进制数
:param file_dir: 文件地址
:return: 特征向量 [1024]
"""
with open(file_dir, 'r', encoding='utf-8') as f:
line = f.readline()
transform = []
for i in range(1024): # 1024维的一个特征
temp = line[i * 4:(i + 1) * 4]
rlt = bin(int(temp, 16))[2:]
transform.append(rlt.count('1'))
return transform
def bitwise_8(file_dir): # 每8个bit计算一下1的个数 2KB(2048个16进制数)
"""
读取一个密文数据并且生成特征 密文最少有2048个十六进制数
:param file_dir: 文件地址
:return: 特征向量 [1024]
"""
with open(file_dir, 'r', encoding='utf-8') as f:
line = f.readline()
transform = []
for i in range(1024): # 1024维的一个特征
temp = line[i * 2:(i + 1) * 2]
if temp == "":
continue
rlt = bin(int(temp, 16))[2:]
transform.append(rlt.count('1'))
return transform
def bitwise_4(file_dir): # 每4个bit计算一下1的个数
with open(file_dir, 'r', encoding='utf-8') as f:
line = f.readline()
transform = []
for i in range(1024):
temp = line[i * 1:(i + 1) * 1]
rlt = bin(int(temp, 16))[2:]
transform.append(rlt.count('1'))
return transform
# deprecated below
def record(line: str):
# 原本的特征提取方法
transform = []
# lens = 4100 // 4
for i in range(1024):
temp = line[i * 4:(i + 1) * 4]
# 计算01串中1的个数
rlt = bin(int(temp, 16))[2:]
transform.append(rlt.count('1'))
# 计算数值
# rlt = int(temp, 16)
# transform.append(int(rlt))
return transform
def get_bitwise_16(filename):
transforms = []
counter = 0
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
counter += 1
line = line.replace('\n', '')
if line == '""':
continue
if len(line) > 4096:
transforms.append(str(record(line)))
if counter > 8000:
break
dataframe = pd.DataFrame({'data_frame': transforms})
new_filename = filename.replace('.csv', '') + '_ones.csv'
dataframe.to_csv(new_filename, ',')
return new_filename
# ifile.writelines(transforms)
def get_bitwise_8(filename):
transforms = []
counter = 0
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
counter += 1
line = line.replace('\n', '')
if line == '""':
continue
if len(line) > 4096:
transforms.append(str(record(line)))
if counter > 8000:
break
dataframe = | pd.DataFrame({'data_frame': transforms}) | pandas.DataFrame |
"""Access NLDI and WaterData databases."""
import numbers
from json import JSONDecodeError
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import geopandas as gpd
import networkx as nx
import numpy as np
import pandas as pd
import pygeoogc as ogc
import pygeoutils as geoutils
from pandas._libs.missing import NAType
from pygeoogc import WFS, ArcGISRESTful, MatchCRS, RetrySession, ServiceURL
from requests import Response
from shapely.geometry import MultiPolygon, Polygon
from .exceptions import InvalidInputType, InvalidInputValue, MissingItems, ZeroMatched
DEF_CRS = "epsg:4326"
ALT_CRS = "epsg:4269"
class WaterData:
"""Access to `Water Data <https://labs.waterdata.usgs.gov/geoserver>`__ service.
Parameters
----------
layer : str
A valid layer from the WaterData service. Valid layers are:
``nhdarea``, ``nhdwaterbody``, ``catchmentsp``, ``nhdflowline_network``
``gagesii``, ``huc08``, ``huc12``, ``huc12agg``, and ``huc12all``. Note that
the layers' worksapce for the Water Data service is ``wmadata`` which will
be added to the given ``layer`` argument if it is not provided.
crs : str, optional
The target spatial reference system, defaults to ``epsg:4326``.
"""
def __init__(
self,
layer: str,
crs: str = DEF_CRS,
) -> None:
self.layer = layer if ":" in layer else f"wmadata:{layer}"
self.crs = crs
self.wfs = WFS(
ServiceURL().wfs.waterdata,
layer=self.layer,
outformat="application/json",
version="2.0.0",
crs=ALT_CRS,
)
def bybox(
self, bbox: Tuple[float, float, float, float], box_crs: str = DEF_CRS
) -> gpd.GeoDataFrame:
"""Get features within a bounding box."""
resp = self.wfs.getfeature_bybox(bbox, box_crs, always_xy=True)
return self.to_geodf(resp)
def bygeom(
self,
geometry: Union[Polygon, MultiPolygon],
geo_crs: str = DEF_CRS,
xy: bool = True,
predicate: str = "INTERSECTS",
) -> gpd.GeoDataFrame:
"""Get features within a geometry.
Parameters
----------
geometry : shapely.geometry
The input geometry
geom_crs : str, optional
The CRS of the input geometry, default to epsg:4326.
xy : bool, optional
Whether axis order of the input geometry is xy or yx.
predicate : str, optional
The geometric prediacte to use for requesting the data, defaults to
INTERSECTS. Valid predicates are:
EQUALS, DISJOINT, INTERSECTS, TOUCHES, CROSSES, WITHIN, CONTAINS,
OVERLAPS, RELATE, DWITHIN, BEYOND
Returns
-------
geopandas.GeoDataFrame
The requested features in the given geometry.
"""
resp = self.wfs.getfeature_bygeom(geometry, geo_crs, always_xy=not xy, predicate=predicate)
return self.to_geodf(resp)
def byid(self, featurename: str, featureids: Union[List[str], str]) -> gpd.GeoDataFrame:
"""Get features based on IDs."""
resp = self.wfs.getfeature_byid(featurename, featureids)
return self.to_geodf(resp)
def byfilter(self, cql_filter: str, method: str = "GET") -> gpd.GeoDataFrame:
"""Get features based on a CQL filter."""
resp = self.wfs.getfeature_byfilter(cql_filter, method)
return self.to_geodf(resp)
def to_geodf(self, resp: Response) -> gpd.GeoDataFrame:
"""Convert a response from WaterData to a GeoDataFrame.
Parameters
----------
resp : Response
A response from a WaterData request.
Returns
-------
geopandas.GeoDataFrame
The requested features in a GeoDataFrames.
"""
return geoutils.json2geodf(resp.json(), ALT_CRS, self.crs)
class NHDPlusHR:
"""Access NHDPlus HR database through the National Map ArcGISRESTful.
Parameters
----------
layer : str
A valid service layer. For a list of available layers pass an empty string to
the class.
outfields : str or list, optional
Target field name(s), default to "*" i.e., all the fileds.
crs : str, optional
Target spatial reference, default to EPSG:4326
"""
def __init__(self, layer: str, outfields: Union[str, List[str]] = "*", crs: str = DEF_CRS):
self.service = ArcGISRESTful(
ServiceURL().restful.nhdplushr,
outformat="json",
outfields=outfields,
crs=crs,
)
valid_layers = self.service.get_validlayers()
self.valid_layers = {v.lower(): k for k, v in valid_layers.items()}
if layer not in self.valid_layers:
raise InvalidInputValue("layer", list(self.valid_layers))
self.service.layer = self.valid_layers[layer]
self.outfields = outfields
self.crs = crs
def bygeom(
self,
geom: Union[Polygon, Tuple[float, float, float, float]],
geo_crs: str = "epsg:4326",
sql_clause: str = "",
return_m: bool = False,
) -> gpd.GeoDataFrame:
"""Get feature within a geometry that can be combined with a SQL where clause.
Parameters
----------
geom : Polygon or tuple
A geometry (Polgon) or bounding box (tuple of length 4).
geo_crs : str
The spatial reference of the input geometry.
sql_clause : str, optional
A valid SQL 92 WHERE clause, defaults to an empty string.
return_m : bool
Whether to activate the Return M (measure) in the request, defaults to False.
Returns
-------
geopandas.GeoDataFrame
The requested features as a GeoDataFrame.
"""
self.service.oids_bygeom(geom, geo_crs=geo_crs, sql_clause=sql_clause)
return self._getfeatures(return_m)
def byids(
self, field: str, fids: Union[str, List[str]], return_m: bool = False
) -> gpd.GeoDataFrame:
"""Get features based on a list of field IDs.
Parameters
----------
field : str
Name of the target field that IDs belong to.
fids : str or list
A list of target field ID(s).
return_m : bool
Whether to activate the Return M (measure) in the request, defaults to False.
Returns
-------
geopandas.GeoDataFrame
The requested features as a GeoDataFrame.
"""
self.service.oids_byfield(field, fids)
return self._getfeatures(return_m)
def bysql(self, sql_clause: str, return_m: bool = False) -> gpd.GeoDataFrame:
"""Get feature IDs using a valid SQL 92 WHERE clause.
Notes
-----
Not all web services support this type of query. For more details look
`here <https://developers.arcgis.com/rest/services-reference/query-feature-service-.htm#ESRI_SECTION2_07DD2C5127674F6A814CE6C07D39AD46>`__
Parameters
----------
sql_clause : str
A valid SQL 92 WHERE clause.
return_m : bool
Whether to activate the Return M (measure) in the request, defaults to False.
Returns
-------
geopandas.GeoDataFrame
The requested features as a GeoDataFrame.
"""
self.service.oids_bysql(sql_clause)
return self._getfeatures(return_m)
def _getfeatures(self, return_m: bool = False):
"""Send a request for getting data based on object IDs.
Parameters
----------
return_m : bool
Whether to activate the Return M (measure) in the request, defaults to False.
Returns
-------
geopandas.GeoDataFrame
The requested features as a GeoDataFrame.
"""
resp = self.service.get_features(return_m)
return geoutils.json2geodf(resp)
class NLDI:
"""Access the Hydro Network-Linked Data Index (NLDI) service."""
def __init__(self) -> None:
self.base_url = ServiceURL().restful.nldi
self.session = RetrySession()
resp = self.session.get("/".join([self.base_url, "linked-data"])).json()
self.valid_fsources = {r["source"]: r["sourceName"] for r in resp}
resp = self.session.get("/".join([self.base_url, "lookups"])).json()
self.valid_chartypes = {r["type"]: r["typeName"] for r in resp}
def getfeature_byid(self, fsource: str, fid: str, basin: bool = False) -> gpd.GeoDataFrame:
"""Get features of a single id.
Parameters
----------
fsource : str
The name of feature source. The valid sources are:
comid, huc12pp, nwissite, wade, wqp
fid : str
The ID of the feature.
basin : bool
Whether to return the basin containing the feature.
Returns
-------
geopandas.GeoDataFrame
NLDI indexed features in EPSG:4326.
"""
self._validate_fsource(fsource)
url = "/".join([self.base_url, "linked-data", fsource, fid])
if basin:
url += "/basin"
return geoutils.json2geodf(self._geturl(url), ALT_CRS, DEF_CRS)
def getcharacteristic_byid(
self,
comids: Union[List[str], str],
char_type: str,
char_ids: str = "all",
values_only: bool = True,
) -> Union[pd.DataFrame, Tuple[pd.DataFrame, pd.DataFrame]]:
"""Get characteristics using a list ComIDs.
Parameters
----------
comids : str or list
The ID of the feature.
char_type : str
Type of the characteristic. Valid values are ``local`` for
individual reach catchments, ``tot`` for network-accumulated values
using total cumulative drainage area and ``div`` for network-accumulated values
using divergence-routed.
char_ids : str or list, optional
Name(s) of the target characteristics, default to all.
values_only : bool, optional
Whether to return only ``characteristic_value`` as a series, default to True.
If is set to False, ``percent_nodata`` is returned as well.
Returns
-------
pandas.DataFrame or tuple of pandas.DataFrame
Either only ``characteristic_value`` as a dataframe or
or if ``values_only`` is Fale return ``percent_nodata`` is well.
"""
if char_type not in self.valid_chartypes:
valids = [f'"{s}" for {d}' for s, d in self.valid_chartypes.items()]
raise InvalidInputValue("char", valids)
comids = comids if isinstance(comids, list) else [comids]
v_dict, nd_dict = {}, {}
if char_ids == "all":
payload = None
else:
_char_ids = char_ids if isinstance(char_ids, list) else [char_ids]
valid_charids = self.get_validchars(char_type)
idx = valid_charids.index
if any(c not in idx for c in _char_ids):
vids = valid_charids["characteristic_description"]
raise InvalidInputValue("char_id", [f'"{s}" for {d}' for s, d in vids.items()])
payload = {"characteristicId": ",".join(_char_ids)}
for comid in comids:
url = "/".join([self.base_url, "linked-data", "comid", comid, char_type])
rjson = self._geturl(url, payload)
char = pd.DataFrame.from_dict(rjson["characteristics"], orient="columns").T
char.columns = char.iloc[0]
char = char.drop(index="characteristic_id")
v_dict[comid] = char.loc["characteristic_value"]
if values_only:
continue
nd_dict[comid] = char.loc["percent_nodata"]
def todf(df_dict: Dict[str, pd.DataFrame]) -> pd.DataFrame:
df = pd.DataFrame.from_dict(df_dict, orient="index")
df[df == ""] = np.nan
df.index = df.index.astype("int64")
return df.astype("f4")
chars = todf(v_dict)
if values_only:
return chars
return chars, todf(nd_dict)
def get_validchars(self, char_type: str) -> pd.DataFrame:
"""Get all the avialable characteristics IDs for a give characteristics type."""
resp = self.session.get("/".join([self.base_url, "lookups", char_type, "characteristics"]))
c_list = ogc.utils.traverse_json(resp.json(), ["characteristicMetadata", "characteristic"])
return pd.DataFrame.from_dict(
{c.pop("characteristic_id"): c for c in c_list}, orient="index"
)
def navigate_byid(
self,
fsource: str,
fid: str,
navigation: str,
source: str,
distance: int = 500,
) -> gpd.GeoDataFrame:
"""Navigate the NHDPlus databse from a single feature id up to a distance.
Parameters
----------
fsource : str
The name of feature source. The valid sources are:
comid, huc12pp, nwissite, wade, WQP.
fid : str
The ID of the feature.
navigation : str
The navigation method.
source : str, optional
Return the data from another source after navigating
the features using fsource, defaults to None.
distance : int, optional
Limit the search for navigation up to a distance in km,
defaults is 500 km. Note that this is an expensive request so you
have be mindful of the value that you provide.
Returns
-------
geopandas.GeoDataFrame
NLDI indexed features in EPSG:4326.
"""
self._validate_fsource(fsource)
url = "/".join([self.base_url, "linked-data", fsource, fid, "navigation"])
valid_navigations = self._geturl(url)
if navigation not in valid_navigations.keys():
raise InvalidInputValue("navigation", list(valid_navigations.keys()))
url = valid_navigations[navigation]
r_json = self._geturl(url)
valid_sources = {s["source"].lower(): s["features"] for s in r_json}
if source not in valid_sources:
raise InvalidInputValue("source", list(valid_sources.keys()))
url = f"{valid_sources[source]}?distance={int(distance)}"
return geoutils.json2geodf(self._geturl(url), ALT_CRS, DEF_CRS)
def navigate_byloc(
self,
coords: Tuple[float, float],
navigation: Optional[str] = None,
source: Optional[str] = None,
loc_crs: str = DEF_CRS,
distance: int = 500,
comid_only: bool = False,
) -> gpd.GeoDataFrame:
"""Navigate the NHDPlus databse from a coordinate.
Parameters
----------
coordinate : tuple
A tuple of length two (x, y).
navigation : str, optional
The navigation method, defaults to None which throws an exception
if comid_only is False.
source : str, optional
Return the data from another source after navigating
the features using fsource, defaults to None which throws an exception
if comid_only is False.
loc_crs : str, optional
The spatial reference of the input coordinate, defaults to EPSG:4326.
distance : int, optional
Limit the search for navigation up to a distance in km,
defaults to 500 km. Note that this is an expensive request so you
have be mindful of the value that you provide. If you want to get
all the available features you can pass a large distance like 9999999.
comid_only : bool, optional
Whether to return the nearest comid without navigation.
Returns
-------
geopandas.GeoDataFrame
NLDI indexed features in EPSG:4326.
"""
_coords = MatchCRS().coords(((coords[0],), (coords[1],)), loc_crs, DEF_CRS)
lon, lat = _coords[0][0], _coords[1][0]
url = "/".join([self.base_url, "linked-data", "comid", "position"])
payload = {"coords": f"POINT({lon} {lat})"}
rjson = self._geturl(url, payload)
comid = geoutils.json2geodf(rjson, ALT_CRS, DEF_CRS).comid.iloc[0]
if comid_only:
return comid
if navigation is None or source is None:
raise MissingItems(["navigation", "source"])
return self.navigate_byid("comid", comid, navigation, source, distance)
def characteristics_dataframe(
self,
char_type: str,
char_id: str,
filename: Optional[str] = None,
metadata: bool = False,
) -> Union[Dict[str, Any], pd.DataFrame]:
"""Get a NHDPlus-based characteristic from sciencebase.gov as dataframe.
Parameters
----------
char_type : str
Characteristic type. Valid values are ``local`` for
individual reach catchments, ``tot`` for network-accumulated values
using total cumulative drainage area and ``div`` for network-accumulated values
using divergence-routed.
char_id : str
Characteristic ID.
filename : str, optional
File name, defaults to None that throws an error and shows
a list of available files.
metadata : bool
Whether to only return the metadata for the selected characteristic,
defaults to False. Useful for getting information about the dataset
such as citation, units, column names, etc.
Returns
-------
pandas.DataFrame or dict
The requested characteristic as a dataframe or if ``metadata`` is True
the metadata as a dictionary.
"""
if char_type not in self.valid_chartypes:
valids = [f'"{s}" for {d}' for s, d in self.valid_chartypes.items()]
raise InvalidInputValue("char", valids)
valid_charids = self.get_validchars(char_type)
if char_id not in valid_charids.index:
vids = valid_charids["characteristic_description"]
raise InvalidInputValue("char_id", [f'"{s}" for {d}' for s, d in vids.items()])
meta = self.session.get(
valid_charids.loc[char_id, "dataset_url"], {"format": "json"}
).json()
if metadata:
return meta
flist = {
f["name"]: f["downloadUri"] for f in meta["files"] if f["name"].split(".")[-1] == "zip"
}
if filename not in flist:
raise InvalidInputValue("filename", list(flist.keys()))
return | pd.read_csv(flist[filename], compression="zip") | pandas.read_csv |
"""
Script for plotting Figures 3, 5, 6
"""
import numpy as np
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.lines import Line2D
import matplotlib.ticker as mtick
import seaborn as sns
from datetime import timedelta
import plotly.graph_objects as go
countries = ['Brazil', 'Canada', 'England', 'France', 'Germany', 'India', 'Japan', 'Scotland', 'USA', 'Wales']
samp_entropy_df = {}
for country in countries:
df = pd.read_csv(f'data/entropy/monthly/fast_samp_entropy_monthly_{country}.csv')
df['Date'] = | pd.to_datetime(df['Date']) | pandas.to_datetime |
import pandas as pd
from .constants import *
def compare_frameworks(results_raw, frameworks=None, banned_datasets=None, folds_to_keep=None, filter_errors=True, verbose=True, columns_to_agg_extra=None, datasets=None):
columns_to_agg = [DATASET, FRAMEWORK, PROBLEM_TYPE, TIME_TRAIN_S, METRIC_ERROR]
if columns_to_agg_extra:
columns_to_agg += columns_to_agg_extra
if frameworks is None:
frameworks = sorted(list(results_raw[FRAMEWORK].unique()))
if filter_errors: # FIXME: This should not be toggled, instead filter_errors should be passed to filter_results
results = filter_results(results_raw=results_raw, valid_frameworks=frameworks, banned_datasets=banned_datasets, folds_to_keep=folds_to_keep)
else:
results = results_raw.copy()
results_agg = results[columns_to_agg].groupby([DATASET, FRAMEWORK, PROBLEM_TYPE]).mean().reset_index()
worst_scores = results_agg.sort_values(METRIC_ERROR, ascending=False).drop_duplicates(DATASET)
worst_scores = worst_scores[[DATASET, METRIC_ERROR]]
worst_scores.columns = [DATASET, 'WORST_ERROR']
best_scores = results_agg.sort_values(METRIC_ERROR, ascending=True).drop_duplicates(DATASET)
best_scores = best_scores[[DATASET, METRIC_ERROR]]
best_scores.columns = [DATASET, 'BEST_ERROR']
results_agg = results_agg.merge(best_scores, on=DATASET)
results_agg = results_agg.merge(worst_scores, on=DATASET)
results_agg[BESTDIFF] = 1 - (results_agg['BEST_ERROR'] / results_agg[METRIC_ERROR])
results_agg[LOSS_RESCALED] = (results_agg[METRIC_ERROR] - results_agg['BEST_ERROR']) / (results_agg['WORST_ERROR'] - results_agg['BEST_ERROR'])
results_agg[BESTDIFF] = results_agg[BESTDIFF].fillna(0)
results_agg[LOSS_RESCALED] = results_agg[LOSS_RESCALED].fillna(0)
results_agg = results_agg.drop(['BEST_ERROR'], axis=1)
results_agg = results_agg.drop(['WORST_ERROR'], axis=1)
valid_tasks = list(results_agg[DATASET].unique())
results_ranked, results_ranked_by_dataset = rank_result(results_agg)
rank_1 = results_ranked_by_dataset[results_ranked_by_dataset[RANK] == 1]
rank_1_count = rank_1[FRAMEWORK].value_counts()
results_ranked['rank=1_count'] = rank_1_count
results_ranked['rank=1_count'] = results_ranked['rank=1_count'].fillna(0).astype(int)
rank_2 = results_ranked_by_dataset[(results_ranked_by_dataset[RANK] > 1) & (results_ranked_by_dataset[RANK] <= 2)]
rank_2_count = rank_2[FRAMEWORK].value_counts()
results_ranked['rank=2_count'] = rank_2_count
results_ranked['rank=2_count'] = results_ranked['rank=2_count'].fillna(0).astype(int)
rank_3 = results_ranked_by_dataset[(results_ranked_by_dataset[RANK] > 2) & (results_ranked_by_dataset[RANK] <= 3)]
rank_3_count = rank_3[FRAMEWORK].value_counts()
results_ranked['rank=3_count'] = rank_3_count
results_ranked['rank=3_count'] = results_ranked['rank=3_count'].fillna(0).astype(int)
rank_l3 = results_ranked_by_dataset[(results_ranked_by_dataset[RANK] > 3)]
rank_l3_count = rank_l3[FRAMEWORK].value_counts()
results_ranked['rank>3_count'] = rank_l3_count
results_ranked['rank>3_count'] = results_ranked['rank>3_count'].fillna(0).astype(int)
if datasets is None:
datasets = sorted(list(results_ranked_by_dataset[DATASET].unique()))
errors_list = []
for framework in frameworks:
results_framework = filter_results(results_raw=results_raw, valid_frameworks=[framework], banned_datasets=banned_datasets, folds_to_keep=folds_to_keep)
results_framework_agg = results_framework[columns_to_agg].groupby([DATASET, FRAMEWORK, PROBLEM_TYPE]).mean().reset_index()
num_valid = len(results_framework_agg[results_framework_agg[FRAMEWORK] == framework])
num_errors = len(datasets) - num_valid
errors_list.append(num_errors)
errors_series = | pd.Series(data=errors_list, index=frameworks) | pandas.Series |
from ..common import convert_to_instance, convert_to_model, match_instance_to_data, match_model_to_data, convert_to_instance_with_index, convert_to_link, IdentityLink, convert_to_data, DenseData, SparseData
from scipy.special import binom
import numpy as np
import pandas as pd
import scipy as sp
import logging
import copy
import itertools
import warnings
from sklearn.linear_model import LassoLarsIC, Lasso, lars_path
from sklearn.cluster import KMeans
from tqdm.auto import tqdm
from .explainer import Explainer
from Generators.DropoutVAE import DropoutVAE
from sklearn.preprocessing import MinMaxScaler
log = logging.getLogger('shap')
def kmeans(X, k, round_values=True):
""" Summarize a dataset with k mean samples weighted by the number of data points they
each represent.
Parameters
----------
X : numpy.array or pandas.DataFrame
Matrix of data samples to summarize (# samples x # features)
k : int
Number of means to use for approximation.
round_values : bool
For all i, round the ith dimension of each mean sample to match the nearest value
from X[:,i]. This ensures discrete features always get a valid value.
Returns
-------
DenseData object.
"""
group_names = [str(i) for i in range(X.shape[1])]
if str(type(X)).endswith("'pandas.core.frame.DataFrame'>"):
group_names = X.columns
X = X.values
kmeans = KMeans(n_clusters=k, random_state=0).fit(X)
if round_values:
for i in range(k):
for j in range(X.shape[1]):
ind = np.argmin(np.abs(X[:,j] - kmeans.cluster_centers_[i,j]))
kmeans.cluster_centers_[i,j] = X[ind,j]
return DenseData(kmeans.cluster_centers_, group_names, None, 1.0*np.bincount(kmeans.labels_))
class KernelExplainer(Explainer):
"""Uses the Kernel SHAP method to explain the output of any function.
Kernel SHAP is a method that uses a special weighted linear regression
to compute the importance of each feature. The computed importance values
are Shapley values from game theory and also coefficents from a local linear
regression.
Parameters
----------
model : function or iml.Model
User supplied function that takes a matrix of samples (# samples x # features) and
computes a the output of the model for those samples. The output can be a vector
(# samples) or a matrix (# samples x # model outputs).
data : numpy.array or pandas.DataFrame or shap.common.DenseData or any scipy.sparse matrix
The background dataset to use for integrating out features. To determine the impact
of a feature, that feature is set to "missing" and the change in the model output
is observed. Since most models aren't designed to handle arbitrary missing data at test
time, we simulate "missing" by replacing the feature with the values it takes in the
background dataset. So if the background dataset is a simple sample of all zeros, then
we would approximate a feature being missing by setting it to zero. For small problems
this background dataset can be the whole training set, but for larger problems consider
using a single reference value or using the kmeans function to summarize the dataset.
Note: for sparse case we accept any sparse matrix but convert to lil format for
performance.
link : "identity" or "logit"
A generalized linear model link to connect the feature importance values to the model
output. Since the feature importance values, phi, sum up to the model output, it often makes
sense to connect them to the ouput with a link function where link(outout) = sum(phi).
If the model output is a probability then the LogitLink link function makes the feature
importance values have log-odds units.
Additional arguments required for use of the generators (all are located in kwargs):
generator: "DropoutVAE", "RBF" or "Forest". If this argument is not given, basic perturbation sampling
is used
generator_specs: Dictionary with values, required by the generator (required if generator is given)
dummy_idcs: List of lists of indeces that represent one-hot encoding of the same categorical feature
integer_idcs: List of indeces of integer features
instance_multiplier: Integer, size of the generated distribution set (set to 100 if not given)
"""
def __init__(self, model, data, link=IdentityLink(), **kwargs):
# Check if we have a generator
self.generator = kwargs.get("generator", None)
# Check for generator_specs
if self.generator is not None:
if kwargs.get("generator_specs") is None:
raise ValueError("Argument generator_specs required")
self.generator_specs = kwargs.get("generator_specs")
# Needed if we use generators
self.dummy_idcs = kwargs.get("dummy_idcs", [])
# Train the generator and Scaler if needed
if self.generator == "DropoutVAE":
if self.generator_specs.get("original_dim") is None or self.generator_specs.get("latent_dim") is None:
raise ValueError("original_dim and latent_dim should not be None")
self.generator = DropoutVAE(original_dim = self.generator_specs["original_dim"],
input_shape = (self.generator_specs["original_dim"],),
intermediate_dim = self.generator_specs.get("intermediate_dim", 128),
dropout = self.generator_specs.get("dropout", 0.3),
latent_dim = self.generator_specs["latent_dim"])
self.scaler = MinMaxScaler()
data_train = self.scaler.fit_transform(data)
self.generator.fit_unsplit(data_train, epochs = self.generator_specs.get("epochs", 100))
self.integer_idcs = kwargs.get("integer_idcs", [])
self.instance_multiplier = kwargs.get("instance_multiplier", 100)
# convert incoming inputs to standardized iml objects
self.link = convert_to_link(link)
self.model = convert_to_model(model)
self.keep_index = kwargs.get("keep_index", False)
self.keep_index_ordered = kwargs.get("keep_index_ordered", False)
if self.generator in ["RBF", "Forest"]:
if self.generator_specs.get("experiment") is None or self.generator_specs.get("feature_names") is None:
raise ValueError("feature_names and experiment should not be None")
if self.generator == "RBF":
if self.generator_specs["experiment"] == "Compas":
df = pd.read_csv("..\Data\compas_RBF.csv")
elif self.generator_specs["experiment"] == "German":
df = pd.read_csv("..\Data\german_RBF.csv")
else:
df = pd.read_csv("..\Data\cc_RBF.csv")
if self.generator_specs["experiment"] != "CC":
df = pd.get_dummies(df)
df = df[self.generator_specs["feature_names"]]
else:
if self.generator_specs["experiment"] == "Compas":
df = pd.read_csv("..\Data\compas_forest.csv")
elif self.generator_specs["experiment"] == "German":
df = pd.read_csv("..\Data\german_forest.csv")
else:
df = pd.read_csv("..\Data\cc_forest.csv")
# pri CC presledke spremeni v pike
if self.generator_specs["experiment"] != "CC":
df = pd.get_dummies(df)
df = df[self.generator_specs["feature_names"]]
# Set the groups, so dummies for the same feature are recognized as one feature
groups = list(range(df.shape[1]))
for dummy in self.dummy_idcs:
for idx in dummy:
groups.remove(idx)
groups.append(np.array(dummy))
# List that can be sorted
groups = [np.array([el]) if not isinstance(el, np.ndarray) else el for el in groups]
sortable = [el[0] for el in groups]
groups = np.array(groups)
# Put the groups indeces in correct order
groups = groups[np.argsort(sortable)]
# Typecast back to list
groups = groups.tolist()
# Names of the groups are just indeces
group_names = [str(i) for i in range(len(groups))]
# Set the distribution set
self.data = DenseData(df.values, group_names, groups)
# Basic option (distribution set obtained by k means)
else:
self.data = convert_to_data(data, keep_index=self.keep_index)
model_null = match_model_to_data(self.model, self.data)
# enforce our current input type limitations
assert isinstance(self.data, DenseData) or isinstance(self.data, SparseData), \
"Shap explainer only supports the DenseData and SparseData input currently."
assert not self.data.transposed, "Shap explainer does not support transposed DenseData or SparseData currently."
# warn users about large background data sets
# (ignore the warning if you are using data generators, true distribution set is set in shap_values)
if len(self.data.weights) > 100:
log.warning("Using " + str(len(self.data.weights)) + " background data samples could cause " +
"slower run times. Consider using shap.sample(data, K) or shap.kmeans(data, K) to " +
"summarize the background as K samples.")
# init our parameters
self.N = self.data.data.shape[0]
self.P = self.data.data.shape[1]
self.linkfv = np.vectorize(self.link.f)
self.nsamplesAdded = 0
self.nsamplesRun = 0
# find E_x[f(x)]
if isinstance(model_null, (pd.DataFrame, pd.Series)):
model_null = np.squeeze(model_null.values)
self.fnull = np.sum((model_null.T * self.data.weights).T, 0)
self.expected_value = self.linkfv(self.fnull)
# see if we have a vector output
self.vector_out = True
if len(self.fnull.shape) == 0:
self.vector_out = False
self.fnull = np.array([self.fnull])
self.D = 1
self.expected_value = float(self.expected_value)
else:
self.D = self.fnull.shape[0]
def shap_values(self, X, **kwargs):
""" Estimate the SHAP values for a set of samples.
Parameters
----------
X : numpy.array or pandas.DataFrame or any scipy.sparse matrix
A matrix of samples (# samples x # features) on which to explain the model's output.
nsamples : "auto" or int
Number of times to re-evaluate the model when explaining each prediction. More samples
lead to lower variance estimates of the SHAP values. The "auto" setting uses
`nsamples = 2 * X.shape[1] + 2048`.
l1_reg : "num_features(int)", "auto" (default for now, but deprecated), "aic", "bic", or float
The l1 regularization to use for feature selection (the estimation procedure is based on
a debiased lasso). The auto option currently uses "aic" when less that 20% of the possible sample
space is enumerated, otherwise it uses no regularization. THE BEHAVIOR OF "auto" WILL CHANGE
in a future version to be based on num_features instead of AIC.
The "aic" and "bic" options use the AIC and BIC rules for regularization.
Using "num_features(int)" selects a fix number of top features. Passing a float directly sets the
"alpha" parameter of the sklearn.linear_model.Lasso model used for feature selection.
Additional arguments if Forest is used as generator (located in kwargs):
fill_data: Boolean, if True the data fill option is used (different distribution set for every instance).
False by default.
data_location: The path to a file, where distribution sets are stored (This is only required because
treeEnsemble is not yet implemented in Python, so distribution sets have to be pregenerated in R).
Returns
-------
For models with a single output this returns a matrix of SHAP values
(# samples x # features). Each row sums to the difference between the model output for that
sample and the expected value of the model output (which is stored as expected_value
attribute of the explainer). For models with vector outputs this returns a list
of such matrices, one for each output.
"""
# convert dataframes
if str(type(X)).endswith("pandas.core.series.Series'>"):
X = X.values
elif str(type(X)).endswith("'pandas.core.frame.DataFrame'>"):
if self.keep_index:
index_value = X.index.values
index_name = X.index.name
column_name = list(X.columns)
X = X.values
x_type = str(type(X))
arr_type = "'numpy.ndarray'>"
# if sparse, convert to lil for performance
if sp.sparse.issparse(X) and not sp.sparse.isspmatrix_lil(X):
X = X.tolil()
assert x_type.endswith(arr_type) or sp.sparse.isspmatrix_lil(X), "Unknown instance type: " + x_type
assert len(X.shape) == 1 or len(X.shape) == 2, "Instance must have 1 or 2 dimensions!"
# single instance
if len(X.shape) == 1:
data = X.reshape((1, X.shape[0]))
if self.keep_index:
data = convert_to_instance_with_index(data, column_name, index_name, index_value)
explanation = self.explain(data, **kwargs)
# vector-output
s = explanation.shape
if len(s) == 2:
outs = [np.zeros(s[0]) for j in range(s[1])]
for j in range(s[1]):
outs[j] = explanation[:, j]
return outs
# single-output
else:
out = np.zeros(s[0])
out[:] = explanation
return out
# explain the whole dataset
elif len(X.shape) == 2:
if self.generator == "Forest":
# We check if Forest is set to fill data (generate around point)
self.fill_data = kwargs.get("fill_data", False)
if self.fill_data:
# Beginning of the distribution set for current instance
self.forest_index = 0
self.distribution_size = kwargs.get("distribution_size", 100)
# Location of the data on the hard drive
path = kwargs.get("data_location", None)
if path == None:
raise ValueError("Given location of the generated data is not valid.")
self.forest_data = pd.read_csv(path)
if self.generator_specs["experiment"] != "CC":
self.forest_data = | pd.get_dummies(self.forest_data) | pandas.get_dummies |
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
from datetime import datetime
from io import StringIO
import os
import pytest
from pandas import DataFrame, Index, MultiIndex
import pandas._testing as tm
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"""foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
""",
{"index_col": 0, "names": ["index", "A", "B", "C", "D"]},
DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
index=Index(["foo", "bar", "baz", "qux", "foo2", "bar2"], name="index"),
columns=["A", "B", "C", "D"],
),
),
(
"""foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
""",
{"index_col": [0, 1], "names": ["index1", "index2", "A", "B", "C", "D"]},
DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
index=MultiIndex.from_tuples(
[
("foo", "one"),
("foo", "two"),
("foo", "three"),
("bar", "one"),
("bar", "two"),
],
names=["index1", "index2"],
),
columns=["A", "B", "C", "D"],
),
),
],
)
def test_pass_names_with_index(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", [[0, 1], [1, 0]])
def test_multi_index_no_level_names(all_parsers, index_col):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
headless_data = "\n".join(data.split("\n")[1:])
names = ["A", "B", "C", "D"]
parser = all_parsers
result = parser.read_csv(
StringIO(headless_data), index_col=index_col, header=None, names=names
)
expected = parser.read_csv(StringIO(data), index_col=index_col)
# No index names in headless data.
expected.index.names = [None] * 2
tm.assert_frame_equal(result, expected)
def test_multi_index_no_level_names_implicit(all_parsers):
parser = all_parsers
data = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=MultiIndex.from_tuples(
[
("foo", "one"),
("foo", "two"),
("foo", "three"),
("bar", "one"),
("bar", "two"),
]
),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,expected,header",
[
("a,b", DataFrame(columns=["a", "b"]), [0]),
(
"a,b\nc,d",
DataFrame(columns=MultiIndex.from_tuples([("a", "c"), ("b", "d")])),
[0, 1],
),
],
)
@pytest.mark.parametrize("round_trip", [True, False])
def test_multi_index_blank_df(all_parsers, data, expected, header, round_trip):
# see gh-14545
parser = all_parsers
data = expected.to_csv(index=False) if round_trip else data
result = parser.read_csv(StringIO(data), header=header)
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(all_parsers):
parser = all_parsers
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
result = parser.read_csv(StringIO(data), sep=" ")
expected = DataFrame(
[[0, 1, 0, "a", "b"], [1, 2, 0, "c", "d"], [2, 2, 2, "e", "f"]],
columns=["Unnamed: 0", "id", "c0", "c1", "c2"],
)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_explicit(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0)
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"], name="index"),
)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(all_parsers):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"]),
)
tm.assert_frame_equal(result, expected)
def test_read_csv_no_index_name(all_parsers, csv_dir_path):
parser = all_parsers
csv2 = os.path.join(csv_dir_path, "test2.csv")
result = parser.read_csv(csv2, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738, "foo"],
[1.047916, -0.041232, -0.16181208307, 0.212549, "bar"],
[0.498581, 0.731168, -0.537677223318, 1.346270, "baz"],
[1.120202, 1.567621, 0.00364077397681, 0.675253, "qux"],
[-0.487094, 0.571455, -1.6116394093, 0.103469, "foo2"],
],
columns=["A", "B", "C", "D", "E"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
]
),
)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""
Copyright 2018 Infosys Ltd.
Use of this source code is governed by MIT license that can be found in the LICENSE file or at
https://opensource.org/licenses/MIT.
@author: zineb , Mohan
# -*- coding: utf-8 -*-
"""
#%%
import xml.dom.minidom
import pandas as pd
import requests
import datetime as DT
from dateutil.parser import parse
import win32com.client as win32
import newspaper
from newspaper import Article
import nltk
nltk.download('all')
from TwitterSearch import TwitterSearchOrder
from TwitterSearch import TwitterUserOrder
from TwitterSearch import TwitterSearchException
from TwitterSearch import TwitterSearch
from bs4 import BeautifulSoup as bs
import urllib3
import xmltodict
import traceback2 as traceback
import re
import warnings
import contextlib
from urllib3.exceptions import InsecureRequestWarning
import Algo_4
#%%
old_merge_environment_settings = requests.Session.merge_environment_settings
@contextlib.contextmanager
def no_ssl_verification():
opened_adapters = set()
def merge_environment_settings(self, url, proxies, stream, verify, cert):
# Verification happens only once per connection so we need to close
# all the opened adapters once we're done. Otherwise, the effects of
# verify=False persist beyond the end of this context manager.
opened_adapters.add(self.get_adapter(url))
settings = old_merge_environment_settings(self, url, proxies, stream, verify, cert)
settings['verify'] = False
return settings
requests.Session.merge_environment_settings = merge_environment_settings
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', InsecureRequestWarning)
yield
finally:
requests.Session.merge_environment_settings = old_merge_environment_settings
for adapter in opened_adapters:
try:
adapter.close()
except:
pass
keywords=[]
companies_names=[]
days_count=[]
emails=[]
persons_names=[]
connections=[]
companies_ids=[]
relevantsubject=[]
twitter_list=[]
main_list=[]
log_date=[]
def main():
#COMPANIES.XML
doc_comp = xml.dom.minidom.parse("companies.xml");
# print(doc.nodeName)
# print(doc.firstChild.tagName)
companies=doc_comp.getElementsByTagName("company")
for company in companies:
#print(company.getElementsByTagName("name"))
company_name=company.getElementsByTagName("c_name")[0].childNodes[0]
companies_names.append(company_name.nodeValue)
keyword=company.getElementsByTagName("keyword")
x=[]
for word in keyword:
x.append(word.childNodes[0].nodeValue)
keywords.append(x)
company_id=company.getElementsByTagName("c_id")[0].childNodes[0]
companies_ids.append(company_id.nodeValue)
twitter=company.getElementsByTagName("twitter_name")[0].childNodes[0].nodeValue
youtube=company.getElementsByTagName("youtube")[0].childNodes[0].nodeValue
hashtag=company.getElementsByTagName("hashtag")
z=[]
for word in hashtag:
z.append(word.childNodes[0].nodeValue)
twitter_list.append([twitter,z])
main_list.append([company_name.nodeValue,x,twitter,z,youtube])
#NEW DATE
doc_log = xml.dom.minidom.parse("log.xml");
log_date.append(doc_log.getElementsByTagName('day')[0].childNodes[0].nodeValue)
#PEOPLE.XML
doc = xml.dom.minidom.parse("people_v2.xml");
#print(doc.nodeName)
#print(doc.firstChild.tagName)
person=doc.getElementsByTagName("person")
for info in person:
# print(company.getElementsByTagName("name"))
person_name=info.getElementsByTagName("p_name")[0].childNodes[0]
#print(person_name)
persons_names.append(person_name.nodeValue)
email=info.getElementsByTagName("email")[0].childNodes[0]
emails.append(email.nodeValue)
grouped_company=info.getElementsByTagName("group")
group=[]
for g in grouped_company:
group_name=g.getElementsByTagName("g_name")[0].childNodes[0]
#group.append(group_name.nodeValue)
comp_name=g.getElementsByTagName("comp_id")
comp=[]
for c in range(len(comp_name)):
comp.append(comp_name[c].childNodes[0].nodeValue)
group.append([group_name.nodeValue,comp])
#connections.append(group)
single_companies=info.getElementsByTagName("single")[0]
cs_name=single_companies.getElementsByTagName("comp_id")
single_comp=[]
for s in range(len(cs_name)):
single_name=cs_name[s].childNodes[0].nodeValue
single_comp.append(single_name)
group.append(single_comp)
connections.append(group)
#Keywords.XML
doc_words = xml.dom.minidom.parse("keywords_list.xml");
#print(doc_date.nodeName)
for i in range(len(doc_words.getElementsByTagName('word'))):
word=doc_words.getElementsByTagName('word')[i]
l=word.childNodes[0].nodeValue
relevantsubject.append(l)
if __name__ == "__main__":
main();
#%%
urls=[]
current_companies=[]
datasets={}
API_KEY = ''
def content():
today = DT.date.today()
# days_ago = today - DT.timedelta(days=int(days_count[0]))
todayf = today.strftime("%Y-%m-%d")
# days_agof = days_ago.strftime("%Y-%m-%d")
#URLS
url = 'https://newsapi.org/v2/everything?q='
url_p2='&from='+log_date[0]+'&to='+todayf+'+&sortBy=publishedAt&language=en&apiKey='+ API_KEY
for company in range(len(keywords)):
# print(company)
# print(len(company))
if len(keywords[company]) == 0 :
print('no keywords given')
if len(keywords[company]) > 1 :
new_url = url + keywords[company][0]
for i in range(1,len(keywords[company])):
new_url = new_url + "%20AND%20"+ keywords[company][i]
final_url = new_url + url_p2
else:
final_url= url + keywords[company][0] + url_p2
# print(url)
urls.append(final_url)
# Build df with article info + create excel sheet
count = 0
# current_companies=[]
# datasets={}
for url in urls:
JSONContent = requests.get(url).json()
#content = json.dumps(JSONContent, indent = 4, sort_keys=True)
article_list = []
for i in range(len(JSONContent['articles'])):
article_list.append([JSONContent['articles'][i]['source']['name'],
JSONContent['articles'][i]['title'],
JSONContent['articles'][i]['publishedAt'],
JSONContent['articles'][i]['url']
])
#print(article_list)
if article_list != []:
datasets[companies_names[count]]= pd.DataFrame(article_list)
datasets[companies_names[count]].columns = ['Source/User','Title/Tweet','Date','Link']
datasets[companies_names[count]]['Date']=datasets[companies_names[count]]['Date'].str.replace('T',' ')
datasets[companies_names[count]]['Date']=datasets[companies_names[count]]['Date'].str.replace('Z','')
datasets[companies_names[count]]['Date']=datasets[companies_names[count]]['Date'].str.split(expand=True)
for i in range(len(datasets[companies_names[count]]['Date'])):
datasets[companies_names[count]]['Date'][i]=parse(datasets[companies_names[count]]['Date'][i])
datasets[companies_names[count]]['Date'][i]=datasets[companies_names[count]]['Date'][i].date()
#datasets[companies_names[count]]['Date'][i]=datasets[companies_names[count]]['Date'][i].str.split(expand=True)
#ds = '2012-03-01T10:00:00Z' # or any date sting of differing formats.
#date = parser.parse(ds)
#datasets[companies_names[count]]['Date']=pd.to_datetime(datasets[companies_names[count]]['Date'])
#print(datasets[companies_names[count]])
current_companies.append(companies_names[count])
count=count+1
else:
None
count=count+1
content()
duplicate_df=[]
def duplicate_check():
for article in datasets:
d=datasets[article][datasets[article].duplicated(['Title/Tweet'],keep='first')==True]
print(d)
if d.empty == False:
duplicate_df.append(d)
else:
None
#duplicate_article.append(d)
#duplicate_article = duplicate_article.concat([duplicate_article,d], axis=0)
#print(d)
duplicate_check()
def duplicate_drop():
for article in datasets:
datasets[article]=datasets[article].drop_duplicates(['Title/Tweet'],keep='first')
datasets[article]=datasets[article].reset_index()
datasets[article]=datasets[article].drop(['index'], axis=1)
duplicate_drop()
#%%
def Scoring():
for a in datasets:
try:
datasets[a].insert(0,'Category','Article')
datasets[a].insert(1,'Company',str(a))
datasets[a].insert(3,'Keywords','none')
datasets[a].insert(4,'Subjects/Views','none')
for i in range(len(datasets[a]['Link'])):
r=[]
article = Article(datasets[a]['Link'][i])
article.download()
article.html
article.parse()
txt=article.text.encode('ascii','ignore').decode('ascii')
#f=requests.get(datasets[article]['Link'][i])
#txt=f.text.encode('ascii','ignore').decode('ascii')
txt=txt.lower()
#total_word= wordcounter(txt).get_word_count()
for word in relevantsubject:
result=txt.count(word)
if result != 0:
r.append(word +'('+ str(txt.count(word)) +')')
else:
None
# relevanceLink.append(r)
r=', '.join(word for word in r)
if r != []:
datasets[a]['Subjects/Views'][i]=str(r + ' (totalWords:'+ str(len(txt.split()))+')')
else:
datasets[a]['Subjects/Views'][i]=str('None')
article.nlp()
k=', '.join(keyword for keyword in article.keywords)
datasets[a]['Keywords'][i]=str(k)
except newspaper.article.ArticleException:
None
#k= []
#for keyword in article.keywords:
# k.append[keyword]
# k=', '.join(keyword for keyword in k)
# datasets[a]['Keywords'][i]=str(k)
Scoring()
# datasets[article]
#%% Formatting
companies_dic=dict(zip(companies_names, companies_ids))
people_comp_dic=dict(zip(persons_names, connections))
people_email_dic=dict(zip(persons_names, emails))
Subject = pd.DataFrame(relevantsubject)
Subject.columns=['Subject Interest']
Companies = pd.DataFrame(companies_names)
Companies.columns=['Companies Interest']
CS = pd.concat([Subject, Companies], axis=1)
CS.fillna('',inplace=True)
MainDF=pd.DataFrame(main_list)
MainDF.columns=['company','keywords','twitter','hashtag','youtube']
#import re
def Find(string):
url = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+] |[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F])|(?:%[0-9a-fA-F]|[$-_@.&+]|[!*\(\), ]|[0-9a-fA-F]))+', string)
return url
#%%
tweets_datasets={}
tw_current_companies=[]
today = DT.date.today()
#days_ago = today - DT.timedelta(days=int(days_count[0]))
new_date = parse(log_date[0]).date()
def Tweets():
try:
max_feeds=10
tso = TwitterSearchOrder() # create a TwitterSearchOrder object
tso.set_language('en')
tso.set_include_entities(False) # and don't give us all those entity information
tso.set_until(new_date)
tso.arguments.update({'tweet_mode':'extended'})
tso.arguments.update({'truncated': 'False' })
ts = TwitterSearch(
consumer_key = '',
consumer_secret = '',
access_token = '',
access_token_secret = '',
proxy='http://proxy_address'
)
for c in range(len(MainDF)):
count=0
#kw=[MainDF['twitter'][c]]
#for h in MainDF['hashtag'][c]:
# kw.append(h)
tso.set_keywords(MainDF['hashtag'][c])
tweets_list=[]
tuo = TwitterUserOrder(MainDF['twitter'][c])
# tuo.set_language('en')
tuo.set_include_entities(False) # and don't give us all those entity information
# tuo.set_until(days_ago)
# tuo.set_count(15)
tuo.arguments.update({'tweet_mode':'extended'})
tuo.arguments.update({'truncated': 'False' })
#for tweet in ts.search_tweets_iterable(tso):
# print(tweet)
# tweets_list.append([tweet['user']['screen_name'],tweet['full_text']])
for tweet in ts.search_tweets_iterable(tso):
if 'retweeted_status' in tweet:
None
#tweets_list.append([tweet['user']['screen_name'],tweet['retweeted_status']['full_text'],'Retweet of ' + tweet['retweeted_status']['user']['screen_name']])
else:
links=Find(tweet['full_text'])
links=', '.join(link for link in links)
#print(tweet)
tweets_list.append([MainDF['company'][c],tweet['user']['screen_name'],tweet['full_text'],tweet['created_at'],links])
for tweet in ts.search_tweets_iterable(tuo):
if tweet['lang'] != 'en':
#print(tweet)
None
else:
# print(tweet)
links=Find(tweet['full_text'])
links=', '.join(link for link in links)
tweets_list.append([MainDF['company'][c],tweet['user']['screen_name'],tweet['full_text'],tweet['created_at'],links])
count=count+1
if count == max_feeds:
break
if tweets_list != []:
tweets_datasets[MainDF['company'][c]]= pd.DataFrame(tweets_list)
tweets_datasets[MainDF['company'][c]].columns = ['Company','Source/User','Title/Tweet','Date','Link']
tweets_datasets[MainDF['company'][c]].insert(0,'Category','Twitter')
for i in range(len(tweets_datasets[MainDF['company'][c]]['Date'])):
tweets_datasets[MainDF['company'][c]]['Date'][i]=parse(tweets_datasets[MainDF['company'][c]]['Date'][i])
tweets_datasets[MainDF['company'][c]]['Date'][i]=tweets_datasets[MainDF['company'][c]]['Date'][i].date()
#print(datasets[companies_names[count]])
tw_current_companies.append(MainDF['company'][c])
else:
None
#tweets_list.append()
#print( '@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text'] ) )
except TwitterSearchException as e: # take care of all those ugly errors if there are some
print(e)
with no_ssl_verification():
Tweets()
#%% Filters only for todays
for comp in tweets_datasets:
tweets_datasets[comp]=tweets_datasets[comp].loc[tweets_datasets[comp]['Date'] >= new_date]
for comp in list(tweets_datasets.keys()):
if tweets_datasets[comp].empty == True:
del tweets_datasets[comp]
#re-indexing
for comp in tweets_datasets:
tweets_datasets[comp]=tweets_datasets[comp].reset_index()
tweets_datasets[comp]=tweets_datasets[comp].drop(['index'], axis=1)
#tweets_datasets = tweets_datasets.loc[tweets_datasets[comp].empty == False]
#%%
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
Double_df=[]
for comp in tweets_datasets:
for i in range(len(tweets_datasets[comp])):
doubles=[]
#doubles.append(comp)
X =tweets_datasets[comp]['Title/Tweet'][i]
X_list = word_tokenize(X)
sw = stopwords.words('english')
X_set = {w for w in X_list if not w in sw}
for n in range(len(tweets_datasets[comp])):
Y =tweets_datasets[comp]['Title/Tweet'][n]
# tokenization
Y_list = word_tokenize(Y)
# sw contains the list of stopwords
# sw = stopwords.words('english')
l1 =[];l2 =[]
# remove stop words from string
#X_set = {w for w in X_list if not w in sw}
Y_set = {w for w in Y_list if not w in sw}
# form a set containing keywords of both strings
rvector = X_set.union(Y_set)
for w in rvector:
if w in X_set: l1.append(1) # create a vector
else: l1.append(0)
if w in Y_set: l2.append(1)
else: l2.append(0)
c = 0
# cosine formula
for i in range(len(rvector)):
c+= l1[i]*l2[i]
cosine = c / float((sum(l1)*sum(l2))**0.5)
print(tweets_datasets[comp]['Title/Tweet'][n])
print("similarity: ", cosine)
if (Y == X)== True:
#None
print('Same')
else:
if 0.80 <= cosine <= 0.99 :
print('Yes!')
doubles.append(tweets_datasets[comp].iloc[[n]])
#d=tweets_datasets[comp][tweets_datasets[comp]['Title/Tweet'][n]]
#doubles.append(d)
else:
None
if doubles != []:
d=pd.concat(doubles)
d=d.reset_index()
d=d.drop(['index'],axis=1)
Double_df.append(d)
else:
None
def drop_similar():
for comp in tweets_datasets:
for i in range(len(Double_df)):
for n in range(len(Double_df[i])):
for r in range(len(tweets_datasets[comp].copy())):
if Double_df[i]['Title/Tweet'][n] != tweets_datasets[comp]['Title/Tweet'][r]:
None
else:
tweets_datasets[comp]=tweets_datasets[comp].drop(r)
tweets_datasets[comp]=tweets_datasets[comp].reset_index()
tweets_datasets[comp]=tweets_datasets[comp].drop(['index'], axis=1)
drop_similar()
#%%
tw_duplicate_df=[]
def tw_duplicate_check():
try:
for article in tweets_datasets:
d=tweets_datasets[article][tweets_datasets[article].duplicated(subset=['Title/Tweet'],keep='first')==True]
print(d)
if d.empty == False:
tw_duplicate_df.append(d)
else:
None
except:
None
tw_duplicate_check()
def tw_duplicate_drop():
if tw_duplicate_df != []:
for article in tweets_datasets:
tweets_datasets[article]=tweets_datasets[article].drop_duplicates(subset=['Title/Tweet'],keep='first', inplace=True)
tweets_datasets[article]=tweets_datasets[article].reset_index()
tweets_datasets[article]=tweets_datasets[article].drop(['index'], axis=1)
else:
None
tw_duplicate_drop()
#%%
def Scoring_Tweet():
for a in tweets_datasets:
#datasets[a].insert(0,'Company',str(a))
tweets_datasets[a].insert(3,'Subjects/Views','none')
for i in range(len(tweets_datasets[a]['Title/Tweet'])):
r=[]
txt=tweets_datasets[a]['Title/Tweet'][i].encode('ascii','ignore').decode('ascii')
#f=requests.get(datasets[article]['Link'][i])
#txt=f.text.encode('ascii','ignore').decode('ascii')
txt=txt.lower()
#total_word= wordcounter(txt).get_word_count()
for word in relevantsubject:
result=txt.count(word)
if result != 0:
r.append(word +'('+ str(txt.count(word)) +')')
else:
None
# relevanceLink.append(r)
r=', '.join(word for word in r)
if r != []:
tweets_datasets[a]['Subjects/Views'][i]=str(r + ' (totalWords:'+ str(len(txt.split()))+')')
else:
tweets_datasets[a]['Subjects/Views'][i]=str('None')
Scoring_Tweet()
#%%
general_df = {}
general_df = tweets_datasets.copy()
for n in datasets:
if n in general_df:
general_df[n]=pd.concat([datasets[n],general_df[n]], axis=0, sort=False)
else:
general_df.update({str(n):datasets[n]})
for comp in general_df:
general_df[comp]=general_df[comp].reset_index()
general_df[comp]=general_df[comp].drop(['index'], axis=1)
#%%
Youtube_dataset ={}
base = "https://www.youtube.com/user/{}/videos"
from textblob import TextBlob
#qstring = "snowflakecomputing"
for i in range(len(MainDF)):
qstring= MainDF['youtube'][i]
with no_ssl_verification():
r = requests.get(base.format(qstring) )
page = r.text
soup=bs(page,'html.parser')
vids= soup.findAll('a',attrs={'class':'yt-uix-tile-link'})
duration=soup.findAll('span',attrs={'class':'accessible description'})
date=soup.findAll('ul',attrs={'class':'yt-lockup-meta-info'})
videolist=[]
for v in vids:
tmp = 'https://www.youtube.com' + v['href']
videolist.append([v['title'],tmp])
infos=[]
for d in date:
x=d.findAll('li')
infos.append([x[0].text,x[1].text])
youtubeDF= | pd.DataFrame(videolist) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 20 14:08:35 2019
@author: Team BTC - <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
"""
#sorry the code isnt very efficient. because of time constraints and the number of people working on the project, we couldnt do all the automatizations we would have liked to do.
#Code in block comment should not be run as it will make change to the cloud database
# %% Importing libraries
# You may need to install dnspython in order to work with cloud server
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
import json
import pandas as pd
import numpy as np
from tqdm import tqdm
from datetime import datetime as dt
import os
import time
import re
import copy
from textblob import TextBlob
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from datetime import timedelta
from pymongo import MongoClient
import statsmodels.formula.api as smf
import statsmodels.api as sm
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
from statsmodels.tsa.stattools import adfuller
import matplotlib.pyplot as plt
from statsmodels.tsa.api import VAR
#os.chdir('H:/Documents/Alternance/Project/')
# %% Function to scrap data from Stocktwit and add to the cloud server
# The function have 2 inputs:
# - Symbol of the asset in string
# - Rate limit: number of requests per execution, in integer
def get_stwits_data(symbol,rate_limit):
client = MongoClient('mongodb+srv://Group_fintech:[email protected]/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
exist=0
for q in db['{}'.format(symbol)].aggregate([
{ "$group": {
"_id": None,
"min": { "$min": "$ID" }
}}
]):
exist=1
min_prev_id=q['min']
http = urllib3.PoolManager()
mid=[]
duplicates=0
for j in tqdm(range(rate_limit)):
if exist==0:
url = "https://api.stocktwits.com/api/2/streams/symbol/{}.json".format(symbol)
elif exist!=0 and len(mid)==0:
url = "https://api.stocktwits.com/api/2/streams/symbol/{}.json?max={}".format(symbol,min_prev_id)
else:
min_ID=min(mid)
url = "https://api.stocktwits.com/api/2/streams/symbol/{}.json?max={}".format(symbol,min_ID)
r = http.request('GET', url)
try:
data = json.loads(r.data)
except:
print('Decode error, retry again')
continue
if duplicates==1:
print('\nThere are duplicates in the result. Other people are maybe running. \nPlease try again later.')
break
if data["response"]["status"] != 200:
print("\nYour request was denied, retry in 1 hour")
time.sleep(3600)
continue
# insert_element=[]
# break
for element in data["messages"]:
mid.append(element["id"])
symbol_list=[]
for s in element['symbols']:
symbol_list.append(s['symbol'])
try:
insert_element = {"ID": element["id"], "TimeStamp": element["created_at"], "User": element["user"]["username"], "Content": element["body"],"Sentiment": (element["entities"]["sentiment"]["basic"]=="Bullish")*2-1,'Symbols':symbol_list}
except:
insert_element = {"ID": element["id"], "TimeStamp": element["created_at"], "User": element["user"]["username"], "Content": element["body"],"Sentiment": 0,'Symbols':symbol_list}
try:
result = db['{}'.format(symbol)].insert_one(insert_element)
except:
duplicates=1
break
return insert_element
# %% Execution of the function
symbol='BTC.X'
rate_limit=2000
last_ele=get_stwits_data(symbol,rate_limit)
# %% #Creating custom lexicon
#%% Finding the time interval of the database
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
#Getting the minimum id
for q in db['BTC.X'].aggregate([
{ "$group": {
"_id": None,
"min": { "$min": "$ID" }
}}
]):
minID=q['min']
#Getting the timestamp from the min ID
for post in db['BTC.X'].find({'ID':minID}):
start_time=post['TimeStamp']
#Getting the max id
for q in db['BTC.X'].aggregate([
{ "$group": {
"_id": None,
"max": { "$max": "$ID" }
}}
]):
maxID=q['max']
#Getting the timestamp from the max ID
for post in db['BTC.X'].find({'ID':maxID}):
end_time=post['TimeStamp']
start_time=dt.strptime(start_time,'%Y-%m-%dT%H:%M:%SZ')
end_time=dt.strptime(end_time,'%Y-%m-%dT%H:%M:%SZ')
period=np.arange(dt(start_time.year,start_time.month,start_time.day),dt(end_time.year,end_time.month,end_time.day),timedelta(days=1))
#%% Creating dictionary
#Creating function to find words in positive and negative function
def create_positive_dictionary_by_day(day):
dictionary=pd.DataFrame(columns=['Word','Frequency'])
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
sentimental=1
for documents in db['BTC.X'].find({'Sentiment':sentimental,"TimeStamp":{"$regex": u"{}-{:02d}-{:02d}".format(day.astype(object).year,day.astype(object).month,day.astype(object).day)}}):
word_list=re.findall(r"[\w']+|[.,!?;$]", documents['Content'])
word_list = [porter.stem(t) for t in word_list]
for word in word_list:
if word in dictionary['Word'].tolist():
frq=copy.copy(dictionary.iloc[dictionary.index[dictionary['Word']==word].tolist()[0]][1])+1
dictionary.at[dictionary.index[dictionary['Word']==word].tolist()[0],'Frequency']=frq
else:
dictionary=dictionary.append({'Word': word ,'Frequency':1}, ignore_index=True)
return dictionary
def create_negative_dictionary_by_day(day):
dictionary=pd.DataFrame(columns=['Word','Frequency'])
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
sentimental=-1
for documents in db['BTC.X'].find({'Sentiment':sentimental,"TimeStamp":{"$regex": u"{}-{:02d}-{:02d}".format(day.astype(object).year,day.astype(object).month,day.astype(object).day)}}):
word_list=re.findall(r"[\w']+|[.,!?;$]", documents['Content'])
word_list = [porter.stem(t) for t in word_list]
for word in word_list:
if word in dictionary['Word'].tolist():
frq=copy.copy(dictionary.iloc[dictionary.index[dictionary['Word']==word].tolist()[0]][1])+1
dictionary.at[dictionary.index[dictionary['Word']==word].tolist()[0],'Frequency']=frq
else:
dictionary=dictionary.append({'Word': word ,'Frequency':1}, ignore_index=True)
return dictionary
from multiprocessing import Pool
pool = Pool()
#creating positive dictionary
df=list(tqdm(pool.imap(create_positive_dictionary_by_day, period), total=len(period)))
positive_dictionary=df[0].set_index('Word')
for i in tqdm(range(1,len(df))):
positive_dictionary=positive_dictionary.add(df[i].set_index('Word'), fill_value=0)
#creating negative dictionary
df=list(tqdm(pool.imap(create_negative_dictionary_by_day, period), total=len(period)))
negative_dictionary=df[0].set_index('Word')
for i in tqdm(range(1,len(df))):
negative_dictionary=negative_dictionary.add(df[i].set_index('Word'), fill_value=0)
negative_dictionary=negative_dictionary.sort_values('Frequency',ascending=False)
positive_dictionary=positive_dictionary.sort_values('Frequency',ascending=False)
positive_dictionary.columns=['Positive Freq']
negative_dictionary.columns=['Negative Freq']
positive_dictionary=positive_dictionary/db['BTC.X'].count_documents({'Sentiment':1})
negative_dictionary=negative_dictionary/db['BTC.X'].count_documents({'Sentiment':-1})
#Combining both dictionary
final_dict=positive_dictionary.add(negative_dictionary, fill_value=0).sort_values('Positive Freq',ascending=False)
final_dict['Pos over Neg']=final_dict['Positive Freq']/final_dict['Negative Freq']
#Removing stopwords from the dictionary
nltk.download('stopwords')
stop_words = set(stopwords.words('english'))
final_dict=final_dict.reset_index()
for i in final_dict['Word']:
if i in stop_words:
final_dict=final_dict[final_dict['Word']!=i]
#Removing words below the threshold
final_dic=final_dict.fillna(value=0)
final_dict=final_dict[(final_dict['Negative Freq']>0.0005) | (final_dict['Positive Freq']>0.0005)]
final_dict.fillna(value=0).sort_values('Pos over Neg',ascending=False).to_csv('Simple_Dictionary2.csv')
#%% Creating positive and negative word list from the lexicon
os.chdir('H:/Documents/Alternance/Project/')
lexicon=pd.read_csv('Simple_Dictionary2.csv')
lexicon=lexicon[['Word','Classification']]
neg_list=list(lexicon[lexicon['Classification']==-1]['Word'])
pos_list=list(lexicon[lexicon['Classification']==1]['Word'])
# Update lexicon result to the database
import nltk
porter = nltk.PorterStemmer()
import re
import copy
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
for i in range(32):
for documents in tqdm(db['BTC.X'].find({'Custom_Lexicon_Sentiment':{ "$exists" : False }},limit=10000)):
if documents['Sentiment']==0:
score=0
word_list=re.findall(r"[\w']+|[.,!?;$]", documents['Content'])
word_list = [porter.stem(t) for t in word_list]
for word in word_list:
if word in neg_list:
score+=-1
if word in pos_list:
score+=1
if score >0:
senti=1
elif score <0:
senti=-1
else:
senti=0
db['BTC.X'].update_one({'_id':documents['_id']},{'$set':{'Custom_Lexicon_Sentiment':senti}})
else:
db['BTC.X'].update_one({'_id':documents['_id']},{'$set':{'Custom_Lexicon_Sentiment':documents['Sentiment']}})
#%% Creating positive and negative word list from the teacher lexicon
os.chdir('H:/Documents/Alternance/Project/')
lexicon=pd.read_csv('l2_lexicon.csv',sep=';')
neg_list=list(lexicon[lexicon['sentiment']=='negative']['keyword'])
pos_list=list(lexicon[lexicon['sentiment']=='positive']['keyword'])
# Update lexicon result to the database
pattern = r'''(?x) # set flag to allow verbose regexps
(?:[A-Z]\.)+ # abbreviations, e.g. U.S.A.
| \w+(?:-\w+)* # words with optional internal hyphens
| \$?\w+(?:\.\w+)?%? # tickers
| \@?\w+(?:\.\w+)?%? # users
| \.\.\. # ellipsis
| [][.,;"'?!():_`-] # these are separate tokens; includes ], [
'''
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
cursor=db['BTC.X'].find({'Prof_Lexicon_Sentiment':{ "$exists" : False }},limit=10000)
for i in range(32):
for documents in tqdm(cursor):
if documents['Sentiment']==0:
score=0
word_list=nltk.regexp_tokenize(documents['Content'], pattern)
# word_list=re.findall(r"[\w']+|[.,!?;$]", documents['Content'])
# word_list = [porter.stem(t) for t in word_list]
for word in word_list:
if word in neg_list:
score+=-1
if word in pos_list:
score+=1
if score >0:
senti=1
elif score <0:
senti=-1
else:
senti=0
db['BTC.X'].update_one({'_id':documents['_id']},{'$set':{'Prof_Lexicon_Sentiment':senti}})
else:
db['BTC.X'].update_one({'_id':documents['_id']},{'$set':{'Prof_Lexicon_Sentiment':documents['Sentiment']}})
#%% Adding Vader analysis value to the database
# Connecting to the database
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true')
db=client['SorbonneBigData']
collection= db['BTC.X']
# Applying Vader
analyser = SentimentIntensityAnalyzer()
for i in tqdm(range(31)):
for documents in collection.find({'Vader_sentiment2':{ "$exists" : False }},limit=10000):
doc_id = documents['_id']
Vaderdoc = analyser.polarity_scores(documents['Content'])
Vaderdoc= Vaderdoc.get('compound')
if Vaderdoc> 0.33:
Sentiment_vader=1
elif Vaderdoc< -0.33:
Sentiment_vader=-1
else:
Sentiment_vader=0
print (Sentiment_vader)
#Insert Vader value to the database
db['BTC.X'].update_one({'_id':documents['_id']},{'$set':{'Vader_sentiment2':Sentiment_vader}})
db['BTC.X'].update_one({'_id':documents['_id']},{'$set':{'Vader_sentiment':Vaderdoc}})
#%% Adding Textblob analysis value to the database
# Connecting to the database
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
collection= db['BTC.X']
# Applying Vader
analyser = SentimentIntensityAnalyzer()
#Vader=[] 54452
for i in tqdm(range(31)):
for documents in collection.find({'Textblob_Sentiment2':{'$exists':False}},limit=10000):
doc_id = documents['_id']
pola = TextBlob(documents['Content']).sentiment.polarity
# Vader.append(Vaderdoc)
if pola> 0.33:
Sentiment_txt=1
elif pola< -0.33:
Sentiment_txt=-1
else:
Sentiment_txt=0
db['BTC.X'].update_one({'_id':documents['_id']},{'$set':{'Textblob_Sentiment2':Sentiment_txt}})
db['BTC.X'].update_one({'_id':documents['_id']},{'$set':{'Textblob_Sentiment':pola}})
#%% Econometric testing
#%% Import BTC price time series
client = MongoClient('mongodb+srv://Group_fintech:[email protected]/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
price=[]
for documents in db['BTC.Price'].find({}):
price.append([documents['Time'],documents['Price']])
price=pd.DataFrame(price,columns=['Time','Price'])
price['Time']=pd.to_datetime(price['Time'])
price=price.set_index('Time')
price=price[price.index<=dt(2019,9,21,14)]
plt.figure()
price.plot()
price['r_btc'] = (price.Price - price.Price.shift(1)) / price.Price.shift(1)
#%% Import all sentiment time series
client = MongoClient('mongodb+srv://Group_fintech:[email protected]/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
sentimental=[]
for documents in tqdm(db['BTC'].find({})):
sentimental.append([documents['TimeStamp'],documents['Custom_Lexicon_Sentiment'],documents['Prof_Lexicon_Sentiment'],documents['Textblob_Sentiment'],documents['Textblob_Sentiment2'],documents['Vader_sentiment'],documents['Vader_sentiment2'],documents['Sentiment']])
sentimental=pd.DataFrame(sentimental,columns=['Time','Custom_Lexicon_Sentiment','Prof_Lexicon_Sentiment','Textblob_Sentiment_prob','Textblob_Sentiment_binary','Vader_sentiment_prob','Vader_sentiment_binary','Origin_sentiment'])
sentimental=sentimental.set_index('Time')
sentimental.index=pd.to_datetime(sentimental.index.tz_localize(None))
# Resample time series into hour
sentiment_1h=sentimental.resample('1H').mean()
sentiment_1h.plot()
sentiment_1h=sentiment_1h[sentiment_1h.index > dt(2019,1,1) ]
# Export the time series to database
for i in tqdm(range(len(sentiment_1h))):
insert_element = {"Time": sentiment_1h.index[i], "{}".format(sentiment_1h.columns[0]): sentiment_1h["{}".format(sentiment_1h.columns[0])][i],"{}".format(sentiment_1h.columns[1]): sentiment_1h["{}".format(sentiment_1h.columns[1])][i], "{}".format(sentiment_1h.columns[2]): sentiment_1h["{}".format(sentiment_1h.columns[2])][i], "{}".format(sentiment_1h.columns[3]): sentiment_1h["{}".format(sentiment_1h.columns[3])][i], "{}".format(sentiment_1h.columns[4]): sentiment_1h["{}".format(sentiment_1h.columns[4])][i], "{}".format(sentiment_1h.columns[5]): sentiment_1h["{}".format(sentiment_1h.columns[5])][i], "{}".format(sentiment_1h.columns[6]): sentiment_1h["{}".format(sentiment_1h.columns[6])][i]}
result = db['Time_series_Data'].insert_one(insert_element)
#
sentiment_1h=[]
for documents in tqdm(db['Time_series_Data'].find({})):
sentiment_1h.append([documents['Time'],documents['Custom_Lexicon_Sentiment'],documents['Prof_Lexicon_Sentiment'],documents['Textblob_Sentiment_prob'],documents['Textblob_Sentiment_binary'],documents['Vader_sentiment_prob'],documents['Vader_sentiment_binary'],documents['Origin_sentiment']])
sentiment_1h=pd.DataFrame(sentiment_1h,columns=['Time','Custom_Lexicon_Sentiment','Prof_Lexicon_Sentiment','Textblob_Sentiment_prob','Textblob_Sentiment_binary','Vader_sentiment_prob','Vader_sentiment_binary','Origin_sentiment'])
sentiment_1h=sentiment_1h.set_index('Time')
sentiment_1h.index=pd.to_datetime(sentiment_1h.index.tz_localize(None))
#%% Correlation Matrix
test_data=pd.concat([price,sentiment_1h],axis=1)
test_data=test_data.fillna(value=0)
corr_matrix=test_data.corr()
#==============================================================================
#%%Time series analysis for custom lexicon and professor's lexicon
#analyse each timeseries by plotting them
sentiment_1h=sentiment_1h.dropna()
sentiprof=sentiment_1h.iloc[:,1]
senticustom=sentiment_1h.iloc[:,0]
sentiprof=sentiprof.dropna()
senticustom=senticustom.dropna()
sentiprof.astype(float)
senticustom.astype(float)
plt.figure()
btweet= sentiprof.plot(title='One hour average sentiment value(sentiprof)')
plt.figure()
btweetc=senticustom.plot(title='One hour average sentiment value2(senticustom)')
#from this graph, we can find our two sentiment values fluctuates, but 'quite stable'.
sentiprof.mean()
senticustom.mean()
#sentiprof mean value is 0.3615, it is lower than senticustom mean value which is 0.44
#Through this grough,we can observe a positive sentiment of btcoin on tweet from janurary 2019.
price.astype(float)
plt.figure()
priceg= price.Price.plot(title='Price of Bitcoin since Jan 2019(one hour)')
#Through this graph, we can find price of Bitcoin has an increasing trend from Jan 2019 to July 2019)
preturn=(price.Price-price.Price.shift(1))/price.Price.shift(1)
preturn=preturn.dropna()
preturn.mean()
plt.figure()
preturn.plot(title='Price return of Bitcoin since Jan 2019(one hour)')
#From this graph of price return, we can find it has some fluctuations, but 'quite stable' for us.
#%%Stationarity test, Unitroot test
#<NAME>
adfuller(sentiprof,regression='ct')
adfuller(sentiprof,regression='nc')
#p value is small enough, at 95% confidence interval, we can say there is no unitroot in sentiprof, the series is quite stationary.
#Custom Lexicon
adfuller(senticustom,regression='ct')
adfuller(senticustom,regression='nc')
##the p-value is low enough, at 95% confidence level, we can reject the null typothesis which there is a unitroot.
adfuller(price.Price,regression='ct')
##p value is high,0.83. like what we saw in the graph, it has an obvious increasing trend since Jan 2019.
adfuller(preturn,regression='ct')
adfuller(preturn,regression='nc')
#p value is very low to reject the null hypothesis, there is no unitroot for Bitcoin price return.
#%%Set the same datatime and merge all datas togther.
dates2 = | pd.date_range('2018-12-22', '2019-09-24', freq='h') | pandas.date_range |
from __future__ import absolute_import, division, print_function
import re
import numpy as np
import pandas as pd
from sklearn.metrics import (accuracy_score, cohen_kappa_score, f1_score, precision_score, recall_score)
pd.set_option('display.max_rows', 20)
pd.set_option('precision', 4)
np.set_printoptions(precision=4)
class Mura(object):
"""`MURA <https://stanfordmlgroup.github.io/projects/mura/>`_ Dataset :
Towards Radiologist-Level Abnormality Detection in Musculoskeletal Radiographs.
"""
url = "https://cs.stanford.edu/group/mlgroup/mura-v1.0.zip"
filename = "mura-v1.0.zip"
md5_checksum = '4c36feddb7f5698c8bf291b912c438b1'
_patient_re = re.compile(r'patient(\d+)')
_study_re = re.compile(r'study(\d+)')
_image_re = re.compile(r'image(\d+)')
_study_type_re = re.compile(r'_(\w+)_patient')
def __init__(self, image_file_names, y_true, y_pred=None):
self.imgs = image_file_names
df_img = pd.Series(np.array(image_file_names), name='img')
self.y_true = y_true
df_true = pd.Series(np.array(y_true), name='y_true')
self.y_pred = y_pred
# number of unique classes
self.patient = []
self.study = []
self.study_type = []
self.image_num = []
self.encounter = []
for img in image_file_names:
self.patient.append(self._parse_patient(img))
self.study.append(self._parse_study(img))
self.image_num.append(self._parse_image(img))
self.study_type.append(self._parse_study_type(img))
self.encounter.append("{}_{}_{}".format(
self._parse_study_type(img),
self._parse_patient(img),
self._parse_study(img), ))
self.classes = np.unique(self.y_true)
df_patient = pd.Series(np.array(self.patient), name='patient')
df_study = pd.Series(np.array(self.study), name='study')
df_image_num = pd.Series(np.array(self.image_num), name='image_num')
df_study_type = pd.Series(np.array(self.study_type), name='study_type')
df_encounter = pd.Series(np.array(self.encounter), name='encounter')
self.data = pd.concat(
[
df_img,
df_encounter,
df_true,
df_patient,
df_patient,
df_study,
df_image_num,
df_study_type,
], axis=1)
if self.y_pred is not None:
self.y_pred_probability = self.y_pred.flatten()
self.y_pred = self.y_pred_probability.round().astype(int)
df_y_pred = | pd.Series(self.y_pred, name='y_pred') | pandas.Series |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Author: <NAME>
date: 2019/11/7 14:06
contact: <EMAIL>
desc: 不需要控制速度, 但是需要伪装游览器, 不然会在第一次请求就被封 IP, 目前采用银保监会采用两套反扒方案
1. 20191114在周内运行
2. 20191115在周末运行
3. 已实现自动切换
"""
import requests
import pandas as pd
from bs4 import BeautifulSoup
from akshare.bank.cons import cbirc_headers_without_cookie_2019
def bank_page_list(page=5):
"""
想要获取多少页的内容
http://www.cbirc.gov.cn/cn/list/9103/910305/ybjfjcf/1.html
:param page: int 输入从第 1 页到 all_page 页的内容
:return: pd.DataFrame 另存为 csv 文件
"""
big_url_list = []
big_title_list = []
flag = True
cbirc_headers = cbirc_headers_without_cookie_2019.copy()
for i_page in range(1, page):
# i_page = 1
print(i_page)
main_url = "http://www.cbirc.gov.cn/cn/list/9103/910305/ybjfjcf/{}.html".format(
i_page
)
if flag:
res = requests.get(main_url, headers=cbirc_headers)
cbirc_headers.update({"Cookie": res.headers["Set-Cookie"].split(";")[0]})
res = requests.get(main_url, headers=cbirc_headers)
soup = BeautifulSoup(res.text, "lxml")
url_list = [
item.find("a")["href"]
for item in soup.find_all(attrs={"class": "zwbg-2"})
]
title_list = [
item.find("a").get_text()
for item in soup.find_all(attrs={"class": "zwbg-2"})
]
big_url_list.extend(url_list)
big_title_list.extend(title_list)
flag = 0
else:
res = requests.get(main_url, headers=cbirc_headers)
soup = BeautifulSoup(res.text, "lxml")
url_list = [
item.find("a")["href"]
for item in soup.find_all(attrs={"class": "zwbg-2"})
]
title_list = [
item.find("a").get_text()
for item in soup.find_all(attrs={"class": "zwbg-2"})
]
big_url_list.extend(url_list)
big_title_list.extend(title_list)
temp_df = pd.DataFrame([big_title_list, big_url_list]).T
return temp_df, cbirc_headers
def bank_fjcf(page=3):
"""
获取每个具体页面的表格内容
:return: pandas.DataFrame 另存为 csv 文件
"""
big_df = pd.DataFrame()
temp_df, cbirc_headers = bank_page_list(page)
for i in range(len(temp_df)):
# i = 1
print(i)
try:
res = requests.get(
"http://www.cbirc.gov.cn" + temp_df.iloc[:, 1][i], headers=cbirc_headers
)
table_list = | pd.read_html(res.text) | pandas.read_html |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 16 09:28:30 2020.
@author: <NAME>
"""
from selenium.webdriver import Firefox
from selenium.webdriver.firefox.options import Options
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import time
import pandas as pd
import re
import os
import datetime
from timeit import default_timer as timer
import glob
# search for all red wines between 10-40$ with a rating of 3.5 or above
# URL = 'https://www.vivino.com/explore?e=eJwNyUEKgCAQBdDb_LVC21lE3SIiJptESI1RrG6fm7d5UckihkTWIPJLg4H7aBrhOjPuvv6kxhqk8oW8k3INyZeNmyh7QaZDisNTl5XsD-oNGk4='
# search for all red wines between 10-25$ with a rating of 3.5 or above
URL = 'https://www.vivino.com/explore?e=eJwNxEEKgCAUBcDbvGVk4fItom4RET8zEdLCxOr2NYsJiW2lEXykqhHkYaNhXvYdzN-AkwpuY5HkbZYdx8Ik2Ud3zVJsEmdxcLWXwZ3HieoDC54atg=='
# number of seconds to wait before each scroll when infinite scrolling to botom
# may not get to the botom if too short
SCROLL_PAUSE_TIME = 0.8
class element_present_after_scrolling():
"""
A custom selenium expectation for scrolling until an element is present.
Thanks to MP https://medium.com/@randombites/how-to-scroll-while-waiting-for-an-element-20208f65b576
Parameters
----------
locator : tuple
Used to find the element.
Returns
-------
elements : WebElement
Once it has the particular element.
"""
def __init__(self, locator, driver):
"""Attributes."""
self.locator = locator
self.driver = driver
def __call__(self, driver):
"""Scroll by 500px increments."""
elements = driver.find_elements(*self.locator) # Finding the referenced element
if len(elements) > 0:
return elements
else:
self.driver.execute_script("window.scrollBy(0, 500);")
class wine_data():
"""Scrape wine data and reviews from Vivino."""
def __init__(self,scroll_to_bottom=False,save_path=None,timeout=20,\
no_scrape=False):
"""
Scrape data using selenium with Firefox and store as a pandas DataFrame.
Parameters
----------
scroll_to_bottom : bool, optional
If True scroll to bottom of the search page to get all the results.
The default is False.
save_path : NoneType or str, optional
If a file path is provided, save the wine and review data to csv.
The default is None.
timeout : int
Timeout in seconds for page load checks. The default is 20.
no_scrape : bool or str, optional
If not False, read in pre-scraped .csv format data instead of
scraping new data. If not False, must be folder path to both
wine_data and review_data csv files. This folder must contain only
one of each of wine_data and review_data csv files. File name
format must be wine_file*.csv and review_file*.csv.
The default is False.
Attributes
----------
number_of_results : int
Number of search results.
wine_data : DataFrame
Collected wine data.
results_data : DataFrame
Collected review data.
Returns
-------
None.
"""
# Parameters
self.timeout = timeout # timeout for page load checks
self.scroll_to_bottom = scroll_to_bottom
self.save_path = save_path
self.no_scrape = no_scrape
# if no_scrape is false, scrape Vivino
if not self.no_scrape:
opts = Options()
opts.headless = True #use a headless browser
self.driver = Firefox(options=opts)
self.driver.get(URL)
# check that page has loaded
try:
element_present = EC.presence_of_element_located((By.CLASS_NAME,\
'vintageTitle__winery--2YoIr'))
WebDriverWait(self.driver, self.timeout).until(element_present)
except TimeoutException:
print("Timed out waiting for page to load")
# get main window handle
self._main_window = self.driver.current_window_handle
# get number of results
number_of_results = self.driver.find_element_by_class_name\
('querySummary__querySummary--39WP2').text
self.number_of_results = int(re.findall('\d+',number_of_results)[0]) # extract number of results using regular expressions
print("Found {} wines.".format(self.number_of_results))
if self.scroll_to_bottom:
self._infinity_scroll()
self.wine_data, self.review_data = self._get_wine_info()
# save to .csv if a path is provided
if self.save_path:
date = str(datetime.date.today())
filename_wine = 'wine_data_' + date + '.csv'
filename_review = 'review_data_' + date + '.csv'
filepath_wine = os.path.join(self.save_path,filename_wine)
filepath_review = os.path.join(self.save_path,filename_review)
self.wine_data.to_csv(filepath_wine)
self.review_data.to_csv(filepath_review)
else: # open pre-scraped data
filepath_wine = os.path.join(self.no_scrape,'wine_data*.csv')
filepath_review = os.path.join(self.no_scrape,'review_data*.csv')
# check to make sure folder only contains on set of data files
wine_file_loc = glob.glob(filepath_wine)
review_file_loc = glob.glob(filepath_review)
if len(wine_file_loc) > 1 or len(review_file_loc) > 1:
raise Exception('More than 1 wine_file*.csv and/or review_file*.csv in folder.')
else: # open files
self.wine_data = pd.read_csv(wine_file_loc[0],index_col=0)
self.review_data = pd.read_csv(review_file_loc[0],index_col=0)
def _infinity_scroll(self,element=False):
"""
Infinite scroll to bottom of a page or element. Breaks when done.
Parameters
----------
element : WebElement, optional
WebElement to scroll to the botom of instead of the whole page. The
default is False.
Returns
-------
None.
"""
if element: # scroll the page if no element is provided
el = element
else:
el = self.driver.find_element_by_class_name('inner-page')
# Get scroll height
last_height = self.driver.execute_script\
("return arguments[0].scrollHeight", el)
while True:
# Scroll down to bottom
if element: #scroll the element
self.driver.execute_script\
('arguments[0].scrollTop = arguments[0].scrollHeight', el)
else: #scroll the window
self.driver.execute_script\
("window.scrollTo(0, arguments[0].scrollHeight);", el)
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = self.driver.execute_script\
("return arguments[0].scrollHeight", el)
if new_height == last_height:
break #break at the bottom
last_height = new_height
def _get_wine_info(self):
"""
Iterate through tabs and scrape data.
Returns
-------
wine_data : DataFrame
Collected wine data.
results_data : DataFrame
Collected review data.
"""
# start timing the scraping process
start = timer()
print("Starting scrape...")
discover_wines = self.driver.find_elements_by_class_name\
('vintageTitle__winery--2YoIr')
global wine_dict_list # global in case of premature end of run
global review_dict_list
wine_dict_list = []
review_dict_list = []
##TEST
# discover_wines = discover_wines[0:50]
# for i, wine in enumerate(discover_wines):
for i, wine in enumerate(discover_wines):
# open wine page in new tab
attempts = 0
while attempts < 100: # in case of connection issue
try:
wine.click()
# switch to latest tab (firefox always opens a new tab next to the main tab)
self.driver.switch_to.window(self.driver.window_handles[1])
# make sure top of page is loaded
element_present = EC.presence_of_element_located\
((By.CLASS_NAME, 'inner-page'))
WebDriverWait(self.driver, self.timeout).until(element_present)
break
except TimeoutException:
attempts += 1
self.driver.close() # close the unloaded tab
self.driver.switch_to.window(self._main_window) # back to main window
print("Timed out waiting for wine tab to load")
time.sleep(10) # wait for 10 seconds
# if show more reviews button is below the loaded page, scroll until it loads
try:
element_present = element_present_after_scrolling((By.CLASS_NAME,\
'anchor__anchor--3DOSm.communityReviews__showAllReviews--1e12c.anchor__vivinoLink--29E1-'),\
self.driver)
WebDriverWait(self.driver, self.timeout).until(element_present)
except TimeoutException:
print("Timed out waiting for show more reviews button")
# get wine info
winery_name = self.driver.find_element_by_class_name('winery').text
wine_name = self.driver.find_element_by_class_name('vintage').text
wine_country = self.driver.find_element_by_class_name\
('wineLocationHeader__country--1RcW2').text
wine_rating = self.driver.find_element_by_class_name\
('vivinoRatingWide__averageValue--1zL_5').text
wine_rating_number = self.driver.find_element_by_class_name\
('vivinoRatingWide__basedOn--s6y0t').text
wine_price = float(self.driver.find_element_by_class_name\
('purchaseAvailabilityPPC__amount--2_4GT').text.split('$')[1])
wine_dict = {'WineName':wine_name,'Winery':winery_name,\
'Country':wine_country,'Rating':wine_rating,\
'NumberOfRatings':wine_rating_number,'Price':wine_price}
wine_dict_list.append(wine_dict)
# get reviews
review_link = self.driver.find_element_by_class_name\
('anchor__anchor--3DOSm.communityReviews__showAllReviews--1e12c.anchor__vivinoLink--29E1-')
review_link.click()
try: #make sure review popup has loaded
element_present = EC.presence_of_element_located\
((By.CLASS_NAME, 'allReviews__reviews--EpUem'))
WebDriverWait(self.driver, self.timeout).until(element_present)
except TimeoutException:
print("Timed out waiting for review popup to load")
review_pane = self.driver.find_element_by_class_name\
('baseModal__window--3r5PC.baseModal__themeNoPadding--T_ROG')
# scroll to the botom of the reviews
self._infinity_scroll(element=review_pane)
# get review info
discover_reviews = self.driver.find_elements_by_class_name\
('reviewCard__reviewContainer--1kMJM')
# discard last 3 since they are duplicates
discover_reviews = discover_reviews[:-3]
# print what tab we are on
for review in discover_reviews:
user_name = review.find_element_by_class_name\
('anchor__anchor--3DOSm.reviewCard__userName--2KnRl').text
rating_elem = review.find_element_by_class_name\
('rating__rating--ZZb_x')
rating = float(rating_elem.get_attribute("aria-label").split()[1])
review_dict = {'Username':user_name,'WineName':wine_name,\
'Winery':winery_name,'Rating':rating}
review_dict_list.append(review_dict)
print('Completed wine {tab_num} of {tab_total}. Scrapabale reviews: {rev_num}'\
.format(tab_num=i+1,tab_total=len(discover_wines),\
rev_num=len(discover_reviews)))
##TEST
# break
self.driver.close() # close the tab when done
self.driver.switch_to.window(self._main_window)
time.sleep(1) # pause for 1 second
wine_data = pd.DataFrame(wine_dict_list)
review_data = | pd.DataFrame(review_dict_list) | pandas.DataFrame |
import os
import sys
import math
import itertools
import warnings
import json
import time
import psutil
import socket
import shelve
import threading
import traceback
import inspect
import logging
from multiprocessing import Pool, Value, Manager, Queue
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from io import StringIO
from types import SimpleNamespace
from typing import Tuple, Dict, List, Any, Union, TypeVar, Type, Sequence
from datetime import datetime, timedelta
from dateutil import tz
from functools import partial
from dataclasses import dataclass, field
from collections import OrderedDict
import numpy as np
import pandas as pd
import h5py
import humanize
from scipy.stats import pearsonr
from sklearn.utils import shuffle, resample
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import (
precision_recall_fscore_support,
f1_score,
accuracy_score,
confusion_matrix,
confusion_matrix,
)
from sklearn.exceptions import UndefinedMetricWarning
from sklearn.dummy import DummyClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier, RadiusNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
import fire
from utils.libutils import swfe # pylint: disable-msg=E0611
np.set_printoptions(precision=4, linewidth=120)
pd.set_option("precision", 4)
pd.set_option("display.width", 300)
@dataclass(frozen=True)
class Results:
experiment: str
timestamp: int
class_: str
seed: int
foldoutter: int
foldinner: int
classifier: str
classifiercfg: int
classifiercfgs: int
f1binp: float
f1binn: float
f1micro: float
f1macro: float
f1weighted: float
f1samples: float
precision: float
recall: float
accuracy: float
accuracy2: float
timeinnertrain: float
timeouttertrain: float
positiveclasses: str
negativeclasses: str
features: str
nfeaturesvar: int
nfeaturestotal: int
ynegfinaltrain: int
yposfinaltrain: int
ynegfinaltest: int
yposfinaltest: int
yposfinalpred: int
ynegfinalpred: int
yfinaltrain: int
yfinaltest: int
yfinalpred: int
postrainsamples: int
negtrainsamples: int
postestsamples: int
negtestsamples: int
tp: int
tn: int
fp: int
fn: int
bestfeatureidx: int
bestvariableidx: int
featurerank: str # em ordem decrescente, tem o IDX da feature
rankfeature: str # em ordem das features, tem o RANK de cada uma
def __post_init__(self):
pass
P = TypeVar("T")
def humantime(*args, **kwargs):
"""
Return time (duration) in human readable format.
>>> humantime(seconds=3411)
56 minutes, 51 seconds
>>> humantime(seconds=800000)
9 days, 6 hours, 13 minutes, 20 seconds
"""
secs = float(timedelta(*args, **kwargs).total_seconds())
units = [("day", 86400), ("hour", 3600), ("minute", 60), ("second", 1)]
parts = []
for unit, mul in units:
if secs / mul >= 1 or mul == 1:
if mul > 1:
n = int(math.floor(secs / mul))
secs -= n * mul
else:
# n = secs if secs != int(secs) else int(secs)
n = int(secs) if secs != int(secs) else int(secs)
parts.append("%s %s%s" % (n, unit, "" if n == 1 else "s"))
return ", ".join(parts)
def get_md5(params):
import hashlib
experiment = f'nr{params.nrounds}_nf{params.nfolds}_w{params.windowsize}_s{params.stepsize}'.encode('utf-8')
return hashlib.md5(experiment).hexdigest()
def loggerthread(q):
"""
Main process thread receiver (handler) for log records.
"""
while True:
record = q.get()
if record is None:
break
logger = logging.getLogger(record.name)
logger.handle(record)
def one_hot(array, num_classes):
return np.squeeze(np.eye(num_classes)[array.reshape(-1)])
def one_hot_inverse(array):
return np.argmax(array, axis=1)
def readdir(path) -> Dict[str, List[Tuple[np.ndarray, str]]]:
"""
Read the CSV content of a directory into a list of numpy arrays.
The return type is actually a dict with the "class" as key.
"""
well_vars = [
"P-PDG",
"P-TPT",
"T-TPT",
"P-MON-CKP",
"T-JUS-CKP",
"P-JUS-CKGL",
"T-JUS-CKGL",
"QGL",
]
columns = ["timestamp"] + well_vars + ["class"]
r = []
class_ = path[-1]
with os.scandir(path) as it:
for entry in it:
if not entry.name.startswith(".") and entry.is_file():
frame = pd.read_csv(entry, sep=",", header=0, names=columns)
# str timestamp to float
frame["timestamp"] = np.array(
[
pd.to_datetime(d).to_pydatetime().timestamp()
for d in frame.loc[:, "timestamp"]
],
dtype=np.float64,
)
# cast int to float
frame["class"] = frame["class"].astype(np.float64)
# remember that scikit has problems with float64
array = frame.loc[:, columns].to_numpy()
r.append((array, entry.name))
rd = {}
rd[class_] = r
return rd
def get_logging_config():
return {
"version": 1,
"formatters": {
"detailed": {
"class": "logging.Formatter",
"format": (
"%(asctime)s %(name)-12s %(levelname)-8s %(processName)-10s "
"%(module)-12s %(funcName)-15s %(message)s"
),
}
},
"handlers": {
"console": {"class": "logging.StreamHandler", "level": "INFO",},
"file": {
"class": "logging.FileHandler",
"filename": "experiment1a.log",
"mode": "w",
"formatter": "detailed",
},
"errors": {
"class": "logging.FileHandler",
"filename": "experiment1a_errors.log",
"mode": "w",
"level": "ERROR",
"formatter": "detailed",
},
},
"root": {"level": "DEBUG", "handlers": ["console", "file", "errors"]},
}
def readdirparallel(path):
"""
Read all CSV content of a directory in parallel.
"""
njobs = psutil.cpu_count()
results = []
# with Pool(processes=njobs) as p:
with ThreadPoolExecutor(max_workers=njobs) as p:
# with ProcessPoolExecutor(max_workers=njobs) as p:
# results = p.starmap(
results = p.map(
readdir, [os.path.join(path, str(c)) for c in [0, 1, 2, 3, 4, 5, 6, 7, 8]],
)
return results
def csv2bin(*args, **kwargs) -> None:
"""
Read 3W dataset CSV files and save in a single numpy binary file.
"""
raise Exception("not implemented")
def csv2hdf(*args, **kwargs) -> None:
"""
Read 3W dataset CSV files and save in a single HDF5 file.
"""
path: str = kwargs.get("path")
useclasses = [0, 1, 2, 3, 4, 5, 6, 7, 8]
well_vars = [
"P-PDG",
"P-TPT",
"T-TPT",
"P-MON-CKP",
"T-JUS-CKP",
"P-JUS-CKGL",
"T-JUS-CKGL",
"QGL",
]
columns = ["timestamp"] + well_vars + ["class"]
print("read CSV and save HDF5 ...", end="", flush=True)
t0 = time.time()
with h5py.File("datasets.h5", "w") as f:
for c in useclasses:
grp = f.create_group(f"/{c}")
with os.scandir(os.path.join(path, str(c))) as it:
for entry in it:
if not entry.name.startswith(".") and entry.is_file() and 'WELL' in entry.name:
frame = pd.read_csv(entry, sep=",", header=0, names=columns)
# str timestamp to float
frame["timestamp"] = np.array(
[
pd.to_datetime(d).to_pydatetime().timestamp()
for d in frame.loc[:, "timestamp"]
],
dtype=np.float64,
)
# cast int to float
frame["class"] = frame["class"].astype(np.float64)
# remember that scikit has problems with float64
array = frame.loc[:, columns].to_numpy()
# entire dataset is float, incluinding timestamp & class labels
grp.create_dataset(
f"{entry.name}", data=array, dtype=np.float64
)
print(f"finished in {time.time()-t0:.1}s.")
def csv2hdfpar(*args, **kwargs) -> None:
"""
Read 3W dataset CSV files and save in a single HDF5 file.
"""
path: str = kwargs.get("path")
print("read CSV and save HDF5 ...", end="", flush=True)
t0 = time.time()
with h5py.File("datasets.h5", "w") as f:
datalist = readdirparallel(path)
for dd in datalist:
for key in dd:
grp = f.create_group(f"/{key}")
for (array, name) in dd[key]:
grp.create_dataset(f"{name}", data=array, dtype=np.float64)
print(
f"finished {humanize.naturalsize(os.stat('datasets.h5').st_size)} "
f"in {humantime(seconds=time.time()-t0)}."
)
def cleandataset(*args, **kwargs) -> None:
"""
Read the the single file (with whole dataset), remove NaN and save 1 file per class.
"""
well_vars = [
"P-PDG",
"P-TPT",
"T-TPT",
"P-MON-CKP",
"T-JUS-CKP",
"P-JUS-CKGL",
"T-JUS-CKGL",
"QGL",
]
columns = ["timestamp"] + well_vars + ["class"]
print("Reading dataset...")
with h5py.File("datasets.h5", "r") as f:
for c in range(0, 9):
print(f"Processing class {c}")
k = f"/{c}"
soma = 0
for s in f[k]:
n = f[k][s].shape[0]
soma = soma + n
data = np.zeros([soma, 10], dtype=np.float64)
i1 = 0
# manual concatenation
for s in f[k]:
i2 = i1 + f[k][s].shape[0]
data[i1:i2, :] = f[k][s][()]
i1 = i2
frame = pd.DataFrame(data=data, columns=columns)
for col in ["P-PDG", "P-TPT", "T-TPT", "P-MON-CKP", "T-JUS-CKP"]:
frame[col].fillna(method="ffill", axis=0, inplace=True)
fp = np.memmap(
f"datasets_clean_{c}.dat", dtype="float64", mode="w+", shape=frame.shape
)
fp[:, ...] = frame.to_numpy()
del fp
print("finished")
def cleandataseth5(*args, **kwargs) -> None:
"""
Read the the single file (with whole dataset), remove NaN and save 1 file per class.
"""
well_vars = [
"P-PDG",
"P-TPT",
"T-TPT",
"P-MON-CKP",
"T-JUS-CKP",
"P-JUS-CKGL",
"T-JUS-CKGL",
"QGL",
]
columns = ["timestamp"] + well_vars + ["class"]
logger = logging.getLogger(f"clean")
formatter = logging.Formatter(
"%(asctime)s %(name)-12s %(levelname)-8s %(lineno)-5d %(funcName)-10s %(module)-10s %(message)s"
)
fh = logging.FileHandler(f"experiments_clean.log")
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
logger.setLevel(logging.INFO)
usecols = [1, 2, 3, 4, 5]
good = [columns[i] for i, _ in enumerate(columns) if i in usecols]
with h5py.File("datasets.h5", "r") as f:
logger.debug("reading input file")
with h5py.File("datasets_clean.h5", "w") as fc:
logger.debug("created output file")
for c in range(0, 9):
grp = fc.create_group(f"/{c}")
logger.debug(f"Processing class {c}")
k = f"/{c}"
for s in f[k]:
if s[0] != "W":
continue
logger.debug(f"{c} {s}")
data = f[k][s][()]
frame = pd.DataFrame(data=data, columns=columns)
frame.dropna(inplace=True, how="any", subset=good, axis=0)
array = frame.to_numpy()
n = check_nan(array[:, [1, 2, 3, 4, 5]], logger)
if n > 0:
logger.info(f"{c} {s} dataset contains NaN")
grp.create_dataset(f"{s}", data=array, dtype=np.float64)
return None
def check_nan(array, logger) -> None:
"""
Check array for inf, nan of null values.
"""
logger.debug("*" * 50)
n = 0
test = array[array > np.finfo(np.float32).max]
logger.debug(f"test for numpy float32overflow {test.shape}")
n = n + test.shape[0]
test = array[~np.isfinite(array)]
logger.debug(f"test for numpy non finite {test.shape}")
n = n + test.shape[0]
test = array[np.isinf(array)]
logger.debug(f"test for numpy inf {test.shape}")
n = n + test.shape[0]
test = array[np.isnan(array)]
logger.debug(f"test for numpy NaN {test.shape}")
n = n + test.shape[0]
test = array[pd.isna(array)]
logger.debug(f"test for pandas NA {test.shape}")
n = n + test.shape[0]
test = array[pd.isnull(array)]
logger.debug(f"test for pandas isnull {test.shape}")
n = n + test.shape[0]
logger.debug("*" * 50)
return n
def get_config_combination_list(settings, default=None) -> List:
"""
Given a list of hyperparameters return all combinations of that.
"""
keys = list(settings)
r = []
for values in itertools.product(*map(settings.get, keys)):
d = dict(zip(keys, values))
if default is not None:
d.update(default)
r.append(d)
return r
def get_classifiers(clflist, n_jobs=1, default=False) -> Dict:
"""
Classifiers and combinations of hyperparameters.
"""
classifiers = OrderedDict()
classifiers["ADA"] = {
"config": get_config_combination_list(
{
"n_estimators": [5, 25, 50, 75, 100, 250, 500, 1000],
"algorithm": ["SAMME", "SAMME.R"],
},
{
"random_state": None
},
),
"default": {"random_state": None},
"model": AdaBoostClassifier,
}
classifiers["DT"] = {
"config": get_config_combination_list(
{
"criterion": ["gini", "entropy"],
"splitter": ["best", "random"],
"max_depth": [None, 5, 10, 50],
"min_samples_split": [2, 5, 10],
},
{"random_state": None},
),
"default": {"random_state": None},
"model": DecisionTreeClassifier,
}
classifiers["GBOOST"] = {
"config": get_config_combination_list(
{
"n_estimators": [50, 100, 250],
"min_samples_split": [2, 5, 10],
"max_depth": [5, 10, 50],
},
{"random_state": None},
),
"default": {"random_state": None},
"model": GradientBoostingClassifier,
}
classifiers["1NN"] = {
"config": [],
"default": {
"n_neighbors": 1,
"weights": "distance",
"algorithm": "auto",
"leaf_size": 30,
"p": 2,
"n_jobs": 1,
},
"model": KNeighborsClassifier,
}
classifiers["5NN"] = {
"config": [],
"default": {
"n_neighbors": 5,
"weights": "distance",
"algorithm": "auto",
"leaf_size": 30,
"p": 2,
"n_jobs": 1,
},
"model": KNeighborsClassifier,
}
classifiers["3NN"] = {
"config": [],
"default": {
"n_neighbors": 3,
"weights": "distance",
"algorithm": "auto",
"leaf_size": 30,
"p": 2,
"n_jobs": 1,
},
"model": KNeighborsClassifier,
}
classifiers["KNN"] = {
"config": get_config_combination_list(
{
"n_neighbors": [1, 3, 5, 7, 10, 15],
},
{"n_jobs": n_jobs}
),
"default": {"n_jobs": n_jobs},
"model": KNeighborsClassifier,
}
classifiers["RF"] = {
"config": get_config_combination_list(
{
'bootstrap': [True],
"criterion": ["gini"],
'max_features': ['auto'],
"max_depth": [None, 5, 10, 50],
"min_samples_split": [2],
'min_samples_leaf': [1],
"n_estimators": [10, 25, 50, 100, 500],
},
{"n_jobs": n_jobs, "random_state": None},
),
"default": {"random_state": None},
"model": RandomForestClassifier,
}
classifiers["SVM"] = {
"config": get_config_combination_list({
#"kernel": ["linear", "poly", "rbf", "sigmoid", "precomputed"],
"kernel": ["linear", "poly", "rbf", "sigmoid"],
#"gamma": ["scale", "auto"],
"gamma": [0.001, 0.01, 0.1, 1.0],
#"C": [1.0, 2.0],
"C": [1.0],
}),
"default": {},
"model": SVC,
}
classifiers["GNB"] = {
"config": [],
"default": {},
"model": "GaussianNB",
}
classifiers["LDA"] = {
"config": [],
"default": {},
"model": "LinearDiscriminantAnalysis",
}
classifiers["QDA"] = {
"config": [],
"default": {},
"model": "QuadraticDiscriminantAnalysis"
}
classifiers["MLP"] = {
"config": get_config_combination_list({
"hidden_layer_sizes": [128, 256, 512],
"solver": ["lbfgs", "sgd", "adam"],
"activation": ["relu"],
"max_iter": [500],
"shuffle": [True],
"momentum": [0.9],
"power_t": [0.5],
"learning_rate": ["constant"],
"batch_size": ["auto"],
"alpha": [0.0001],
}),
"default": {"random_state": None},
"model": MLPClassifier,
}
classifiers["ZERORULE"] = {
"config": get_config_combination_list(
{
"strategy": [
"constant"
],
},
{"random_state": None},
),
"default": {"strategy": "most_frequent", "random_state": None},
"model": DummyClassifier,
}
if isinstance(clflist, str):
if not clflist or clflist.lower() != "all":
clflist = clflist.split(",")
elif isinstance(clflist, tuple):
clflist = list(clflist)
if default:
for c in classifiers.keys():
"""
if c[:5] != "DUMMY":
classifiers[c]['config'] = {}
else:
del classifiers[c]
"""
classifiers[c]["config"] = {}
return classifiers
else:
ret = {}
for c in clflist:
if c in classifiers.keys():
ret[c] = classifiers[c]
return ret
def _split_data(n: int, folds: int) -> Sequence[Tuple[int, int]]:
"""
Return list of tuples with array index for each fold.
"""
raise Exception("depends on which experiment")
def _read_and_split_h5(fold: int, params: P) -> Tuple:
"""
HDF files offer at least a couple advantages:
1 - reading is faster than CSV
2 - you dont have to read the whole dataset to get its size (shape)
H5PY fancy indexing is very slow.
https://github.com/h5py/h5py/issues/413
"""
raise Exception("depends on which experiment")
def _read_and_split_bin(fold: int, params: P) -> Tuple:
"""
HDF files offer at least a couple advantages:
1 - reading is faster than CSV
2 - you dont have to read the whole dataset to get its size (shape)
Numpy needs to know 'shape' beforehand.
https://numpy.org/doc/stable/reference/generated/numpy.memmap.html
"""
raise Exception("depends on which experiment")
def train_test_binary(
classif, xtrain, ytrain, xtest, ytest
) -> Tuple[Tuple, Tuple, Tuple, Tuple]:
"""
Execute training and testing (binary) and return metrics (F1, Accuracy, CM)
"""
p = 0.0
r = 0.0
f1binp, f1binn = 0.0, 0.0
f1mic = 0.0
f1mac = 0.0
f1weigh = 0.0
f1sam = 0.0
acc = 0.0
excp = []
excptb = []
ypred = np.full([xtrain.shape[0]], 0, dtype="int")
ynpred = 0
yppred = 0
tn = 0
tp = 0
fp = 0
fn = 0
trt1 = 0.0
try:
trt0 = time.time()
classif.fit(xtrain, ytrain)
trt1 = time.time() - trt0
try:
ypred = classif.predict(xtest)
yppred = np.sum(ypred)
ynpred = ytest.shape[0] - yppred
try:
p, r, f1binp, _ = precision_recall_fscore_support(
ytest, ypred, average="binary", pos_label=1,
)
_, _, f1binn, _ = precision_recall_fscore_support(
ytest, ypred, average="binary", pos_label=0,
)
except Exception as ef1bin:
excp.append(ef1bin)
try:
f1mic = f1_score(ytest, ypred, average="micro")
except Exception as e2:
excp.append(e2)
try:
f1mac = f1_score(ytest, ypred, average="macro")
except Exception as e3:
excp.append(e3)
try:
f1weigh = f1_score(ytest, ypred, average="weighted")
except Exception as e4:
excp.append(e4)
try:
acc = accuracy_score(ytest, ypred)
except Exception as e_acc:
excp.append(e_acc)
try:
tn, fp, fn, tp = confusion_matrix(
ytest, ypred, labels=[0, 1], sample_weight=None, normalize=None,
).ravel()
except Exception as ecm:
excp.append(ecm)
except Exception as e_pred:
excp.append(e_pred)
raise e_pred
except Exception as efit:
excp.append(efit)
einfo = sys.exc_info()
excptb.append(einfo[2])
raise efit
return (
(ypred, ynpred, yppred, trt1),
(f1binp, f1binn, f1mic, f1mac, f1weigh, f1sam, p, r, acc),
(tn, fp, fn, tp),
tuple(excp),
)
def vertical_split_bin(negative, positive):
x = np.concatenate([negative, positive], axis=0).astype(np.float32)
y = np.concatenate(
[
np.zeros(negative.shape[0], dtype=np.int32),
np.ones(positive.shape[0], dtype=np.int32),
],
axis=0,
)
return x, y
def horizontal_split_file(fold, nfolds, files, seed):
count = 0
samples = []
sessions = []
for s in files:
if s[0] != "W":
continue
n = files[s].shape[0]
count += 1
samples.append(n)
sessions.append(s)
samples = np.array(samples)
nf = int(count / nfolds)
testfidx = np.reshape(np.arange(nf * nfolds), (5, -1)).T
testsize = sum(samples[testfidx[:, fold]])
test = np.zeros((testsize, 10), dtype=np.float64)
stest = [sessions[i] for i in testfidx[:, fold]]
i1 = 0
i2 = 0
for s in stest:
i2 = i1 + files[s].shape[0]
test[i1:i2, :] = files[s][()]
i1 = i2
print(f"fold {fold} ate {i2}")
nstp = 0
for s in sessions:
if s in stest:
continue
nstp += files[s].shape[0]
train = np.zeros((nstp, 10), dtype=np.float64)
i1 = 0
i2 = 0
for k, s in enumerate(sessions):
if s in stest:
continue
n = files[s].shape[0]
i2 = i1 + n
train[i1:i2, :] = files[s][()]
i1 = i2
return train, test
def horizontal_split_well(fold, nfolds, file, seed=None):
wellstest = [1, 2, 4, 5, 7]
welltest = wellstest[fold]
count = 0
wells = {}
stest = []
for s in file:
if s[0] != "W":
continue
n = file[s].shape[0]
count += 1
welli = int(str(s[6:10]))
if welli not in wells:
wells[welli] = 0
wells[welli] += n
if welli == welltest:
stest.append(s)
if wellstest[fold] in wells:
ntest = wells[wellstest[fold]]
test = np.zeros((ntest, 10), dtype=np.float64)
i1 = 0
i2 = 0
for s in stest:
if s[0] != "W":
continue
i2 = i1 + file[s].shape[0]
test[i1:i2, :] = file[s][()]
i1 = i2
else:
print("data for this fault and well not available")
test = np.empty((0, 10), dtype=np.float64)
ntrain = sum(wells[k] for k in wells if k != welltest)
train = np.zeros((ntrain, 10), dtype=np.float64)
i1 = 0
i2 = 0
for s in file:
if s[0] != "W":
continue
if s in stest:
continue
i2 = i1 + file[s].shape[0]
train[i1:i2, :] = file[s][()]
i1 = i2
# print('well', s, i1, i2, ntrain)
return train, test
def drop_nan(*args):
for a in args:
mask = np.any(
np.isnan(a)
# | (trainnegative > np.finfo(np.float32).max)
| np.isinf(a) | ~np.isfinite(a),
axis=1,
)
a = a[~mask]
return args
def get_mask(*args):
m = []
for a in args:
mask = np.any(
np.isnan(a)
# | (trainnegative > np.finfo(np.float32).max)
| np.isinf(a) | ~np.isfinite(a),
axis=1,
)
m.append(mask)
return m
def split_and_save1(params, case, group, classes):
"""
"""
win = params.windowsize
step = params.stepsize
#filename = get_md5(params)
with h5py.File(f"datasets_clean.h5", "r") as file:
n = 0
skipped = 0
for c in classes:
f = file[f"/{c}"]
for s in f:
if s[0] != "W":
continue
if len(params.skipwell) > 0:
# skip well by ID
if s[:10] in params.skipwell:
skipped += f[s].shape[0]
continue
n += f[s].shape[0]
data = np.zeros([n, 10], dtype=np.float64)
test = np.zeros((skipped, 10), dtype=np.float64)
for c in classes:
f = file[f"/{c}"]
for s in f:
i1, i2 = 0, 0
j1, j2 = 0, 0
for s in f:
if s[0] != "W":
continue
if len(params.skipwell) > 0:
if s[:10] in params.skipwell:
j2 = j1 + f[s].shape[0]
test[j1:j2, :] = f[s][()]
j1 = j2
continue
i2 = i1 + f[s].shape[0]
data[i1:i2, :] = f[s][()]
i1 = i2
xdata = swfe(params.windowsize, n, params.stepsize, data[:, params.usecols],)
tdata = swfe(
params.windowsize, skipped, params.stepsize, test[:, params.usecols],
)
if group == "pos":
ydata = np.ones(xdata.shape[0], dtype=np.float64)
elif group == "neg":
ydata = np.zeros(xdata.shape[0], dtype=np.float64)
with h5py.File(f"datasets_folds_exp{case}.h5", "a") as ffolds:
for round_ in range(1, params.nrounds + 1):
if params.shuffle:
kf = KFold(
n_splits=params.nfolds, random_state=round_, shuffle=True
)
else:
kf = KFold(n_splits=params.nfolds, random_state=None, shuffle=False)
for fold, (train_index, test_index) in enumerate(kf.split(xdata)):
gk = f"/case{case}_{group}_r{round_}_nf{params.nfolds}_f{fold}_w{win}_s{step}"
if gk in ffolds:
del ffolds[gk]
grp = ffolds.create_group(gk)
xtrain, ytrain = xdata[train_index], ydata[train_index]
xtest, ytest = xdata[test_index], ydata[test_index]
print(
gk,
"original data shape",
data.shape,
"final",
xdata.shape,
"xtrain",
xtrain.shape,
"xtest",
xtest.shape,
)
grp.create_dataset(f"xtrain", data=xtrain, dtype=np.float64)
grp.create_dataset(f"ytrain", data=ytrain, dtype=np.float64)
grp.create_dataset(f"xvalid", data=xtest, dtype=np.float64)
grp.create_dataset(f"yvalid", data=ytest, dtype=np.float64)
if tdata.shape[0] > 0:
gkt = f"/case{case}_{group}_r{round_}_nf{params.nfolds}_f-test_w{win}_s{step}"
if gkt in ffolds:
del ffolds[gkt]
grpt = ffolds.create_group(gkt)
grpt.create_dataset(f"xtest", data=tdata, dtype=np.float64)
def split_and_save2(params, case, group, classes):
win = params.windowsize
step = params.stepsize
nfolds = params.nfolds
filename = get_md5(params)
with h5py.File(f"datasets_clean.h5", "r") as file:
with h5py.File(f"datasets_folds_exp{case}.h5", "a") as ffolds:
for round_ in range(1, params.nrounds + 1):
samples = []
sessions = []
for class_ in classes:
files = file[f"/{class_}"]
for s in files:
if s[0] != "W":
continue
n = files[s].shape[0]
samples.append(n)
sessions.append(f"/{class_}/{s}")
count = len(samples)
samples = np.array(samples)
nf = int(count / nfolds)
# random
if params.shuffle:
testfidx = np.random.RandomState(round_).choice(
range(0, count), size=(nf, params.nfolds), replace=False,
)
else:
# sequence
testfidx = np.reshape(np.arange(nf * nfolds), (5, -1)).T
for fold in range(0, params.nfolds):
gk = f"/case{case}_{group}_r{round_}_nf{params.nfolds}_f{fold}_w{win}_s{step}"
testsize = sum(samples[testfidx[:, fold]])
test = np.zeros((testsize, 10), dtype=np.float64)
stest = [sessions[i] for i in testfidx[:, fold]]
i1, i2 = 0, 0
#for class_ in classes:
# files = file[f"/{class_}"]
for s in stest:
i2 = i1 + file[s].shape[0]
test[i1:i2, :] = file[s][()]
i1 = i2
# print(s)
# print(f'fold {fold} ate {i2}')
nstp = 0
for s in sessions:
if s in stest:
continue
nstp += file[s].shape[0]
train = np.zeros((nstp, 10), dtype=np.float64)
i1, i2 = 0, 0
for s in sessions:
if s in stest:
continue
i2 = i1 + file[s].shape[0]
train[i1:i2, :] = file[s][()]
i1 = i2
xtrain = swfe(
params.windowsize,
nstp,
params.stepsize,
train[:, params.usecols],
)
if classes == params.negative:
ytrain = np.zeros(xtrain.shape[0], dtype=np.float64)
else:
ytrain = np.ones(xtrain.shape[0], dtype=np.float64)
xtest = swfe(
params.windowsize,
testsize,
params.stepsize,
test[:, params.usecols],
)
if classes == params.negative:
ytest = np.zeros(xtest.shape[0], dtype=np.float64)
else:
ytest = np.ones(xtest.shape[0], dtype=np.float64)
if gk in ffolds:
del ffolds[gk]
grp = ffolds.create_group(gk)
print(
gk,
"original data shape",
np.sum(samples),
"train",
train.shape,
"test",
test.shape,
"xtrain",
xtrain.shape,
"xtest",
xtest.shape,
)
grp.create_dataset(f"xtrain", data=xtrain, dtype=np.float64)
grp.create_dataset(f"ytrain", data=ytrain, dtype=np.float64)
grp.create_dataset(f"xvalid", data=xtest, dtype=np.float64)
grp.create_dataset(f"yvalid", data=ytest, dtype=np.float64)
def split_and_save3(params, case, group, classes):
win = params.windowsize
step = params.stepsize
wellstest = [1, 2, 4, 5, 7]
filename = get_md5(params)
with h5py.File(f"datasets_clean.h5", "r") as clean:
with h5py.File(f"datasets_folds_exp{case}.h5", "a") as ffolds:
for round_ in range(1, params.nrounds + 1):
for fold in range(0, params.nfolds):
gk = f"/case{case}_{group}_r{round_}_nf{params.nfolds}_f{fold}_w{win}_s{step}"
welltest = wellstest[fold]
count = 0
wells = {}
strain = []
stest = []
n = 0
for class_ in classes:
files = clean[f"/{class_}"]
for s in files:
if s[0] != "W":
continue
count += 1
welli = int(str(s[6:10]))
if welli not in wells:
wells[welli] = 0
wells[welli] += files[s].shape[0]
if welli == welltest:
stest.append(f"/{class_}/{s}")
else:
strain.append(f"/{class_}/{s}")
ntrain = sum(wells[k] for k in wells if k != welltest)
train = np.zeros((ntrain, 10), dtype=np.float64)
if wellstest[fold] in wells:
ntest = wells[wellstest[fold]]
test = np.zeros((ntest, 10), dtype=np.float64)
i1, i2 = 0, 0
for s in stest:
i2 = i1 + clean[s].shape[0]
test[i1:i2, :] = clean[s][()]
i1 = i2
else:
print("data for this fault and well not available")
test = np.empty((0, 10), dtype=np.float64)
i1, i2 = 0, 0
for s in strain:
i2 = i1 + clean[s].shape[0]
train[i1:i2, :] = clean[s][()]
i1 = i2
xtrain = swfe(
params.windowsize,
ntrain,
params.stepsize,
train[:, params.usecols],
)
if classes == params.negative:
ytrain = np.zeros(xtrain.shape[0], dtype=np.float64)
else:
ytrain = np.ones(xtrain.shape[0], dtype=np.float64)
xtest = swfe(
params.windowsize,
ntest,
params.stepsize,
test[:, params.usecols],
)
if classes == params.negative:
ytest = np.zeros(xtest.shape[0], dtype=np.float64)
else:
ytest = np.ones(xtest.shape[0], dtype=np.float64)
if params.shuffle:
xtrain, ytrain = resample(xtrain, ytrain, random_state=round_, replace=False)
xtest, ytest = resample(xtest, ytest, random_state=round_, replace=False)
if gk in ffolds:
del ffolds[gk]
grp = ffolds.create_group(gk)
print(gk, "xtrain", xtrain.shape, "xtest", xtest.shape)
grp.create_dataset(f"xtrain", data=xtrain, dtype=np.float64)
grp.create_dataset(f"ytrain", data=ytrain, dtype=np.float64)
grp.create_dataset(f"xvalid", data=xtest, dtype=np.float64)
grp.create_dataset(f"yvalid", data=ytest, dtype=np.float64)
def foldfn(round_: int, fold: int, params: P) -> List[Dict]:
"""
Run one fold.
It can be executed in parallel.
"""
logging.captureWarnings(True)
logger = logging.getLogger(f"fold{fold}")
formatter = logging.Formatter(params.logformat)
fh = logging.FileHandler(f"{params.experiment}_fold{fold}.log")
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
logger.setLevel(logging.DEBUG)
logger.debug(f"round {round_}")
logger.debug(f"fold {fold}")
# 0 => timestamp
# 1 "P-PDG",
# 2 "P-TPT",
# 3 "T-TPT",
# 4 "P-MON-CKP",
# 5 "T-JUS-CKP",
# 6 "P-JUS-CKGL",
# 7 "T-JUS-CKGL",
# 8 "QGL",
# 9 => class label
# 6, 7, 8 are gas lift related
# usecols = [1, 2, 3, 4, 5]
# usecols = params.usecols
classifiers = get_classifiers(params.classifierstr)
try:
# ==============================================================================
# Read data from disk and split in folds
# ==============================================================================
logger.debug(f"read and split data in folds")
try:
xtrainneg, xtrainpos, xtestneg, xtestpos, xfn, xfp = params.read_and_split(
fold, round_, params
)
except Exception as e000:
print(e000)
raise e000
x_outter_train, y_outter_train = vertical_split_bin(xtrainneg, xtrainpos)
x_outter_test, y_outter_test = vertical_split_bin(xtestneg, xtestpos)
if len(params.usecols) > 0:
usecols = []
for c in params.usecols:
for ck in range((c - 1) * params.nfeaturesvar, c * params.nfeaturesvar):
usecols.append(ck)
print('use measured variables', str(params.usecols), ' keep features ', str(usecols))
logger.info('use measured variables' + str(params.usecols) + ' keep features ' + str(usecols))
x_outter_train = x_outter_train[:, usecols]
x_outter_test = x_outter_test[:, usecols]
logger.debug(f"train neg={str(xtrainneg.shape)} pos={str(xtrainpos.shape)}")
logger.debug(f"test neg={str(xtestneg.shape)} pos={str(xtestpos.shape)}")
if 0 in xtestpos.shape or 0 in xtestneg.shape:
breakpoint()
raise Exception("dimension zero")
if xtestpos.shape[0] > xtestneg.shape[0]:
#
# print('Binary problem with unbalanced classes: NEG is not > POS')
logger.warn("Binary problem with unbalanced classes: NEG is not > POS")
# raise Exception()
logger.debug(
f"shapes train={str(x_outter_train.shape)} test={str(x_outter_test.shape)}"
)
# ==============================================================================
# After feature extraction, some NaN appear again in the arrays
# ==============================================================================
logger.debug(f"check NaN #1")
mask = np.any(
np.isnan(x_outter_train)
| (x_outter_train > np.finfo(np.float32).max)
| np.isinf(x_outter_train)
| ~np.isfinite(x_outter_train),
axis=1,
)
# breakpoint()
logger.debug(f"NaN Mask size {np.count_nonzero(mask, axis=0)}")
x_outter_train = x_outter_train[~mask]
y_outter_train = y_outter_train[~mask]
mask = np.any(
np.isnan(x_outter_test)
| (x_outter_test > np.finfo(np.float32).max)
| np.isinf(x_outter_test)
| ~np.isfinite(x_outter_test),
axis=1,
)
# mask = ~mask
x_outter_test = x_outter_test[~mask]
y_outter_test = y_outter_test[~mask]
logger.debug(f"check NaN #2")
check_nan(x_outter_train, logger)
logger.debug(
f"shapes train={str(x_outter_train.shape)} test={str(x_outter_test.shape)}"
)
# ==================
# Feature selection
# ==================
# 0 - MAX
# 1 - Mean
# 2 - Median
# 3 - Min
# 4 - Std
# 5 - Var
# usefeatures = [1, 4, 5]
# usefeatures = [0, 1, 2, 3, 4, 5]
# logger.info('Use features ' + str(usefeatures))
# x_outter_train = x_outter_train[:, usefeatures]
# x_outter_test = x_outter_test[:, usefeatures]
# ==============================================================================
# Normalization
# ==============================================================================
logger.debug(f"normalization AFTER feature extraction")
scalerafter = StandardScaler()
scalerafter.fit(x_outter_train)
x_outter_train = scalerafter.transform(x_outter_train)
x_outter_test = scalerafter.transform(x_outter_test)
# ==============================================================================
# Covariance and Person's Correlation
# ==============================================================================
logger.info("Covariance and correlation - over features")
nc = len(usecols) * 6
corr1 = np.zeros((nc, nc), dtype=np.float64)
pers1 = np.zeros((nc, nc), dtype=np.float64)
corr2 = np.zeros((nc, nc), dtype=np.float64)
pers2 = np.zeros((nc, nc), dtype=np.float64)
for a, b in itertools.combinations(range(0, nc), 2):
try:
corr1[a, b], pers1[a, b] = pearsonr(
x_outter_train[:, a], x_outter_train[:, b]
)
except:
corr1[a, b], pers1[a, b] = 0.0, 0.0
try:
corr2[a, b], pers2[a, b] = pearsonr(
x_outter_test[:, a], x_outter_test[:, b]
)
except:
corr2[a, b], pers2[a, b] = 0.0, 0.0
logger.info("Pearson R train \n" + str(corr1))
logger.info("Pearson R test \n" + str(corr2))
resultlist = []
for clf in classifiers:
logger.debug(f'Classifier {clf}')
if isinstance(classifiers[clf]["model"], str):
model = eval(classifiers[clf]["model"])
elif callable(classifiers[clf]["model"]):
model = classifiers[clf]["model"]
if params.gridsearch != 0:
#raise Exception("not implemented")
inner = []
innerkf = KFold(n_splits=4, shuffle=True, random_state=round_)
nxotr = x_outter_train.shape[0] // 4
for kfi, (itrainidx, itestidx) in enumerate(innerkf.split(x_outter_train)):
logger.debug(f'inner fold {kfi}')
x_inner_train = x_outter_train[itrainidx, :]
y_inner_train = y_outter_train[itrainidx]
x_inner_test = x_outter_train[itestidx, :]
y_inner_test = y_outter_train[itestidx]
for cfgk, cfg in enumerate(classifiers[clf]["config"]):
logger.debug(f'classifier config {cfgk+1:3d}/{len(classifiers[clf]["config"])}')
if clf == "ELM":
cfg['inputs'] = x_inner_train.shape[1]
yintr = one_hot(y_inner_train, 2)
else:
yintr = y_inner_train
classif_in = model(**cfg)
f1_in = 0.0
trt0 = time.time()
try:
classif_in.fit(x_inner_train, yintr)
trt1 = time.time() - trt0
p_in, r_in, f1_in = 0.0, 0.0, 0.0
try:
ypred_in = classif_in.predict(x_inner_test)
try:
p_in, r_in, f1_in, _ = precision_recall_fscore_support(
y_inner_test, ypred_in, average="macro",
)
logger.debug(f'classifier config {cfgk:3d} F1={f1_in:.4f} time={trt1:11.4f}')
except Exception as inef1:
print(cfgk, inef1)
except Exception as inetr:
print(cfgk, inetr)
except Exception as e_inner:
logger.exception(e_inner)
inner.append({
'config': cfgk,
'metric': f1_in,
})
del classif_in
validf = pd.DataFrame(data=inner)
valid = pd.pivot_table(validf, index='config', values=['metric'], aggfunc={'metric': ['mean']})
valid = valid['metric']
valid = valid.reindex(
valid.sort_values(by=['mean', 'config'], axis=0, ascending=[False, True], inplace=False).index
)
idx = list(valid.index)[0]
logger.debug(f'best config {idx}, F1={valid.iat[0, 0]}')
best_config = classifiers[clf]["config"][idx]
# ===============
# FIM GRID SEARCH
# ===============
else:
idx = 0
best_config = classifiers[clf]["default"]
if "random_state" in best_config:
best_config["random_state"] = round_
classif = None
if clf == "ZERORULE":
pass
if clf == "ELM":
best_config['inputs'] = x_outter_train.shape[1]
youttr = one_hot(y_outter_train, 2)
else:
youttr = y_outter_train
classif = model(**best_config)
r1, r2, r3, r4 = train_test_binary(
classif, x_outter_train, youttr, x_outter_test, y_outter_test
)
y_outter_pred, ynpred, yppred, traintime = r1
f1bin4, f1bin0, f1mic, f1mac, f1weigh, f1sam, p, r, acc = r2
tn, fp, fn, tp = r3
for exp in r4:
logger.exception(exp)
logger.info(f"Classifier {clf} acc={acc:.4f} f1mac={f1mac:.4f} f1bin4={f1bin4:.4f} f1bin0={f1bin0:.4f}")
resultlist.append(
vars(
Results(
class_="4",
experiment=params.experiment,
nfeaturestotal=0,
timestamp=int(f"{params.sessionts:%Y%m%d%H%M%S}"),
seed=round_,
foldoutter=fold,
foldinner=-1,
classifier=clf,
classifiercfg=idx,
classifiercfgs=len(classifiers[clf]["config"]),
f1binp=f1bin4,
f1binn=f1bin0,
f1micro=f1mic,
f1macro=f1mac,
f1weighted=f1weigh,
f1samples=f1sam,
precision=p,
recall=r,
accuracy=acc,
accuracy2=0.0,
timeinnertrain=0,
timeouttertrain=traintime,
positiveclasses="4",
negativeclasses="0",
features="",
nfeaturesvar=6,
postrainsamples=0,
negtrainsamples=0,
postestsamples=0,
negtestsamples=0,
ynegfinaltrain=xtrainneg.shape[0],
yposfinaltrain=xtrainpos.shape[0],
ynegfinaltest=xtestneg.shape[0],
yposfinaltest=xtestpos.shape[0],
yposfinalpred=yppred,
ynegfinalpred=ynpred,
yfinaltrain=y_outter_train.shape[0],
yfinaltest=y_outter_test.shape[0],
yfinalpred=y_outter_pred.shape[0],
tp=tp,
tn=tn,
fp=fp,
fn=fn,
bestfeatureidx='',
bestvariableidx='',
featurerank='',
rankfeature='',
)
)
)
if len(params.skipwell) > 0 and xfn is not None:
yft = np.concatenate(
(
np.zeros(xfn.shape[0], dtype=np.int32),
np.ones(xfp.shape[0], dtype=np.int32),
),
axis=0,
)
try:
yftpred = classif.predict(np.concatenate((xfn, xfp), axis=0))
try:
p, r, f1bin, _ = precision_recall_fscore_support(
yft, yftpred, average="binary",
)
except Exception as ef1bin:
print(ef1bin)
acc = 0.0
try:
acc = accuracy_score(yft, yftpred)
except Exception as eacc:
print(eacc)
raise eacc
resultlist.append(
vars(
Results(
class_="4",
experiment=params.experiment,
nfeaturestotal=0,
timestamp=int(f"{params.sessionts:%Y%m%d%H%M%S}"),
seed=round_,
foldoutter=-1,
foldinner=-1,
classifier=clf,
classifiercfg=idx,
f1bin=f1bin,
f1micro=0,
f1macro=0,
f1weighted=0,
f1samples=0,
precision=p,
recall=r,
accuracy=0.0,
accuracy2=0.0,
timeinnertrain=0,
timeouttertrain=0,
positiveclasses="4",
negativeclasses="0",
features="",
nfeaturesvar=6,
postrainsamples=0,
negtrainsamples=0,
postestsamples=0,
negtestsamples=0,
ynegfinaltrain=0,
yposfinaltrain=0,
ynegfinaltest=xfn.shape[0],
yposfinaltest=xfp.shape[0],
yposfinalpred=0,
ynegfinalpred=0,
yfinaltrain=0,
yfinaltest=0,
yfinalpred=0,
tp=0,
tn=0,
fp=0,
fn=0,
)
)
)
except Exception as eft:
print(eft)
raise eft
return resultlist
except Exception as efold:
logger.exception(efold)
breakpoint()
raise efold
return []
def runexperiment(params, *args, **kwargs) -> None:
"""
Run experiment - train, validation (optional) and test.
"""
all_results_list = []
partial = []
logger = logging.getLogger(__name__)
formatter = logging.Formatter(params.logformat)
fh = logging.FileHandler(f"{params.experiment}.log")
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
logger.setLevel(logging.DEBUG)
logger.info(params.name)
logger.info("=" * 79)
t0 = time.time()
for round_ in range(1, params.nrounds + 1):
logger.info(f"Round {round_}")
if params.njobs > 1:
logger.debug(f"Running {params.njobs} parallel jobs")
with Pool(processes=params.njobs) as p:
# 'results' is a List of List
results = p.starmap(
foldfn,
zip(
[round_] * params.nfolds,
range(params.nfolds),
[params] * params.nfolds,
),
)
results_list_round = []
for r in results:
results_list_round.extend(r)
else:
logger.debug(f"Running single core")
results_list_round = []
for foldout in range(0, params.nfolds):
results_list_round.extend(foldfn(round_, foldout, params))
partial = pd.DataFrame(data=results_list_round)
try:
partial.to_excel(f"{params.experiment}_parcial_{round_}.xlsx", index=False)
except Exception as e1:
logger.exception(e1)
all_results_list.extend(results_list_round)
results = pd.DataFrame(data=all_results_list)
results["e"] = params.experiment[:-1]
results["case"] = params.experiment[-1]
try:
results.to_excel(f"{params.experiment}_final.xlsx", index=False)
except Exception as e2:
logger.exception(e2)
try:
markdown = str(pd.pivot_table(
data=results[results["foldoutter"] >= 0],
values=["f1macro", "ynegfinaltest", "yposfinaltest"],
index=["classifier", "e"],
aggfunc={
"f1macro": ["mean", "std"],
"ynegfinaltest": "max",
"yposfinaltest": "max",
},
columns=["case"],
).to_markdown(buf=None, index=True))
logger.debug(markdown)
except:
pass
try:
with open(f"{params.experiment}_final.md", "w") as f:
f.writelines("\n")
f.writelines("All rounds\n")
pd.pivot_table(
data=results[results["foldoutter"] >= 0],
values=["f1macro", "ynegfinaltest", "yposfinaltest", "class_"],
index=["classifier", "e"],
aggfunc={
"f1macro": ["mean", "std"],
"ynegfinaltest": "max",
"yposfinaltest": "max",
"class_": "count"
},
columns=["case"],
).to_markdown(buf=f, index=True)
f.writelines("\n\n")
f.writelines(
# folds de "teste"
pd.pivot_table(
data=results[results["foldoutter"] < 0],
values=["f1macro", "ynegfinaltest", "yposfinaltest"],
index=["classifier", "e"],
aggfunc={
"f1macro": ["mean", "std"],
"ynegfinaltest": "max",
"yposfinaltest": "max",
},
columns=["case"],
).to_markdown(buf=None, index=True)
)
f.writelines("\n\n\n")
f.writelines("Round 1\n")
pd.pivot_table(
data=results[(results["foldoutter"] >= 0) & (results["foldinner"] < 0) &(results["seed"] == 1)],
values=["f1macro", "ynegfinaltest", "yposfinaltest", "class_"],
index=["classifier", "e"],
aggfunc={
"f1macro": ["mean", "std"],
"ynegfinaltest": "max",
"yposfinaltest": "max",
"class_": "count",
},
columns=["case"],
).to_markdown(buf=f, index=True)
f.writelines("\n")
except Exception as e3:
logger.exception(e3)
logger.debug(f"finished in {humantime(seconds=(time.time()-t0))}")
@dataclass(frozen=False)
class DefaultParams:
"""
Experiment configuration (and model hyperparameters)
DataClass offers a little advantage over simple dictionary in that it checks if
the parameter actually exists. A dict would accept anything.
"""
name: str = ""
experiment: str = ""
nrounds: int = 1
nfolds: int = 5
njobs: int = 1
windowsize: int = 900
stepsize: int = 900
gridsearch: int = 0
classifierstr: str = "1NN,3NN,QDA,LDA,GNB,RF,ZERORULE"
usecolsstr: str = "1,2,3,4,5"
usecols: list = field(default_factory=list)
nfeaturesvar: int = 6
hostname: str = socket.gethostname()
ncpu: int = psutil.cpu_count()
datasetcols: list = field(default_factory=list)
tzsp = tz.gettz("America/Sao_Paulo")
logformat: str = "%(asctime)s %(levelname)-8s %(name)-12s %(funcName)-12s %(lineno)-5d %(message)s"
shuffle: bool = True
skipwellstr: str = ""
read_and_split = None
def __post_init__(self):
self.njobs = max(min(self.nfolds, self.njobs, self.ncpu), 1)
if isinstance(self.classifierstr, str):
self.classifiers = self.classifierstr.split(",")
elif isinstance(self.classifierstr, tuple):
self.classifiers = list(self.classifierstr)
self.skipwell = self.skipwellstr.split(",")
self.sessionts = datetime.now(tz=self.tzsp)
if isinstance(self.usecolsstr, str):
self.usecols = list(map(int, self.usecolsstr.split(",")))
elif isinstance(self.usecolsstr, tuple):
self.usecols = list(self.usecolsstr)
elif isinstance(self.usecolsstr, int):
self.usecols = [self.usecolsstr]
self.datasetcols = [
"timestamp",
"P-PDG",
"P-TPT",
"T-TPT",
"P-MON-CKP",
"T-JUS-CKP",
"P-JUS-CKGL",
"T-JUS-CKGL",
"QGL",
"class",
]
def concat_excel(*files, output='compilado.xlsx'):
lista = []
for f in files:
frame = | pd.read_excel(f, header=0) | pandas.read_excel |
# Utility data handling,
# - Convert parquet to csv dataframes.
# - Consolidate parquet data files.
# - load parquet
import sys
#import glob
import pprint
import re
import string
import time
from pathlib import Path
ROOT_PATH = Path('D:/OneDrive - Microsoft/data/20news') # /20news-bydate-train/train_clean'
import pyarrow
import pandas as pd
def cnvt2csv(path):
'path = path to the train_clean directory'
try:
adf = pd.read_parquet(path)
print(path, ': ', adf.shape)
new_name = Path(path.parent) / Path(path.stem + '.csv')
# create strings from txt lists
msg_col = adf['msg']
msg_col = msg_col.apply(flatten_msg)
item_col = adf['item']
item_col = item_col.apply(lambda x: Path(x).name)
adf['msg'] = msg_col
adf['item'] = item_col
adf.to_csv(new_name)
except Exception as e:
print(f"for file {new_name} got exception {e}.")
print('wrote ', new_name)
def flatten_msg(msg):
'Concatenate lines to form a single string, removing punctuation.'
# Convert array to one text string.
txt = ' '.join(list(msg))
# Remove punct.
txt =''.join([k for k in txt if k not in string.punctuation])
return txt
def consolidate_parquet(path):
'Combine all parquet files into one df'
full_df = pd.DataFrame()
globpath = Path(path)
for a_file in globpath.glob('*.parquet'):
try:
adf = | pd.read_parquet(a_file) | pandas.read_parquet |
import datetime
from unittest import TestCase
import numpy as np
import pandas as pd
from mlnext import pipeline
class TestColumnSelector(TestCase):
def setUp(self):
data = np.arange(8).reshape(-1, 2)
cols = ['a', 'b']
self.df = pd.DataFrame(data, columns=cols)
def test_select_columns(self):
t = pipeline.ColumnSelector(keys=['a'])
expected = self.df.loc[:, ['a']]
result = t.fit_transform(self.df)
pd.testing.assert_frame_equal(result, expected)
class TestColumnDropper(TestCase):
def setUp(self):
data = np.arange(8).reshape(-1, 2)
cols = ['a', 'b']
self.df = pd.DataFrame(data, columns=cols)
def test_drop_columns(self):
t = pipeline.ColumnDropper(columns=['b'])
expected = self.df.loc[:, ['a']]
result = t.fit_transform(self.df)
| pd.testing.assert_frame_equal(result, expected) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 2 21:40:29 2020
@author: Phil
"""
import pandas as pd
import numpy as np
import json
import h5py
import os.path
from typing import Union
from ccdef import __DEBUG__, __VERSION__, __DATA_VERSION__
#%% file helper functions
def slice_hd5 (infile, outfile, start_time, duration):
# get dataset names
hdf = pd.HDFStore(infile)
keys = hdf.keys()
hdf.close()
end_time = pd.to_datetime(start_time) + pd.to_timedelta(duration, 'S')
print ('File {} has the following datasets: {}'.format(infile, keys))
for key in keys:
# *may* need to modify this for large slices... read in chunks and loop
df = pd.read_hdf(infile, key, where='index>start_time & index<end_time')
df.to_hdf(outfile, key, append = True, format = 't')
def hdf_stats (infile):
hdf = | pd.HDFStore(infile) | pandas.HDFStore |
import os
from os.path import join
import tempfile
import shutil
import math
import json
import numpy as np
import pandas as pd
import zarr
from numcodecs import Zlib
from scipy import sparse
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from generate_tiff_offsets import get_offsets
from starlette.responses import JSONResponse, UJSONResponse
from starlette.routing import Route, Mount
from starlette.staticfiles import StaticFiles
from .constants import (
CoordinationType as ct,
Component as cm,
DataType as dt,
FileType as ft,
)
from .entities import Cells, CellSets, GenomicProfiles
from .routes import range_repsonse
VAR_CHUNK_SIZE = 10
class JsonRoute(Route):
def __init__(self, path, endpoint, data_json):
super().__init__(path, endpoint)
self.data_json = data_json
class AbstractWrapper:
"""
An abstract class that can be extended when
implementing custom dataset object wrapper classes.
TODO: Add some useful tests.
>>> assert True
"""
def __init__(self, **kwargs):
"""
Abstract constructor to be inherited by dataset wrapper classes.
:param str out_dir: The path to a local directory used for data processing outputs. By default, uses a temp. directory.
"""
self.out_dir = kwargs['out_dir'] if 'out_dir' in kwargs else tempfile.mkdtemp()
self.routes = []
self.is_remote = False
self.file_def_creators = []
def convert_and_save(self, dataset_uid, obj_i):
"""
Fill in the file_def_creators array.
Each function added to this list should take in a base URL and generate a Vitessce file definition.
If this wrapper is wrapping local data, then create routes and fill in the routes array.
This method is void, should not return anything.
:param str dataset_uid: A unique identifier for this dataset.
:param int obj_i: Within the dataset, the index of this data wrapper object.
"""
os.makedirs(self._get_out_dir(dataset_uid, obj_i), exist_ok=True)
def get_routes(self):
"""
Obtain the routes that have been created for this wrapper class.
:returns: A list of server routes.
:rtype: list[starlette.routing.Route]
"""
return self.routes
def get_file_defs(self, base_url):
"""
Obtain the file definitions for this wrapper class.
:param str base_url: A base URL to prepend to relative URLs.
:returns: A list of file definitions.
:rtype: list[dict]
"""
file_defs_with_base_url = []
for file_def_creator in self.file_def_creators:
file_def = file_def_creator(base_url)
if file_def is not None:
file_defs_with_base_url.append(file_def)
return file_defs_with_base_url
def get_out_dir_route(self, dataset_uid, obj_i):
"""
Obtain the Mount for the `out_dir`
:param str dataset_uid: A dataset unique identifier for the Mount
:param str obj_i: A index of the current vitessce.wrappers.AbstractWrapper among all other wrappers in the view config
:returns: A starlette Mount of the the `out_dir`
:rtype: list[starlette.routing.Mount]
"""
if not self.is_remote:
out_dir = self._get_out_dir(dataset_uid, obj_i)
return [Mount(self._get_route_str(dataset_uid, obj_i),
app=StaticFiles(directory=out_dir, html=False))]
return []
def _get_url(self, base_url, dataset_uid, obj_i, *args):
return base_url + self._get_route_str(dataset_uid, obj_i, *args)
def _get_route_str(self, dataset_uid, obj_i, *args):
return "/" + "/".join(map(str, [dataset_uid, obj_i, *args]))
def _get_out_dir(self, dataset_uid, obj_i, *args):
return join(self.out_dir, dataset_uid, str(obj_i), *args)
def auto_view_config(self, vc):
"""
Auto view configuration is intended to be used internally by the `VitessceConfig.from_object` method.
Each subclass of `AbstractWrapper` may implement this method which takes in a `VitessceConfig` instance
and modifies it by adding datasets, visualization components, and view coordinations.
Implementations of this method may create an opinionated view config based on inferred use cases.
:param vc: The view config instance.
:type vc: VitessceConfig
"""
raise NotImplementedError("Auto view configuration has not yet been implemented for this data object wrapper class.")
class MultiImageWrapper(AbstractWrapper):
"""
Wrap multiple imaging datasets by creating an instance of the ``MultiImageWrapper`` class.
:param list image_wrappers: A list of imaging wrapper classes (only :class:`~vitessce.wrappers.OmeTiffWrapper` supported now)
:param \\*\\*kwargs: Keyword arguments inherited from :class:`~vitessce.wrappers.AbstractWrapper`
"""
def __init__(self, image_wrappers, use_physical_size_scaling=False, **kwargs):
super().__init__(**kwargs)
self.image_wrappers = image_wrappers
self.use_physical_size_scaling = use_physical_size_scaling
def convert_and_save(self, dataset_uid, obj_i):
for image in self.image_wrappers:
image.convert_and_save(dataset_uid, obj_i)
file_def_creator = self.make_raster_file_def_creator(dataset_uid, obj_i)
routes = self.make_raster_routes()
self.file_def_creators.append(file_def_creator)
self.routes += routes
def make_raster_routes(self):
obj_routes = []
for num, image in enumerate(self.image_wrappers):
obj_routes = obj_routes + image.get_routes()
return obj_routes
def make_raster_file_def_creator(self, dataset_uid, obj_i):
def raster_file_def_creator(base_url):
raster_json = {
"schemaVersion": "0.0.2",
"usePhysicalSizeScaling": self.use_physical_size_scaling,
"images": [],
"renderLayers": []
}
for image in self.image_wrappers:
image_json = image.make_image_def(dataset_uid, obj_i, base_url)
raster_json['images'].append(image_json)
raster_json['renderLayers'].append(image.name)
return {
"type": dt.RASTER.value,
"fileType": ft.RASTER_JSON.value,
"options": raster_json
}
return raster_file_def_creator
class OmeTiffWrapper(AbstractWrapper):
"""
Wrap an OME-TIFF File by creating an instance of the ``OmeTiffWrapper`` class.
:param str img_path: A local filepath to an OME-TIFF file.
:param str offsets_path: A local filepath to an offsets.json file.
:param str img_url: A remote URL of an OME-TIFF file.
:param str offsets_url: A remote URL of an offsets.json file.
:param str name: The display name for this OME-TIFF within Vitessce.
:param list[number] transformation_matrix: A column-major ordered matrix for transforming this image (see http://www.opengl-tutorial.org/beginners-tutorials/tutorial-3-matrices/#homogeneous-coordinates for more information).
:param bool is_bitmask: Whether or not this image is a bitmask.
:param \\*\\*kwargs: Keyword arguments inherited from :class:`~vitessce.wrappers.AbstractWrapper`
"""
def __init__(self, img_path=None, offsets_path=None, img_url=None, offsets_url=None, name="", transformation_matrix=None, is_bitmask=False,
**kwargs):
super().__init__(**kwargs)
self.name = name
self._img_path = img_path
self._img_url = img_url
self._offsets_url = offsets_url
self._transformation_matrix = transformation_matrix
self.is_remote = img_url is not None
self.is_bitmask = is_bitmask
if img_url is not None and (img_path is not None or offsets_path is not None):
raise ValueError("Did not expect img_path or offsets_path to be provided with img_url")
def convert_and_save(self, dataset_uid, obj_i):
# Only create out-directory if needed
if not self.is_remote:
super().convert_and_save(dataset_uid, obj_i)
file_def_creator = self.make_raster_file_def_creator(dataset_uid, obj_i)
routes = self.make_raster_routes(dataset_uid, obj_i)
self.file_def_creators.append(file_def_creator)
self.routes += routes
def make_raster_routes(self, dataset_uid, obj_i):
if self.is_remote:
return []
else:
offsets = get_offsets(self._img_path)
async def response_func(req):
return UJSONResponse(offsets)
routes = [
Route(self._get_route_str(dataset_uid, obj_i, self._get_img_filename()), lambda req: range_repsonse(req, self._img_path)),
JsonRoute(self._get_route_str(dataset_uid, obj_i, self.get_offsets_path_name()), response_func, offsets)
]
return routes
def make_image_def(self, dataset_uid, obj_i, base_url):
img_url = self.get_img_url(base_url, dataset_uid, obj_i)
offsets_url = self.get_offsets_url(base_url, dataset_uid, obj_i)
return self.create_image_json(img_url, offsets_url)
def make_raster_file_def_creator(self, dataset_uid, obj_i):
def raster_file_def_creator(base_url):
raster_json = {
"schemaVersion": "0.0.2",
"images": [self.make_image_def(dataset_uid, obj_i, base_url)],
}
return {
"type": dt.RASTER.value,
"fileType": ft.RASTER_JSON.value,
"options": raster_json
}
return raster_file_def_creator
def create_image_json(self, img_url, offsets_url=None):
metadata = {}
image = {
"name": self.name,
"type": "ome-tiff",
"url": img_url,
}
if offsets_url is not None:
metadata["omeTiffOffsetsUrl"] = offsets_url
if self._transformation_matrix is not None:
metadata["transform"] = {
"matrix": self._transformation_matrix
}
metadata["isBitmask"] = self.is_bitmask
# Only attach metadata if there is some - otherwise schema validation fails.
if len(metadata.keys()) > 0:
image["metadata"] = metadata
return image
def _get_image_dir(self):
return os.path.dirname(self._img_path)
def _get_img_filename(self):
return os.path.basename(self._img_path)
def get_img_url(self, base_url="", dataset_uid="", obj_i=""):
if self._img_url is not None:
return self._img_url
img_url = self._get_url(base_url, dataset_uid, obj_i, self._get_img_filename())
return img_url
def get_offsets_path_name(self):
return f"{self._get_img_filename().split('ome.tif')[0]}offsets.json"
def get_offsets_url(self, base_url="", dataset_uid="", obj_i=""):
if self._offsets_url is not None or self._img_url is not None:
return self._offsets_url
offsets_url = self._get_url(base_url, dataset_uid, obj_i, self.get_offsets_path_name())
return offsets_url
# class OmeZarrWrapper(AbstractWrapper):
# def __init__(self, z, name="", **kwargs):
# super().__init__(**kwargs)
# self.z = z
# self.name = name
# def create_raster_json(self, img_url):
# raster_json = {
# "schemaVersion": "0.0.2",
# "images": [
# {
# "name": self.name,
# "type": "zarr",
# "url": img_url,
# "metadata": {
# "dimensions": [
# {
# "field": "channel",
# "type": "nominal",
# "values": [
# "DAPI - Hoechst (nuclei)",
# "FITC - Laminin (basement membrane)",
# "Cy3 - Synaptopodin (glomerular)",
# "Cy5 - THP (thick limb)"
# ]
# },
# {
# "field": "y",
# "type": "quantitative",
# "values": None
# },
# {
# "field": "x",
# "type": "quantitative",
# "values": None
# }
# ],
# "isPyramid": True,
# "transform": {
# "scale": 1,
# "translate": {
# "x": 0,
# "y": 0,
# }
# }
# }
# }
# ],
# }
# return raster_json
# def get_raster(self, base_url, dataset_uid, obj_i):
# obj_routes = []
# obj_file_defs = []
# if type(self.z) == zarr.hierarchy.Group:
# img_dir_path = self.z.store.path
# raster_json = self.create_raster_json(
# self._get_url(base_url, dataset_uid, obj_i, "raster_img"),
# )
# obj_routes = [
# Mount(self._get_route_str(dataset_uid, obj_i, "raster_img"),
# app=StaticFiles(directory=img_dir_path, html=False)),
# JsonRoute(self._get_route_str(dataset_uid, obj_i, "raster"),
# self._create_response_json(raster_json), raster_json)
# ]
# obj_file_defs = [
# {
# "type": dt.RASTER.value,
# "fileType": ft.RASTER_JSON.value,
# "url": self._get_url(base_url, dataset_uid, obj_i, "raster")
# }
# ]
# return obj_file_defs, obj_routes
class AnnDataWrapper(AbstractWrapper):
def __init__(self, adata=None, adata_url=None, expression_matrix=None, matrix_gene_var_filter=None, gene_var_filter=None, cell_set_obs=None, cell_set_obs_names=None, spatial_centroid_obsm=None, spatial_polygon_obsm=None, mappings_obsm=None, mappings_obsm_names=None, mappings_obsm_dims=None, request_init=None, factors_obs=None, **kwargs):
"""
Wrap an AnnData object by creating an instance of the ``AnnDataWrapper`` class.
:param adata: An AnnData object containing single-cell experiment data.
:type adata: anndata.AnnData
:param str adata_url: A remote url pointing to a zarr-backed AnnData store.
:param str expression_matrix: Location of the expression (cell x gene) matrix, like `X` or `obsm/highly_variable_genes_subset`
:param str gene_var_filter: A string like `highly_variable` (from `var` in the AnnData stored) used in conjunction with expression_matrix if expression_matrix points to a subset of `X` of the full `var` list.
:param str matrix_gene_var_filter: A string like `highly_variable` (from `var` in the AnnData stored) used in conjunction with expression_matrix if expression_matrix points to a subset of `X` of the full `var` list.
:param list[str] factors_obs: Column names like `['top_marker_gene', 'sex']` for showing factors when cells are hovered over
:param list[str] cell_set_obs: Column names like `['louvain', 'cellType']` for showing cell sets from `obs`
:param list[str] cell_set_obs_names: Names to display in place of those in `cell_set_obs`, like `['Louvain', 'Cell Type']
:param str spatial_centroid_obsm: Column name in `obsm` that contains centroid coordinates for displaying centroids in the spatial viewer
:param str spatial_polygon_obsm: Column name in `obsm` that contains polygonal coordinates for displaying outlines in the spatial viewer
:param list[str] mappings_obsm: Column names like `['X_umap', 'X_pca']` for showing scatterplots from `obsm`
:param list[str] mappings_obsm_names: Overriding names like `['UMAP', 'PCA'] for displaying above scatterplots
:param list[str] mappings_obsm_dims: Dimensions along which to get data for the scatterplot, like [[0, 1], [4, 5]] where [0, 1] is just the normal x and y but [4, 5] could be comparing the third and fourth principal components, for example.
:param dict request_init: options to be passed along with every fetch request from the browser, like { "header": { "Authorization": "Bearer <PASSWORD>" } }
:param \\*\\*kwargs: Keyword arguments inherited from :class:`~vitessce.wrappers.AbstractWrapper`
"""
super().__init__(**kwargs)
self._adata = adata
self._adata_url = adata_url
if adata is not None:
self.is_remote = False
self.zarr_folder = 'anndata.zarr'
else:
self.is_remote = True
self.zarr_folder = None
self._expression_matrix = expression_matrix
self._cell_set_obs_names = cell_set_obs_names
self._mappings_obsm_names = mappings_obsm_names
self._gene_var_filter = "var/" + gene_var_filter if gene_var_filter is not None else gene_var_filter
self._matrix_gene_var_filter = "var/" + matrix_gene_var_filter if matrix_gene_var_filter is not None else matrix_gene_var_filter
self._cell_set_obs = ["obs/" + i for i in cell_set_obs] if cell_set_obs is not None else cell_set_obs
self._factors_obs = ["obs/" + i for i in factors_obs] if factors_obs is not None else factors_obs
self._spatial_centroid_obsm = "obsm/" + spatial_centroid_obsm if spatial_centroid_obsm is not None else spatial_centroid_obsm
self._spatial_polygon_obsm = "obsm/" + spatial_polygon_obsm if spatial_polygon_obsm is not None else spatial_polygon_obsm
self._mappings_obsm = ["obsm/" + i for i in mappings_obsm] if mappings_obsm is not None else mappings_obsm
self._mappings_obsm_dims = mappings_obsm_dims
self._request_init = request_init
def convert_and_save(self, dataset_uid, obj_i):
# Only create out-directory if needed
if not self.is_remote:
super().convert_and_save(dataset_uid, obj_i)
zarr_filepath = self.get_zarr_path(dataset_uid, obj_i)
# In the future, we can use sparse matrices with equal performance:
# https://github.com/theislab/anndata/issues/524
if isinstance(self._adata.X, sparse.spmatrix):
self._adata.X = self._adata.X.todense()
self._adata.write_zarr(zarr_filepath, chunks=[self._adata.shape[0], VAR_CHUNK_SIZE])
cells_file_creator = self.make_cells_file_def_creator(dataset_uid, obj_i)
cell_sets_file_creator = self.make_cell_sets_file_def_creator(dataset_uid, obj_i)
expression_matrix_file_creator = self.make_expression_matrix_file_def_creator(dataset_uid, obj_i)
self.file_def_creators += [cells_file_creator, cell_sets_file_creator, expression_matrix_file_creator]
self.routes += self.get_out_dir_route(dataset_uid, obj_i)
def get_zarr_path(self, dataset_uid, obj_i):
out_dir = self._get_out_dir(dataset_uid, obj_i)
zarr_filepath = join(out_dir, self.zarr_folder)
return zarr_filepath
def get_zarr_url(self, base_url="", dataset_uid="", obj_i=""):
if self.is_remote:
return self._adata_url
else:
return self._get_url(base_url, dataset_uid, obj_i, self.zarr_folder)
def make_cells_file_def_creator(self, dataset_uid, obj_i):
def get_cells(base_url):
options = {}
if self._spatial_centroid_obsm is not None:
options["xy"] = self._spatial_centroid_obsm
if self._spatial_polygon_obsm is not None:
options["poly"] = self._spatial_polygon_obsm
if self._mappings_obsm is not None:
options["mappings"] = {}
if self._mappings_obsm_names is not None:
for key, mapping in zip(self._mappings_obsm_names, self._mappings_obsm):
options["mappings"][key] = {
"key": mapping,
"dims": [0, 1]
}
else:
for mapping in self._mappings_obsm:
mapping_key = mapping.split('/')[-1]
self._mappings_obsm_names = mapping_key
options["mappings"][mapping_key] = {
"key": mapping,
"dims": [0, 1]
}
if self._mappings_obsm_dims is not None:
for dim, key in zip(self._mappings_obsm_dims, self._mappings_obsm_names):
options["mappings"][key]['dims'] = dim
if self._factors_obs is not None:
options["factors"] = []
for obs in self._factors_obs:
options["factors"].append(obs)
if len(options.keys()) > 0:
obj_file_def = {
"type": dt.CELLS.value,
"fileType": ft.ANNDATA_CELLS_ZARR.value,
"url": self.get_zarr_url(base_url, dataset_uid, obj_i),
"options": options
}
if self._request_init is not None:
obj_file_def['requestInit'] = self._request_init
return obj_file_def
return None
return get_cells
def make_cell_sets_file_def_creator(self, dataset_uid, obj_i):
def get_cell_sets(base_url):
if self._cell_set_obs is not None:
options = []
if self._cell_set_obs_names is not None:
names = self._cell_set_obs_names
else:
names = [obs.split('/')[-1] for obs in self._cell_set_obs]
for obs, name in zip(self._cell_set_obs, names):
options.append({
"groupName": name,
"setName": obs
})
obj_file_def = {
"type": dt.CELL_SETS.value,
"fileType": ft.ANNDATA_CELL_SETS_ZARR.value,
"url": self.get_zarr_url(base_url, dataset_uid, obj_i),
"options": options
}
if self._request_init is not None:
obj_file_def['requestInit'] = self._request_init
return obj_file_def
return None
return get_cell_sets
def make_expression_matrix_file_def_creator(self, dataset_uid, obj_i):
def get_expression_matrix(base_url):
options = {}
if self._expression_matrix is not None:
options["matrix"] = self._expression_matrix
if self._gene_var_filter is not None:
options["geneFilter"] = self._gene_var_filter
if self._matrix_gene_var_filter is not None:
options["matrixGeneFilter"] = self._matrix_gene_var_filter
obj_file_def = {
"type": dt.EXPRESSION_MATRIX.value,
"fileType": ft.ANNDATA_EXPRESSION_MATRIX_ZARR.value,
"url": self.get_zarr_url(base_url, dataset_uid, obj_i),
"options": options
}
if self._request_init is not None:
obj_file_def['requestInit'] = self._request_init
return obj_file_def
return None
return get_expression_matrix
def auto_view_config(self, vc):
dataset = vc.add_dataset().add_object(self)
mapping_name = self._mappings_obsm_names[0] if (self._mappings_obsm_names is not None) else self._mappings_obsm[0].split('/')[-1]
scatterplot = vc.add_view(dataset, cm.SCATTERPLOT, mapping=mapping_name)
cell_sets = vc.add_view(dataset, cm.CELL_SETS)
genes = vc.add_view(dataset, cm.GENES)
heatmap = vc.add_view(dataset, cm.HEATMAP)
if self._spatial_polygon_obsm is not None or self._spatial_centroid_obsm is not None:
spatial = vc.add_view(dataset, cm.SPATIAL)
vc.layout((scatterplot | spatial) / (heatmap | (cell_sets / genes)))
else:
vc.layout((scatterplot | (cell_sets / genes)) / heatmap)
class SnapWrapper(AbstractWrapper):
# The Snap file is difficult to work with.
# For now we can use the processed cell-by-bin MTX file
# However, the HuBMAP pipeline currently computes this with resolution 5000
# TODO: Make a PR to sc-atac-seq-pipeline to output this at a higher resolution (e.g. 200)
# https://github.com/hubmapconsortium/sc-atac-seq-pipeline/blob/develop/bin/snapAnalysis.R#L93
def __init__(self, in_mtx, in_barcodes_df, in_bins_df, in_clusters_df, starting_resolution=5000, **kwargs):
super().__init__(**kwargs)
self.in_mtx = in_mtx # scipy.sparse.coo.coo_matrix (filtered_cell_by_bin.mtx)
self.in_barcodes_df = in_barcodes_df # pandas dataframe (barcodes.txt)
self.in_bins_df = in_bins_df # pandas dataframe (bins.txt)
self.in_clusters_df = in_clusters_df # pandas dataframe (umap_coords_clusters.csv)
self.zarr_folder = 'profiles.zarr'
self.starting_resolution = starting_resolution
# Convert to dense matrix if sparse.
if type(in_mtx) == coo_matrix:
self.in_mtx = in_mtx.toarray()
def convert_and_save(self, dataset_uid, obj_i):
super().convert_and_save(dataset_uid, obj_i)
out_dir = self._get_out_dir(dataset_uid, obj_i)
zarr_filepath = join(out_dir, self.zarr_folder)
self.create_genomic_multivec_zarr(zarr_filepath)
with open(join(out_dir, 'cell-sets'), 'w') as f:
f.write(json.dumps(self.create_cell_sets_json()))
with open(join(out_dir, 'cells'), 'w') as f:
f.write(json.dumps(self.create_cells_json()))
cells_file_creator = self.make_cells_file_def_creator(dataset_uid, obj_i)
cell_sets_file_creator = self.make_cell_sets_file_def_creator(dataset_uid, obj_i)
genomic_profiles_file_creator = self.make_genomic_profiles_file_def_creator(dataset_uid, obj_i)
self.file_def_creators += [cells_file_creator, cell_sets_file_creator, genomic_profiles_file_creator]
self.routes += self.get_out_dir_route(dataset_uid, obj_i)
def create_genomic_multivec_zarr(self, zarr_filepath):
in_mtx = self.in_mtx
in_clusters_df = self.in_clusters_df
in_barcodes_df = self.in_barcodes_df
in_bins_df = self.in_bins_df
starting_resolution = self.starting_resolution
# The bin datafram consists of one column like chrName:binStart-binEnd
def convert_bin_name_to_chr_name(bin_name):
try:
return bin_name[:bin_name.index(':')]
except ValueError:
return np.nan
def convert_bin_name_to_chr_start(bin_name):
try:
return int(bin_name[bin_name.index(':')+1:bin_name.index('-')])
except ValueError:
return np.nan
def convert_bin_name_to_chr_end(bin_name):
try:
return int(bin_name[bin_name.index('-')+1:])
except ValueError:
return np.nan
# The genome assembly is GRCh38 but the chromosome names in the bin names do not start with the "chr" prefix.
# This is incompatible with the chromosome names from `negspy`, so we need to append the prefix.
in_bins_df[0] = in_bins_df[0].apply(lambda x: "chr" + x)
in_bins_df["chr_name"] = in_bins_df[0].apply(convert_bin_name_to_chr_name)
in_bins_df["chr_start"] = in_bins_df[0].apply(convert_bin_name_to_chr_start)
in_bins_df["chr_end"] = in_bins_df[0].apply(convert_bin_name_to_chr_end)
# Drop any rows that had incorrect bin strings (missing a chromosome name, bin start, or bin end value).
in_bins_df = in_bins_df.dropna(subset=["chr_name", "chr_start", "chr_end"]).copy()
# Ensure that the columns have the expect types.
in_bins_df["chr_name"] = in_bins_df["chr_name"].astype(str)
in_bins_df["chr_start"] = in_bins_df["chr_start"].astype(int)
in_bins_df["chr_end"] = in_bins_df["chr_end"].astype(int)
# Create the Zarr store for the outputs.
out_f = zarr.open(zarr_filepath, mode='w')
# Get a list of clusters.
in_clusters_df["cluster"] = in_clusters_df["cluster"].astype(str)
cluster_ids = in_clusters_df["cluster"].unique().tolist()
cluster_ids.sort(key=int)
cluster_paths = [ [ "Clusters", cluster_id ] for cluster_id in cluster_ids ]
# "SnapTools performs quantification using a specified aligner, and HuBMAP has standardized on BWA with the GRCh38 reference genome"
# Reference: https://github.com/hubmapconsortium/sc-atac-seq-pipeline/blob/bb023f95ca3330128bfef41cc719ffcb2ee6a190/README.md
genomic_profiles = GenomicProfiles(out_f, profile_paths=cluster_paths, assembly='hg38', starting_resolution=starting_resolution)
chrom_name_to_length = genomic_profiles.chrom_name_to_length
# Create each chromosome dataset.
for chr_name, chr_len in chrom_name_to_length.items():
# The bins dataframe frustratingly does not contain every bin.
# We need to figure out which bins are missing.
# We want to check for missing bins in each chromosome separately,
# otherwise too much memory is used during the join step.
chr_bins_in_df = in_bins_df.loc[in_bins_df["chr_name"] == chr_name]
if chr_bins_in_df.shape[0] == 0:
# No processing or output is necessary if there is no data for this chromosome.
# Continue on through all resolutions of this chromosome to the next chromosome.
continue
# Determine the indices of the matrix at which the bins for this chromosome start and end.
chr_bin_i_start = int(chr_bins_in_df.head(1).iloc[0].name)
chr_bin_i_end = int(chr_bins_in_df.tail(1).iloc[0].name) + 1
# Extract the part of the matrix corresponding to the current chromosome.
chr_mtx = in_mtx[:,chr_bin_i_start:chr_bin_i_end]
# Create a list of the "ground truth" bins (all bins from position 0 to the end of the chromosome).
# We will join the input bins onto this dataframe to determine which bins are missing.
chr_bins_gt_df = pd.DataFrame()
chr_bins_gt_df["chr_start"] = np.arange(0, math.ceil(chr_len/starting_resolution)) * starting_resolution
chr_bins_gt_df["chr_end"] = chr_bins_gt_df["chr_start"] + starting_resolution
chr_bins_gt_df["chr_start"] = chr_bins_gt_df["chr_start"] + 1
chr_bins_gt_df["chr_start"] = chr_bins_gt_df["chr_start"].astype(int)
chr_bins_gt_df["chr_end"] = chr_bins_gt_df["chr_end"].astype(int)
chr_bins_gt_df["chr_name"] = chr_name
chr_bins_gt_df[0] = chr_bins_gt_df.apply(lambda r: f"{r['chr_name']}:{r['chr_start']}-{r['chr_end']}", axis='columns')
# We will add a new column "i", which should match the _old_ index, so that we will be able join with the data matrix on the original indices.
# For the new rows, we will add values for the "i" column that are greater than any of the original indices,
# to prevent any joining with the incoming data matrix onto these bins for which the data is missing.
chr_bins_in_df = chr_bins_in_df.reset_index(drop=True)
chr_bins_in_df["i"] = chr_bins_in_df.index.values
chr_bins_gt_df["i"] = chr_bins_gt_df.index.values + (in_mtx.shape[1] + 1)
# Set the full bin string column as the index of both data frames.
chr_bins_gt_df = chr_bins_gt_df.set_index(0)
chr_bins_in_df = chr_bins_in_df.set_index(0)
# Join the input bin subset dataframe right onto the full bin ground truth dataframe.
chr_bins_in_join_df = chr_bins_in_df.join(chr_bins_gt_df, how='right', lsuffix="", rsuffix="_gt")
# The bins which were not present in the input will have NaN values in the "i" column.
# For these rows, we replace the NaN values with the much higher "i_gt" values which will not match to any index of the data matrix.
chr_bins_in_join_df["i"] = chr_bins_in_join_df.apply(lambda r: r['i'] if | pd.notna(r['i']) | pandas.notna |
# coding: utf-8
# In[9]:
import pandas as pd
import random
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
import plotly
import plotly.graph_objs as go
import numpy as np
import datetime
import calendar as cd
import seaborn as sns
sns.set(style="whitegrid")
import warnings
warnings.filterwarnings('ignore')
# In[10]:
twitter= pd.read_csv("./datasets/twitter.csv")
twitter['Hashtags'] = twitter.Hashtags.str.lower()
twitter.drop_duplicates(inplace=True)
twitter.reset_index(drop=True,inplace=True)
# In[11]:
twitter.describe()
# In[12]:
regions = pd.DataFrame({'Regions':pd.unique(twitter.Regions)})
regions=regions[regions.Regions!='United States']
regions_2 = random.sample(list(regions.Regions),2)
print("Regions:",regions_2)
# In[13]:
#Wordcloud of HashTags for a particular region
for reg in regions_2:
reg_data = twitter[twitter['Regions']==reg]
print(reg)
grp_tags = pd.DataFrame({'freq' : reg_data.groupby(['Hashtags']).size()}).sort_values(by=['freq'],ascending=False).reset_index()
top_50_tags=grp_tags.head(50)
#top_10_tags['Hashtags']=pd.DataFrame(tag_list)
print(top_50_tags.head(10))
d = {}
for tag, freq in grp_tags.values:
d[tag] = freq
wordcloud = WordCloud(
stopwords=STOPWORDS,
background_color='white',
width=4000,
height=3500
)
wordcloud.generate_from_frequencies(frequencies=d)
plt.figure(figsize = (10,10))
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
print("===========")
# In[14]:
grp_data= pd.DataFrame({'freq' : twitter.groupby(['Date','Hashtags']).size()}).sort_values(by=['Date'],ascending=True).reset_index()
grp_entire_data= pd.DataFrame({'freq' : twitter.groupby(['Regions','Date','Hashtags']).size()}).sort_values(by=['Date'],ascending=True).reset_index()
unique_tags = list(pd.unique(grp_data.Hashtags))
rndm5_tags=[]
while True:
tag = random.sample(unique_tags,1)[0]
x=grp_data[grp_data['Hashtags']==tag]
if len(rndm5_tags)==5:
break
if(len(x)<30):
continue
elif (tag not in rndm5_tags):
rndm5_tags.append(tag)
print("Hashtags: ",rndm5_tags)
month_wise_count=[]
for tag,k in zip(rndm5_tags,range(1,6)):
x=grp_entire_data[grp_entire_data['Hashtags']==tag]
for i in x.Date:
date=datetime.datetime.strptime(i, "%Y-%m-%d")
month=cd.month_name[date.month]
x.loc[x.Date==i,'Date'] = month
x.rename(columns={'Date':'Month'},inplace=True)
month_wise_count.append(x)
# In[15]:
#Bar plot of hashtags
month_wise_cnt=[]
for df in month_wise_count:
df.drop_duplicates(inplace=True)
month_wise_cnt.append(pd.DataFrame(df.groupby(['Month'],sort=False)['freq'].sum()).reset_index())
months = ['January','February','March','April','May','June','July','August','September','October','November','December']
for i in months:
for j in range(len(month_wise_cnt)):
if i not in list(month_wise_cnt[j].Month):
ind = months.index(i)
line = | pd.DataFrame({"Month": i, "freq": 0}, index=[ind]) | pandas.DataFrame |
"""Get the log of the simulation objects in a pandas dataframe."""
import pandas as pd
def get_log_dataframe(simulation_object, activities=[]):
"""Get the log of the simulation objects in a pandas dataframe."""
id_map = {act.id: act.name for act in activities}
df = (
pd.DataFrame(simulation_object.log)
.sort_values(by=["Timestamp"])
.sort_values(by=["Timestamp"])
)
return pd.concat(
[
(
df.filter(items=["ActivityID"])
.rename(columns={"ActivityID": "Activity"})
.replace(id_map)
),
| pd.DataFrame(simulation_object.log) | pandas.DataFrame |
import warnings
import numpy as np
import datetime as dt
import os
import json
import pandas as pd
from datetimerange import DateTimeRange
import dateparser
OPERAND_MAPPING_DICT = {
">": 5,
">=": 4,
"=": 3,
"<=": 2,
"<": 1
}
def check_valid_signal(x):
"""Check whether signal is valid, i.e. an array_like numeric, or raise errors.
Parameters
----------
x :
array_like, array of signal
Returns
-------
"""
if isinstance(x, dict) or isinstance(x, tuple):
raise ValueError("Expected array_like input, instead found {"
"0}:".format(type(x)))
if len(x) == 0:
raise ValueError("Empty signal")
types = []
for i in range(len(x)):
types.append(str(type(x[i])))
type_unique = np.unique(np.array(types))
if len(type_unique) != 1 and (type_unique[0].find("int") != -1 or
type_unique[0].find("float") != -1):
raise ValueError("Invalid signal: Expect numeric array, instead found "
"array with types {0}: ".format(type_unique))
if type_unique[0].find("int") == -1 and type_unique[0].find("float") == -1:
raise ValueError("Invalid signal: Expect numeric array, instead found "
"array with types {0}: ".format(type_unique))
return True
def calculate_sampling_rate(timestamps):
"""
Parameters
----------
x : array_like of timestamps, float (unit second)
Returns
-------
float : sampling rate
"""
if isinstance(timestamps[0], float):
timestamps_second = timestamps
else:
try:
v_parse_datetime = np.vectorize(parse_datetime)
timestamps = v_parse_datetime(timestamps)
timestamps_second = []
timestamps_second.append(0)
for i in range(1, len(timestamps)):
timestamps_second.append((timestamps[i] - timestamps[
i - 1]).total_seconds())
except Exception:
sampling_rate = None
return sampling_rate
steps = np.diff(timestamps_second)
sampling_rate = round(1 / np.min(steps[steps != 0]))
return sampling_rate
def generate_timestamp(start_datetime, sampling_rate, signal_length):
"""
Parameters
----------
start_datetime :
sampling_rate : float
signal_length : int
Returns
-------
list : list of timestamps with length equal to signal_length.
"""
number_of_seconds = (signal_length - 1) / sampling_rate
if start_datetime is None:
start_datetime = dt.datetime.now()
end_datetime = start_datetime + dt.timedelta(seconds=number_of_seconds)
time_range = DateTimeRange(start_datetime, end_datetime)
timestamps = []
for value in time_range.range(dt.timedelta(seconds=1 / sampling_rate)):
timestamps.append(value)
return timestamps
def parse_datetime(string, type='datetime'):
"""
A simple dateparser that detects common datetime formats
Parameters
----------
string : str
a date string in format as denoted below.
Returns
-------
datetime.datetime
datetime object of a time.
"""
# some common formats.
date_formats = ['%Y-%m-%d',
'%d-%m-%Y',
'%d.%m.%Y',
'%Y.%m.%d',
'%d %b %Y',
'%Y/%m/%d',
'%d/%m/%Y']
datime_formats = ['%Y-%m-%d %H:%M:%S.%f',
'%d-%m-%Y %H:%M:%S.%f',
'%d.%m.%Y %H:%M:%S.%f',
'%Y.%m.%d %H:%M:%S.%f',
'%d %b %Y %H:%M:%S.%f',
'%Y/%m/%d %H:%M:%S.%f',
'%d/%m/%Y %H:%M:%S.%f',
'%Y-%m-%d %I:%M:%S.%f',
'%d-%m-%Y %I:%M:%S.%f',
'%d.%m.%Y %I:%M:%S.%f',
'%Y.%m.%d %I:%M:%S.%f',
'%d %b %Y %I:%M:%S.%f',
'%Y/%m/%d %I:%M:%S.%f',
'%d/%m/%Y %I:%M:%S.%f']
if type == 'date':
formats = date_formats
if type == 'datetime':
formats = datime_formats
for f in formats:
try:
return dt.datetime.strptime(string, f)
except:
pass
try:
return dateparser.parse(string)
except:
raise ValueError('Datetime string must be of standard Python format '
'(https://docs.python.org/3/library/time.html), '
'e.g., `%d-%m-%Y`, eg. `24-01-2020`')
def get_moving_average(q, w):
q_padded = np.pad(q, (w // 2, w - 1 - w // 2), mode='edge')
convole = np.convolve(q_padded, np.ones(w) / w, 'valid')
return convole
def parse_rule(name, source):
assert os.path.isfile(source) is True, 'Source file not found'
with open(source) as json_file:
all = json.load(json_file)
try:
sqi = all[name]
except:
raise Exception("SQI {0} not found".format(name))
rule_def, boundaries, label_list = update_rule(sqi['def'],
is_update=False)
return rule_def, \
boundaries, \
label_list
def update_rule(rule_def, threshold_list=[], is_update=True):
if rule_def is None or is_update:
all_rules = []
else:
all_rules = list(np.copy(rule_def))
for threshold in threshold_list:
all_rules.append(threshold)
df = sort_rule(all_rules)
df = decompose_operand(df.to_dict('records'))
boundaries = np.sort(df["value"].unique())
inteveral_label_list = get_inteveral_label_list(df, boundaries)
value_label_list = get_value_label_list(df, boundaries, inteveral_label_list)
label_list = []
for i in range(len(value_label_list)):
label_list.append(inteveral_label_list[i])
label_list.append(value_label_list[i])
label_list.append(inteveral_label_list[-1])
return all_rules, boundaries, label_list
def sort_rule(rule_def):
df = | pd.DataFrame(rule_def) | pandas.DataFrame |
""""""
"""
Copyright (c) 2021 <NAME> as part of Airlab Amsterdam
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import torch
import torch.utils.data as torchdata
import pandas as pd
#%% Training class
class timeseries_dataset():
def __init__(self, name, dim_inputseqlen, dim_outputseqlen, dim_maxseqlen):
self.name = name
self.dim_inputseqlen = dim_inputseqlen
self.dim_outputseqlen = dim_outputseqlen
self.dim_maxseqlen = dim_maxseqlen
def load(self, mode):
if self.name == 'uci_electricity':
output = uci_electricity(self.dim_inputseqlen, self.dim_outputseqlen, self.dim_maxseqlen, mode, self.name)
if self.name == 'uci_traffic':
output = uci_traffic(self.dim_inputseqlen, self.dim_outputseqlen, self.dim_maxseqlen, mode, self.name)
if self.name == 'kaggle_webtraffic':
output = kaggle_webtraffic(self.dim_inputseqlen, self.dim_outputseqlen, self.dim_maxseqlen, mode, self.name)
if self.name == 'kaggle_favorita':
output = kaggle_favorita(self.dim_inputseqlen, self.dim_outputseqlen, self.dim_maxseqlen, mode, self.name)
if self.name == 'kaggle_m5':
output = kaggle_m5(self.dim_inputseqlen, self.dim_outputseqlen, self.dim_maxseqlen, mode, self.name)
return output
#%% UCI - Electricity
# Source: https://archive.ics.uci.edu/ml/datasets/ElectricityLoadDiagrams20112014
class uci_electricity(torchdata.Dataset):
def __init__(self, dim_inputseqlen, dim_outputseqlen, dim_maxseqlen, mode, name):
"""
Load UCI Electricity dataset in format [samples, seqlen, features]
"""
self.dim_inputseqlen = dim_inputseqlen
self.dim_outputseqlen = dim_outputseqlen
self.window = dim_inputseqlen + dim_outputseqlen
self.dim_maxseqlen = dim_maxseqlen
self.mode = mode
self.p_train = 0.8
self.p_validate = 0.1
self.name = name
self.X, self.Y = self.get_data()
def __len__(self):
return len(self.index)
def __getitem__(self, idx):
x = self.X[self.index[idx, 0], self.index[idx, 1]]
y = self.Y[self.index[idx, 0], self.index[idx, 1]]
return x, y
def get_data(self):
# Read data from source
df = | pd.read_csv('data/uci_electricity/LD2011_2014.txt', sep = ';', parse_dates=[0], infer_datetime_format=True, dtype='float32', decimal=',', index_col=[0]) | pandas.read_csv |
import numpy as np
import pandas as pd
import scipy
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
sample = pd.read_csv('/home/vitorbootz/research/aux_files/abundancias_sample.csv')
control = pd.read_csv('/home/vitorbootz/research/aux_files/abundancias_control.csv')
sdss = pd.read_csv('/home/vitorbootz/research/aux_files/abundancias_sdss.csv')
fig, ax = plt.subplots()
ax.grid(axis='y', alpha=0.8)
control.OH.plot.hist(density=True, ax=ax, histtype='step', color='#EBA592', lw=3, fill=True, alpha=0.9, bins=5, edgecolor='#EBA592', label='LCGs isoladas')
sample.OH.plot.hist(density=True, ax=ax, histtype='step', color='#429E83', linewidth=3, hatch='/', alpha=0.9, bins=5, label='LCGs em grupos')
ax1 = sns.kdeplot(control.OH, color='#EB5E5C', label='_nolegend_')
ax2 = sns.kdeplot(sample.OH, color='#429E83', shade=False, ls='--', label='_nolegend_')
stats, pvalue = stats.ks_2samp(control.OH, sample.OH, mode = "asymp")
m1 = ax1.axvline(control.OH.median(), c='#EB5E5C', ls='--', lw=1)
m2 = ax2.axvline(sample.OH.median(), c='#429E83', ls='--', lw=1)
ax.text(0.65,0.9, 'KS '+r'$\it{p}$'+'-value = ' + str(round(pvalue,2)), fontsize=12, transform=ax.transAxes)
ax.set_xlabel('12+log(O/H)', fontsize=15)
ax.set_ylabel('Densidade', fontsize=15)
ax.legend(loc='upper left')
fig.savefig('/home/vitorbootz/research/TCC_images/abundancia_oxigenio/hist_abundancia_LCGs_sample_control.pdf', format='pdf', bbox_inches='tight')
sample = pd.read_csv('/home/vitorbootz/research/aux_files/abundancias_sample.csv')
control = pd.read_csv('/home/vitorbootz/research/aux_files/abundancias_control.csv')
sdss = pd.read_csv('/home/vitorbootz/research/aux_files/abundancias_sdss.csv')
sample_gemini = | pd.read_csv('/home/vitorbootz/research/aux_files/abundancias_sample_lcgs_gemini.csv') | pandas.read_csv |
""" Data collection pipeline """
import logging
from concurrent.futures.thread import ThreadPoolExecutor
from io import BytesIO
from pathlib import Path
from typing import List, Union, Optional, Tuple
from zipfile import ZipFile, BadZipFile
import pandas as pd
from requests.exceptions import InvalidURL
from wetterdienst.dwd.observations.fileindex import (
create_file_list_for_climate_observations,
)
from wetterdienst.util.cache import payload_cache_five_minutes
from wetterdienst.dwd.util import (
check_parameters,
parse_enumeration_from_template,
create_humanized_column_names_mapping,
)
from wetterdienst.dwd.metadata.column_names import DWDMetaColumns
from wetterdienst.dwd.metadata.parameter import Parameter
from wetterdienst.dwd.metadata.period_type import PeriodType
from wetterdienst import TimeResolution
from wetterdienst.dwd.metadata.constants import DWD_FOLDER_MAIN
from wetterdienst.exceptions import (
InvalidParameterCombination,
FailedDownload,
ProductFileNotFound,
)
from wetterdienst.dwd.observations.parser import (
parse_climate_observations_data,
)
from wetterdienst.dwd.network import download_file_from_dwd
from wetterdienst.dwd.observations.store import (
store_climate_observations,
restore_climate_observations,
_build_local_store_key,
)
log = logging.getLogger(__name__)
POSSIBLE_ID_VARS = (
DWDMetaColumns.STATION_ID.value,
DWDMetaColumns.DATE.value,
DWDMetaColumns.FROM_DATE.value,
DWDMetaColumns.TO_DATE.value,
)
POSSIBLE_DATE_VARS = (
DWDMetaColumns.DATE.value,
DWDMetaColumns.FROM_DATE.value,
DWDMetaColumns.TO_DATE.value,
)
def collect_climate_observations_data(
station_ids: List[int],
parameter: Union[Parameter, str],
time_resolution: Union[TimeResolution, str],
period_type: Union[PeriodType, str],
folder: Union[str, Path] = DWD_FOLDER_MAIN,
prefer_local: bool = False,
write_file: bool = False,
tidy_data: bool = True,
humanize_column_names: bool = False,
run_download_only: bool = False,
) -> Optional[pd.DataFrame]:
"""
Function that organizes the complete pipeline of data collection, either
from the internet or from a local file. It therefor goes through every given
station id and, given by the parameters, either tries to get data from local
store and/or if fails tries to get data from the internet. Finally if wanted
it will try to store the data in a hdf file.
:param station_ids: station ids that are trying to be loaded
:param parameter: Parameter as enumeration
:param time_resolution: Time resolution as enumeration
:param period_type: Period type as enumeration
:param folder: Folder for local file interaction
:param prefer_local: Local data should be preferred
:param write_file: Write data to local storage
:param tidy_data: Tidy up data so that there's only one set of values
for a datetime in a row, e.g. station_id, parameter,
element, datetime, value, quality.
:param humanize_column_names: Yield column names for human consumption
:param run_download_only: Run only the download and storing process
:return: All the data given by the station ids.
"""
parameter = parse_enumeration_from_template(parameter, Parameter)
time_resolution = parse_enumeration_from_template(time_resolution, TimeResolution)
period_type = parse_enumeration_from_template(period_type, PeriodType)
if not check_parameters(parameter, time_resolution, period_type):
raise InvalidParameterCombination(
f"The combination of {parameter.value}, {time_resolution.value}, "
f"{period_type.value} is invalid."
)
# List for collected pandas DataFrames per each station id
data = []
for station_id in set(station_ids):
# Just for logging.
request_string = _build_local_store_key(
station_id, parameter, time_resolution, period_type
)
if prefer_local:
# Try restoring data
station_data = restore_climate_observations(
station_id, parameter, time_resolution, period_type, folder
)
# When successful append data and continue with next iteration
if not station_data.empty:
log.info(f"Data for {request_string} restored from local.")
data.append(station_data)
continue
log.info(f"Acquiring observations data for {request_string}")
remote_files = create_file_list_for_climate_observations(
[station_id], parameter, time_resolution, period_type
)
if len(remote_files) == 0:
log.info(f"No files found for {request_string}. Station will be skipped.")
continue
filenames_and_files = download_climate_observations_data_parallel(remote_files)
station_data = parse_climate_observations_data(
filenames_and_files, parameter, time_resolution
)
if write_file:
store_climate_observations(
station_data,
station_id,
parameter,
time_resolution,
period_type,
folder,
)
data.append(station_data)
if run_download_only:
return None
try:
data = pd.concat(data)
except ValueError:
return pd.DataFrame()
if tidy_data:
data = _tidy_up_data(data, parameter)
# Assign meaningful column names (humanized).
if humanize_column_names:
hcnm = create_humanized_column_names_mapping(time_resolution, parameter)
if tidy_data:
data[DWDMetaColumns.ELEMENT.value] = data[
DWDMetaColumns.ELEMENT.value
].apply(lambda x: hcnm[x])
else:
data = data.rename(columns=hcnm)
return data
def _tidy_up_data(df: pd.DataFrame, parameter: Parameter) -> pd.DataFrame:
"""
Function to create a tidy DataFrame by reshaping it, putting quality in a
separate column and setting an extra column with the parameter.
:param df: DataFrame to be tidied
:param parameter: the parameter that is written in a column to identify a set of
different parameters amongst each other
:return: The tidied DataFrame
"""
id_vars = []
date_vars = []
# Add id columns based on metadata columns
for column in POSSIBLE_ID_VARS:
if column in df:
id_vars.append(column)
if column in POSSIBLE_DATE_VARS:
date_vars.append(column)
# Extract quality
# Set empty quality for first columns until first QN column
quality = pd.Series(dtype=int)
column_quality = | pd.Series(dtype=int) | pandas.Series |
import pandas as pd
from pandas.plotting import register_matplotlib_converters
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import os
import datetime
class PolarRRI:
def __init__(self, file_path=None):
if file_path is None:
self.rri_df = pd.DataFrame()
return
file_name = os.path.basename(file_path)
src_df = pd.read_csv(file_path, header=None, sep=' ')
src_df = src_df.rename(columns={0:'time', 1:'rri'})
src_df['rri'] = src_df['rri'] * 1000
file_date, file_time = file_name.split('.')[0].split('_')
_, yy, MM ,dd = file_date.split('-')
hh, mm ,ss = file_time.split('-')
start_datetime = datetime.datetime(int(yy), int(MM), int(dd), int(hh), int(mm), int(ss), 0, tzinfo=None)
func = lambda x: start_datetime + datetime.timedelta(seconds=x[0])
src_df['datetime'] = src_df.apply(func, axis=1)
self.rri_df = src_df.set_index(['datetime'])
def plot_rri(self):
| register_matplotlib_converters() | pandas.plotting.register_matplotlib_converters |
# -*- coding: utf-8 -*-
"""
Functions for cleaning mdredze Sandy Twitter dataset.
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from statsmodels.graphics.tsaplots import plot_acf
from twitterinfrastructure.tools import cross_corr, output, query
def create_timeseries_diff(df, col1, col2, zone_col, write_path=None):
"""Creates a dataframe where col1 and col2 columns are replaced by
first differenced time series.
Parameters
----------
df : Dataframe
Dataframe to containing time series data to difference (e.g. from
create_timeseries). Assumes dataframe is multi-indexed by zone_col and
timedelta (in hours).
col1 : str
Name of column containing first time series.
col2 : str
Name of column containing second time series.
zone_col : str
Name of zone column: 'zone_id' (nyiso zone), 'location_id' (taxi
zone), or 'borough' (taxi borough).
write_path : str or None
If str, then write a csv of the time series dataframe to the
specified path. Else, do not write.
Returns
-------
df_diff : dataframe
Notes
-----
"""
# create differenced time series dataframe
df_diff = pd.DataFrame(columns=[zone_col, 'timedelta',
col1, col2])
df_diff.set_index([zone_col, 'timedelta'], inplace=True)
zones = pd.unique(df.index.get_level_values(level=zone_col))
for zone in zones:
s_y1 = df[col1].xs(zone, level=0).dropna()
s_y2 = df[col2].xs(zone, level=0).dropna()
s_y1.index = pd.to_timedelta(s_y1.index.values, unit='h')
s_y2.index = pd.to_timedelta(s_y2.index.values, unit='h')
# difference both timeseries
s_y1_diff = pd.Series(data=np.diff(s_y1), index=s_y1.index.values[0:-1],
name=col1)
s_y2_diff = pd.Series(data=np.diff(s_y2), index=s_y2.index.values[0:-1],
name=col2)
df_zone = pd.concat([s_y1_diff, s_y2_diff], axis=1)
df_zone.index.name = 'timedelta'
df_zone = df_zone.reset_index()
df_zone[zone_col] = zone
df_zone = df_zone.set_index([zone_col, 'timedelta'])
# add zone to differenced dataframe
df_diff = df_diff.append(df_zone, ignore_index=False, sort='False')
# save to csv
if write_path:
df_csv = df_diff.reset_index()
df_csv['timedelta'] = df_csv['timedelta'].astype('timedelta64[h]')
df_csv.to_csv(write_path, index=False)
return df_diff
def create_timeseries_shift(df, df_max_rho, col1, col2, zone_col,
write_path=None):
"""Creates a dataframe where the 2nd time series column is time-shifted.
Parameters
----------
df : Dataframe
Dataframe to containing time series data to shift (e.g. from
create_timeseries). Assumes dataframe is multi-indexed by zone_col and
timedelta (in hours).
df_max_rho : Dataframe
Dataframe containing desired shifts for col2 in a 'max-lag' column,
indexed by zone_col.
col1 : str
Name of column containing first time series (copied).
col2 : str
Name of column containing second time series. This is the shifted
time series, where col2_shifted = col2 + shift.
zone_col : str
Name of zone column: 'zone_id' (nyiso zone), 'location_id' (taxi
zone), or 'borough' (taxi borough).
write_path : str or None
If str, then write a csv of the time series dataframe to the
specified path. Else, do not write.
Returns
-------
df_shift : dataframe
Notes
-----
"""
# create shifted time series dataframe
df_shift = pd.DataFrame(columns=[zone_col, 'timedelta', col1, col2])
df_shift.set_index([zone_col, 'timedelta'], inplace=True)
for zone in df_max_rho.index.values:
if not np.isnan(df_max_rho.loc[zone, 'max-rho']):
s_y1 = df[col1].xs(zone, level=0).dropna()
s_y2 = df[col2].xs(zone, level=0).dropna()
s_y1.index = pd.to_timedelta(s_y1.index.values, unit='h')
s_y2.index = pd.to_timedelta(s_y2.index.values, unit='h')
# shift 2nd time series
shift = df_max_rho.loc[zone, 'max-shift']
s_y2_shift = s_y2.shift(1, freq=pd.Timedelta(shift, unit='h'))
df_zone = pd.concat([s_y1, s_y2_shift], axis=1)
df_zone.index.name = 'timedelta'
df_zone = df_zone.reset_index()
df_zone[zone_col] = zone
df_zone = df_zone.set_index([zone_col, 'timedelta'])
# add zone to shifted dataframe
df_shift = df_shift.append(df_zone, ignore_index=False,
sort='False')
# save to csv
if write_path:
df_csv = df_shift.reset_index()
df_csv['timedelta'] = df_csv['timedelta'].astype('timedelta64[h]')
df_csv.to_csv(write_path, index=False)
return df_shift
def create_timeseries(df, zone_col, min_count, write_path=None, verbose=0):
"""Creates a time series dataframe where each column of df is
independently linearly interpolated over the total range of timedeltas of
each zone. Only time series with at least min_count data points are
included. Assumes the dataframe is indexed by a zone column (zone_col)
and a timedelta column (e.g. using index_timedelta).
Parameters
----------
df : Dataframe
Dataframe to calculate time series from.
zone_col : str
Name of zone column: 'zone_id' (nyiso zone), 'location_id' (taxi
zone), or 'borough' (taxi borough).
min_count : int
Minimum number of data points needed to convert to a time series.
write_path : str or None
If str, then write a csv of the time series dataframe to the
specified path. Else, do not write.
verbose : int
Defines verbosity for output statements.
Returns
-------
df_ts : dataframe
Notes
-----
"""
# loop through zones
df_ts = pd.DataFrame()
skipped = []
zones = pd.unique(df.index.get_level_values(zone_col))
for zone in zones:
df_zone = df.xs(zone, level=0)
# loop through columns (i.e. data to convert to time series)
y_interps = []
cols = df_zone.columns.values
for col in cols:
s = df_zone[col].dropna()
if s.count() < min_count:
skipped.append((zone, col))
else:
timedeltas = range(s.index.astype('timedelta64[h]').min(),
s.index.astype('timedelta64[h]').max() + 1)
y_interp = pd.Series(data=np.interp(
timedeltas, s.index.astype('timedelta64[h]'), s.values),
index=timedeltas, name=col)
y_interps.append(y_interp)
# add interpolated data to dataframe
if y_interps:
df_temp = pd.concat(objs=y_interps, axis=1, join='outer')
df_temp = df_temp.set_index(
pd.to_timedelta(df_temp.index.values, unit='h'))
df_temp[zone_col] = zone
df_temp.set_index(zone_col, append=True, inplace=True)
df_temp.index.names = ['timedelta', zone_col]
df_temp = df_temp.reorder_levels([1, 0])
df_ts = df_ts.append(df_temp, sort=False)
# save to csv
if write_path:
df_csv = df_ts.reset_index()
df_csv['timedelta'] = df_csv['timedelta'].astype('timedelta64[h]')
df_csv.to_csv(write_path, index=False)
if verbose >= 1:
output('skipped zones for having less than {min_count} data points '
'in original column data: {skipped}'.format(skipped=skipped,
min_count=min_count))
return df_ts
def index_timedelta(df, datetime_ref, datetime_col):
"""Indexes a dataframe on a timedelta calculated from datetime_col
relative to datetime_ref.
Parameters
----------
df : Dataframe
Dataframe to reindex on timedelta.
datetime_ref : Timestamp
Reference datetime to calculate timedelta relative to, specified as a
timezone-aware Pandas Timestamp object. Calculates timedelta as
datetime_col - datetime_ref.
e.g. enddate = pd.Timestamp('2012-11-03 00:00:00',
tz='America/New_York')
datetime_col : str
Name of column (or index) containing the datetime data to calculate
timedelta from.
Returns
-------
df : dataframe
Notes
-----
"""
indexes = df.index.names
df = df.reset_index()
# calculate and add timedelta
df['timedelta'] = df['datetimeNY'] - datetime_ref
# df['timedelta'] = [int(td.total_seconds() / 3600) for td
# in df['timedelta']]
# df['timedelta'] = pd.to_timedelta(df['timedelta'], unit='h')
# drop columns and reindex with datetime_col replaced by timedelta
df = df.drop([datetime_col], axis=1)
indexes = ['timedelta' if ind == datetime_col else ind for ind in indexes]
df = df.set_index(indexes)
df = df.sort_index(level=0)
return df
def load_nyctlc_zone(startdate, enddate, trip_type, trip_count_filter,
db_path, verbose=0):
"""Query and clean nyctlc dropoff or pickup data for the specified date
range from a sqlite database, grouped by zone. Assumes the database
contains a standard_zonedropoff_hour_sandy or
standard_zonepickup_hour_sandy table created using
create_standard_zone_hour.
Parameters
----------
startdate : Timestamp
Start date to include tweets from (inclusive), specified as a
timezone-aware Pandas Timestamp object.
E.g. startdate = pd.Timestamp('2012-10-28 00:00:00',
tz='America/New_York')
enddate : Timestamp
End date to include tweets from (exclusive), specified as a
timezone-aware Pandas Timestamp object.
e.g. enddate = pd.Timestamp('2012-11-03 00:00:00',
tz='America/New_York')
trip_type : str
Trip type: 'dropoff' or 'pickup'.
trip_count_filter : int
Minimum number of trips required to load a data point.
db_path : str
Path to sqlite database containing table.
verbose : int
Defines verbosity for output statements.
Returns
-------
df_taxi : dataframe
Notes
-----
Sqlite date queries are inclusive for start and end, datetimes in nyctlc
database are local (i.e. NY timezone).
"""
df_taxi = load_nyctlc_zone_hour(startdate, enddate, trip_type,
trip_count_filter, db_path, verbose=verbose)
# remove index, remove columns, and group by zone
df_taxi = df_taxi.reset_index()
df_taxi = df_taxi.drop(['datetimeNY'], axis=1)
df_taxi = df_taxi.groupby(['location_id']).mean()
if verbose >= 1:
if trip_type == 'dropoff':
output('[min, max] taxi pace and trips mean z-score: [' +
str(np.nanmin(df_taxi['zpace-drop'])) + ', ' +
str(np.nanmax(df_taxi['zpace-drop'])) + '], [' +
str(np.nanmin(df_taxi['ztrips-drop'])) + ', ' +
str(np.nanmax(df_taxi['ztrips-drop'])) + '].')
elif trip_type == 'pickup':
output('[min, max] taxi pace and trips mean z-score: [' +
str(np.nanmin(df_taxi['zpace-pick'])) + ', ' +
str(np.nanmax(df_taxi['zpace-pick'])) + '], [' +
str(np.nanmin(df_taxi['ztrips-pick'])) + ', ' +
str(np.nanmax(df_taxi['ztrips-pick'])) + '].')
return df_taxi
def load_nyctlc_zone_date(startdate, enddate, trip_type, trip_count_filter,
db_path, verbose=0):
"""Query and clean nyctlc dropoff or pickup data for the specified date
range from a sqlite database, grouped by zone and date. Assumes the database
contains a standard_zonedropoff_hour_sandy or
standard_zonepickup_hour_sandy table created using
create_standard_zone_hour.
Parameters
----------
startdate : Timestamp
Start date to include tweets from (inclusive), specified as a
timezone-aware Pandas Timestamp object.
E.g. startdate = pd.Timestamp('2012-10-28 00:00:00',
tz='America/New_York')
enddate : Timestamp
End date to include tweets from (exclusive), specified as a
timezone-aware Pandas Timestamp object.
e.g. enddate = pd.Timestamp('2012-11-03 00:00:00',
tz='America/New_York')
trip_type : str
Trip type: 'dropoff' or 'pickup'.
trip_count_filter : int
Minimum number of trips required to load a data point.
db_path : str
Path to sqlite database containing table.
verbose : int
Defines verbosity for output statements.
Returns
-------
df_taxi : dataframe
Notes
-----
Sqlite date queries are inclusive for start and end, datetimes in nyctlc
database are local (i.e. NY timezone).
"""
df_taxi = load_nyctlc_zone_hour(startdate, enddate, trip_type,
trip_count_filter, db_path, verbose=verbose)
# remove index, adjust datetime to date, and group by zone and date
df_taxi = df_taxi.reset_index()
df_taxi['datetimeNY'] = | pd.to_datetime(df_taxi['datetimeNY']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 2 09:04:41 2019
@author: michaelek
"""
import io
import numpy as np
import requests
from gistools import vector
from allotools import AlloUsage
from hydrolm import LM
from tethysts import Tethys
from tethysts import utils
import os
import sys
import yaml
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point
from multiprocessing.pool import ThreadPool
import pyproj
try:
import plotly.offline as py
import plotly.graph_objs as go
except:
print('install plotly for plot functions to work')
#####################################
### Parameters
# base_dir = os.path.dirname(os.path.abspath( __file__ ))
base_dir = os.path.realpath(os.path.dirname(__file__))
print(base_dir)
with open(os.path.join(base_dir, 'parameters.yml')) as param2:
param = yaml.safe_load(param2)
# datasets_path = os.path.join(base_dir, 'datasets')
outputs = param['output']
catch_key_base = 'tethys/station_misc/{station_id}/catchment.geojson.zst'
####################################
### Testing
# base_dir = os.path.split(os.path.realpath(os.path.dirname(__file__)))[0]
# with open(os.path.join(base_dir, 'parameters.yml')) as param2:
# param1 = yaml.safe_load(param2)
# flow_remote = param1['remote']['flow']
# usage_remote = param1['remote']['usage']
#
# from_date='2010-07-01'
# from_date=None
# to_date='2020-06-30'
# product_code='quality_controlled_data'
# min_gaugings=10
# output_path=os.path.join(base_dir, 'tests')
# local_tz='Etc/GMT-12'
# station_id=['0bc0762fac7423261610b50f', '0ba603f66f55a19d18cbeb81', '0c6b76f9ff6fcf2e103f5e84', '2ec4a2cfa71dd4811eec25e4', '0d1024b9975b573e515ebd62']
# station_id=['0d1024b9975b573e515ebd62']
# ref=None
#
#
# self = FlowNat(flow_remote, usage_remote, from_date, to_date, product_code, min_gaugings, station_id, ref, output_path)
#
# stns_all = self.stations_all.station_id.unique().tolist().copy()
#
# stns1 = self.process_stations(stns_all)
#
# nat_flow = self.naturalisation()
# wap1 = 'SW/0082'
#
# a1 = AlloUsage(from_date='2015-06-30', to_date='2016-06-30', wap_filter={'wap': [wap1]})
#
# res1 = a1.get_ts(['allo', 'usage'], 'D', ['wap'])
#######################################
### Class
class FlowNat(object):
"""
Class to perform several operations to ultimately naturalise flow data.
Initialise the class with the following parameters.
Parameters
----------
from_date : str
The start date for the flow record.
to_date : str
The end of of the flow record.
min_gaugings : int
The minimum number of gaugings required for the regressions. Default is 8.
rec_data_code : str
Either 'RAW' for the raw telemetered recorder data, or 'Primary' for the quality controlled recorder data. Default is 'Primary'.
input_sites : str, int, list, or None
Flow sites (either recorder or gauging) to be naturalised. If None, then the input_sites need to be defined later. Default is None.
output_path : str or None
Path to save the processed data, or None to not save them.
load_rec : bool
should the REC rivers and catchment GIS layers be loaded in at initiation?
Returns
-------
FlowNat instance
"""
def __init__(self, flow_remote, usage_remote, from_date=None, to_date=None, product_code='quality_controlled_data', min_gaugings=10, station_id=None, ref=None, output_path=None, local_tz='Etc/GMT-12'):
"""
Class to perform several operations to ultimately naturalise flow data.
Initialise the class with the following parameters.
Parameters
----------
from_date : str
The start date for the flow record.
to_date : str
The end of of the flow record.
min_gaugings : int
The minimum number of gaugings required for the regressions. Default is 8.
rec_data_code : str
Either 'RAW' for the raw telemetered recorder data, or 'Primary' for the quality controlled recorder data. Default is 'Primary'.
input_sites : str, int, list, or None
Flow sites (either recorder or gauging) to be naturalised. If None, then the input_sites need to be defined later. Default is None.
output_path : str or None
Path to save the processed data, or None to not save them.
catch_del : str
Defines what should be used for the catchments associated with flow sites. 'rec' will perform a catchment delineation on-the-fly using the REC rivers and catchments GIS layers, 'internal' will use the pre-generated catchments stored in the package, or a path to a shapefile will use a user created catchments layer. The shapefile must at least have a column named ExtSiteID with the flow site numbers associated with the catchment geometry.
Returns
-------
FlowNat instance
"""
setattr(self, 'from_date', from_date)
setattr(self, 'to_date', to_date)
setattr(self, 'min_gaugings', min_gaugings)
setattr(self, 'flow_remote', flow_remote)
setattr(self, 'usage_remote', usage_remote)
setattr(self, 'product_code', product_code)
setattr(self, 'local_tz', local_tz)
# setattr(self, 'rec_data_code', rec_data_code)
# setattr(self, 'ts_server', param['input']['ts_server'])
# setattr(self, 'permit_server', param['input']['permit_server'])
self.save_path(output_path)
stns_summ = self.get_all_flow_stations()
if (isinstance(station_id, list)) or (isinstance(ref, list)):
stns1 = self.process_stations(station_id=station_id, ref=ref)
# summ1 = self.flow_datasets(from_date=from_date, to_date=to_date, min_gaugings=8, rec_data_code=rec_data_code)
# if input_sites is not None:
# input_summ1 = self.process_sites(input_sites)
#
# if not isinstance(catch_del, str):
# raise ValueError('catch_del must be a string')
#
# if catch_del == 'rec':
# self.load_rec()
# elif catch_del == 'internal':
# catch_gdf_all = pd.read_pickle(os.path.join(base_dir, 'datasets', param['input']['catch_del_file']))
# setattr(self, 'catch_gdf_all', catch_gdf_all)
# elif catch_del.endswith('shp'):
# catch_gdf_all = gpd.read_file(catch_del)
# setattr(self, 'catch_gdf_all', catch_gdf_all)
# else:
# raise ValueError('Please read docstrings for options for catch_del argument')
pass
# def flow_datasets_all(self, rec_data_code='Primary'):
# """
#
# """
# ## Get dataset types
# datasets1 = mssql.rd_sql(self.ts_server, param['input']['ts_database'], param['input']['ts_dataset_table'], where_in={'Feature': ['River'], 'MeasurementType': ['Flow'], 'DataCode': ['Primary', 'RAW']})
# man_datasets1 = datasets1[(datasets1['CollectionType'] == 'Manual Field') & (datasets1['DataCode'] == 'Primary')].copy()
# rec_datasets1 = datasets1[(datasets1['CollectionType'] == 'Recorder') & (datasets1['DataCode'] == rec_data_code)].copy()
#
# ## Get ts summaries
# man_summ1 = mssql.rd_sql(self.ts_server, param['input']['ts_database'], param['input']['ts_summ_table'], ['ExtSiteID', 'DatasetTypeID', 'Min', 'Median', 'Mean', 'Max', 'Count', 'FromDate', 'ToDate'], where_in={'DatasetTypeID': man_datasets1['DatasetTypeID'].tolist()}).sort_values('ToDate')
# man_summ2 = man_summ1.drop_duplicates(['ExtSiteID'], keep='last').copy()
# man_summ2['CollectionType'] = 'Manual Field'
#
# rec_summ1 = mssql.rd_sql(self.ts_server, param['input']['ts_database'], param['input']['ts_summ_table'], ['ExtSiteID', 'DatasetTypeID', 'Min', 'Median', 'Mean', 'Max', 'Count', 'FromDate', 'ToDate'], where_in={'DatasetTypeID': rec_datasets1['DatasetTypeID'].tolist()}).sort_values('ToDate')
# rec_summ2 = rec_summ1.drop_duplicates(['ExtSiteID'], keep='last').copy()
# rec_summ2['CollectionType'] = 'Recorder'
#
# ## Combine
# summ2 = pd.concat([man_summ2, rec_summ2], sort=False)
#
# summ2['FromDate'] = pd.to_datetime(summ2['FromDate'])
# summ2['ToDate'] = pd.to_datetime(summ2['ToDate'])
#
# ## Add in site info
# sites1 = mssql.rd_sql(self.ts_server, param['input']['ts_database'], param['input']['sites_table'], ['ExtSiteID', 'NZTMX', 'NZTMY', 'SwazGroupName', 'SwazName'])
#
# summ3 = pd.merge(summ2, sites1, on='ExtSiteID')
#
# ## Assign objects
# setattr(self, 'sites', sites1)
# setattr(self, 'rec_data_code', rec_data_code)
# setattr(self, 'summ_all', summ3)
def get_all_flow_stations(self):
"""
Function to process the flow datasets
Parameters
----------
from_date : str
The start date for the flow record.
to_date : str
The end of of the flow record.
min_gaugings : int
The minimum number of gaugings required for the regressions. Default is 8.
rec_data_code : str
Either 'RAW' for the raw telemetered recorder data, or 'Primary' for the quality controlled recorder data. Default is 'Primary'.
Returns
-------
DataFrame
"""
tethys1 = Tethys([self.flow_remote])
flow_ds = [ds for ds in tethys1.datasets if (ds['parameter'] == 'streamflow') and (ds['product_code'] == self.product_code) and (ds['frequency_interval'] == '24H') and (ds['utc_offset'] == '12H') and (ds['method'] == 'sensor_recording')]
flow_ds.extend([ds for ds in tethys1.datasets if (ds['parameter'] == 'streamflow') and (ds['product_code'] == self.product_code) and (ds['frequency_interval'] == 'T') and (ds['method'] == 'field_activity')])
stns_list = []
for ds in flow_ds:
stns1 = tethys1.get_stations(ds['dataset_id'])
stns_list.extend(stns1)
stns_list2 = [s for s in stns_list if s['stats']['count'] >= self.min_gaugings]
# stns_list2 = stns_list
stns_list3 = [{'dataset_id': s['dataset_id'], 'station_id': s['station_id'], 'ref': s['ref'], 'geometry': Point(s['geometry']['coordinates']), 'min': s['stats']['min'], 'max': s['stats']['max'], 'count': s['stats']['count'], 'from_date': s['time_range']['from_date'], 'to_date': s['time_range']['to_date']} for s in stns_list2]
[s.update({'from_date': s['from_date'] + '+00:00', 'to_date': s['to_date'] + '+00:00'}) for s in stns_list3 if not '+00:00' in s['from_date']]
stns_summ = gpd.GeoDataFrame(pd.DataFrame(stns_list3), geometry='geometry', crs=4326)
stns_summ['from_date'] = pd.to_datetime(stns_summ['from_date']).dt.tz_convert(self.local_tz).dt.tz_localize(None)
stns_summ['to_date'] = pd.to_datetime(stns_summ['to_date']).dt.tz_convert(self.local_tz).dt.tz_localize(None)
# stns_summ['from_date'] = pd.to_datetime(stns_summ['from_date']).dt.tz_localize(None)
# stns_summ['to_date'] = pd.to_datetime(stns_summ['to_date']).dt.tz_localize(None)
if isinstance(self.from_date, str):
from_date1 = pd.Timestamp(self.from_date)
stns_summ = stns_summ[stns_summ['from_date'] <= from_date1]
if isinstance(self.to_date, str):
to_date1 = pd.Timestamp(self.to_date)
stns_summ = stns_summ[stns_summ['to_date'] >= to_date1]
setattr(self, 'stations_all', stns_summ)
setattr(self, '_tethys_flow', tethys1)
setattr(self, 'flow_datasets_all', flow_ds)
return stns_summ
def save_path(self, output_path=None):
"""
"""
if output_path is None:
pass
elif isinstance(output_path, str):
if not os.path.exists(output_path):
os.makedirs(output_path)
setattr(self, 'output_path', output_path)
# output_dict1 = {k: v.split('_{run_date}')[0] for k, v in param['output'].items()}
# file_list = [f for f in os.listdir(output_path) if ('catch_del' in f) and ('.shp' in f)]
def process_stations(self, station_id=None, ref=None):
"""
Function to process the sites.
Parameters
----------
input_sites : str, int, list, or None
Flow sites (either recorder or gauging) to be naturalised. If None, then the input_sites need to be defined later. Default is None.
Returns
-------
DataFrame
"""
## Checks
# if isinstance(input_sites, (str, int)):
# input_sites = [input_sites]
# elif not isinstance(input_sites, list):
# raise ValueError('input_sites must be a str, int, or list')
if (not isinstance(station_id, list)) and (not isinstance(ref, list)):
raise ValueError('station_id and ref must be lists')
## Filter
stns1 = self.stations_all.copy()
bad_stns = []
if isinstance(station_id, list):
stns1 = stns1[stns1['station_id'].isin(station_id)]
[bad_stns.extend([s['ref']]) for i, s in stns1.iterrows() if s['station_id'] not in station_id]
if isinstance(ref, list):
stns1 = stns1[stns1['ref'].isin(ref)]
[bad_stns.extend([s['ref']]) for i, s in stns1.iterrows() if s['ref'] not in ref]
if bad_stns:
print(', '.join(bad_stns) + ' stations are not available for naturalisation')
## Save if required
if hasattr(self, 'output_path'):
run_time = pd.Timestamp.today().strftime('%Y-%m-%dT%H%M')
flow_sites_shp = outputs['flow_sites_shp'].format(run_date=run_time)
save1 = stns1.copy()
save1['from_date'] = save1['from_date'].astype(str)
save1['to_date'] = save1['to_date'].astype(str)
save1.to_file(os.path.join(self.output_path, flow_sites_shp))
## Drop duplicate stations
stns2 = stns1.sort_values('count', ascending=False).drop_duplicates('station_id')
# stns2 = stns1.drop_duplicates('station_id')
setattr(self, 'stations', stns2)
## Filter flow datasets
stn_ds = stns2['dataset_id'].unique()
flow_ds1 = self.flow_datasets_all.copy()
flow_ds2 = [ds for ds in flow_ds1 if ds['dataset_id'] in stn_ds]
setattr(self, 'flow_datasets', flow_ds2)
## Remove existing attributes if they exist
if hasattr(self, 'catch'):
delattr(self, 'catch')
if hasattr(self, 'waps'):
delattr(self, 'waps')
if hasattr(self, 'flow'):
delattr(self, 'flow')
if hasattr(self, 'usage_rate'):
delattr(self, 'usage_rate')
if hasattr(self, 'nat_flow'):
delattr(self, 'nat_flow')
return stns1
# def load_rec(self):
# """
#
# """
#
# if not hasattr(self, 'rec_rivers'):
# try:
# with lzma.open(os.path.join(datasets_path, param['input']['rec_rivers_file'])) as r:
# rec_rivers = pickle.loads(r.read())
# with lzma.open(os.path.join(datasets_path, param['input']['rec_catch_file'])) as r:
# rec_catch = pickle.loads(r.read())
# except:
# print('Downloading rivers and catchments files...')
#
# url1 = 'https://cybele.s3.us-west.stackpathstorage.com/mfe;rec;v2.4;rivers.gpd.pkl.xz'
# r_resp = requests.get(url1)
# with open(os.path.join(datasets_path, param['input']['rec_rivers_file']), 'wb') as r:
# r.write(r_resp.content)
# with lzma.open(os.path.join(datasets_path, param['input']['rec_rivers_file'])) as r:
# rec_rivers = pickle.loads(r.read())
#
# url2 = 'https://cybele.s3.us-west.stackpathstorage.com/mfe;rec;v2.4;catchments.gpd.pkl.xz'
# r_resp = requests.get(url2)
# with open(os.path.join(datasets_path, param['input']['rec_catch_file']), 'wb') as r:
# r.write(r_resp.content)
# with lzma.open(os.path.join(datasets_path, param['input']['rec_catch_file'])) as r:
# rec_catch = pickle.loads(r.read())
#
# rec_rivers.rename(columns={'order': 'ORDER'}, inplace=True)
# setattr(self, 'rec_rivers', rec_rivers)
# setattr(self, 'rec_catch', rec_catch)
#
# pass
@staticmethod
def _get_catchment(inputs):
"""
"""
station_id = inputs['station_id']
bucket = inputs['bucket']
conn_config = inputs['conn_config']
key1 = catch_key_base.format(station_id=station_id)
try:
obj1 = utils.get_object_s3(key1, conn_config, bucket, 'zstd', 0)
b2 = io.BytesIO(obj1)
c1 = gpd.read_file(b2)
except:
c1 = gpd.GeoDataFrame(columns=['id', 'area', 'dataset_id', 'distance', 'nzsegment', 'ref', 'station_id', 'geometry'])
return c1
def get_catchments(self, threads=30):
"""
"""
stns = self.stations.copy()
stn_ids = stns.station_id.unique()
conn_config = self.flow_remote['connection_config']
bucket = self.flow_remote['bucket']
input_list = [{'conn_config': conn_config, 'bucket': bucket, 'station_id': s} for s in stn_ids]
output = ThreadPool(threads).map(self._get_catchment, input_list)
catch1 = pd.concat(output).drop('id', axis=1)
catch1.crs = pyproj.CRS(2193)
catch1 = catch1.to_crs(4326)
## Save if required
if hasattr(self, 'output_path'):
run_time = pd.Timestamp.today().strftime('%Y-%m-%dT%H%M')
catch_del_shp = outputs['catch_del_shp'].format(run_date=run_time)
catch1.to_file(os.path.join(self.output_path, catch_del_shp))
setattr(self, 'catch', catch1)
return catch1
def get_waps(self):
"""
"""
tethys1 = Tethys([self.usage_remote])
usage_ds = [ds for ds in tethys1.datasets if (ds['parameter'] == 'water_use') and (ds['product_code'] == 'raw_data') and (ds['frequency_interval'] == '24H') and (ds['utc_offset'] == '12H') and (ds['method'] == 'sensor_recording')]
stns_list = []
for ds in usage_ds:
stns1 = tethys1.get_stations(ds['dataset_id'])
stns_list.extend(stns1)
stns_list3 = [{'dataset_id': s['dataset_id'], 'station_id': s['station_id'], 'ref': s['ref'], 'geometry': Point(s['geometry']['coordinates']), 'from_date': s['time_range']['from_date'], 'to_date': s['time_range']['to_date']} for s in stns_list]
[s.update({'from_date': s['from_date'] + '+00:00', 'to_date': s['to_date'] + '+00:00'}) for s in stns_list3 if not '+00:00' in s['from_date']]
stns_summ = gpd.GeoDataFrame(pd.DataFrame(stns_list3), geometry='geometry', crs=4326)
stns_summ['from_date'] = pd.to_datetime(stns_summ['from_date']).dt.tz_convert(self.local_tz).dt.tz_localize(None)
stns_summ['to_date'] = pd.to_datetime(stns_summ['to_date']).dt.tz_convert(self.local_tz).dt.tz_localize(None)
# stns_summ['from_date'] = pd.to_datetime(stns_summ['from_date']).dt.tz_localize(None)
# stns_summ['to_date'] = pd.to_datetime(stns_summ['to_date']).dt.tz_localize(None)
if isinstance(self.from_date, str):
from_date1 = pd.Timestamp(self.from_date)
stns_summ = stns_summ[stns_summ['to_date'] >= from_date1]
if isinstance(self.to_date, str):
to_date1 = pd.Timestamp(self.to_date)
stns_summ = stns_summ[stns_summ['from_date'] <= to_date1]
setattr(self, 'waps_all', stns_summ)
setattr(self, '_tethys_usage', tethys1)
setattr(self, 'usage_datasets', usage_ds)
return stns_summ
def get_upstream_waps(self):
"""
Function to determine the upstream water abstraction sites from the catchment delineation.
Returns
-------
DataFrame
allocation data
"""
if not hasattr(self, 'waps_all'):
waps = self.get_waps()
else:
waps = self.waps_all.copy()
if not hasattr(self, 'catch'):
catch1 = self.get_catchments()
else:
catch1 = self.catch.copy()
waps.rename(columns={'station_id': 'wap_stn_id', 'dataset_id': 'wap_ds_id', 'ref': 'wap'}, inplace=True)
### WAP selection
waps_catch, poly1 = vector.pts_poly_join(waps, catch1, 'station_id')
### Get crc data
if waps_catch.empty:
print('No WAPs were found in the polygon(s)')
else:
waps_sel = waps[waps.wap_stn_id.isin(waps_catch.wap_stn_id.unique())].copy()
## Save if required
if hasattr(self, 'output_path'):
run_time = pd.Timestamp.today().strftime('%Y-%m-%dT%H%M')
save1 = waps_sel.copy()
save1['from_date'] = save1['from_date'].astype(str)
save1['to_date'] = save1['to_date'].astype(str)
waps_shp = outputs['waps_shp'].format(run_date=run_time)
save1.to_file(os.path.join(self.output_path, waps_shp))
## Return
setattr(self, 'waps_catch', waps_catch)
return waps_catch
def get_usage(self):
"""
"""
if not hasattr(self, 'waps_catch'):
waps_catch = self.get_upstream_waps()
else:
waps_catch = self.waps_catch.copy()
waps2 = waps_catch.groupby(['wap_ds_id', 'wap_stn_id']).first().reset_index()
wap_ids = waps2.wap.unique().tolist()
allo1 = AlloUsage(wap_filter={'wap': wap_ids}, from_date=self.from_date, to_date=self.to_date)
# allo1 = AlloUsage(from_date=self.from_date, to_date=self.to_date)
usage1 = allo1.get_ts(['allo', 'usage', 'usage_est'], 'D', ['wap'])
usage1a = usage1[(usage1['total_allo'] > 0) & (usage1['sw_allo'] > 0)].copy()
if 'sw_usage_est' not in usage1a.columns:
usage1a['sw_usage_est'] = 0
usage2 = usage1a[['sw_allo', 'sw_usage', 'sw_usage_est']].reset_index().copy()
usage3 = pd.merge(waps_catch[['wap', 'station_id', 'wap_stn_id']], usage2, on='wap')
## Aggregate by flow station id and date
usage4 = usage3.groupby(['station_id', 'date'])[['sw_allo', 'sw_usage', 'sw_usage_est']].sum()
usage5 = (usage4 / 24 / 60 / 60).round(3)
# usage5 = usage4.copy()
# usage5.loc[(usage5['sw_usage'] > 0) & (usage5['sw_usage_est'] > 0), 'sw_usage_est'] = 0
usage5.rename(columns={'sw_allo': 'allocation', 'sw_usage': 'measured usage', 'sw_usage_est': 'estimated usage'}, inplace=True)
## Aggregate by flow station id, wap station id, and date
usage6 = usage3.groupby(['station_id', 'wap_stn_id', 'date'])[['sw_allo', 'sw_usage', 'sw_usage_est']].sum()
usage7 = (usage6 / 24 / 60 / 60).round(3)
# usage5 = usage4.copy()
# usage7.loc[(usage6['sw_usage'] > 0) & (usage6['sw_usage_est'] > 0), 'sw_usage_est'] = 0
usage7.rename(columns={'sw_allo': 'allocation', 'sw_usage': 'measured usage', 'sw_usage_est': 'estimated usage'}, inplace=True)
## Save results
if hasattr(self, 'output_path'):
run_time = pd.Timestamp.today().strftime('%Y-%m-%dT%H%M')
usage_rate_wap_csv = outputs['usage_rate_wap_csv'].format(run_date=run_time)
usage1.to_csv(os.path.join(self.output_path, usage_rate_wap_csv))
setattr(self, 'usage_rate', usage5.reset_index())
setattr(self, 'usage_rate_wap', usage7.reset_index())
return usage5.reset_index()
def get_flow(self, buffer_dis=60000, threads=30):
"""
Function to query and/or estimate flow at the input_sites.
Parameters
----------
buffer_dis : int
The search radius for the regressions in meters.
Returns
-------
DataFrame of Flow
"""
### Prep the stations and other inputs
flow_ds = self.flow_datasets.copy()
tethys1 = self._tethys_flow
stns = self.stations.copy()
rec_ds_id = [ds for ds in self.flow_datasets_all if ds['method'] == 'sensor_recording'][0]['dataset_id']
man_ds_id = [ds for ds in self.flow_datasets_all if ds['method'] == 'field_activity'][0]['dataset_id']
methods = [m['method'] for m in flow_ds]
rec_stns = self.stations_all[self.stations_all.dataset_id == rec_ds_id].to_crs(2193).copy()
if self.from_date is None:
from_date1 = None
else:
from_date1 = pd.Timestamp(self.from_date, tz=self.local_tz).tz_convert('utc').tz_localize(None)
if self.to_date is None:
to_date1 = None
else:
to_date1 = | pd.Timestamp(self.to_date, tz=self.local_tz) | pandas.Timestamp |
from datetime import datetime
from decimal import Decimal
import numpy as np
import pytest
import pytz
from pandas.compat import is_platform_little_endian
from pandas import CategoricalIndex, DataFrame, Index, Interval, RangeIndex, Series
import pandas._testing as tm
class TestFromRecords:
def test_from_records_with_datetimes(self):
# this may fail on certain platforms because of a numpy issue
# related GH#6140
if not is_platform_little_endian():
pytest.skip("known failure of test on non-little endian")
# construction with a null in a recarray
# GH#6140
expected = DataFrame({"EXPIRY": [datetime(2005, 3, 1, 0, 0), None]})
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [("EXPIRY", "<M8[ns]")]
try:
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
except (ValueError):
pytest.skip("known failure of numpy rec array creation")
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
# coercion should work too
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [("EXPIRY", "<M8[m]")]
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
def test_from_records_sequencelike(self):
df = DataFrame(
{
"A": np.array(np.random.randn(6), dtype=np.float64),
"A1": np.array(np.random.randn(6), dtype=np.float64),
"B": np.array(np.arange(6), dtype=np.int64),
"C": ["foo"] * 6,
"D": np.array([True, False] * 3, dtype=bool),
"E": np.array(np.random.randn(6), dtype=np.float32),
"E1": np.array(np.random.randn(6), dtype=np.float32),
"F": np.array(np.arange(6), dtype=np.int32),
}
)
# this is actually tricky to create the recordlike arrays and
# have the dtypes be intact
blocks = df._to_dict_of_blocks()
tuples = []
columns = []
dtypes = []
for dtype, b in blocks.items():
columns.extend(b.columns)
dtypes.extend([(c, np.dtype(dtype).descr[0][1]) for c in b.columns])
for i in range(len(df.index)):
tup = []
for _, b in blocks.items():
tup.extend(b.iloc[i].values)
tuples.append(tuple(tup))
recarray = np.array(tuples, dtype=dtypes).view(np.recarray)
recarray2 = df.to_records()
lists = [list(x) for x in tuples]
# tuples (lose the dtype info)
result = DataFrame.from_records(tuples, columns=columns).reindex(
columns=df.columns
)
# created recarray and with to_records recarray (have dtype info)
result2 = DataFrame.from_records(recarray, columns=columns).reindex(
columns=df.columns
)
result3 = DataFrame.from_records(recarray2, columns=columns).reindex(
columns=df.columns
)
# list of tupels (no dtype info)
result4 = DataFrame.from_records(lists, columns=columns).reindex(
columns=df.columns
)
tm.assert_frame_equal(result, df, check_dtype=False)
tm.assert_frame_equal(result2, df)
tm.assert_frame_equal(result3, df)
tm.assert_frame_equal(result4, df, check_dtype=False)
# tuples is in the order of the columns
result = DataFrame.from_records(tuples)
tm.assert_index_equal(result.columns, RangeIndex(8))
# test exclude parameter & we are casting the results here (as we don't
# have dtype info to recover)
columns_to_test = [columns.index("C"), columns.index("E1")]
exclude = list(set(range(8)) - set(columns_to_test))
result = DataFrame.from_records(tuples, exclude=exclude)
result.columns = [columns[i] for i in sorted(columns_to_test)]
tm.assert_series_equal(result["C"], df["C"])
tm.assert_series_equal(result["E1"], df["E1"].astype("float64"))
# empty case
result = DataFrame.from_records([], columns=["foo", "bar", "baz"])
assert len(result) == 0
tm.assert_index_equal(result.columns, Index(["foo", "bar", "baz"]))
result = DataFrame.from_records([])
assert len(result) == 0
assert len(result.columns) == 0
def test_from_records_dictlike(self):
# test the dict methods
df = DataFrame(
{
"A": np.array(np.random.randn(6), dtype=np.float64),
"A1": np.array(np.random.randn(6), dtype=np.float64),
"B": np.array(np.arange(6), dtype=np.int64),
"C": ["foo"] * 6,
"D": np.array([True, False] * 3, dtype=bool),
"E": np.array(np.random.randn(6), dtype=np.float32),
"E1": np.array(np.random.randn(6), dtype=np.float32),
"F": np.array(np.arange(6), dtype=np.int32),
}
)
# columns is in a different order here than the actual items iterated
# from the dict
blocks = df._to_dict_of_blocks()
columns = []
for dtype, b in blocks.items():
columns.extend(b.columns)
asdict = {x: y for x, y in df.items()}
asdict2 = {x: y.values for x, y in df.items()}
# dict of series & dict of ndarrays (have dtype info)
results = []
results.append(DataFrame.from_records(asdict).reindex(columns=df.columns))
results.append(
DataFrame.from_records(asdict, columns=columns).reindex(columns=df.columns)
)
results.append(
DataFrame.from_records(asdict2, columns=columns).reindex(columns=df.columns)
)
for r in results:
tm.assert_frame_equal(r, df)
def test_from_records_with_index_data(self):
df = DataFrame(np.random.randn(10, 3), columns=["A", "B", "C"])
data = np.random.randn(10)
df1 = DataFrame.from_records(df, index=data)
tm.assert_index_equal(df1.index, Index(data))
def test_from_records_bad_index_column(self):
df = DataFrame(np.random.randn(10, 3), columns=["A", "B", "C"])
# should pass
df1 = DataFrame.from_records(df, index=["C"])
tm.assert_index_equal(df1.index, Index(df.C))
df1 = DataFrame.from_records(df, index="C")
tm.assert_index_equal(df1.index, Index(df.C))
# should fail
msg = r"Shape of passed values is \(10, 3\), indices imply \(1, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame.from_records(df, index=[2])
with pytest.raises(KeyError, match=r"^2$"):
DataFrame.from_records(df, index=2)
def test_from_records_non_tuple(self):
class Record:
def __init__(self, *args):
self.args = args
def __getitem__(self, i):
return self.args[i]
def __iter__(self):
return iter(self.args)
recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]
tups = [tuple(rec) for rec in recs]
result = DataFrame.from_records(recs)
expected = DataFrame.from_records(tups)
tm.assert_frame_equal(result, expected)
def test_from_records_len0_with_columns(self):
# GH#2633
result = DataFrame.from_records([], index="foo", columns=["foo", "bar"])
expected = Index(["bar"])
assert len(result) == 0
assert result.index.name == "foo"
| tm.assert_index_equal(result.columns, expected) | pandas._testing.assert_index_equal |
import pandas as pd
import numpy as np
from sklearn.cluster import MeanShift, AgglomerativeClustering, estimate_bandwidth
from .azure_api import AzureOCR
TOP_LEFT_X, TOP_LEFT_Y, TOP_RIGHT_X, TOP_RIGHT_Y, \
BOTTOM_RIGHT_X, BOTTOM_RIGHT_Y, BOTTOM_LEFT_X, \
BOTTOM_LEFT_Y, TEXT = 'top_left_x', 'top_left_y', 'top_right_x', \
'top_right_y', 'bottom_right_x', 'bottom_right_y', \
'bottom_left_x', 'bottom_left_y', 'text'
__all__ = ['StructureExtractor']
class StructureExtractor:
def __init__(self, document_filepath=None, endpoint=None, subscription_key=None, operation_url=None, ocr_outputs=None, api_type='azure', api='read'):
self.ocr_outputs = ocr_outputs
self.operation_url = operation_url
if api_type=='azure':
azure_ocr = AzureOCR(
document_filepath=document_filepath,
endpoint=endpoint,
subscription_key=subscription_key,
operation_url = self.operation_url,
ocr_outputs = self.ocr_outputs,
api=api
)
read_api_ocr = azure_ocr.get_api_ocr()
self.word_dataframe = read_api_ocr.word_dataframe
self.line_dataframe = read_api_ocr.line_dataframe
self.is_scanned = read_api_ocr.is_scanned
self.ocr_outputs = read_api_ocr.ocr_outputs
if api.lower()=='read':
self.operation_url = read_api_ocr.operation_url
else:
self.word_dataframe = None
self.line_dataframe = None
self.is_scanned = None
self.document_binaries = None
def calculating_paragraph_and_column_per_page(self, line_dataframe, page_number):
"""
*Author: <NAME>
*Details: Creating paragraph attribute for calculating paragraph number of the text
present in given dataframe using clustering on coordiantes.
"""
MIN_LINE_SPACE = 0.09
line_dataframe = line_dataframe.reset_index(drop=True)
# Operation on page
page_df = line_dataframe[line_dataframe['page']==page_number]
# Calculating vertical text
page_df['x_diff'] = page_df[TOP_RIGHT_X]-page_df[TOP_LEFT_X]
page_df['y_diff'] = page_df[TOP_RIGHT_Y]-page_df[TOP_LEFT_Y]
temp_page_df = page_df[page_df['x_diff']==0]
v_df = pd.DataFrame(index=temp_page_df[TOP_LEFT_X], columns=[TEXT, 'line_number'])
v_df[TEXT] = temp_page_df[TEXT].tolist()
v_df['line_number'] = temp_page_df['line_number'].tolist()
my_line_num_text_dict = v_df.T.to_dict()
page_df.loc[temp_page_df.index, 'vertical_text_lines'] = [my_line_num_text_dict for _ in range(len(temp_page_df))]
line_dataframe.loc[temp_page_df.index, 'vertical_text_lines'] = [my_line_num_text_dict for _ in range(len(temp_page_df))]
dd = pd.DataFrame(index = temp_page_df.index)
dd[TOP_LEFT_X] = temp_page_df[TOP_RIGHT_X].tolist()
dd[TOP_LEFT_Y] = temp_page_df[TOP_RIGHT_Y].tolist()
dd[TOP_RIGHT_X] = temp_page_df[BOTTOM_RIGHT_X].tolist()
dd[TOP_RIGHT_Y] = temp_page_df[BOTTOM_RIGHT_Y].tolist()
dd[BOTTOM_RIGHT_X] = temp_page_df[BOTTOM_LEFT_X].tolist()
dd[BOTTOM_RIGHT_Y] = temp_page_df[BOTTOM_LEFT_Y].tolist()
dd[BOTTOM_LEFT_X] = temp_page_df[TOP_LEFT_X].tolist()
dd[BOTTOM_LEFT_Y] = temp_page_df[TOP_LEFT_Y].tolist()
if not dd.empty:
dd[TOP_LEFT_X] = min(dd[TOP_LEFT_X])
page_df.loc[dd.index, [TOP_LEFT_X, TOP_LEFT_Y, TOP_RIGHT_X, TOP_RIGHT_Y,
BOTTOM_RIGHT_X, BOTTOM_RIGHT_Y, BOTTOM_LEFT_X, BOTTOM_LEFT_Y]] = dd.loc[dd.index, [TOP_LEFT_X, TOP_LEFT_Y, TOP_RIGHT_X, TOP_RIGHT_Y,
BOTTOM_RIGHT_X, BOTTOM_RIGHT_Y, BOTTOM_LEFT_X, BOTTOM_LEFT_Y]]
line_dataframe.loc[dd.index, [TOP_LEFT_X, TOP_LEFT_Y, TOP_RIGHT_X, TOP_RIGHT_Y,
BOTTOM_RIGHT_X, BOTTOM_RIGHT_Y, BOTTOM_LEFT_X, BOTTOM_LEFT_Y]] = dd.loc[dd.index, [TOP_LEFT_X, TOP_LEFT_Y, TOP_RIGHT_X, TOP_RIGHT_Y,
BOTTOM_RIGHT_X, BOTTOM_RIGHT_Y, BOTTOM_LEFT_X, BOTTOM_LEFT_Y]]
# Assigning approprate value for coordinated belonging to same line
for li in sorted(set(page_df.line_number)):
df_li = page_df[page_df['line_number']==li]
page_df.loc[df_li.index, BOTTOM_RIGHT_Y] = max(df_li[BOTTOM_RIGHT_Y])
page_df.loc[df_li.index, TOP_LEFT_Y] = min(df_li[TOP_LEFT_Y])
page_df.loc[df_li.index, BOTTOM_LEFT_Y] = max(df_li[BOTTOM_LEFT_Y])
page_df.loc[df_li.index, TOP_RIGHT_Y] = min(df_li[TOP_RIGHT_Y])
# Calculating y-coordinates space above and below line
page_df['bottom'] = [0] + page_df[BOTTOM_RIGHT_Y].tolist()[:-1]
page_df['up_space'] = page_df[TOP_LEFT_Y] - page_df['bottom']
page_df['down_space'] = page_df['up_space'][1:].tolist()+ [0]
# Assigning approprate value for coordinated belonging to same line
for li in sorted(set(page_df.line_number)):
df_li = page_df[page_df['line_number']==li]
page_df.loc[df_li.index, 'up_space'] = max(df_li['up_space'])
page_df.loc[df_li.index, 'down_space'] = max(df_li['down_space'])
# Filter for eliminating large bottom blank space before clustering
page_df1 = page_df[page_df['up_space'] < 1.8]
page_df2 = page_df[page_df['up_space'] >= 1.8]
if page_df1.empty:
return line_dataframe
# MeanShift Clustering in space between two lines
X = np.array(page_df1.loc[:, ['up_space']])
model = MeanShift(n_jobs=-1)
# fit model and predict clusters
yhat = model.fit_predict(X)
# Adding -1 cluster number for ignored words below large bottom blank space
page_df['yhat'] = list(yhat) + [-1 for _ in range(len(page_df2))]
# Sorting clustering number bases on upper space of line
page_df = page_df.sort_values(by=['up_space'])
# Reordering clustering in ascending order based on height of upper blank space of line
yhat_ascending_sequence = []
count = 0
prev_cluster_no = page_df['yhat'].tolist() and page_df['yhat'].tolist()[0]
for cluster_no in page_df['yhat']:
if prev_cluster_no != cluster_no:
count += 1
yhat_ascending_sequence.append(count)
prev_cluster_no = cluster_no
page_df['yhat'] = yhat_ascending_sequence
page_df = page_df.sort_index()
# Creating paragraph sequence by combining 0 with non-zerp values and lines whose upper space is less than MIN_LINE_SPACE
paragraph_seq = []
count = 0
prev_line = page_df['line_number'].tolist() and page_df['line_number'].tolist()[0]
for y, line, up_space in zip(page_df['yhat'], page_df['line_number'], page_df['up_space']):
if y and line != prev_line:
if up_space > MIN_LINE_SPACE:
count += 1
prev_line = line
paragraph_seq.append(count)
# Adding paragraph number and sorting results
page_df['paragraph'] = paragraph_seq
page_df= page_df.sort_values(by=['line_number', TOP_LEFT_X])
# MeanShift Clustering in top left x coordinates
X = np.array(page_df.loc[:, [TOP_LEFT_X]])
bandwidth = estimate_bandwidth(X, quantile=0.16, n_samples=500, n_jobs=-1)
if bandwidth:
model = MeanShift(bandwidth=bandwidth, n_jobs=-1)
else:
model = MeanShift(n_jobs=-1)
xhat = model.fit_predict(X)
cluster_centers = model.cluster_centers_
page_df['xhat'] = xhat
# Sorting clustering number bases on Top left x of line
page_df = page_df.sort_values(by=[TOP_LEFT_X])
# Reordering clustering in ascending order based on height of upper blank space of line
xhat_ascending_sequence = []
count = 0
prev_cluster_no = page_df['xhat'].tolist() and page_df['xhat'].tolist()[0]
for cluster_no in page_df['xhat']:
if prev_cluster_no != cluster_no:
count += 1
xhat_ascending_sequence.append(count)
prev_cluster_no = cluster_no
page_df['column'] = xhat_ascending_sequence
page_df = page_df.sort_index()
# Assignment of value to line_dataframe
line_dataframe.loc[page_df.index, 'up_space'] = page_df['up_space']
line_dataframe.loc[page_df.index, 'down_space'] = page_df['down_space']
line_dataframe.loc[page_df.index, 'xhat'] = page_df['xhat']
line_dataframe.loc[page_df.index, 'yhat'] = page_df['yhat']
line_dataframe.loc[page_df.index, 'paragraph'] = page_df['paragraph']
line_dataframe.loc[page_df.index, 'column'] = page_df['column']
return line_dataframe
def paragraph_extraction(self, line_dataframe=None):
"""
*Author: <NAME>
*Details: Creating paragraph number in line_dataframe.
"""
if line_dataframe is None:
line_dataframe = self.line_dataframe
line_dataframe ['vertical_text_lines'] = None
for page_number in sorted(set(line_dataframe ['page'])):
line_dataframe = self.calculating_paragraph_and_column_per_page(line_dataframe , page_number)
# Calculating paragraph_number column for complete PDF
paragraph_number = []
count = 0
prev_para_num = line_dataframe['paragraph'].tolist() and line_dataframe['paragraph'].tolist()[0]
for para_num in line_dataframe['paragraph']:
if para_num==prev_para_num or pd.isna(para_num):
pass
else:
count += 1
prev_para_num = para_num
paragraph_number.append(count)
line_dataframe['paragraph_number'] = paragraph_number
return line_dataframe
def structure_extraction(self, line_dataframe=None):
"""
*Author: <NAME>
*Details: Identifying page header, page footer,
table rows (i.e `table_number` attribute) and
table columns (i.e `column` attribute)
"""
if line_dataframe is False:
return line_dataframe
if line_dataframe is None:
line_dataframe = self.line_dataframe
line_dataframe = self.paragraph_extraction(line_dataframe)
# Calculating table identified in a paragraph
for para_num in sorted(set(line_dataframe['paragraph_number'])):
df_para = line_dataframe[line_dataframe['paragraph_number']==para_num]
for col in sorted(set(df_para[~pd.isna(df_para['column'])]['column'])):
col_df = df_para[df_para['column']==col]
col_df['column_up_space'] = col_df[TOP_LEFT_Y].diff().tolist()
df_para.loc[col_df.index, 'column_up_space'] = col_df['column_up_space'].tolist()
line_dataframe.loc[col_df.index, 'column_up_space'] = col_df['column_up_space'].tolist()
df_nan = line_dataframe[pd.isna(line_dataframe['column_up_space'])]
df_nan = df_nan.sort_values(by=['column'])
df_nan["column_up_space"] = df_nan[TOP_LEFT_Y].diff()
df_nan["column_up_space"] = df_nan["column_up_space"].apply(lambda x: abs(x))
line_dataframe.loc[df_nan.index, 'column_up_space'] = df_nan['column_up_space'].tolist()
prev_para_num = sorted(set(line_dataframe['paragraph_number'])) and sorted(set(line_dataframe['paragraph_number']))[0]
for para_num in sorted(set(line_dataframe['paragraph_number'])):
df_para = line_dataframe[line_dataframe['paragraph_number']==para_num]
for line in sorted(set(df_para['line_number'])):
temp_df = df_para[df_para['line_number']==line]
my_sum = 0
for val in temp_df['column_up_space']:
if not pd.isna(val):
my_sum+=val
df_para.loc[temp_df.index, 'sum_of_column_up_space'] = my_sum * len(temp_df) + len(temp_df)*100
line_dataframe.loc[temp_df.index, 'sum_of_column_up_space'] = my_sum * len(temp_df) + len(temp_df)*100
# Identify Table Rows
for page in sorted(set(line_dataframe['page'])):
df_page = line_dataframe[line_dataframe['page']==page]
# min_col = min(df_page['column'])
X = np.array(df_page.loc[:, ['sum_of_column_up_space']])
if len(X) != 1:
model = AgglomerativeClustering(n_clusters=2)
# fit model and predict clusters
yhat = model.fit_predict(X)
if not df_page.empty:
df_t = df_page[df_page['sum_of_column_up_space']==max(df_page['sum_of_column_up_space'])]['sum_of_column_up_space']
max_val = not df_t.empty and df_t.iloc[0,]
for i, s in zip(df_page.index, df_page['sum_of_column_up_space']):
if s == max_val:
break
index_val = True in list(df_page.index==i) and list(df_page.index==i).index(True)
if not yhat[index_val]:
yhat = (yhat==0).astype(int)
df_page['table_identifier'] = yhat
line_dataframe.loc[df_page.index, 'table_identifier'] = yhat
row = []
count = 0
table_starter = df_page[df_page['sum_of_column_up_space']==max(df_page['sum_of_column_up_space'])]['table_identifier'].unique()[0]
first_identifier = df_page['table_identifier'].tolist() and df_page['table_identifier'].tolist()[0]
prev_identifier = df_page['table_identifier'].tolist() and df_page['table_identifier'].tolist()[0]
flag = True
for identifier in df_page['table_identifier']:
if pd.isna(identifier):
row.append(identifier)
continue
if flag :
if identifier == table_starter:
flag = False
count += 1
elif prev_identifier != identifier:
if identifier != first_identifier:
count += 1
prev_identifier = identifier
row.append(count)
df_page['row'] = row
starting_table_identifier = df_page[df_page['sum_of_column_up_space'] == max(df_page['sum_of_column_up_space'])]['table_identifier'].unique()[0]
for r in sorted(set(row)):
df_page_row = df_page[df_page['row'] == r]
# starting_table_identifier = df_page_row[df_page_row['sum_of_column_up_space'] == max(df_page_row['sum_of_column_up_space'])]['table_identifier'].unique()[0]
table_expected_column_table_identifier = set(df_page_row[df_page_row['table_identifier']== starting_table_identifier]['column'])
table_column_checker = df_page_row[df_page_row['table_identifier'] != starting_table_identifier]['column']
vertical_text_lines = df_page_row[df_page_row['table_identifier'] != starting_table_identifier]['vertical_text_lines']
for index, column_no, vertical_text_line in zip(table_column_checker.index, table_column_checker, vertical_text_lines):
if not table_expected_column_table_identifier:
df_page.loc[index, 'row'] = -1
elif column_no not in table_expected_column_table_identifier:
if pd.isna(vertical_text_line):
df_page.loc[index, 'row'] = None
line_dataframe.loc[df_page.index, 'row'] = df_page['row'].tolist()
table_number = []
count = 0
flag = False
prev_row = line_dataframe['row'].tolist() and line_dataframe['row'].tolist()[0]
prev_page = line_dataframe['page'].tolist() and line_dataframe['page'].tolist()[0]
for r, p in zip(line_dataframe['row'], line_dataframe['page']):
if flag:
if pd.isna(r) or (prev_row == r and prev_page == p):
flag = True
table_number.append(None)
else:
flag = False
count += 1
table_number.append(count)
prev_row = r
prev_page = p
continue
if pd.isna(r):
table_number.append(None)
flag = True
continue
if r != prev_row and r != -1:
count += 1
if not pd.isna(r):
prev_row = r
table_number.append(count)
prev_page = p
line_dataframe['table_number']=table_number
# Identifying header and footers by Clustering
header_para = []
footer_para = []
for page in sorted(set(line_dataframe['page'])):
page_df = line_dataframe[line_dataframe['page']==page]
page_numbers = sorted(page_df['paragraph_number'])
if not page_numbers:
continue
elif len(page_numbers)==1:
header_para.append(page_numbers[0])
else:
header_para.append(page_numbers[0])
footer_para.append(page_numbers[-1])
header_df = pd.DataFrame()
for h_para in header_para:
h_df = line_dataframe[line_dataframe['paragraph_number']==h_para]
header_df = header_df.append(h_df)
# MeanShift Clustering in space between two lines
X = np.array(header_df.loc[:, [TOP_LEFT_X, TOP_RIGHT_X, BOTTOM_RIGHT_X, BOTTOM_LEFT_X]])
model = MeanShift(n_jobs=-1)
hhat = model.fit_predict(X)
cluster_centers = model.cluster_centers_
header_df['header_clusters'] = hhat
header_cluster_number_list = header_df['header_clusters'].mode().tolist()
header_cluster_number = header_cluster_number_list and sorted(header_cluster_number_list)[0]
header_df = header_df[header_df["header_clusters"]==header_cluster_number]
line_dataframe['is_header'] = False
line_dataframe.loc[header_df.index, 'is_header'] = True
footer_df = | pd.DataFrame() | pandas.DataFrame |
import os
from .sf import (
getFilename,
extract_task_value,
parseTime,
get_power_users,
percentageVotesForAnswer,
extractTaskValue,
get_task_0_value_counts)
from .helpers import json_parser
from datetime import date
import pandas as pd
import numpy as np
import json
def make_df_classify(workflow, task_indices=[0,1]): # [0,1] are the indices from the classify workflow
"""
Create a dataframe where each contains a single classification, from a Zooniverse .csv file.
@param {str} workflow: one of 'classify', 'onthego' and 'hardcore'
@param {List[Int]} task_indices: list of task indices present in the given workflow
"""
converters = { column_name: json_parser for column_name in ['annotations', 'subject_data', 'metadata'] }
cwd = os.path.dirname(os.path.abspath(__file__))
csv_filenames = {
'classify': 'classify-classifications',
'hardcore': 'classify-hardcore-edition-classifications',
'onthego': 'classify-on-the-go-classifications'
}
pathstring = '../SpaceFluff/zooniverse_exports/{}.csv'.format(csv_filenames[workflow])
loc = os.path.join(cwd, pathstring)
df = pd.read_csv(loc, delimiter=",", converters=converters)
df.insert(0, 'Filename', df['subject_data'].apply(getFilename))
tasks = ['T{}'.format(i) for i in task_indices]
for task in tasks:
df[task] = df['annotations'].apply(lambda x: extractTaskValue(x, task))
df = df[~df['T0'].isnull()] # if user didn't answer T0, the classification is void and can be removed safely
# filter out classifications from beta
df['created_at'] = parseTime(df['created_at'])
end_of_beta = pd.Timestamp(date(2020, 10, 20), tz='utc')
df = df[df['created_at'] > end_of_beta]
try:
df['isRetired'] = df['metadata'].apply(lambda x: x.get('subject_selection_state', {}).get('retired'))
df['alreadySeen'] = df['metadata'].apply(lambda x: x.get('subject_selection_state', {}).get('already_seen'))
# filter alreadySeen or retired rows, and drop obsolete columns from the dataframe altogether
df = df.query('(isRetired == False) & (alreadySeen == False)')
df = df.drop(['isRetired', 'alreadySeen', 'gold_standard'], axis=1)
except:
pass
return df
def make_df_vote_threshold(df, vote_count_threshold):
users_and_votes = get_power_users(df, vote_count_threshold)
usernames = [user['username'] for user in users_and_votes]
df = df[df['user_name'].isin(usernames)]
return df
def make_df_tasks_with_props(df, candidate_names, object_info, onthego=False):
# create a temporary dataframe containing only classifications where 'task0' == 'Galaxy'
df_galaxy = df[df['T0'] == 'Galaxy']
galaxy_names = df_galaxy['Filename']
df_task0 = make_df_task0(df, candidate_names, onthego)
if not onthego:
groupby_name = df_galaxy[['Filename', 'T0', 'T1']].groupby(['Filename'])
galaxy_task1_values = []
for name in set(galaxy_names):
group = groupby_name.get_group(name) # get all classifications of this object from df
rowObj = {
"name": name
}
for answer in ['Fluffy', 'Bright']: # add 'fluffy' and 'bright' columns
rowObj['% {}'.format(answer)] = round(list(group['T1']).count(answer)*100/group.shape[0], 1)
none_count = group[group['T1'].isnull()].shape[0] # also manually add 'None' row since None is parsed to NaN otherwise
rowObj['% None'] = round(none_count*100/group.shape[0], 1)
galaxy_task1_values.append(rowObj) # append rowObj to list
df_task1 = pd.DataFrame(galaxy_task1_values)
df_tasks = df_task1.merge(df_task0, on='name', how='outer')
else:
df_tasks = df_task0
df_tasks_with_props = df_tasks.merge(object_info, how='outer', on='name') # merge properties onto dataframe
df_tasks_with_props = df_tasks_with_props[~df_tasks_with_props['# votes'].isnull()] # filter out objects without actual votes
return df_tasks_with_props
def make_df_task0(df, candidate_names, onthego):
# group df by filename, so that each group contains only rows belonging to that object
gr = df[['Filename', 'T0']].groupby('Filename')
task0Values = [] # create empty list to push results to
for objectName in candidate_names:
# loop over every group created above to accumulate 'task 0' votes ('galaxy'/'group of objects'/'something else')
try:
task0_values = gr.get_group(objectName)['T0']
counts, votes = get_task_0_value_counts(task0_values)
countObj = {
"name": objectName,
"counts": counts,
"# votes": votes
}
task0Values.append(countObj)
except:
continue
df_task0 = | pd.DataFrame(task0Values) | pandas.DataFrame |
# lucid/df.py
__doc__ = """
Functions for exploring dataframes.
"""
#-----------------------------------------------------------------------------
# Logging
#-----------------------------------------------------------------------------
import logging
_l = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports & Options
#-----------------------------------------------------------------------------
# External imports
from functools import reduce
import numpy as np
import pandas as pd
import re
# Lucid imports
from .util import me
#-----------------------------------------------------------------------------
# Globals & Constants
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Data Ingest
#-----------------------------------------------------------------------------
def read_selected_columns(file, exclude, **kwargs) -> pd.DataFrame():
"""Reads a CSV file with the exclusion of specified columns."""
columns = pd.read_csv(file, nrows=0)
usecols = [col for col in columns if col not in exclude]
return pd.read_csv(file, usecols=usecols, **kwargs)
#-----------------------------------------------------------------------------
# Data Overview Functions
#-----------------------------------------------------------------------------
def mem(df, verbose=False):
"""Shows RAM footprint of a datafrane (df.info)."""
return df.info(verbose=verbose, memory_usage='deep')
def vc(df, col, dropna=False):
"""Shortcut to Series.value_counts(dropna=False)."""
return df[col].value_counts(dropna=dropna)
def topseries(series, n=7):
"""Shows top `n` values in Pandas series."""
series_sorted = series.sort_values(ascending=False)
topn = series_sorted.iloc[:n]
topn = topn.append(
pd.Series(series_sorted.iloc[n:].sum(), index=[' other'])
)
return topn
def top_items(df, col, n=1) -> list:
"""Returns cardinality and top `n` items from `col` in a `df`."""
rel = 100 / len(df)
counts = df[col].value_counts(dropna=False)
c = len(counts)
keys = counts.index[:n]
vals = counts.values[:n]
pcts = counts.values[:n] * rel
return c, [[x, y, round(z, 1)] for x, y, z in zip(keys, vals, pcts)]
def ntop(df, n=3) -> pd.DataFrame:
"""Overview of top `n` items in all columns of a `df`."""
df = df.loc[:, ~df.columns.duplicated()]
dftop = pd.DataFrame(
index=df.columns,
columns=['cardinality','top_items','coverage'],
)
for col in df.columns:
top = top_items(df, col, n=n)
dftop.loc[col, 'cardinality'] = top[0]
dftop.loc[col, 'coverage'] = sum([i[2] for i in top[1]])
dftop.loc[col, 'top_items'] = top[1]
return dftop
class Counts:
"""MapReduce implementation for COUNT ... GROUP BY on big data.
Returns CGB and top ``n`` values from every column."""
def __init__(self, file, ddl_file, n_cols=None, n_top=10):
self.file = file
self.columns = self._get_columns_from_ddl(ddl_file)
self.conv = {
'message__timestamp': ts_to_dt,
}
if n_cols:
self.n = min(len(self.columns), n_cols)
else:
self.n = len(self.columns)
# self.n_lines = sum(1 for l in gzip.open(file,'rb'))
self.n_top = n_top
self.result = {}
@staticmethod
def _get_columns_from_ddl(file):
"""Reads column headers from a DDL file derived from
SHOW CREATE TABLE.
"""
with open(file, 'r') as f:
ddl = f.read()
return ddl.split('`')[3::2]
@staticmethod
def _series_add(previous_result: pd.Series, new_result: pd.Series):
"""Reducing function for adding up the results across chunks.
Equivalent to ``lambda a,b: a+b`` except takes advantage of
``fill_value`` in pd.Series.add"""
return previous_result.add(new_result, fill_value=0)
@staticmethod
def _series_ntop(s: pd.Series, n: int, fillna='NULL'):
"""Returns top n values from a Pandas series."""
vc = s.fillna(fillna).value_counts(dropna=False).head(n)
return vc
def count_chunks(self, sep='\t', chunksize=10000):
self.chunks = pd.read_csv(
self.file,
sep=sep,
chunksize=chunksize,
header=None,
low_memory=False,
nrows=5e6,
usecols=[i for i in range(self.n)],
)
# MAP
counts = []
for chunk in self.chunks:
counts.append([self._series_ntop(chunk[c], None) for c in chunk.columns])
_l.info('mapping chunk number {:>4}'.format(len(counts)))
counts = np.array(counts)
# REDUCE
for i in range(self.n):
print('reducing: {}'.format(self.columns[i]) + ' '*40, end='\r')
self.result[self.columns[i]] = reduce(
_series_add, counts[:,i]
).astype(int)
# SORT by column names
self.result = sorted(self.result.items())
def summarize(self):
"""Summarize results in a neat dataframe."""
#initialize result array
result_columns = ['column','n_unique']
for i in range(self.n_top):
result_columns.append('top_%s\nvalue' % (i+1))
result_columns.append('top_%s\ncount' % (i+1))
result_columns.append('top_%s\nrel_count' % (i+1))
result = [result_columns]
#loop over result columns
for col in self.result:
# print('analyzing column: {}'.format(col[0]), end='\r')
col_name = col[0].split('.')[-1]
n_unique = len(col[1])
col_summary = [col_name, n_unique]
vc_abs = col[1].sort_values(ascending=False)
vc_norm = (vc_abs / vc_abs.sum()).round(5) * 100
for i in range(min(self.n_top, n_unique)):
col_summary.append(vc_abs.index[i])
col_summary.append(vc_abs.values[i])
col_summary.append(vc_norm.values[i])
result.append(col_summary)
df = | pd.DataFrame(columns=result[0], data=result[1:]) | pandas.DataFrame |
#for plots
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
from ipywidgets import fixed,interactive,Layout
from preprocess import read_preprocess_file,load_interventions,intervention_dict
import ipywidgets as widgets
from textwrap import wrap
import numpy as np
import pandas as pd
#---------------------------------------------------------------------------
# Monkey patch seaborn to color the error bands with maxmin and iqr options
#---------------------------------------------------------------------------
import matplotlib as mpl
import matplotlib.pyplot as plt
from seaborn import utils
from seaborn.utils import (categorical_order, get_color_cycle, ci_to_errsize,
remove_na, locator_to_legend_entries)
from seaborn.algorithms import bootstrap
from seaborn.palettes import (color_palette, cubehelix_palette,
_parse_cubehelix_args, QUAL_PALETTES)
from seaborn.axisgrid import FacetGrid, _facet_docs
class LinePlotter_custom(sns.relational._RelationalPlotter):
_legend_attributes = ["color", "linewidth", "marker", "dashes"]
_legend_func = "plot"
def __init__(self,
x=None, y=None, hue=None, size=None, style=None, data=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
dashes=None, markers=None, style_order=None,
units=None, estimator=None, ci=None, n_boot=None, seed=None,
sort=True, err_style=None, err_kws=None, legend=None):
plot_data = self.establish_variables(
x, y, hue, size, style, units, data
)
self._default_size_range = (
np.r_[.5, 2] * mpl.rcParams["lines.linewidth"]
)
self.parse_hue(plot_data["hue"], palette, hue_order, hue_norm)
self.parse_size(plot_data["size"], sizes, size_order, size_norm)
self.parse_style(plot_data["style"], markers, dashes, style_order)
self.units = units
self.estimator = estimator
self.ci = ci
self.n_boot = n_boot
self.seed = seed
self.sort = sort
self.err_style = err_style
self.err_kws = {} if err_kws is None else err_kws
self.legend = legend
def aggregate(self, vals, grouper, units=None):
"""Compute an estimate and confidence interval using grouper."""
func = self.estimator
ci = self.ci
n_boot = self.n_boot
seed = self.seed
# Define a "null" CI for when we only have one value
null_ci = pd.Series(index=["low", "high"], dtype=np.float)
# Function to bootstrap in the context of a pandas group by
def bootstrapped_cis(vals):
if len(vals) <= 1:
return null_ci
boots = bootstrap(vals, func=func, n_boot=n_boot, seed=seed)
cis = utils.ci(boots, ci)
return | pd.Series(cis, ["low", "high"]) | pandas.Series |
"""This file contains code for use with "Think Stats" and
"Think Bayes", both by <NAME>, available from greenteapress.com
Copyright 2014 <NAME>
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
"""This file contains class definitions for:
Hist: represents a histogram (map from values to integer frequencies).
Pmf: represents a probability mass function (map from values to probs).
_DictWrapper: private parent class for Hist and Pmf.
Cdf: represents a discrete cumulative distribution function
Pdf: represents a continuous probability density function
"""
import bisect
import copy
import logging
import math
import random
import re
from collections import Counter
from operator import itemgetter
import thinkplot
import numpy as np
import pandas
import scipy
from scipy import stats
from scipy import special
from scipy import ndimage
from io import open
ROOT2 = math.sqrt(2)
def RandomSeed(x):
"""Initialize the random and np.random generators.
x: int seed
"""
random.seed(x)
np.random.seed(x)
def Odds(p):
"""Computes odds for a given probability.
Example: p=0.75 means 75 for and 25 against, or 3:1 odds in favor.
Note: when p=1, the formula for odds divides by zero, which is
normally undefined. But I think it is reasonable to define Odds(1)
to be infinity, so that's what this function does.
p: float 0-1
Returns: float odds
"""
if p == 1:
return float('inf')
return p / (1 - p)
def Probability(o):
"""Computes the probability corresponding to given odds.
Example: o=2 means 2:1 odds in favor, or 2/3 probability
o: float odds, strictly positive
Returns: float probability
"""
return o / (o + 1)
def Probability2(yes, no):
"""Computes the probability corresponding to given odds.
Example: yes=2, no=1 means 2:1 odds in favor, or 2/3 probability.
yes, no: int or float odds in favor
"""
return yes / (yes + no)
class Interpolator(object):
"""Represents a mapping between sorted sequences; performs linear interp.
Attributes:
xs: sorted list
ys: sorted list
"""
def __init__(self, xs, ys):
self.xs = xs
self.ys = ys
def Lookup(self, x):
"""Looks up x and returns the corresponding value of y."""
return self._Bisect(x, self.xs, self.ys)
def Reverse(self, y):
"""Looks up y and returns the corresponding value of x."""
return self._Bisect(y, self.ys, self.xs)
def _Bisect(self, x, xs, ys):
"""Helper function."""
if x <= xs[0]:
return ys[0]
if x >= xs[-1]:
return ys[-1]
i = bisect.bisect(xs, x)
frac = 1.0 * (x - xs[i - 1]) / (xs[i] - xs[i - 1])
y = ys[i - 1] + frac * 1.0 * (ys[i] - ys[i - 1])
return y
class _DictWrapper(object):
"""An object that contains a dictionary."""
def __init__(self, obj=None, label=None):
"""Initializes the distribution.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
label: string label
"""
self.label = label if label is not None else '_nolegend_'
self.d = {}
# flag whether the distribution is under a log transform
self.log = False
if obj is None:
return
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.label = label if label is not None else obj.label
if isinstance(obj, dict):
self.d.update(obj.items())
elif isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.d.update(obj.Items())
elif isinstance(obj, pandas.Series):
self.d.update(obj.value_counts().iteritems())
else:
# finally, treat it like a list
self.d.update(Counter(obj))
if len(self) > 0 and isinstance(self, Pmf):
self.Normalize()
def __hash__(self):
return id(self)
def __str__(self):
cls = self.__class__.__name__
return '%s(%s)' % (cls, str(self.d))
__repr__ = __str__
def __eq__(self, other):
return self.d == other.d
def __len__(self):
return len(self.d)
def __iter__(self):
return iter(self.d)
def iterkeys(self):
"""Returns an iterator over keys."""
return iter(self.d)
def __contains__(self, value):
return value in self.d
def __getitem__(self, value):
return self.d.get(value, 0)
def __setitem__(self, value, prob):
self.d[value] = prob
def __delitem__(self, value):
del self.d[value]
def Copy(self, label=None):
"""Returns a copy.
Make a shallow copy of d. If you want a deep copy of d,
use copy.deepcopy on the whole object.
label: string label for the new Hist
returns: new _DictWrapper with the same type
"""
new = copy.copy(self)
new.d = copy.copy(self.d)
new.label = label if label is not None else self.label
return new
def Scale(self, factor):
"""Multiplies the values by a factor.
factor: what to multiply by
Returns: new object
"""
new = self.Copy()
new.d.clear()
for val, prob in self.Items():
new.Set(val * factor, prob)
return new
def Log(self, m=None):
"""Log transforms the probabilities.
Removes values with probability 0.
Normalizes so that the largest logprob is 0.
"""
if self.log:
raise ValueError("Pmf/Hist already under a log transform")
self.log = True
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
if p:
self.Set(x, math.log(p / m))
else:
self.Remove(x)
def Exp(self, m=None):
"""Exponentiates the probabilities.
m: how much to shift the ps before exponentiating
If m is None, normalizes so that the largest prob is 1.
"""
if not self.log:
raise ValueError("Pmf/Hist not under a log transform")
self.log = False
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
self.Set(x, math.exp(p - m))
def GetDict(self):
"""Gets the dictionary."""
return self.d
def SetDict(self, d):
"""Sets the dictionary."""
self.d = d
def Values(self):
"""Gets an unsorted sequence of values.
Note: one source of confusion is that the keys of this
dictionary are the values of the Hist/Pmf, and the
values of the dictionary are frequencies/probabilities.
"""
return self.d.keys()
def Items(self):
"""Gets an unsorted sequence of (value, freq/prob) pairs."""
return self.d.items()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
Note: options are ignored
Returns:
tuple of (sorted value sequence, freq/prob sequence)
"""
if min(self.d.keys()) is np.nan:
logging.warning('Hist: contains NaN, may not render correctly.')
return zip(*sorted(self.Items()))
def MakeCdf(self, label=None):
"""Makes a Cdf."""
label = label if label is not None else self.label
return Cdf(self, label=label)
def Print(self):
"""Prints the values and freqs/probs in ascending order."""
for val, prob in sorted(self.d.items()):
print(val, prob)
def Set(self, x, y=0):
"""Sets the freq/prob associated with the value x.
Args:
x: number value
y: number freq or prob
"""
self.d[x] = y
def Incr(self, x, term=1):
"""Increments the freq/prob associated with the value x.
Args:
x: number value
term: how much to increment by
"""
self.d[x] = self.d.get(x, 0) + term
def Mult(self, x, factor):
"""Scales the freq/prob associated with the value x.
Args:
x: number value
factor: how much to multiply by
"""
self.d[x] = self.d.get(x, 0) * factor
def Remove(self, x):
"""Removes a value.
Throws an exception if the value is not there.
Args:
x: value to remove
"""
del self.d[x]
def Total(self):
"""Returns the total of the frequencies/probabilities in the map."""
total = sum(self.d.values())
return total
def MaxLike(self):
"""Returns the largest frequency/probability in the map."""
return max(self.d.values())
def Largest(self, n=10):
"""Returns the largest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=True)[:n]
def Smallest(self, n=10):
"""Returns the smallest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=False)[:n]
class Hist(_DictWrapper):
"""Represents a histogram, which is a map from values to frequencies.
Values can be any hashable type; frequencies are integer counters.
"""
def Freq(self, x):
"""Gets the frequency associated with the value x.
Args:
x: number value
Returns:
int frequency
"""
return self.d.get(x, 0)
def Freqs(self, xs):
"""Gets frequencies for a sequence of values."""
return [self.Freq(x) for x in xs]
def IsSubset(self, other):
"""Checks whether the values in this histogram are a subset of
the values in the given histogram."""
for val, freq in self.Items():
if freq > other.Freq(val):
return False
return True
def Subtract(self, other):
"""Subtracts the values in the given histogram from this histogram."""
for val, freq in other.Items():
self.Incr(val, -freq)
class Pmf(_DictWrapper):
"""Represents a probability mass function.
Values can be any hashable type; probabilities are floating-point.
Pmfs are not necessarily normalized.
"""
def Prob(self, x, default=0):
"""Gets the probability associated with the value x.
Args:
x: number value
default: value to return if the key is not there
Returns:
float probability
"""
return self.d.get(x, default)
def Probs(self, xs):
"""Gets probabilities for a sequence of values."""
return [self.Prob(x) for x in xs]
def Percentile(self, percentage):
"""Computes a percentile of a given Pmf.
Note: this is not super efficient. If you are planning
to compute more than a few percentiles, compute the Cdf.
percentage: float 0-100
returns: value from the Pmf
"""
p = percentage / 100.0
total = 0
for val, prob in sorted(self.Items()):
total += prob
if total >= p:
return val
def ProbGreater(self, x):
"""Probability that a sample from this Pmf exceeds x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbGreater(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val > x]
return sum(t)
def ProbLess(self, x):
"""Probability that a sample from this Pmf is less than x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbLess(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val < x]
return sum(t)
def __lt__(self, obj):
"""Less than.
obj: number or _DictWrapper
returns: float probability
"""
return self.ProbLess(obj)
def __gt__(self, obj):
"""Greater than.
obj: number or _DictWrapper
returns: float probability
"""
return self.ProbGreater(obj)
def __ge__(self, obj):
"""Greater than or equal.
obj: number or _DictWrapper
returns: float probability
"""
return 1 - (self < obj)
def __le__(self, obj):
"""Less than or equal.
obj: number or _DictWrapper
returns: float probability
"""
return 1 - (self > obj)
def Normalize(self, fraction=1.0):
"""Normalizes this PMF so the sum of all probs is fraction.
Args:
fraction: what the total should be after normalization
Returns: the total probability before normalizing
"""
if self.log:
raise ValueError("Normalize: Pmf is under a log transform")
total = self.Total()
if total == 0.0:
raise ValueError('Normalize: total probability is zero.')
#logging.warning('Normalize: total probability is zero.')
#return total
factor = fraction / total
for x in self.d:
self.d[x] *= factor
return total
def Random(self):
"""Chooses a random element from this PMF.
Note: this is not very efficient. If you plan to call
this more than a few times, consider converting to a CDF.
Returns:
float value from the Pmf
"""
target = random.random()
total = 0.0
for x, p in self.d.items():
total += p
if total >= target:
return x
# we shouldn't get here
raise ValueError('Random: Pmf might not be normalized.')
def Mean(self):
"""Computes the mean of a PMF.
Returns:
float mean
"""
mean = 0.0
for x, p in self.d.items():
mean += p * x
return mean
def Var(self, mu=None):
"""Computes the variance of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float variance
"""
if mu is None:
mu = self.Mean()
var = 0.0
for x, p in self.d.items():
var += p * (x - mu) ** 2
return var
def Std(self, mu=None):
"""Computes the standard deviation of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float standard deviation
"""
var = self.Var(mu)
return math.sqrt(var)
def MaximumLikelihood(self):
"""Returns the value with the highest probability.
Returns: float probability
"""
_, val = max((prob, val) for val, prob in self.Items())
return val
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = self.MakeCdf()
return cdf.CredibleInterval(percentage)
def __add__(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf or a scalar
returns: new Pmf
"""
try:
return self.AddPmf(other)
except AttributeError:
return self.AddConstant(other)
def AddPmf(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 + v2, p1 * p2)
return pmf
def AddConstant(self, other):
"""Computes the Pmf of the sum a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 + other, p1)
return pmf
def __sub__(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.SubPmf(other)
except AttributeError:
return self.AddConstant(-other)
def SubPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 - v2, p1 * p2)
return pmf
def __mul__(self, other):
"""Computes the Pmf of the product of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.MulPmf(other)
except AttributeError:
return self.MulConstant(other)
def MulPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 * v2, p1 * p2)
return pmf
def MulConstant(self, other):
"""Computes the Pmf of the product of a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 * other, p1)
return pmf
def __div__(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.DivPmf(other)
except AttributeError:
return self.MulConstant(1/other)
__truediv__ = __div__
def DivPmf(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 / v2, p1 * p2)
return pmf
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.MakeCdf()
return cdf.Max(k)
class Joint(Pmf):
"""Represents a joint distribution.
The values are sequences (usually tuples)
"""
def Marginal(self, i, label=None):
"""Gets the marginal distribution of the indicated variable.
i: index of the variable we want
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
pmf.Incr(vs[i], prob)
return pmf
def Conditional(self, i, j, val, label=None):
"""Gets the conditional distribution of the indicated variable.
Distribution of vs[i], conditioned on vs[j] = val.
i: index of the variable we want
j: which variable is conditioned on
val: the value the jth variable has to have
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
if vs[j] != val:
continue
pmf.Incr(vs[i], prob)
pmf.Normalize()
return pmf
def MaxLikeInterval(self, percentage=90):
"""Returns the maximum-likelihood credible interval.
If percentage=90, computes a 90% CI containing the values
with the highest likelihoods.
percentage: float between 0 and 100
Returns: list of values from the suite
"""
interval = []
total = 0
t = [(prob, val) for val, prob in self.Items()]
t.sort(reverse=True)
for prob, val in t:
interval.append(val)
total += prob
if total >= percentage / 100.0:
break
return interval
def MakeJoint(pmf1, pmf2):
"""Joint distribution of values from pmf1 and pmf2.
Assumes that the PMFs represent independent random variables.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
Joint pmf of value pairs
"""
joint = Joint()
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
joint.Set((v1, v2), p1 * p2)
return joint
def MakeHistFromList(t, label=None):
"""Makes a histogram from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this histogram
Returns:
Hist object
"""
return Hist(t, label=label)
def MakeHistFromDict(d, label=None):
"""Makes a histogram from a map from values to frequencies.
Args:
d: dictionary that maps values to frequencies
label: string label for this histogram
Returns:
Hist object
"""
return Hist(d, label)
def MakePmfFromList(t, label=None):
"""Makes a PMF from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(t, label=label)
def MakePmfFromDict(d, label=None):
"""Makes a PMF from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(d, label=label)
def MakePmfFromItems(t, label=None):
"""Makes a PMF from a sequence of value-probability pairs
Args:
t: sequence of value-probability pairs
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(dict(t), label=label)
def MakePmfFromHist(hist, label=None):
"""Makes a normalized PMF from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Pmf object
"""
if label is None:
label = hist.label
return Pmf(hist, label=label)
def MakeMixture(metapmf, label='mix'):
"""Make a mixture distribution.
Args:
metapmf: Pmf that maps from Pmfs to probs.
label: string label for the new Pmf.
Returns: Pmf object.
"""
mix = Pmf(label=label)
for pmf, p1 in metapmf.Items():
for x, p2 in pmf.Items():
mix.Incr(x, p1 * p2)
return mix
def MakeUniformPmf(low, high, n):
"""Make a uniform Pmf.
low: lowest value (inclusive)
high: highest value (inclusize)
n: number of values
"""
pmf = Pmf()
for x in np.linspace(low, high, n):
pmf.Set(x, 1)
pmf.Normalize()
return pmf
class Cdf(object):
"""Represents a cumulative distribution function.
Attributes:
xs: sequence of values
ps: sequence of probabilities
label: string used as a graph label.
"""
def __init__(self, obj=None, ps=None, label=None):
"""Initializes.
If ps is provided, obj must be the corresponding list of values.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
ps: list of cumulative probabilities
label: string label
"""
self.label = label if label is not None else '_nolegend_'
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
if not label:
self.label = label if label is not None else obj.label
if obj is None:
# caller does not provide obj, make an empty Cdf
self.xs = np.asarray([])
self.ps = np.asarray([])
if ps is not None:
logging.warning("Cdf: can't pass ps without also passing xs.")
return
else:
# if the caller provides xs and ps, just store them
if ps is not None:
if isinstance(ps, str):
logging.warning("Cdf: ps can't be a string")
self.xs = np.asarray(obj)
self.ps = np.asarray(ps)
return
# caller has provided just obj, not ps
if isinstance(obj, Cdf):
self.xs = copy.copy(obj.xs)
self.ps = copy.copy(obj.ps)
return
if isinstance(obj, _DictWrapper):
dw = obj
else:
dw = Hist(obj)
if len(dw) == 0:
self.xs = np.asarray([])
self.ps = np.asarray([])
return
xs, freqs = zip(*sorted(dw.Items()))
self.xs = np.asarray(xs)
self.ps = np.cumsum(freqs, dtype=np.float)
self.ps /= self.ps[-1]
def __str__(self):
return 'Cdf(%s, %s)' % (str(self.xs), str(self.ps))
__repr__ = __str__
def __len__(self):
return len(self.xs)
def __getitem__(self, x):
return self.Prob(x)
def __setitem__(self):
raise UnimplementedMethodException()
def __delitem__(self):
raise UnimplementedMethodException()
def __eq__(self, other):
return np.all(self.xs == other.xs) and np.all(self.ps == other.ps)
def Copy(self, label=None):
"""Returns a copy of this Cdf.
label: string label for the new Cdf
"""
if label is None:
label = self.label
return Cdf(list(self.xs), list(self.ps), label=label)
def MakePmf(self, label=None):
"""Makes a Pmf."""
if label is None:
label = self.label
return Pmf(self, label=label)
def Values(self):
"""Returns a sorted list of values.
"""
return self.xs
def Items(self):
"""Returns a sorted sequence of (value, probability) pairs.
Note: in Python3, returns an iterator.
"""
a = self.ps
b = np.roll(a, 1)
b[0] = 0
return zip(self.xs, a-b)
def Shift(self, term):
"""Adds a term to the xs.
term: how much to add
"""
new = self.Copy()
# don't use +=, or else an int array + float yields int array
new.xs = new.xs + term
return new
def Scale(self, factor):
"""Multiplies the xs by a factor.
factor: what to multiply by
"""
new = self.Copy()
# don't use *=, or else an int array * float yields int array
new.xs = new.xs * factor
return new
def Prob(self, x):
"""Returns CDF(x), the probability that corresponds to value x.
Args:
x: number
Returns:
float probability
"""
if x < self.xs[0]:
return 0.0
index = bisect.bisect(self.xs, x)
p = self.ps[index-1]
return p
def Probs(self, xs):
"""Gets probabilities for a sequence of values.
xs: any sequence that can be converted to NumPy array
returns: NumPy array of cumulative probabilities
"""
xs = np.asarray(xs)
index = np.searchsorted(self.xs, xs, side='right')
ps = self.ps[index-1]
ps[xs < self.xs[0]] = 0.0
return ps
ProbArray = Probs
def Value(self, p):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
p: number in the range [0, 1]
Returns:
number value
"""
if p < 0 or p > 1:
raise ValueError('Probability p must be in range [0, 1]')
index = bisect.bisect_left(self.ps, p)
return self.xs[index]
def ValueArray(self, ps):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
ps: NumPy array of numbers in the range [0, 1]
Returns:
NumPy array of values
"""
ps = np.asarray(ps)
if np.any(ps < 0) or np.any(ps > 1):
raise ValueError('Probability p must be in range [0, 1]')
index = np.searchsorted(self.ps, ps, side='left')
return self.xs[index]
def Percentile(self, p):
"""Returns the value that corresponds to percentile p.
Args:
p: number in the range [0, 100]
Returns:
number value
"""
return self.Value(p / 100.0)
def PercentileRank(self, x):
"""Returns the percentile rank of the value x.
x: potential value in the CDF
returns: percentile rank in the range 0 to 100
"""
return self.Prob(x) * 100.0
def Random(self):
"""Chooses a random value from this distribution."""
return self.Value(random.random())
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int length of the sample
returns: NumPy array
"""
ps = np.random.random(n)
return self.ValueArray(ps)
def Mean(self):
"""Computes the mean of a CDF.
Returns:
float mean
"""
old_p = 0
total = 0.0
for x, new_p in zip(self.xs, self.ps):
p = new_p - old_p
total += p * x
old_p = new_p
return total
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
prob = (1 - percentage / 100.0) / 2
interval = self.Value(prob), self.Value(1 - prob)
return interval
ConfidenceInterval = CredibleInterval
def _Round(self, multiplier=1000.0):
"""
An entry is added to the cdf only if the percentile differs
from the previous value in a significant digit, where the number
of significant digits is determined by multiplier. The
default is 1000, which keeps log10(1000) = 3 significant digits.
"""
# TODO(write this method)
raise UnimplementedMethodException()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
An empirical CDF is a step function; linear interpolation
can be misleading.
Note: options are ignored
Returns:
tuple of (xs, ps)
"""
def interleave(a, b):
c = np.empty(a.shape[0] + b.shape[0])
c[::2] = a
c[1::2] = b
return c
a = np.array(self.xs)
xs = interleave(a, a)
shift_ps = np.roll(self.ps, 1)
shift_ps[0] = 0
ps = interleave(shift_ps, self.ps)
return xs, ps
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.Copy()
cdf.ps **= k
return cdf
def MakeCdfFromItems(items, label=None):
"""Makes a cdf from an unsorted sequence of (value, frequency) pairs.
Args:
items: unsorted sequence of (value, frequency) pairs
label: string label for this CDF
Returns:
cdf: list of (value, fraction) pairs
"""
return Cdf(dict(items), label=label)
def MakeCdfFromDict(d, label=None):
"""Makes a CDF from a dictionary that maps values to frequencies.
Args:
d: dictionary that maps values to frequencies.
label: string label for the data.
Returns:
Cdf object
"""
return Cdf(d, label=label)
def MakeCdfFromList(seq, label=None):
"""Creates a CDF from an unsorted sequence.
Args:
seq: unsorted sequence of sortable values
label: string label for the cdf
Returns:
Cdf object
"""
return Cdf(seq, label=label)
def MakeCdfFromHist(hist, label=None):
"""Makes a CDF from a Hist object.
Args:
hist: Pmf.Hist object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = hist.label
return Cdf(hist, label=label)
def MakeCdfFromPmf(pmf, label=None):
"""Makes a CDF from a Pmf object.
Args:
pmf: Pmf.Pmf object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = pmf.label
return Cdf(pmf, label=label)
class UnimplementedMethodException(Exception):
"""Exception if someone calls a method that should be overridden."""
class Suite(Pmf):
"""Represents a suite of hypotheses and their probabilities."""
def Update(self, data):
"""Updates each hypothesis based on the data.
data: any representation of the data
returns: the normalizing constant
"""
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdate(self, data):
"""Updates a suite of hypotheses based on new data.
Modifies the suite directly; if you want to keep the original, make
a copy.
Note: unlike Update, LogUpdate does not normalize.
Args:
data: any representation of the data
"""
for hypo in self.Values():
like = self.LogLikelihood(data, hypo)
self.Incr(hypo, like)
def UpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
This is more efficient than calling Update repeatedly because
it waits until the end to Normalize.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: the normalizing constant
"""
for data in dataset:
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: None
"""
for data in dataset:
self.LogUpdate(data)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def LogLikelihood(self, data, hypo):
"""Computes the log likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def Print(self):
"""Prints the hypotheses and their probabilities."""
for hypo, prob in sorted(self.Items()):
print(hypo, prob)
def MakeOdds(self):
"""Transforms from probabilities to odds.
Values with prob=0 are removed.
"""
for hypo, prob in self.Items():
if prob:
self.Set(hypo, Odds(prob))
else:
self.Remove(hypo)
def MakeProbs(self):
"""Transforms from odds to probabilities."""
for hypo, odds in self.Items():
self.Set(hypo, Probability(odds))
def MakeSuiteFromList(t, label=None):
"""Makes a suite from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this suite
Returns:
Suite object
"""
hist = MakeHistFromList(t, label=label)
d = hist.GetDict()
return MakeSuiteFromDict(d)
def MakeSuiteFromHist(hist, label=None):
"""Makes a normalized suite from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Suite object
"""
if label is None:
label = hist.label
# make a copy of the dictionary
d = dict(hist.GetDict())
return MakeSuiteFromDict(d, label)
def MakeSuiteFromDict(d, label=None):
"""Makes a suite from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this suite
Returns:
Suite object
"""
suite = Suite(label=label)
suite.SetDict(d)
suite.Normalize()
return suite
class Pdf(object):
"""Represents a probability density function (PDF)."""
def Density(self, x):
"""Evaluates this Pdf at x.
Returns: float or NumPy array of probability density
"""
raise UnimplementedMethodException()
def GetLinspace(self):
"""Get a linspace for plotting.
Not all subclasses of Pdf implement this.
Returns: numpy array
"""
raise UnimplementedMethodException()
def MakePmf(self, **options):
"""Makes a discrete version of this Pdf.
options can include
label: string
low: low end of range
high: high end of range
n: number of places to evaluate
Returns: new Pmf
"""
label = options.pop('label', '')
xs, ds = self.Render(**options)
return Pmf(dict(zip(xs, ds)), label=label)
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
If options includes low and high, it must also include n;
in that case the density is evaluated an n locations between
low and high, including both.
If options includes xs, the density is evaluate at those location.
Otherwise, self.GetLinspace is invoked to provide the locations.
Returns:
tuple of (xs, densities)
"""
low, high = options.pop('low', None), options.pop('high', None)
if low is not None and high is not None:
n = options.pop('n', 101)
xs = np.linspace(low, high, n)
else:
xs = options.pop('xs', None)
if xs is None:
xs = self.GetLinspace()
ds = self.Density(xs)
return xs, ds
def Items(self):
"""Generates a sequence of (value, probability) pairs.
"""
return zip(*self.Render())
class NormalPdf(Pdf):
"""Represents the PDF of a Normal distribution."""
def __init__(self, mu=0, sigma=1, label=None):
"""Constructs a Normal Pdf with given mu and sigma.
mu: mean
sigma: standard deviation
label: string
"""
self.mu = mu
self.sigma = sigma
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'NormalPdf(%f, %f)' % (self.mu, self.sigma)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = self.mu-3*self.sigma, self.mu+3*self.sigma
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.norm.pdf(xs, self.mu, self.sigma)
class ExponentialPdf(Pdf):
"""Represents the PDF of an exponential distribution."""
def __init__(self, lam=1, label=None):
"""Constructs an exponential Pdf with given parameter.
lam: rate parameter
label: string
"""
self.lam = lam
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'ExponentialPdf(%f)' % (self.lam)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = 0, 5.0/self.lam
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.expon.pdf(xs, scale=1.0/self.lam)
class EstimatedPdf(Pdf):
"""Represents a PDF estimated by KDE."""
def __init__(self, sample, label=None):
"""Estimates the density function based on a sample.
sample: sequence of data
label: string
"""
self.label = label if label is not None else '_nolegend_'
self.kde = stats.gaussian_kde(sample)
low = min(sample)
high = max(sample)
self.linspace = np.linspace(low, high, 101)
def __str__(self):
return 'EstimatedPdf(label=%s)' % str(self.label)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
return self.linspace
def Density(self, xs):
"""Evaluates this Pdf at xs.
returns: float or NumPy array of probability density
"""
return self.kde.evaluate(xs)
def Sample(self, n):
"""Generates a random sample from the estimated Pdf.
n: size of sample
"""
# NOTE: we have to flatten because resample returns a 2-D
# array for some reason.
return self.kde.resample(n).flatten()
def CredibleInterval(pmf, percentage=90):
"""Computes a credible interval for a given distribution.
If percentage=90, computes the 90% CI.
Args:
pmf: Pmf object representing a posterior distribution
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = pmf.MakeCdf()
prob = (1 - percentage / 100.0) / 2
interval = cdf.Value(prob), cdf.Value(1 - prob)
return interval
def PmfProbLess(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 < v2:
total += p1 * p2
return total
def PmfProbGreater(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 > v2:
total += p1 * p2
return total
def PmfProbEqual(pmf1, pmf2):
"""Probability that a value from pmf1 equals a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 == v2:
total += p1 * p2
return total
def RandomSum(dists):
"""Chooses a random value from each dist and returns the sum.
dists: sequence of Pmf or Cdf objects
returns: numerical sum
"""
total = sum(dist.Random() for dist in dists)
return total
def SampleSum(dists, n):
"""Draws a sample of sums from a list of distributions.
dists: sequence of Pmf or Cdf objects
n: sample size
returns: new Pmf of sums
"""
pmf = Pmf(RandomSum(dists) for i in range(n))
return pmf
def EvalNormalPdf(x, mu, sigma):
"""Computes the unnormalized PDF of the normal distribution.
x: value
mu: mean
sigma: standard deviation
returns: float probability density
"""
return stats.norm.pdf(x, mu, sigma)
def MakeNormalPmf(mu, sigma, num_sigmas, n=201):
"""Makes a PMF discrete approx to a Normal distribution.
mu: float mean
sigma: float standard deviation
num_sigmas: how many sigmas to extend in each direction
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
low = mu - num_sigmas * sigma
high = mu + num_sigmas * sigma
for x in np.linspace(low, high, n):
p = EvalNormalPdf(x, mu, sigma)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def EvalBinomialPmf(k, n, p):
"""Evaluates the binomial PMF.
Returns the probabily of k successes in n trials with probability p.
"""
return stats.binom.pmf(k, n, p)
def MakeBinomialPmf(n, p):
"""Evaluates the binomial PMF.
Returns the distribution of successes in n trials with probability p.
"""
pmf = Pmf()
for k in range(n+1):
pmf[k] = stats.binom.pmf(k, n, p)
return pmf
def EvalHypergeomPmf(k, N, K, n):
"""Evaluates the hypergeometric PMF.
Returns the probabily of k successes in n trials from a population
N with K successes in it.
"""
return stats.hypergeom.pmf(k, N, K, n)
def EvalPoissonPmf(k, lam):
"""Computes the Poisson PMF.
k: number of events
lam: parameter lambda in events per unit time
returns: float probability
"""
# don't use the scipy function (yet). for lam=0 it returns NaN;
# should be 0.0
# return stats.poisson.pmf(k, lam)
return lam ** k * math.exp(-lam) / special.gamma(k+1)
def MakePoissonPmf(lam, high, step=1):
"""Makes a PMF discrete approx to a Poisson distribution.
lam: parameter lambda in events per unit time
high: upper bound of the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for k in range(0, high + 1, step):
p = EvalPoissonPmf(k, lam)
pmf.Set(k, p)
pmf.Normalize()
return pmf
def EvalExponentialPdf(x, lam):
"""Computes the exponential PDF.
x: value
lam: parameter lambda in events per unit time
returns: float probability density
"""
return lam * math.exp(-lam * x)
def EvalExponentialCdf(x, lam):
"""Evaluates CDF of the exponential distribution with parameter lam."""
return 1 - math.exp(-lam * x)
def MakeExponentialPmf(lam, high, n=200):
"""Makes a PMF discrete approx to an exponential distribution.
lam: parameter lambda in events per unit time
high: upper bound
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for x in np.linspace(0, high, n):
p = EvalExponentialPdf(x, lam)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def StandardNormalCdf(x):
"""Evaluates the CDF of the standard Normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution
#Cumulative_distribution_function
Args:
x: float
Returns:
float
"""
return (math.erf(x / ROOT2) + 1) / 2
def EvalNormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the normal distribution.
Args:
x: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.cdf(x, loc=mu, scale=sigma)
def EvalNormalCdfInverse(p, mu=0, sigma=1):
"""Evaluates the inverse CDF of the normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function
Args:
p: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.ppf(p, loc=mu, scale=sigma)
def EvalLognormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the lognormal distribution.
x: float or sequence
mu: mean parameter
sigma: standard deviation parameter
Returns: float or sequence
"""
return stats.lognorm.cdf(x, loc=mu, scale=sigma)
def RenderExpoCdf(lam, low, high, n=101):
"""Generates sequences of xs and ps for an exponential CDF.
lam: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = 1 - np.exp(-lam * xs)
#ps = stats.expon.cdf(xs, scale=1.0/lam)
return xs, ps
def RenderNormalCdf(mu, sigma, low, high, n=101):
"""Generates sequences of xs and ps for a Normal CDF.
mu: parameter
sigma: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = stats.norm.cdf(xs, mu, sigma)
return xs, ps
def RenderParetoCdf(xmin, alpha, low, high, n=50):
"""Generates sequences of xs and ps for a Pareto CDF.
xmin: parameter
alpha: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
if low < xmin:
low = xmin
xs = np.linspace(low, high, n)
ps = 1 - (xs / xmin) ** -alpha
#ps = stats.pareto.cdf(xs, scale=xmin, b=alpha)
return xs, ps
class Beta(object):
"""Represents a Beta distribution.
See http://en.wikipedia.org/wiki/Beta_distribution
"""
def __init__(self, alpha=1, beta=1, label=None):
"""Initializes a Beta distribution."""
self.alpha = alpha
self.beta = beta
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Beta distribution.
data: pair of int (heads, tails)
"""
heads, tails = data
self.alpha += heads
self.beta += tails
def Mean(self):
"""Computes the mean of this distribution."""
return self.alpha / (self.alpha + self.beta)
def Random(self):
"""Generates a random variate from this distribution."""
return random.betavariate(self.alpha, self.beta)
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int sample size
"""
size = n,
return np.random.beta(self.alpha, self.beta, size)
def EvalPdf(self, x):
"""Evaluates the PDF at x."""
return x ** (self.alpha - 1) * (1 - x) ** (self.beta - 1)
def MakePmf(self, steps=101, label=None):
"""Returns a Pmf of this distribution.
Note: Normally, we just evaluate the PDF at a sequence
of points and treat the probability density as a probability
mass.
But if alpha or beta is less than one, we have to be
more careful because the PDF goes to infinity at x=0
and x=1. In that case we evaluate the CDF and compute
differences.
"""
if self.alpha < 1 or self.beta < 1:
cdf = self.MakeCdf()
pmf = cdf.MakePmf()
return pmf
xs = [i / (steps - 1.0) for i in range(steps)]
probs = [self.EvalPdf(x) for x in xs]
pmf = Pmf(dict(zip(xs, probs)), label=label)
return pmf
def MakeCdf(self, steps=101):
"""Returns the CDF of this distribution."""
xs = [i / (steps - 1.0) for i in range(steps)]
ps = [special.betainc(self.alpha, self.beta, x) for x in xs]
cdf = Cdf(xs, ps)
return cdf
class Dirichlet(object):
"""Represents a Dirichlet distribution.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
"""
def __init__(self, n, conc=1, label=None):
"""Initializes a Dirichlet distribution.
n: number of dimensions
conc: concentration parameter (smaller yields more concentration)
label: string label
"""
if n < 2:
raise ValueError('A Dirichlet distribution with '
'n<2 makes no sense')
self.n = n
self.params = np.ones(n, dtype=np.float) * conc
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Dirichlet distribution.
data: sequence of observations, in order corresponding to params
"""
m = len(data)
self.params[:m] += data
def Random(self):
"""Generates a random variate from this distribution.
Returns: normalized vector of fractions
"""
p = np.random.gamma(self.params)
return p / p.sum()
def Likelihood(self, data):
"""Computes the likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float probability
"""
m = len(data)
if self.n < m:
return 0
x = data
p = self.Random()
q = p[:m] ** x
return q.prod()
def LogLikelihood(self, data):
"""Computes the log likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float log probability
"""
m = len(data)
if self.n < m:
return float('-inf')
x = self.Random()
y = np.log(x[:m]) * data
return y.sum()
def MarginalBeta(self, i):
"""Computes the marginal distribution of the ith element.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
#Marginal_distributions
i: int
Returns: Beta object
"""
alpha0 = self.params.sum()
alpha = self.params[i]
return Beta(alpha, alpha0 - alpha)
def PredictivePmf(self, xs, label=None):
"""Makes a predictive distribution.
xs: values to go into the Pmf
Returns: Pmf that maps from x to the mean prevalence of x
"""
alpha0 = self.params.sum()
ps = self.params / alpha0
return Pmf(zip(xs, ps), label=label)
def BinomialCoef(n, k):
"""Compute the binomial coefficient "n choose k".
n: number of trials
k: number of successes
Returns: float
"""
return scipy.misc.comb(n, k)
def LogBinomialCoef(n, k):
"""Computes the log of the binomial coefficient.
http://math.stackexchange.com/questions/64716/
approximating-the-logarithm-of-the-binomial-coefficient
n: number of trials
k: number of successes
Returns: float
"""
return n * math.log(n) - k * math.log(k) - (n - k) * math.log(n - k)
def NormalProbability(ys, jitter=0.0):
"""Generates data for a normal probability plot.
ys: sequence of values
jitter: float magnitude of jitter added to the ys
returns: numpy arrays xs, ys
"""
n = len(ys)
xs = np.random.normal(0, 1, n)
xs.sort()
if jitter:
ys = Jitter(ys, jitter)
else:
ys = np.array(ys)
ys.sort()
return xs, ys
def Jitter(values, jitter=0.5):
"""Jitters the values by adding a uniform variate in (-jitter, jitter).
values: sequence
jitter: scalar magnitude of jitter
returns: new numpy array
"""
n = len(values)
return np.random.normal(0, jitter, n) + values
def NormalProbabilityPlot(sample, fit_color='0.8', **options):
"""Makes a normal probability plot with a fitted line.
sample: sequence of numbers
fit_color: color string for the fitted line
options: passed along to Plot
"""
xs, ys = NormalProbability(sample)
mean, var = MeanVar(sample)
std = math.sqrt(var)
fit = FitLine(xs, mean, std)
thinkplot.Plot(*fit, color=fit_color, label='model')
xs, ys = NormalProbability(sample)
thinkplot.Plot(xs, ys, **options)
def Mean(xs):
"""Computes mean.
xs: sequence of values
returns: float mean
"""
return np.mean(xs)
def Var(xs, mu=None, ddof=0):
"""Computes variance.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
xs = np.asarray(xs)
if mu is None:
mu = xs.mean()
ds = xs - mu
return np.dot(ds, ds) / (len(xs) - ddof)
def Std(xs, mu=None, ddof=0):
"""Computes standard deviation.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
var = Var(xs, mu, ddof)
return math.sqrt(var)
def MeanVar(xs, ddof=0):
"""Computes mean and variance.
Based on http://stackoverflow.com/questions/19391149/
numpy-mean-and-variance-from-single-function
xs: sequence of values
ddof: delta degrees of freedom
returns: pair of float, mean and var
"""
xs = np.asarray(xs)
mean = xs.mean()
s2 = Var(xs, mean, ddof)
return mean, s2
def Trim(t, p=0.01):
"""Trims the largest and smallest elements of t.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
sequence of values
"""
n = int(p * len(t))
t = sorted(t)[n:-n]
return t
def TrimmedMean(t, p=0.01):
"""Computes the trimmed mean of a sequence of numbers.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
return Mean(t)
def TrimmedMeanVar(t, p=0.01):
"""Computes the trimmed mean and variance of a sequence of numbers.
Side effect: sorts the list.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
mu, var = MeanVar(t)
return mu, var
def CohenEffectSize(group1, group2):
"""Compute Cohen's d.
group1: Series or NumPy array
group2: Series or NumPy array
returns: float
"""
diff = group1.mean() - group2.mean()
n1, n2 = len(group1), len(group2)
var1 = group1.var()
var2 = group2.var()
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
d = diff / math.sqrt(pooled_var)
return d
def Cov(xs, ys, meanx=None, meany=None):
"""Computes Cov(X, Y).
Args:
xs: sequence of values
ys: sequence of values
meanx: optional float mean of xs
meany: optional float mean of ys
Returns:
Cov(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
if meanx is None:
meanx = np.mean(xs)
if meany is None:
meany = np.mean(ys)
cov = np.dot(xs-meanx, ys-meany) / len(xs)
return cov
def Corr(xs, ys):
"""Computes Corr(X, Y).
Args:
xs: sequence of values
ys: sequence of values
Returns:
Corr(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
meanx, varx = MeanVar(xs)
meany, vary = MeanVar(ys)
corr = Cov(xs, ys, meanx, meany) / math.sqrt(varx * vary)
return corr
def SerialCorr(series, lag=1):
"""Computes the serial correlation of a series.
series: Series
lag: integer number of intervals to shift
returns: float correlation
"""
xs = series[lag:]
ys = series.shift(lag)[lag:]
corr = Corr(xs, ys)
return corr
def SpearmanCorr(xs, ys):
"""Computes Spearman's rank correlation.
Args:
xs: sequence of values
ys: sequence of values
Returns:
float Spearman's correlation
"""
xranks = | pandas.Series(xs) | pandas.Series |
import os
import logging
import json
import itertools
import collections
import yaml
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import scipy
import LCTM.metrics
from kinemparse import decode
from mathtools import utils # , metrics
logger = logging.getLogger(__name__)
def loadPartInfo(
event_attr_fn, connection_attr_fn, assembly_attr_fn,
background_action='', assembly_vocab=None):
def gen_assembly_vocab(final_assemblies, part_categories, ref_vocab=None):
def get_parts(joints):
return frozenset(p for joint in joints for p in joint)
def remove_part(assembly, part):
removed = frozenset(joint for joint in assembly if part not in joint)
# Renumber identical parts using part_to_class
part_names = tuple(sorted(get_parts(removed)))
part_classes = tuple(part_to_class.get(p, p) for p in part_names)
rename = {}
class_to_parts = collections.defaultdict(list)
for part_name, part_class in zip(part_names, part_classes):
class_to_parts[part_class].append(part_name)
if part_class in part_categories:
i = len(class_to_parts[part_class]) - 1
new_part_name = part_categories[part_class][i]
else:
new_part_name = part_name
rename[part_name] = new_part_name
removed = frozenset(
frozenset(rename[part] for part in joint)
for joint in removed
)
return removed
def is_possible(assembly):
def shelf_with_tabletop(assembly):
def replace(joint, replacee, replacer):
replaced = set(joint)
replaced.remove(replacee)
replaced.add(replacer)
return frozenset(replaced)
for joint in assembly:
if 'shelf' in joint and replace(joint, 'shelf', 'table') not in joint:
return False
return True
predicates = (shelf_with_tabletop,)
return all(x(assembly) for x in predicates)
part_to_class = {
part: class_name
for class_name, parts in part_categories.items()
for part in parts
}
final_assemblies = tuple(
frozenset(frozenset(j) for j in joints)
for joints in final_assemblies
)
stack = list(final_assemblies)
assembly_vocab = set(final_assemblies)
while stack:
assembly = stack.pop()
for part in get_parts(assembly):
child = remove_part(assembly, part)
if is_possible(child) and child not in assembly_vocab:
assembly_vocab.add(child)
stack.append(child)
assembly_vocab = tuple(
tuple(sorted(tuple(sorted(j)) for j in joints))
for joints in sorted(assembly_vocab, key=len)
)
if ref_vocab is not None:
v = list(ref_vocab)
for a in assembly_vocab:
utils.getIndex(a, v)
assembly_vocab = tuple(v)
return assembly_vocab
with open(assembly_attr_fn, 'rt') as file_:
data = json.load(file_)
part_vocab = tuple(data['part_vocab'])
part_categories = data['part_categories']
joint_vocab = tuple(tuple(sorted(joint)) for joint in data['joint_vocab'])
final_assembly_attrs = tuple(
tuple(sorted(tuple(sorted(joint)) for joint in joints))
for joints in data['final_assemblies']
)
assembly_attrs = gen_assembly_vocab(
final_assembly_attrs, part_categories, ref_vocab=assembly_vocab
)
connection_probs, action_vocab, connection_vocab = connection_attrs_to_probs(
pd.read_csv(connection_attr_fn, index_col=False, keep_default_na=False),
# normalize=True
)
with open(event_attr_fn, 'rt') as file_:
event_probs, event_vocab = event_attrs_to_probs(
# pd.read_csv(event_attr_fn, index_col=False, keep_default_na=False),
json.load(file_),
part_categories, action_vocab, joint_vocab,
background_action=background_action,
# normalize=True
)
assembly_probs, assembly_vocab = assembly_attrs_to_probs(
assembly_attrs, joint_vocab, connection_vocab,
# normalize=True
)
num_assemblies = len(assembly_attrs)
transition_probs = np.zeros((num_assemblies + 1, num_assemblies + 1), dtype=float)
transition_probs[0, :-1] = prior_probs(assembly_attrs)
transition_probs[1:, -1] = final_probs(assembly_attrs, final_assembly_attrs)
transition_probs[1:, :-1] = assembly_transition_probs(
assembly_attrs,
allow_self_transitions=True
)
probs = (event_probs, connection_probs, assembly_probs, transition_probs)
vocabs = {
'event_vocab': event_vocab,
'part_vocab': part_vocab,
'action_vocab': action_vocab,
'joint_vocab': joint_vocab,
'connection_vocab': connection_vocab,
'assembly_vocab': assembly_vocab
}
return probs, vocabs
def connection_attrs_to_probs(action_attrs, normalize=False):
"""
Parameters
----------
action_attrs :
Returns
-------
scores :
action_vocab :
connecion_vocab :
"""
# Log-domain values for zero and one
# zero = -np.inf
# one = 0
zero = 0
tx_sep = '->'
tx_cols = [name for name in action_attrs.columns if tx_sep in name]
tx_vocab = tuple(
tuple(int(x) for x in col_name.split(tx_sep))
for col_name in tx_cols
)
connection_vocab = tuple(
sorted(frozenset().union(*[frozenset(t) for t in tx_vocab]))
)
action_vocab = tuple(action_attrs['action'].to_list())
num_actions = len(action_vocab)
num_connections = len(connection_vocab)
scores = np.full((num_actions, num_connections, num_connections), zero, dtype=float)
action_attrs = action_attrs.set_index('action')
for i_action, action_name in enumerate(action_vocab):
tx_weights = tuple(action_attrs.loc[action_name][c] for c in tx_cols)
for i_edge, tx_weight in enumerate(tx_weights):
i_conn_cur, i_conn_next = tx_vocab[i_edge]
scores[i_action, i_conn_cur, i_conn_next] = tx_weight
if normalize:
# Compute P(c_cur, c_next | action)
action_counts = scores.reshape((scores.shape[0], -1)).sum(axis=1)
scores /= action_counts[:, None, None]
return scores, action_vocab, connection_vocab
def event_attrs_to_probs(
event_attrs, part_categories, action_vocab, joint_vocab,
background_action='', normalize=False):
def expand_part_categories(and_rules):
def expand(parts):
if isinstance(parts, str):
parts = [parts]
parts = set(x for name in parts for x in part_categories.get(name, [name]))
return sorted(parts)
def to_or_rules(joint):
new_joint = [expand(parts) for parts in joint]
joints_set = frozenset([frozenset(prod) for prod in itertools.product(*new_joint)])
joints_tup = tuple(tuple(sorted(x)) for x in joints_set)
return joints_tup
and_or_rules = tuple(to_or_rules(joint) for joint in and_rules if len(joint) > 1)
or_and_rules = tuple(itertools.product(*and_or_rules))
return or_and_rules
# Log-domain values for zero and one
# zero = -np.inf
# one = 0
zero = 0
one = 1
# event index --> all data
event_vocab = tuple(event_attrs.keys())
action_integerizer = {x: i for i, x in enumerate(action_vocab)}
num_actions = len(action_vocab)
num_joints = len(joint_vocab)
scores = []
for i_event, event_name in enumerate(event_vocab):
event_data = event_attrs[event_name]
action_name = event_data['action']
and_rules = event_data['parts']
or_and_rules = expand_part_categories(and_rules)
num_modes = len(or_and_rules)
event_scores = np.full((num_modes, num_actions, num_joints), zero, dtype=float)
for i_mode, active_joints in enumerate(or_and_rules):
for i_joint, joint in enumerate(joint_vocab):
action = action_name if joint in active_joints else background_action
i_action = action_integerizer[action]
event_scores[i_mode, i_action, i_joint] = one
scores.append(event_scores)
if normalize:
# Compute P(action, joint | event)
# event_counts = scores.reshape((scores.shape[0], -1)).sum(axis=1)
# scores /= event_counts[:, None, None]
raise NotImplementedError()
return scores, event_vocab
def assembly_attrs_to_probs(
assembly_attrs, joint_vocab, connection_vocab,
disconnected_val=0, connected_val=1, normalize=False):
"""
Parameters
----------
assembly_attrs :
joint_vocab :
connection_vocab :
Returns
-------
scores :
assembly_vocab :
"""
# Log-domain values for zero and one
# zero = -np.inf
# one = 0
zero = 0
one = 1
assembly_vocab = tuple(
tuple(sorted(set(part for joint in a for part in joint)))
for i, a in enumerate(assembly_attrs)
)
num_assemblies = len(assembly_vocab)
num_joints = len(joint_vocab)
num_connections = len(connection_vocab)
joint_integerizer = {x: i for i, x in enumerate(joint_vocab)}
connection_integerizer = {x: i for i, x in enumerate(connection_vocab)}
disconnected_index = connection_integerizer[disconnected_val]
connected_index = connection_integerizer[connected_val]
scores = np.full((num_assemblies, num_joints, num_connections), zero, dtype=float)
for i_assembly, _ in enumerate(assembly_vocab):
joints = assembly_attrs[i_assembly]
connection_indices = np.full((num_joints,), disconnected_index, dtype=int)
for joint in joints:
i_joint = joint_integerizer[joint]
connection_indices[i_joint] = connected_index
for i_joint, i_connection in enumerate(connection_indices):
scores[i_assembly, i_joint, i_connection] = one
if normalize:
# Compute P(assembly | joint, connection)
event_counts = scores.sum(axis=0)
scores /= event_counts[None, :, :]
return scores, assembly_vocab
def prior_probs(assembly_attrs):
zero = 0
one = 1
def score(joints):
if joints:
return zero
return one
num_assemblies = len(assembly_attrs)
scores = np.full((num_assemblies,), zero, dtype=float)
for i, joints in enumerate(assembly_attrs):
scores[i] = score(joints)
return scores
def final_probs(assembly_attrs, final_assembly_attrs):
zero = 0
one = 1
def score(joints):
if joints in final_assembly_attrs:
return one
return zero
num_assemblies = len(assembly_attrs)
scores = np.full((num_assemblies,), zero, dtype=float)
for i, joints in enumerate(assembly_attrs):
scores[i] = score(joints)
return scores
def assembly_transition_probs(assembly_attrs, allow_self_transitions=False):
"""
"""
zero = 0
one = 1
def score(cur_joints, next_joints):
def num_diff(lhs, rhs):
return sum(x not in rhs for x in lhs)
def get_parts(joints):
return frozenset(p for joint in joints for p in joint)
if allow_self_transitions and cur_joints == next_joints:
return one
if num_diff(cur_joints, next_joints) != 0:
return zero
if not cur_joints and len(next_joints) == 1:
return one
cur_parts = get_parts(cur_joints)
next_parts = get_parts(next_joints)
if num_diff(next_parts, cur_parts) != 1:
return zero
return one
num_assemblies = len(assembly_attrs)
scores = np.full((num_assemblies, num_assemblies), zero, dtype=float)
for i_cur, cur_joints in enumerate(assembly_attrs):
for i_next, next_joints in enumerate(assembly_attrs):
scores[i_cur, i_next] = score(cur_joints, next_joints)
return scores
def event_to_assembly_scores(event_probs, connection_probs, assembly_probs, support_only=True):
def make_score(p_connection, p_cur_assembly, p_next_assembly, p_action):
joint_probs = np.stack(
tuple(
np.einsum(
'aij,ik,jk,ak->k',
p_connection, p_cur_assembly, p_next_assembly, p
)
for p in p_action
), axis=0
)
score = scipy.special.logsumexp(np.log(joint_probs).sum(axis=1))
return score
num_assemblies = assembly_probs.shape[0]
num_events = len(event_probs)
# Log-domain values for zero and one
zero = -np.inf
# one = 0
scores = np.full((num_events, num_assemblies, num_assemblies), zero, dtype=float)
for i_event in range(num_events):
for i_cur in range(num_assemblies):
for i_next in range(num_assemblies):
score = make_score(
connection_probs,
assembly_probs[i_cur].T, assembly_probs[i_next].T,
event_probs[i_event]
)
scores[i_event, i_cur, i_next] = score
if support_only:
scores[~np.isinf(scores)] = 0
return scores
def count_priors(label_seqs, num_classes, stride=None, approx_upto=None, support_only=False):
dur_counts = {}
class_counts = {}
for label_seq in label_seqs:
for label, dur in zip(*utils.computeSegments(label_seq[::stride])):
class_counts[label] = class_counts.get(label, 0) + 1
dur_counts[label, dur] = dur_counts.get((label, dur), 0) + 1
class_priors = np.zeros((num_classes))
for label, count in class_counts.items():
class_priors[label] = count
class_priors /= class_priors.sum()
max_dur = max(dur for label, dur in dur_counts.keys())
dur_priors = np.zeros((num_classes, max_dur))
for (label, dur), count in dur_counts.items():
assert dur
dur_priors[label, dur - 1] = count
dur_priors /= dur_priors.sum(axis=1, keepdims=True)
if approx_upto is not None:
cdf = dur_priors.cumsum(axis=1)
approx_bounds = (cdf >= approx_upto).argmax(axis=1)
dur_priors = dur_priors[:, :approx_bounds.max()]
if support_only:
dur_priors = (dur_priors > 0).astype(float)
return class_priors, dur_priors
def viz_priors(fn, class_priors, dur_priors):
fig, axes = plt.subplots(3)
axes[0].matshow(dur_priors)
axes[1].stem(class_priors)
plt.tight_layout()
plt.savefig(fn)
plt.close()
def viz_transition_probs(fig_dir, transitions, vocab):
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
for i, transition_arr in enumerate(transitions):
plt.matshow(transition_arr)
plt.title(vocab[i])
plt.savefig(os.path.join(fig_dir, f"action={i:02d}_{vocab[i].replace(' ', '-')}"))
plt.close()
def write_transition_probs(fig_dir, transitions, event_vocab, assembly_vocab):
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
for i, transition_arr in enumerate(transitions):
fn = os.path.join(fig_dir, f"action={i:02d}_{event_vocab[i].replace(' ', '-')}.txt")
rows, cols = transition_arr.nonzero()
strs = tuple(
f'{assembly_vocab[r]} -> {assembly_vocab[c]}: {transition_arr[r, c]:.2f}'
for r, c in zip(rows, cols)
)
with open(fn, 'wt') as file_:
file_.write('\n'.join(strs))
def write_labels(fn, label_seq, vocab):
seg_label_idxs, seg_durs = utils.computeSegments(label_seq)
seg_durs = np.array(seg_durs)
seg_ends = np.cumsum(seg_durs) - 1
seg_starts = np.array([0] + (seg_ends + 1)[:-1].tolist())
seg_labels = tuple(vocab[i] for i in seg_label_idxs)
d = {
'start': seg_starts,
'end': seg_ends,
'label': seg_labels
}
| pd.DataFrame(d) | pandas.DataFrame |
import numpy as np
import pandas as pd
import h5py
filename = 'LSTM_all_activations.csv'
resultant_filename = 'states.hdf5'
csv_df = pd.read_csv(filename)
# Creates matrices for capturing the state values
layer1_hidden_states_mx = []
layer1_cell_states_mx = []
layer2_hidden_states_mx = []
layer2_cell_states_mx = []
for (columnName, columnData) in csv_df.iteritems():
if('lstm_1/while/Exit_3' in columnName):
layer1_hidden_states_mx.append(csv_df[columnName])
if('lstm_1/while/Exit_4' in columnName):
layer1_cell_states_mx.append(csv_df[columnName])
if('lstm_2/while/Exit_3' in columnName):
layer2_hidden_states_mx.append(csv_df[columnName])
if('lstm_2/while/Exit_4' in columnName):
layer2_cell_states_mx.append(csv_df[columnName])
# Creates a dataframe to write the state information to.
layer1_hidden_states_df = pd.DataFrame(data=layer1_hidden_states_mx)
layer1_cell_states_df = pd.DataFrame(data=layer1_cell_states_mx)
layer2_hidden_states_df = | pd.DataFrame(data=layer2_hidden_states_mx) | pandas.DataFrame |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series, concat
from pandas.core.base import DataError
from pandas.util import testing as tm
def test_rank_apply():
lev1 = tm.rands_array(10, 100)
lev2 = tm.rands_array(10, 130)
lab1 = np.random.randint(0, 100, size=500)
lab2 = np.random.randint(0, 130, size=500)
df = DataFrame(
{
"value": np.random.randn(500),
"key1": lev1.take(lab1),
"key2": lev2.take(lab2),
}
)
result = df.groupby(["key1", "key2"]).value.rank()
expected = [piece.value.rank() for key, piece in df.groupby(["key1", "key2"])]
expected = concat(expected, axis=0)
expected = expected.reindex(result.index)
tm.assert_series_equal(result, expected)
result = df.groupby(["key1", "key2"]).value.rank(pct=True)
expected = [
piece.value.rank(pct=True) for key, piece in df.groupby(["key1", "key2"])
]
expected = concat(expected, axis=0)
expected = expected.reindex(result.index)
| tm.assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
__version__ = '0.1.3'
__maintainer__ = '<NAME>'
__contributors__ = '<NAME>, <NAME>, <NAME>'
__email__ = '<EMAIL>'
__birthdate__ = '30.09.2020'
__status__ = 'prod' # options are: dev, test, prod
__license__ = 'BSD-3-Clause'
#----- imports & packages ------
if __package__ is None or __package__ == '':
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.dirname(__file__))))
from pathlib import Path
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from vencopy.scripts.globalFunctions import createFileString
class GridModeler:
def __init__(self, configDict: dict, datasetID: str):
"""
Class for modeling individual vehicle connection options dependent on parking purposes. Configurations on
charging station availabilities can be parametrized in gridConfig. globalConfig and datasetID are needed for
reading the input files.
:param configDict: A dictionary containing multiple yaml config files
:param datasetID: String, used for referencing the purpose input file
"""
self.globalConfig = configDict['globalConfig']
self.gridConfig = configDict['gridConfig']
self.flexConfig = configDict['flexConfig']
self.inputFileName = createFileString(globalConfig=self.globalConfig, fileKey='purposesProcessed',
datasetID=datasetID)
self.inputFilePath = Path(__file__).parent / self.globalConfig['pathRelative']['diaryOutput'] / self.inputFileName
self.inputDriveProfilesName = createFileString(globalConfig=self.globalConfig, fileKey='inputDataDriveProfiles',
datasetID=datasetID)
self.inputDriveProfilesPath = Path(__file__).parent / self.globalConfig['pathRelative']['diaryOutput'] / self.inputDriveProfilesName
self.scalarsPath = self.flexConfig['inputDataScalars'][datasetID]
self.gridMappings = self.gridConfig['chargingInfrastructureMappings']
self.gridProbability = self.gridConfig['gridAvailabilityProbability']
self.gridDistribution = self.gridConfig['gridAvailabilityDistribution']
self.gridFastCharging = self.gridConfig['gridAvailabilityFastCharging']
self.gridFastChargingThreshold = self.gridConfig['fastChargingThreshold']
self.outputFileName = createFileString(globalConfig=self.globalConfig, fileKey='inputDataPlugProfiles',
datasetID=datasetID)
self.outputFilePath = Path(__file__).parent / self.globalConfig['pathRelative']['gridOutput'] / self.outputFileName
self.purposeData = pd.read_csv(self.inputFilePath, keep_default_na=False)
self.driveData = pd.read_csv(self.inputDriveProfilesPath, keep_default_na=False)
self.chargeAvailability = None
def assignSimpleGridViaPurposes(self):
"""
Method to translate hourly purpose profiles into hourly profiles of true/false giving the charging station
availability in each hour for each individual vehicle.
:return: None
"""
print(f'Starting with charge connection replacement of location purposes')
self.chargeAvailability = self.purposeData.replace(self.gridMappings)
self.chargeAvailability.set_index(['genericID'], inplace=True)
self.chargeAvailability = (~(self.chargeAvailability != True))
print('Grid connection assignment complete')
def getFastChargingList(self):
'''
Returns a list of household trips having consumption greater than 80% (40 kWh) of battery capacity (50 kWh)
'''
driveProfiles = self.driveData.set_index(['genericID'])
driveProfiles = driveProfiles.loc[:].sum(axis=1)
driveProfiles = driveProfiles * self.scalarsPath['Electric_consumption_corr'] / 100
driveProfiles = np.where(driveProfiles > (self.gridFastChargingThreshold * (self.scalarsPath['Battery_capacity'])), driveProfiles, 0)
driveProfiles = pd.DataFrame(driveProfiles)
driveProfiles.set_index(self.driveData['genericID'], inplace=True)
driveProfiles = driveProfiles.replace(0, np.nan)
driveProfiles = driveProfiles.dropna(how='all', axis=0)
driveProfiles.reset_index(inplace=True)
drive = pd.Series(driveProfiles['genericID'])
fastChargingHHID = []
for i, item in drive.items():
fastChargingHHID.append(item)
return fastChargingHHID
def assignGridViaProbabilities(self, model: str, fastChargingHHID):
'''
:param model: Input for assigning probability according to models presented in gridConfig
:param fastChargingHHID: List of household trips for fast charging
:return: Returns a dataFrame holding charging capacity for each trip assigned with probability distribution
'''
self.chargeAvailability = self.purposeData.copy()
self.chargeAvailability.set_index(['genericID'], inplace=True)
print('Starting with charge connection replacement ')
print('There are ' + str(len(fastChargingHHID)) + ' trips having consumption greater than ' + str(self.gridFastChargingThreshold) + '% of battery capacity')
np.random.seed(42)
for hhPersonID, row in self.chargeAvailability.iterrows():
activity = row.copy(deep=True)
activity
# if None:
# # if hhPersonID in fastChargingHHID:
# for iHour in range(0, len(row)):
# if iHour == 0:
# if model == 'probability':
# row[iHour] = self.getRandomNumberForModel1(activity[iHour])
# elif model == 'distribution':
# row[iHour] = self.getRandomNumberForModel3(activity[iHour])
# # print(row[iHour], activity[iHour], hhPersonID)
# elif iHour > 0:
# if activity[iHour] == activity[iHour - 1]:
# # print(row[j-1])
# row[iHour] = row[iHour - 1]
# elif activity[iHour] == 'HOME' and (activity[iHour] in activity[range(0, iHour)].values):
# selector = activity[activity == 'HOME']
# selectorindex = selector.index[0]
# row[iHour] = row[selectorindex]
# elif model == 'probability':
# row[iHour] = self.getRandomNumberForModel1(activity[iHour])
# elif model == 'distribution':
# row[iHour] = self.getRandomNumberForModel3(activity[iHour])
# else:
for iHour in range(0, len(row)):
if iHour == 0:
if model == 'probability':
row[iHour] = self.getRandomNumberForModel1(activity[iHour])
elif model == 'distribution':
row[iHour] = self.getRandomNumberForModel2(activity[iHour])
# print(f'Power: {row[iHour]}, Activity: {activity[iHour]},householdPersonID: {hhPersonID}')
elif iHour > 0:
if activity[iHour] == activity[iHour - 1]:
row[iHour] = row[iHour - 1]
elif activity[iHour] == 'HOME' and (activity[iHour] in activity[range(0, iHour)].values):
selector = activity[activity == 'HOME']
selectorindex = selector.index[0]
row[iHour] = row[selectorindex]
elif model == 'probability':
row[iHour] = self.getRandomNumberForModel1(activity[iHour])
elif model == 'distribution':
row[iHour] = self.getRandomNumberForModel2(activity[iHour])
# print(f'Power: {row[iHour]}, Activity: {activity[iHour]}, householdPersonID: {hhPersonID}')
self.chargeAvailability.loc[hhPersonID] = row
print('Grid connection assignment complete')
def getRandomNumberForModel1(self, purpose):
'''
Assigns a random number between 0 and 1 for all the purposes, and allots a charging station according to the
probability distribution
:param purpose: Purpose of each hour of a trip
:return: Returns a charging capacity for a purpose based on probability distribution 1
'''
if purpose == "HOME":
rnd = np.random.random_sample()
if rnd <= 1:
rnd = self.gridProbability['HOME'][1]
else:
rnd = 0.0
elif purpose == "WORK":
rnd = np.random.random_sample()
if rnd <= 1:
rnd = self.gridProbability['WORK'][1]
else:
rnd = 0.0
elif purpose == "DRIVING":
rnd = 0
if rnd == 0:
rnd = self.gridProbability['DRIVING'][1]
elif purpose == "LEISURE":
rnd = np.random.random_sample()
if rnd <= 1:
rnd = self.gridProbability['LEISURE'][1]
else:
rnd = 0.0
elif purpose == "SHOPPING":
rnd = np.random.random_sample()
if rnd <= 1:
rnd = self.gridProbability['SHOPPING'][1]
else:
rnd = 0.0
elif purpose == "SCHOOL":
rnd = np.random.random_sample()
if rnd <= 1:
rnd = self.gridProbability['SCHOOL'][1]
else:
rnd = 0.0
elif purpose == "OTHER":
rnd = np.random.random_sample()
if rnd <= 1:
rnd = self.gridProbability['OTHER'][1]
else:
rnd = 0.0
else:
rnd = 0
if rnd == 0:
rnd = self.gridProbability['NA'][1]
return rnd
def getRandomNumberForModel2(self, purpose):
'''
Assigns a random number between 0 and 1 for all the purposes, and allots a charging station according to the
probability distribution
:param purpose: Purpose of each hour of a trip
:return: Returns a charging capacity for a purpose based on probability distribution model 2
'''
if purpose == 'DRIVING':
rnd = 0
else:
rnd = np.random.random_sample()
keys = list(self.gridDistribution[purpose].keys())
range_dict = {}
prob_min = 0
for index, (key, value) in enumerate(self.gridDistribution[purpose].items()):
prob_max = prob_min + value
range_dict.update({index: {'min_range': prob_min, 'max_range': prob_max}})
prob_min = prob_max
for dictIndex, rangeValue in range_dict.items():
if rangeValue['min_range'] <= rnd <= rangeValue['max_range']:
power = keys[dictIndex]
break
return power
def getRandomNumberForModel3(self, purpose):
'''
Assigns a random number between 0 and 1 for all the purposes, and allots a charging station according to the
probability distribution
:param purpose: Purpose of each hour of a trip
:return: Returns a charging capacity for a purpose based on probability distribution model 3 (fast charging)
'''
if purpose == 'DRIVING':
rnd = 0
else:
rnd = np.random.random_sample()
keys = list(self.gridFastCharging[purpose].keys())
range_dict_fast = {}
prob_min = 0
for index, (key, value) in enumerate(self.gridFastCharging[purpose].items()):
prob_max = prob_min + value
range_dict_fast.update({index: {'min_range': prob_min, 'max_range': prob_max}})
prob_min = prob_max
for dictIndex, rangeValue in range_dict_fast.items():
if rangeValue['min_range'] <= rnd <= rangeValue['max_range']:
power = keys[dictIndex]
break
return power
def writeOutGridAvailability(self):
"""
Function to write out the boolean charging station availability for each vehicle in each hour to the output
file path.
:return: None
"""
self.chargeAvailability.to_csv(self.outputFilePath)
def stackPlot(self):
'''
:return: Plots charging station assigned to each trip and EV's parking area/trip purposes during a time span of
24 hours
'''
keys = []
for key, value in self.gridDistribution.items():
for nestedKey, nestedValue in value.items():
keys.append(nestedKey)
capacityList = keys
capacityList = list(set(capacityList))
capacityList.sort()
capacity = self.chargeAvailability.transpose()
totalChargingStation = pd.DataFrame()
for i in range(0, len(capacityList)):
total = capacity.where(capacity.loc[:] == capacityList[i]).count(axis=1)
totalChargingStation = pd.concat([totalChargingStation, total], ignore_index=True, axis=1)
totalChargingStation.columns = totalChargingStation.columns[:-len(capacityList)].tolist() + capacityList
totalChargingStation.index = np.arange(1, len(totalChargingStation)+1)
totalChargingStation.plot(kind='area', title='Vehicles connected to different charging station over 24 hours',
figsize=(10, 8), colormap='Paired')
capacityListStr = [str(x) for x in capacityList]
appendStr = ' kW'
capacityListStr = [sub + appendStr for sub in capacityListStr]
plt.xlim(1, 24)
plt.xlabel('Time (hours)')
plt.ylabel('Number of vehicles')
plt.legend(capacityListStr, loc='upper center', ncol=len(capacityList))
plt.show()
purposeList = list(self.gridDistribution)
purposes = self.purposeData.copy()
purposes = purposes.set_index(['genericID']).transpose()
totalTripPurpose = | pd.DataFrame() | pandas.DataFrame |
# general imports
from pathlib import Path
import os
import re
import argparse
from time import time
import multiprocessing as mp
from functools import partial
from collections import Counter
# processing imports
import numpy as np
import pandas as pd
from tqdm import tqdm
from collections import OrderedDict
from difflib import SequenceMatcher
import os
# pdfminer imports
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LTPage, LTChar, LTAnno, LAParams, LTTextBox, LTTextLine
# local imports
import rse_watch.sententizer as sententizer
def get_list_of_pdfs_filenames(dirName):
"""
For the given path, get the List of all files in the directory tree
"""
paths = []
for path, subdirs, files in os.walk(dirName):
for name in files:
if (name.lower().endswith(".pdf")):
paths.append((Path(path + "/" + name)))
return paths
def get_companies_metadata_dict(config):
""" Read companies metadata from config and turn it into dictionnary"""
companies_metadata_dict = pd.read_csv(config.annotations_file,
sep=";",
encoding='utf-8-sig').set_index("project_denomination").T.to_dict()
return companies_metadata_dict
def clean_child_str(child_str):
child_str = ' '.join(child_str.split()).strip()
# dealing with hyphens:
# 1. Replace words separators in row by a different char than hyphen (i.e. longer hyphen)
child_str = re.sub("[A-Za-z] - [A-Za-z]", lambda x: x.group(0).replace(' - ', ' – '), child_str)
# 2. Attach the negative term to the following number, # TODO: inutile ? Enlever ?
child_str = re.sub("(- )([0-9])", r"-\2", child_str)
return child_str
class PDFPageDetailedAggregator(PDFPageAggregator):
"""
Custom class to parse pdf and keep position of parsed text lines.
"""
def __init__(self, rsrcmgr, pageno=1, laparams=None):
PDFPageAggregator.__init__(self, rsrcmgr, pageno=pageno, laparams=laparams)
self.rows = []
self.page_number = 0
self.result = ""
def receive_layout(self, ltpage):
def render(item, page_number):
if isinstance(item, LTPage) or isinstance(item, LTTextBox):
for child in item:
render(child, page_number)
elif isinstance(item, LTTextLine):
child_str = ''
for child in item:
if isinstance(child, (LTChar, LTAnno)):
child_str += child.get_text()
child_str = clean_child_str(child_str)
if child_str:
# bbox == (pagenb, x1, y1, x2, y2, text)
row = (page_number, item.bbox[0], item.bbox[1], item.bbox[2], item.bbox[3], child_str)
self.rows.append(row)
for child in item:
render(child, page_number)
return
render(ltpage, self.page_number)
self.page_number += 1
self.rows = sorted(self.rows, key=lambda x: (x[0], -x[2]))
self.result = ltpage
def get_raw_content_from_pdf(input_file, rse_range=None):
"""
Parse pdf file, within rse range of pages if needed, and return list of rows with all metadata
:param input_file: PDF filename
:param rse_range: (nb_first_page_rse:int, nb_last_page_rse:int) tuple, starting at 1
:return: list of rows with (pagenb, x1, y1, x2, y2, text) and page_nb starts at 0!
"""
assert input_file.name.endswith(".pdf")
fp = open(input_file, 'rb')
parser = PDFParser(fp)
doc = PDFDocument(parser)
rsrcmgr = PDFResourceManager()
laparams = LAParams()
device = PDFPageDetailedAggregator(rsrcmgr, laparams=laparams)
interpreter = PDFPageInterpreter(rsrcmgr, device)
if rse_range is not None and rse_range != "":
# start at zero to match real index of pages
pages_selection = range(rse_range[0] - 1, (rse_range[1] - 1) + 1)
else:
pages_selection = range(0, 10000)
first_page_nb = pages_selection[0] + 1 # to start indexation at 1
# Checked: only useful pages are actually parsed.
for nb_page_parsed, page in enumerate(PDFPage.create_pages(doc)):
if nb_page_parsed in pages_selection:
interpreter.process_page(page)
device.get_result()
return device, first_page_nb
def clean_paragraph(p):
""" Curate paragraph object before save, in particular deal with hyphen and spaces """
# Attach together words (>= 2 char to avoid things like A minus, B minus...)
# that may have been split at end of row like géographie = "géo - graphie"
# real separator have been turned into longer hyphen during parsing to avoid confusion with those.
# Accents accepted thks to https://stackoverflow.com/a/24676780/8086033
w_expr = "(?i)(?:(?![×Þß÷þø])[-'a-zÀ-ÿ]){2,}"
p["paragraph"] = re.sub("{} - {}".format(w_expr, w_expr),
lambda x: x.group(0).replace(' - ', ''),
p["paragraph"])
# reattach words that were split, like Fort-Cros = "Fort- Cros"
p["paragraph"] = re.sub("{}- {}".format(w_expr, w_expr),
lambda x: x.group(0).replace('- ', '-'),
p["paragraph"])
return p
def get_paragraphs_from_raw_content(device, idx_first_page):
"""
From parsed data with positional information, aggregate into paragraphs using simple rationale
:param device:
:param idx_first_page:
:param p: size of next gap needs to be smaller than previous min size of letters (among two last rows) times p
:return:
"""
# GROUPING BY COLUMN
column_text_dict = OrderedDict() # keep order of identification in the document.
APPROXIMATION_FACTOR = 10 # to allow for slight shifts at beg of aligned text
N_MOST_COMMON = 4 # e.g. nb max of columns of text that can be considered
LEFT_SECURITY_SHIFT = 20 # to include way more shifted text of previous column
counter = Counter()
item_holder = []
item_index = 0
it_was_last_item = False
while "There are unchecked items in device.rows":
# add the item to the list of the page
try:
(page_id, x_min, _, _, _, _) = device.rows[item_index]
except e:
print("Wrong index {} for device.rows of len {}".format(item_index, len(device.rows)))
print("was that last page ? : {]".format(it_was_last_item))
raise
item_holder.append(device.rows[item_index])
# increment the count of x_min
counter[(x_min // APPROXIMATION_FACTOR) * APPROXIMATION_FACTOR] += 1
# go to next item
it_was_last_item = item_index == (len(device.rows) - 1)
if not it_was_last_item:
item_index += 1
(next_page_id, _, _, _, _, _) = device.rows[item_index]
changing_page = (next_page_id > page_id)
if changing_page or it_was_last_item: # approximate next page
top_n_x_min_approx = counter.most_common(N_MOST_COMMON)
df = pd.DataFrame(top_n_x_min_approx, columns=["x_min_approx", "freq"])
df = df[df["freq"] > df["freq"].sum() * (1 / (N_MOST_COMMON + 1))].sort_values(by="x_min_approx")
x_min_approx = (df["x_min_approx"] - LEFT_SECURITY_SHIFT).values
x_min_approx = x_min_approx * (x_min_approx > 0)
left_x_min_suport = np.hstack([x_min_approx,
[10000]])
def x_grouper(x_min):
delta = left_x_min_suport - x_min
x_group = left_x_min_suport[np.argmin(delta < 0) * 1 - 1]
return x_group
# iter on x_group and add items
page_nb = idx_first_page + page_id
column_text_dict[page_nb] = {}
for item in item_holder:
(page_id, x_min, y_min, x_max, y_max, text) = item
page_nb = idx_first_page + page_id
x_group = x_grouper(x_min)
if x_group in column_text_dict[page_nb].keys():
column_text_dict[page_nb][x_group].append((y_min, y_max, text))
else:
column_text_dict[page_nb][x_group] = [(y_min, y_max, text)]
if it_was_last_item:
break
else:
# restart from zero for next page
counter = Counter()
item_holder = []
# CREATE THE PARAGRAPHS IN EACH COLUMN
# define minimal conditions to define a change of paragraph:
# Being spaced by more than the size of each line (min if different to account for titles)
pararaphs_list = []
paragraph_index = 0
for page_nb, x_groups_dict in column_text_dict.items():
for x_group_name, x_groups_data in x_groups_dict.items():
x_groups_data = sorted(x_groups_data, key=lambda x: x[0],
reverse=True) # sort vertically, higher y = before
x_groups_data_paragraphs = []
p = {"y_min": x_groups_data[0][0],
"y_max": x_groups_data[0][1],
"paragraph": x_groups_data[0][2]}
previous_height = p["y_max"] - p["y_min"]
previous_y_min = p["y_min"]
for y_min, y_max, paragraph in x_groups_data[1:]:
current_height = y_max - y_min
current_y_min = y_min
max_height = max(previous_height, current_height)
relative_var_in_height = (current_height - previous_height) / float(
current_height) # Was min before ???
relative_var_in_y_min = abs(current_y_min - previous_y_min) / float(current_height)
positive_change_in_font_size = (relative_var_in_height > 0.05)
change_in_font_size = abs(relative_var_in_height) > 0.05
different_row = (relative_var_in_y_min > 0.7)
large_gap = (relative_var_in_y_min > 1.2)
artefact_to_ignore = (len(paragraph) <= 2) # single "P" broke row parsing in auchan dpef
if not artefact_to_ignore:
if (positive_change_in_font_size and different_row) or large_gap: # always break
# break paragraph, start new one
# print("break",relative_var_in_height, relative_var_in_y_min, paragraph)
p = clean_paragraph(p)
x_groups_data_paragraphs.append(p)
p = {"y_min": y_min,
"y_max": y_max,
"paragraph": paragraph}
else:
# if change_in_font_size: # to separate titles
# paragraph = paragraph + ".\n"
# paragraph continues
p["y_min"] = y_min
p["paragraph"] = p["paragraph"] + " " + paragraph
previous_height = current_height
previous_y_min = current_y_min
# add the last paragraph of column
p = clean_paragraph(p)
x_groups_data_paragraphs.append(p)
# structure the output
for p in x_groups_data_paragraphs:
pararaphs_list.append({"paragraph_id": paragraph_index,
"page_nb": page_nb,
"x_group": x_group_name,
"y_min_paragraph": round(p["y_min"]),
"y_max_paragraph": round(p["y_max"]),
"paragraph": p["paragraph"]})
paragraph_index += 1
df_par = pd.DataFrame(data=pararaphs_list,
columns=["paragraph_id",
"page_nb",
"paragraph",
"x_group",
"y_min_paragraph",
"y_max_paragraph"])
return df_par
def parse_paragraphs_from_pdf(input_file, rse_ranges=None):
"""
From filename, parse pdf and output structured paragraphs with filter on rse_ranges uif present.
:param input_file: filename ending with ".pdf" or ".PDF".
:param rse_ranges: "(start, end)|(start, end)"
:return: df[[page_nb, page_text]] dataframe
"""
rse_ranges_list = list(map(eval, rse_ranges.split("|")))
df_paragraphs_list = []
for rse_range in rse_ranges_list:
df_par, idx_first_page = get_raw_content_from_pdf(input_file, rse_range=rse_range)
df_par = get_paragraphs_from_raw_content(df_par, idx_first_page)
df_paragraphs_list.append(df_par)
df_par = | pd.concat(df_paragraphs_list, axis=0, ignore_index=True) | pandas.concat |
#
# Copyright 2020 Logical Clocks AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
import numpy as np
# in case importing in %%local
try:
from pyspark.sql import SparkSession, DataFrame
from pyspark.rdd import RDD
except ModuleNotFoundError:
pass
from hsfs import feature
from hsfs.storage_connector import StorageConnector
from hsfs.client.exceptions import FeatureStoreException
class Engine:
HIVE_FORMAT = "hive"
JDBC_FORMAT = "jdbc"
def __init__(self):
self._spark_session = SparkSession.builder.getOrCreate()
self._spark_context = self._spark_session.sparkContext
self._spark_session.conf.set("hive.exec.dynamic.partition", "true")
self._spark_session.conf.set("hive.exec.dynamic.partition.mode", "nonstrict")
def sql(self, sql_query, feature_store, online_conn, dataframe_type):
if not online_conn:
result_df = self._sql_offline(sql_query, feature_store)
else:
result_df = self._sql_online(sql_query, online_conn)
self.set_job_group("", "")
return self._return_dataframe_type(result_df, dataframe_type)
def _sql_offline(self, sql_query, feature_store):
# set feature store
self._spark_session.sql("USE {}".format(feature_store))
return self._spark_session.sql(sql_query)
def _sql_online(self, sql_query, online_conn):
options = online_conn.spark_options()
options["query"] = sql_query
return (
self._spark_session.read.format(self.JDBC_FORMAT).options(**options).load()
)
def show(self, sql_query, feature_store, n, online_conn):
return self.sql(sql_query, feature_store, online_conn, "default").show(n)
def set_job_group(self, group_id, description):
self._spark_session.sparkContext.setJobGroup(group_id, description)
def _return_dataframe_type(self, dataframe, dataframe_type):
if dataframe_type.lower() in ["default", "spark"]:
return dataframe
if dataframe_type.lower() == "pandas":
return dataframe.toPandas()
if dataframe_type.lower() == "numpy":
return dataframe.toPandas().values
if dataframe_type == "python":
return dataframe.toPandas().values.tolist()
raise TypeError(
"Dataframe type `{}` not supported on this platform.".format(dataframe_type)
)
def convert_to_default_dataframe(self, dataframe):
if isinstance(dataframe, pd.DataFrame):
return self._spark_session.createDataFrame(dataframe)
if isinstance(dataframe, list):
dataframe = np.array(dataframe)
if isinstance(dataframe, np.ndarray):
if dataframe.ndim != 2:
raise TypeError(
"Cannot convert numpy array that do not have two dimensions to a dataframe. "
"The number of dimensions are: {}".format(dataframe.ndim)
)
num_cols = dataframe.shape[1]
dataframe_dict = {}
for n_col in list(range(num_cols)):
col_name = "col_" + str(n_col)
dataframe_dict[col_name] = dataframe[:, n_col]
pandas_df = | pd.DataFrame(dataframe_dict) | pandas.DataFrame |
from datetime import datetime
from decimal import Decimal
from io import StringIO
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, read_csv
import pandas._testing as tm
from pandas.core.base import SpecificationError
import pandas.core.common as com
def test_repr():
# GH18203
result = repr(pd.Grouper(key="A", level="B"))
expected = "Grouper(key='A', level='B', axis=0, sort=False)"
assert result == expected
@pytest.mark.parametrize("dtype", ["int64", "int32", "float64", "float32"])
def test_basic(dtype):
data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
for k, v in grouped:
assert len(v) == 3
agged = grouped.aggregate(np.mean)
assert agged[1] == 1
tm.assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
tm.assert_series_equal(agged, grouped.mean())
tm.assert_series_equal(grouped.agg(np.sum), grouped.sum())
expected = grouped.apply(lambda x: x * x.sum())
transformed = grouped.transform(lambda x: x * x.sum())
assert transformed[7] == 12
tm.assert_series_equal(transformed, expected)
value_grouped = data.groupby(data)
tm.assert_series_equal(
value_grouped.aggregate(np.mean), agged, check_index_type=False
)
# complex agg
agged = grouped.aggregate([np.mean, np.std])
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped.aggregate({"one": np.mean, "two": np.std})
group_constants = {0: 10, 1: 20, 2: 30}
agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())
assert agged[1] == 21
# corner cases
msg = "Must produce aggregated value"
# exception raised is type Exception
with pytest.raises(Exception, match=msg):
grouped.aggregate(lambda x: x * 2)
def test_groupby_nonobject_dtype(mframe, df_mixed_floats):
key = mframe.index.codes[0]
grouped = mframe.groupby(key)
result = grouped.sum()
expected = mframe.groupby(key.astype("O")).sum()
tm.assert_frame_equal(result, expected)
# GH 3911, mixed frame non-conversion
df = df_mixed_floats.copy()
df["value"] = range(len(df))
def max_value(group):
return group.loc[group["value"].idxmax()]
applied = df.groupby("A").apply(max_value)
result = applied.dtypes
expected = Series(
[np.dtype("object")] * 2 + [np.dtype("float64")] * 2 + [np.dtype("int64")],
index=["A", "B", "C", "D", "value"],
)
tm.assert_series_equal(result, expected)
def test_groupby_return_type():
# GH2893, return a reduced type
df1 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 2, "val2": 27},
{"val1": 2, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df1.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
df2 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 1, "val2": 27},
{"val1": 1, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df2.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
# GH3596, return a consistent type (regression in 0.11 from 0.10.1)
df = DataFrame([[1, 1], [1, 1]], columns=["X", "Y"])
with tm.assert_produces_warning(FutureWarning):
result = df.groupby("X", squeeze=False).count()
assert isinstance(result, DataFrame)
def test_inconsistent_return_type():
# GH5592
# inconsistent return type
df = DataFrame(
dict(
A=["Tiger", "Tiger", "Tiger", "Lamb", "Lamb", "Pony", "Pony"],
B=Series(np.arange(7), dtype="int64"),
C=date_range("20130101", periods=7),
)
)
def f(grp):
return grp.iloc[0]
expected = df.groupby("A").first()[["B"]]
result = df.groupby("A").apply(f)[["B"]]
tm.assert_frame_equal(result, expected)
def f(grp):
if grp.name == "Tiger":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Tiger"] = np.nan
tm.assert_frame_equal(result, e)
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Pony"] = np.nan
tm.assert_frame_equal(result, e)
# 5592 revisited, with datetimes
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["C"]]
e = df.groupby("A").first()[["C"]]
e.loc["Pony"] = pd.NaT
tm.assert_frame_equal(result, e)
# scalar outputs
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0].loc["C"]
result = df.groupby("A").apply(f)
e = df.groupby("A").first()["C"].copy()
e.loc["Pony"] = np.nan
e.name = None
tm.assert_series_equal(result, e)
def test_pass_args_kwargs(ts, tsframe):
def f(x, q=None, axis=0):
return np.percentile(x, q, axis=axis)
g = lambda x: np.percentile(x, 80, axis=0)
# Series
ts_grouped = ts.groupby(lambda x: x.month)
agg_result = ts_grouped.agg(np.percentile, 80, axis=0)
apply_result = ts_grouped.apply(np.percentile, 80, axis=0)
trans_result = ts_grouped.transform(np.percentile, 80, axis=0)
agg_expected = ts_grouped.quantile(0.8)
trans_expected = ts_grouped.transform(g)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
agg_result = ts_grouped.agg(f, q=80)
apply_result = ts_grouped.apply(f, q=80)
trans_result = ts_grouped.transform(f, q=80)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
# DataFrame
df_grouped = tsframe.groupby(lambda x: x.month)
agg_result = df_grouped.agg(np.percentile, 80, axis=0)
apply_result = df_grouped.apply(DataFrame.quantile, 0.8)
expected = df_grouped.quantile(0.8)
tm.assert_frame_equal(apply_result, expected, check_names=False)
tm.assert_frame_equal(agg_result, expected)
agg_result = df_grouped.agg(f, q=80)
apply_result = df_grouped.apply(DataFrame.quantile, q=0.8)
tm.assert_frame_equal(agg_result, expected)
tm.assert_frame_equal(apply_result, expected, check_names=False)
def test_len():
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
assert len(grouped) == len(df)
grouped = df.groupby([lambda x: x.year, lambda x: x.month])
expected = len({(x.year, x.month) for x in df.index})
assert len(grouped) == expected
# issue 11016
df = pd.DataFrame(dict(a=[np.nan] * 3, b=[1, 2, 3]))
assert len(df.groupby(("a"))) == 0
assert len(df.groupby(("b"))) == 3
assert len(df.groupby(["a", "b"])) == 3
def test_basic_regression():
# regression
result = Series([1.0 * x for x in list(range(1, 10)) * 10])
data = np.random.random(1100) * 10.0
groupings = Series(data)
grouped = result.groupby(groupings)
grouped.mean()
@pytest.mark.parametrize(
"dtype", ["float64", "float32", "int64", "int32", "int16", "int8"]
)
def test_with_na_groups(dtype):
index = Index(np.arange(10))
values = Series(np.ones(10), index, dtype=dtype)
labels = Series(
[np.nan, "foo", "bar", "bar", np.nan, np.nan, "bar", "bar", np.nan, "foo"],
index=index,
)
# this SHOULD be an int
grouped = values.groupby(labels)
agged = grouped.agg(len)
expected = Series([4, 2], index=["bar", "foo"])
tm.assert_series_equal(agged, expected, check_dtype=False)
# assert issubclass(agged.dtype.type, np.integer)
# explicitly return a float from my function
def f(x):
return float(len(x))
agged = grouped.agg(f)
expected = Series([4, 2], index=["bar", "foo"])
tm.assert_series_equal(agged, expected, check_dtype=False)
assert issubclass(agged.dtype.type, np.dtype(dtype).type)
def test_indices_concatenation_order():
# GH 2808
def f1(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(levels=[[]] * 2, codes=[[]] * 2, names=["b", "c"])
res = DataFrame(columns=["a"], index=multiindex)
return res
else:
y = y.set_index(["b", "c"])
return y
def f2(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
return DataFrame()
else:
y = y.set_index(["b", "c"])
return y
def f3(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(
levels=[[]] * 2, codes=[[]] * 2, names=["foo", "bar"]
)
res = DataFrame(columns=["a", "b"], index=multiindex)
return res
else:
return y
df = DataFrame({"a": [1, 2, 2, 2], "b": range(4), "c": range(5, 9)})
df2 = DataFrame({"a": [3, 2, 2, 2], "b": range(4), "c": range(5, 9)})
# correct result
result1 = df.groupby("a").apply(f1)
result2 = df2.groupby("a").apply(f1)
tm.assert_frame_equal(result1, result2)
# should fail (not the same number of levels)
msg = "Cannot concat indices that do not have the same number of levels"
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f2)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f2)
# should fail (incorrect shape)
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f3)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f3)
def test_attr_wrapper(ts):
grouped = ts.groupby(lambda x: x.weekday())
result = grouped.std()
expected = grouped.agg(lambda x: np.std(x, ddof=1))
tm.assert_series_equal(result, expected)
# this is pretty cool
result = grouped.describe()
expected = {name: gp.describe() for name, gp in grouped}
expected = DataFrame(expected).T
tm.assert_frame_equal(result, expected)
# get attribute
result = grouped.dtype
expected = grouped.agg(lambda x: x.dtype)
# make sure raises error
msg = "'SeriesGroupBy' object has no attribute 'foo'"
with pytest.raises(AttributeError, match=msg):
getattr(grouped, "foo")
def test_frame_groupby(tsframe):
grouped = tsframe.groupby(lambda x: x.weekday())
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == 5
assert len(aggregated.columns) == 4
# by string
tscopy = tsframe.copy()
tscopy["weekday"] = [x.weekday() for x in tscopy.index]
stragged = tscopy.groupby("weekday").aggregate(np.mean)
tm.assert_frame_equal(stragged, aggregated, check_names=False)
# transform
grouped = tsframe.head(30).groupby(lambda x: x.weekday())
transformed = grouped.transform(lambda x: x - x.mean())
assert len(transformed) == 30
assert len(transformed.columns) == 4
# transform propagate
transformed = grouped.transform(lambda x: x.mean())
for name, group in grouped:
mean = group.mean()
for idx in group.index:
tm.assert_series_equal(transformed.xs(idx), mean, check_names=False)
# iterate
for weekday, group in grouped:
assert group.index[0].weekday() == weekday
# groups / group_indices
groups = grouped.groups
indices = grouped.indices
for k, v in groups.items():
samething = tsframe.index.take(indices[k])
assert (samething == v).all()
def test_frame_groupby_columns(tsframe):
mapping = {"A": 0, "B": 0, "C": 1, "D": 1}
grouped = tsframe.groupby(mapping, axis=1)
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == len(tsframe)
assert len(aggregated.columns) == 2
# transform
tf = lambda x: x - x.mean()
groupedT = tsframe.T.groupby(mapping, axis=0)
tm.assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf))
# iterate
for k, v in grouped:
assert len(v.columns) == 2
def test_frame_set_name_single(df):
grouped = df.groupby("A")
result = grouped.mean()
assert result.index.name == "A"
result = df.groupby("A", as_index=False).mean()
assert result.index.name != "A"
result = grouped.agg(np.mean)
assert result.index.name == "A"
result = grouped.agg({"C": np.mean, "D": np.std})
assert result.index.name == "A"
result = grouped["C"].mean()
assert result.index.name == "A"
result = grouped["C"].agg(np.mean)
assert result.index.name == "A"
result = grouped["C"].agg([np.mean, np.std])
assert result.index.name == "A"
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"foo": np.mean, "bar": np.std})
def test_multi_func(df):
col1 = df["A"]
col2 = df["B"]
grouped = df.groupby([col1.get, col2.get])
agged = grouped.mean()
expected = df.groupby(["A", "B"]).mean()
# TODO groupby get drops names
tm.assert_frame_equal(
agged.loc[:, ["C", "D"]], expected.loc[:, ["C", "D"]], check_names=False
)
# some "groups" with no data
df = DataFrame(
{
"v1": np.random.randn(6),
"v2": np.random.randn(6),
"k1": np.array(["b", "b", "b", "a", "a", "a"]),
"k2": np.array(["1", "1", "1", "2", "2", "2"]),
},
index=["one", "two", "three", "four", "five", "six"],
)
# only verify that it works for now
grouped = df.groupby(["k1", "k2"])
grouped.agg(np.sum)
def test_multi_key_multiple_functions(df):
grouped = df.groupby(["A", "B"])["C"]
agged = grouped.agg([np.mean, np.std])
expected = DataFrame({"mean": grouped.agg(np.mean), "std": grouped.agg(np.std)})
tm.assert_frame_equal(agged, expected)
def test_frame_multi_key_function_list():
data = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": [
"dull",
"dull",
"shiny",
"dull",
"dull",
"shiny",
"shiny",
"dull",
"shiny",
"shiny",
"shiny",
],
"D": np.random.randn(11),
"E": np.random.randn(11),
"F": np.random.randn(11),
}
)
grouped = data.groupby(["A", "B"])
funcs = [np.mean, np.std]
agged = grouped.agg(funcs)
expected = pd.concat(
[grouped["D"].agg(funcs), grouped["E"].agg(funcs), grouped["F"].agg(funcs)],
keys=["D", "E", "F"],
axis=1,
)
assert isinstance(agged.index, MultiIndex)
assert isinstance(expected.index, MultiIndex)
tm.assert_frame_equal(agged, expected)
@pytest.mark.parametrize("op", [lambda x: x.sum(), lambda x: x.mean()])
def test_groupby_multiple_columns(df, op):
data = df
grouped = data.groupby(["A", "B"])
result1 = op(grouped)
keys = []
values = []
for n1, gp1 in data.groupby("A"):
for n2, gp2 in gp1.groupby("B"):
keys.append((n1, n2))
values.append(op(gp2.loc[:, ["C", "D"]]))
mi = MultiIndex.from_tuples(keys, names=["A", "B"])
expected = pd.concat(values, axis=1).T
expected.index = mi
# a little bit crude
for col in ["C", "D"]:
result_col = op(grouped[col])
pivoted = result1[col]
exp = expected[col]
tm.assert_series_equal(result_col, exp)
tm.assert_series_equal(pivoted, exp)
# test single series works the same
result = data["C"].groupby([data["A"], data["B"]]).mean()
expected = data.groupby(["A", "B"]).mean()["C"]
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
| tm.assert_frame_equal(chunks[2], df[4:]) | pandas.util.testing.assert_frame_equal |
from context import dero
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pandas import Timestamp
from numpy import nan
import numpy
class DataFrameTest:
df = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_duplicate_row = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/3/2000', 1.03), #this is a duplicated row
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_weight = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 1),
(10516, 'a', '1/4/2000', 1.04, 0),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 1),
(10516, 'b', '1/4/2000', 1.08, 1),
(10517, 'a', '1/1/2000', 1.09, 0),
(10517, 'a', '1/2/2000', 1.1, 0),
(10517, 'a', '1/3/2000', 1.11, 0),
(10517, 'a', '1/4/2000', 1.12, 1),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight'])
df_nan_byvar = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', 3),
('b', 4),
], columns = ['byvar', 'val'])
df_nan_byvar_and_val = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', nan),
('b', 4),
], columns = ['byvar', 'val'])
single_ticker_df = pd.DataFrame(data = [
('a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['byvar', 'Date', 'TICKER'])
df_datetime = df.copy()
df_datetime['Date'] = pd.to_datetime(df_datetime['Date'])
df_datetime_no_ret = df_datetime.copy()
df_datetime_no_ret.drop('RET', axis=1, inplace=True)
df_gvkey_str = pd.DataFrame([
('001076','3/1/1995'),
('001076','4/1/1995'),
('001722','1/1/2012'),
('001722','7/1/2012'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str['Date'] = pd.to_datetime(df_gvkey_str['Date'])
df_gvkey_num = df_gvkey_str.copy()
df_gvkey_num['GVKEY'] = df_gvkey_num['GVKEY'].astype('float64')
df_gvkey_str2 = pd.DataFrame([
('001076','2/1/1995'),
('001076','3/2/1995'),
('001722','11/1/2011'),
('001722','10/1/2011'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str2['Date'] = pd.to_datetime(df_gvkey_str2['Date'])
df_fill_data = pd.DataFrame(
data=[
(4, 'c', nan, 'a'),
(1, 'd', 3, 'a'),
(10, 'e', 100, 'a'),
(2, nan, 6, 'b'),
(5, 'f', 8, 'b'),
(11, 'g', 150, 'b'),
],
columns=['y', 'x1', 'x2', 'group']
)
class TestCumulate(DataFrameTest):
expect_between_1_3 = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.1, 1.1),
(10517, 'a', '1/3/2000', 1.11, 1.2210000000000003),
(10517, 'a', '1/4/2000', 1.12, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'cum_RET'])
expect_first = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.092624),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.224936),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.10, 1.10),
(10517, 'a', '1/3/2000', 1.11, 1.221),
(10517, 'a', '1/4/2000', 1.12, 1.36752),
], columns = ['PERMNO','byvar','Date', 'RET', 'cum_RET'])
def test_method_between_1_3(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[1,3])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_between_m2_0(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
#Actually same result as [1,3]
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_shifted_index(self):
df = self.df.copy()
df.index = df.index + 10
cum_df = dero.pandas.cumulate(df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_first(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'])
assert_frame_equal(self.expect_first, cum_df, check_dtype=False)
def test_grossify(self):
df = self.df.copy() #don't overwrite original
df['RET'] -= 1 #ungrossify
expect_first_grossify = self.expect_first.copy()
expect_first_grossify['cum_RET'] -= 1
expect_first_grossify['RET'] -= 1
cum_df = dero.pandas.cumulate(df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'], grossify=True)
assert_frame_equal(expect_first_grossify, cum_df, check_dtype=False)
class TestGroupbyMerge(DataFrameTest):
def test_subset_max(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'max', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 1.04),
(10516, 'a', '1/2/2000', 1.02, 1.04),
(10516, 'a', '1/3/2000', 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.08),
(10516, 'b', '1/2/2000', 1.06, 1.08),
(10516, 'b', '1/3/2000', 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.12),
(10517, 'a', '1/2/2000', 1.10, 1.12),
(10517, 'a', '1/3/2000', 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.12, 1.12)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_max'])
assert_frame_equal(expect_df, out)
def test_subset_std(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'std', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 0.012909944487358068),
(10516, 'a', '1/2/2000', 1.02, 0.012909944487358068),
(10516, 'a', '1/3/2000', 1.03, 0.012909944487358068),
(10516, 'a', '1/4/2000', 1.04, 0.012909944487358068),
(10516, 'b', '1/1/2000', 1.05, 0.012909944487358068),
(10516, 'b', '1/2/2000', 1.06, 0.012909944487358068),
(10516, 'b', '1/3/2000', 1.07, 0.012909944487358068),
(10516, 'b', '1/4/2000', 1.08, 0.012909944487358068),
(10517, 'a', '1/1/2000', 1.09, 0.012909944487358068),
(10517, 'a', '1/2/2000', 1.10, 0.012909944487358068),
(10517, 'a', '1/3/2000', 1.11, 0.012909944487358068),
(10517, 'a', '1/4/2000', 1.12, 0.012909944487358068)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_std'])
assert_frame_equal(expect_df, out)
def test_nan_byvar_transform(self):
expect_df = self.df_nan_byvar.copy()
expect_df['val_transform'] = expect_df['val']
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'transform', (lambda x: x))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_transform_numeric(self):
non_standard_index = self.df_nan_byvar_and_val.copy()
non_standard_index.index = [5,6,7,8]
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
expect_df.index = [5,6,7,8]
out = dero.pandas.groupby_merge(non_standard_index, 'byvar', 'transform', (lambda x: x + 1))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_and_nonstandard_index_transform_numeric(self):
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
def test_nan_byvar_sum(self):
expect_df = pd.DataFrame(data = [
('a', 1, 1.0),
(nan, 2, nan),
('b', 3, 7.0),
('b', 4, 7.0),
], columns = ['byvar', 'val', 'val_sum'])
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'sum')
assert_frame_equal(expect_df, out)
class TestLongToWide:
expect_df_with_colindex = pd.DataFrame(data = [
(10516, 'a', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar',
'RET1/1/2000', 'RET1/2/2000',
'RET1/3/2000', 'RET1/4/2000'])
expect_df_no_colindex = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/2/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/3/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/2/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/3/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/2/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/3/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET0',
'RET1', 'RET2', 'RET3'])
input_data = DataFrameTest()
ltw_no_dup_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_dup_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_no_dup_no_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET')
ltw_dup_no_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET')
df_list = [ltw_no_dup_colindex, ltw_dup_colindex,
ltw_no_dup_no_colindex, ltw_dup_no_colindex]
def test_no_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_no_dup_colindex)
def test_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_dup_colindex)
def test_no_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_no_dup_no_colindex)
def test_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_dup_no_colindex)
def test_no_extra_vars(self):
for df in self.df_list:
assert ('__idx__','__key__') not in df.columns
class TestPortfolioAverages:
input_data = DataFrameTest()
expect_avgs_no_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001),
(1, 'b', 1.0550000000000002),
(2, 'a', 1.1050000000000002),
(2, 'b', 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET'])
expect_avgs_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001, 1.025),
(1, 'b', 1.0550000000000002, 1.0550000000000002),
(2, 'a', 1.1050000000000002, 1.12),
(2, 'b', 1.0750000000000002, 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET', 'RET_wavg'])
expect_ports = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0, 1),
(10516, 'a', '1/2/2000', 1.02, 1, 1),
(10516, 'a', '1/3/2000', 1.03, 1, 1),
(10516, 'a', '1/4/2000', 1.04, 0, 1),
(10516, 'b', '1/1/2000', 1.05, 1, 1),
(10516, 'b', '1/2/2000', 1.06, 1, 1),
(10516, 'b', '1/3/2000', 1.07, 1, 2),
(10516, 'b', '1/4/2000', 1.08, 1, 2),
(10517, 'a', '1/1/2000', 1.09, 0, 2),
(10517, 'a', '1/2/2000', 1.1, 0, 2),
(10517, 'a', '1/3/2000', 1.11, 0, 2),
(10517, 'a', '1/4/2000', 1.12, 1, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight', 'portfolio'])
avgs, ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar')
w_avgs, w_ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar', wtvar='weight')
def test_simple_averages(self):
assert_frame_equal(self.expect_avgs_no_wt, self.avgs, check_dtype=False)
def test_weighted_averages(self):
assert_frame_equal(self.expect_avgs_wt, self.w_avgs, check_dtype=False)
def test_portfolio_construction(self):
print(self.ports)
assert_frame_equal(self.expect_ports, self.ports, check_dtype=False)
assert_frame_equal(self.expect_ports, self.w_ports, check_dtype=False)
class TestWinsorize(DataFrameTest):
def test_winsor_40_subset_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.022624),
(10516, 'a', '1/2/2000', 1.022624),
(10516, 'a', '1/3/2000', 1.02672),
(10516, 'a', '1/4/2000', 1.02672),
(10516, 'b', '1/1/2000', 1.062624),
(10516, 'b', '1/2/2000', 1.062624),
(10516, 'b', '1/3/2000', 1.06672),
(10516, 'b', '1/4/2000', 1.06672),
(10517, 'a', '1/1/2000', 1.102624),
(10517, 'a', '1/2/2000', 1.102624),
(10517, 'a', '1/3/2000', 1.10672),
(10517, 'a', '1/4/2000', 1.10672),
], columns = ['PERMNO', 'byvar', 'Date', 'RET'])
wins = dero.pandas.winsorize(self.df, .4, subset='RET', byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, wins, check_less_precise=True)
class TestRegBy(DataFrameTest):
def create_indf(self):
indf = self.df_weight.copy()
indf['key'] = indf['PERMNO'].astype(str) + '_' + indf['byvar']
return indf
def test_regby_nocons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.48774684748988806, '10516_a'),
(0.9388636664168903, '10516_b'),
(0.22929206076239614, '10517_a'),
], columns = ['coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key', cons=False)
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(-32.89999999999997, 29.999999999999982, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons_low_obs(self):
indf = self.create_indf().loc[:8,:] #makes it so that one byvar only has one obs
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(nan, nan, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
class TestExpandMonths(DataFrameTest):
def test_expand_months_tradedays(self):
expect_df = pd.DataFrame(data = [
(Timestamp('2000-01-03 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-04 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-05 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-06 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-07 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-10 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-11 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-12 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-13 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-14 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-18 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-19 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-20 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-21 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-24 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-25 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-26 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-27 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-28 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-31 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['Daily Date', 'byvar', 'Date', 'TICKER'])
em = dero.pandas.expand_months(self.single_ticker_df)
assert_frame_equal(expect_df.sort_index(axis=1), em.sort_index(axis=1))
def test_expand_months_calendardays(self):
expect_df = pd.DataFrame(data = [
(Timestamp('2000-01-01 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-02 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-03 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-04 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-05 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-06 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-07 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-08 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-09 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-10 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-11 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-12 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-13 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-14 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-15 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-16 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-17 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-18 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-19 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-20 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-21 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-22 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
( | Timestamp('2000-01-23 00:00:00') | pandas.Timestamp |
import pandas as pd
import time
def patient(rdb):
""" Returns list of patients """
patients = """SELECT "Name" FROM patient ORDER BY index"""
try:
patients = pd.read_sql(patients, rdb)
patients = patients["Name"].values.tolist()
except:
patients = ['Patient']
return patients
def label(rdb):
""" Returns list of parameter for linear and bar drop down """
sql = """SELECT type FROM name WHERE type IN ('Heart Rate','Heart Rate Variability SDNN', 'Resting Heart Rate',
'VO2 Max','Walking Heart Rate Average')"""
sql2 = """SELECT type FROM name WHERE type NOT IN ('Heart Rate','Heart Rate Variability SDNN',
'Resting Heart Rate','VO2 Max','Walking Heart Rate Average')"""
try:
df, df2 = pd.read_sql(sql, rdb), pd.read_sql(sql2, rdb)
label_linear, label_bar = df["type"].values.tolist(), df2["type"].values.tolist()
except:
label_linear, label_bar = [], []
return label_linear, label_bar
def month(rdb, patient):
""" Returns list of months in database for selected patient """
sql = """SELECT DISTINCT TO_CHAR("Date",'YYYY-MM') AS month
FROM applewatch_numeric
WHERE "Name"='{}'
AND type ='Resting Heart Rate'
ORDER BY month""".format(patient)
try:
df = pd.read_sql(sql, rdb)
months = df['month'].to_list()
except:
months = []
return months
def week(rdb, patient):
""" Returns list of weeks in database for selected patient """
sql = """SELECT DISTINCT TO_CHAR("Date", 'IYYY/IW') AS week
FROM applewatch_numeric
WHERE "Name"='{}'
AND type ='Resting Heart Rate'
ORDER BY week """.format(patient)
try:
df = pd.read_sql(sql, rdb)
weeks = df['week'].to_list()
except:
weeks = []
return weeks
def min_max_date(rdb, patient):
""" Returns min and max date for selected patient """
sql = """SELECT min_date,max_date FROM patient WHERE "Name"='{}'""".format(patient)
try:
df = pd.read_sql(sql, rdb)
min_date, max_date = df['min_date'].iloc[0].date(), df['max_date'].iloc[0].date()
except:
min_date, max_date = '', ''
return min_date, max_date
def age_sex(rdb, patient):
""" Returns age and gender for selected patient"""
sql = """SELECT "Age","Sex" from patient where "Name"='{}' """.format(patient)
try:
df = pd.read_sql(sql, rdb)
age, sex = df['Age'][0], df['Sex'][0]
except:
age, sex = '', ''
return age, sex
def classification_ecg(rdb, patient):
""" Returns ecg classification for patient information card """
sql = """SELECT "Classification",count(*) FROM ecg WHERE "Patient"='{}' GROUP BY "Classification" """.format(patient)
try:
df = pd.read_sql(sql, rdb)
except:
df = pd.DataFrame()
return df
def number_of_days_more_6(rdb, patient):
""" Returns number of days the patient had the Apple Watch on their hand for more than 6 hours"""
sql = """SELECT count (*)
FROM (SELECT "Date"::date
FROM applewatch_categorical
WHERE "Name" = '{}'
AND "type" = 'Apple Stand Hour'
GROUP BY "Date"::date
HAVING count("Date"::date) > 6) days """.format(patient)
try:
df = pd.read_sql(sql, rdb)
df = df.iloc[0]['count']
except:
df = '0'
return df
def card(rdb, patient, group, date, value):
""" Returns DataFrame with resting, working, mean hear rate, step count, exercise time, activity for the cards """
if group == 'M':
to_char = """ TO_CHAR("Date",'YYYY-MM') """
group_by = "month"
elif group == 'W':
to_char = """ TO_CHAR("Date", 'IYYY/IW') """
group_by = "week"
elif group == 'DOW':
to_char = """ TRIM(TO_CHAR("Date", 'Day')) """
group_by = "DOW"
else:
to_char = """ "Date"::date """
group_by = "date"
value = date
sql = """SELECT {0} AS {3},type,
CASE
WHEN type in ('Active Energy Burned','Step Count','Apple Exercise Time') THEN SUM("Value")
WHEN type in ('Heart Rate','Walking Heart Rate Average','Resting Heart Rate') THEN AVG("Value")
END AS "Value"
FROM applewatch_numeric
WHERE "Name" = '{1}'
AND type in ('Active Energy Burned','Step Count','Apple Exercise Time','Heart Rate',
'Walking Heart Rate Average','Resting Heart Rate')
AND {0}='{2}'
GROUP BY {3},type""".format(to_char, patient, value, group_by)
try:
df = pd.read_sql(sql, rdb)
df["Value"] = df["Value"].round(2)
except:
df = pd.DataFrame()
return df
def table(rdb, patient, group, linear, bar):
""" Returns a table with the patient and parameters that were selected from drop downs """
if isinstance(linear, list):
linear = "'" + "','".join(linear) + "'"
else:
linear = "'" + linear + "'"
if group == 'M':
to_char = """ TO_CHAR("Date",'YYYY-MM')"""
group_by = "month"
elif group == 'W':
to_char = """ TO_CHAR("Date", 'IYYY/IW') """
group_by = "week"
elif group == 'DOW':
to_char = """ TRIM(TO_CHAR("Date",'Day')) """
group_by = ' "DOW" '
else:
to_char = """ "Date"::date """
group_by = "date"
sql = """SELECT {0} as {4},"type",
CASE WHEN type IN ('Heart Rate','Heart Rate Variability SDNN','Resting Heart Rate','VO2 Max',
'Walking Heart Rate Average') THEN AVG("Value") ELSE SUM("Value")
END AS "Value"
FROM applewatch_numeric
WHERE "Name" = '{1}'
AND "type" in ({2},'{3}')
GROUP BY {0},type
ORDER BY "type",{4} """.format(to_char, patient, linear, bar, group_by)
try:
df = pd.read_sql(sql, rdb)
if group == 'DOW':
cats = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
df['DOW'] = | pd.Categorical(df['DOW'], categories=cats, ordered=True) | pandas.Categorical |
#python librairies
# Data manipulation
import numpy as np
import pandas as pd
# Plotting
import matplotlib.pyplot as plt
import seaborn
import matplotlib.mlab as mlab
# Statistical calculation
from scipy.stats import norm
# Tabular data output
from tabulate import tabulate
from pandas_datareader import data as web
from tabulate import tabulate
from datetime import datetime
import plotly.graph_objs as go
import matplotlib.pyplot as plt
import yfinance as yf
import statsmodels.api as sm
from statsmodels import regression
plt.style.use('fivethirtyeight')
# ------------------------------------------------------------------------------------------
def graph_close(stock, start_date, end_date):
"""
Source and plot Close prices from yahoo for any given stock/s & period
Parameters
----------
stock : str,list
Either a single stock ticker or list of tickers.
start_date : str
Date in yyyy-mm-dd format
end_date : str
Date in yyyy-mm-dd format
"""
df = web.DataReader(stock, data_source='yahoo', start = start_date, end= end_date)['Close']
df = pd.DataFrame(df)
plt.figure(figsize=(20,10))
plt.plot(df.index, df[stock])
plt.xlabel("Date")
plt.ylabel("$ price")
plt.title(" Close Price from "+start_date + " to "+ end_date)
# ------------------------------------------------------------------------------------------
def graph_open(stock, start_date, end_date):
df = web.DataReader(stock, data_source='yahoo', start = start_date, end= end_date)['Open']
df = pd.DataFrame(df)
plt.figure(figsize=(20,10))
plt.plot(df.index, df[stock])
plt.xlabel("Date")
plt.ylabel("$ price")
plt.title(" Open Price from "+start_date + " to "+ end_date)
# ------------------------------------------------------------------------------------------
def graph_volume(stock, start_date, end_date):
df = web.DataReader(stock, data_source='yahoo', start = start_date, end= end_date)['Volume']
df = pd.DataFrame(df)
plt.figure(figsize=(20,10))
plt.plot(df.index, df[stock])
plt.xlabel("Date")
plt.ylabel("$ price")
plt.title(" Close Price from "+start_date + " to "+ end_date)
# ------------------------------------------------------------------------------------------
def graph_adj_close(stock, start_date, end_date):
df = web.DataReader(stock, data_source='yahoo', start = start_date, end= end_date)['Adj Close']
df = pd.DataFrame(df)
plt.figure(figsize=(20,10))
plt.plot(df.index, df[stock])
plt.xlabel("Date")
plt.ylabel("$ price")
plt.title(" Close Price from "+start_date + " to "+ end_date)
# ------------------------------------------------------------------------------------------
def close(stock, start_date, end_date):
df = web.DataReader(stock, data_source='yahoo', start = start_date, end= end_date)['Close']
df = pd.DataFrame(df)
return df
# ------------------------------------------------------------------------------------------
def open(stock, start_date, end_date):
df = web.DataReader(stock, data_source='yahoo', start = start_date, end= end_date)['Open']
df = pd.DataFrame(df)
return df
# ------------------------------------------------------------------------------------------
def adj_close(stock, start_date, end_date):
df = web.DataReader(stock, data_source='yahoo', start = start_date, end= end_date)['Adj Close']
df = pd.DataFrame(df)
return df
# ------------------------------------------------------------------------------------------
def volume(stock, start_date, end_date):
df = web.DataReader(stock, data_source='yahoo', start = start_date, end= end_date)['Volume']
df = pd.DataFrame(df)
return df
# ------------------------------------------------------------------------------------------
def returns(stocks, start_date, end_date):
df = web.DataReader(stocks, data_source='yahoo', start = start_date, end= end_date)['Close']
df = pd.DataFrame(df)
returns = df.pct_change()
return returns
# ------------------------------------------------------------------------------------------
def returns_graph(stock, start_date, end_date):
df = web.DataReader(stock, data_source='yahoo', start = start_date, end= end_date)['Close']
df = pd.DataFrame(df)
returns = df.pct_change()
plt.figure(figsize=(20,10))
plt.plot(returns.index, returns['Close'])
plt.xlabel("Date")
plt.ylabel("$ price")
plt.title(stock + " Adj Revenues from "+start_date + " to "+ end_date)
# ------------------------------------------------------------------------------------------
def covariance(stocks, start_date, end_date, days):
df = web.DataReader(stocks, data_source='yahoo', start = start_date, end= end_date )['Close']
df = pd.DataFrame(df)
returns = df.pct_change()
cov_matrix_annual = returns.cov()*days
return cov_matrix_annual
# ------------------------------------------------------------------------------------------
def correlation(stocks, start_date, end_date, method='pearson'):
df = web.DataReader(stocks, data_source='yahoo', start = start_date, end= end_date )['Close']
df = | pd.DataFrame(df) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
from dku_timeseries import IntervalRestrictor
from recipe_config_loading import get_interval_restriction_params
@pytest.fixture
def columns():
class COLUMNS:
date = "Date"
category = "categorical"
data = "value1"
return COLUMNS
@pytest.fixture
def config(columns):
config = {u'max_threshold': 400, u'min_threshold': 300, u'datetime_column': columns.date, u'advanced_activated': False, u'time_unit': u'days',
u'min_deviation_duration_value': 0, u'value_column': columns.data, u'min_valid_values_duration_value': 0}
return config
@pytest.fixture
def threshold_dict(config):
min_threshold = config.get('min_threshold')
max_threshold = config.get('max_threshold')
value_column = config.get('value_column')
threshold_dict = {value_column: (min_threshold, max_threshold)}
return threshold_dict
class TestIntervalFrequencies:
def test_day(self, config, threshold_dict, columns):
config["time_unit"] = "days"
params = get_interval_restriction_params(config)
df = get_df_DST("W", columns)
interval_restrictor = IntervalRestrictor(params)
output_df = interval_restrictor.compute(df, columns.date, threshold_dict)
expected_dates = pd.DatetimeIndex(['2019-02-03T00:59:00.000000000', '2019-02-10T00:59:00.000000000',
'2019-02-17T00:59:00.000000000', '2019-02-24T00:59:00.000000000'])
np.testing.assert_array_equal(expected_dates, output_df[columns.date].values)
def test_hours(self, config, threshold_dict, columns):
config["time_unit"] = "hours"
params = get_interval_restriction_params(config)
df = get_df_DST("H", columns)
interval_restrictor = IntervalRestrictor(params)
output_df = interval_restrictor.compute(df, columns.date, threshold_dict)
expected_dates = pd.DatetimeIndex(['2019-01-31T00:59:00.000000000', '2019-01-31T01:59:00.000000000',
'2019-01-31T02:59:00.000000000', '2019-01-31T03:59:00.000000000'])
np.testing.assert_array_equal(expected_dates, output_df[columns.date].values)
assert np.all(output_df["interval_id"].values == "0")
def test_minutes(self, config, threshold_dict, columns):
config["time_unit"] = "minutes"
params = get_interval_restriction_params(config)
df = get_df_DST("T", columns)
interval_restrictor = IntervalRestrictor(params)
output_df = interval_restrictor.compute(df, columns.date, threshold_dict)
expected_dates = pd.DatetimeIndex(['2019-01-31T00:59:00.000000000', '2019-01-31T01:00:00.000000000',
'2019-01-31T01:01:00.000000000', '2019-01-31T01:02:00.000000000'])
np.testing.assert_array_equal(expected_dates, output_df[columns.date].values)
assert np.all(output_df["interval_id"].values == "0")
def test_seconds(self, config, threshold_dict, columns):
config["time_unit"] = "seconds"
params = get_interval_restriction_params(config)
df = get_df_DST("S", columns)
interval_restrictor = IntervalRestrictor(params)
output_df = interval_restrictor.compute(df, columns.date, threshold_dict)
expected_dates = pd.DatetimeIndex(['2019-01-31T00:59:00.000000000', '2019-01-31T00:59:01.000000000',
'2019-01-31T00:59:02.000000000', '2019-01-31T00:59:03.000000000'])
np.testing.assert_array_equal(expected_dates, output_df[columns.date].values)
assert np.all(output_df["interval_id"].values == "0")
def test_milliseconds(self, config, threshold_dict, columns):
config["time_unit"] = "milliseconds"
params = get_interval_restriction_params(config)
df = get_df_DST("L", columns)
interval_restrictor = IntervalRestrictor(params)
output_df = interval_restrictor.compute(df, columns.date, threshold_dict)
expected_dates = pd.DatetimeIndex(['2019-01-31T00:59:00.000000000', '2019-01-31T00:59:00.001000000',
'2019-01-31T00:59:00.002000000', '2019-01-31T00:59:00.003000000'])
np.testing.assert_array_equal(expected_dates, output_df[columns.date].values)
assert np.all(output_df["interval_id"].values == "0")
def test_microseconds(self, config, threshold_dict, columns):
config["time_unit"] = "microseconds"
params = get_interval_restriction_params(config)
df = get_df_DST("U", columns)
interval_restrictor = IntervalRestrictor(params)
output_df = interval_restrictor.compute(df, columns.date, threshold_dict)
expected_dates = pd.DatetimeIndex(['2019-01-31T00:59:00.000000000', '2019-01-31T00:59:00.000001000',
'2019-01-31T00:59:00.000002000', '2019-01-31T00:59:00.000003000'])
np.testing.assert_array_equal(expected_dates, output_df[columns.date].values)
assert np.all(output_df["interval_id"].values == "0")
def test_nanoseconds(self, config, threshold_dict, columns):
config["time_unit"] = "nanoseconds"
params = get_interval_restriction_params(config)
df = get_df_DST("N", columns)
interval_restrictor = IntervalRestrictor(params)
output_df = interval_restrictor.compute(df, columns.date, threshold_dict)
expected_dates = pd.DatetimeIndex(['2019-01-31T00:59:00.000000000', '2019-01-31T00:59:00.000000001',
'2019-01-31T00:59:00.000000002', '2019-01-31T00:59:00.000000003'])
np.testing.assert_array_equal(expected_dates, output_df[columns.date].values)
assert np.all(output_df["interval_id"].values == "0")
def get_df_DST(frequency, columns):
JUST_BEFORE_SPRING_DST = pd.Timestamp('20190131 01:59:0000000').tz_localize('CET')
co2 = [315.58, 316.39, 316.79, 316.2, 666, 888]
co = [315.58, 77, 316.79, 66, 666, 888]
categorical = ["first", "first", "second", "second", "second", "second"]
time_index = | pd.date_range(JUST_BEFORE_SPRING_DST, periods=6, freq=frequency) | pandas.date_range |
"""
Tools to clean Balancing area data.
A data cleaning step is performed by an object that subclasses
the `BaDataCleaner` class.
"""
import os
import logging
import time
import re
from gridemissions.load import BaData
from gridemissions.eia_api import SRC, KEYS
import pandas as pd
import numpy as np
from collections import defaultdict
import cvxpy as cp
import dask
A = 1e4 # MWh
GAMMA = 10 # MWh
EPSILON = 1 # MWh
def na_stats(data, title, cols):
"""
Print NA statistics for a subset of a dataframe.
"""
print(
"%s:\t%.2f%%"
% (
title,
(
data.df.loc[:, cols].isna().sum().sum()
/ len(data.df)
/ len(data.df.loc[:, cols].columns)
* 100
),
)
)
class BaDataCleaner(object):
"""
Template class for data cleaning.
This is mostly just a shell to show how cleaning classes should operate.
"""
def __init__(self, ba_data):
"""
Parameters
----------
ba_data : BaData object
"""
self.d = ba_data
self.logger = logging.getLogger("clean")
def process(self):
pass
class BaDataBasicCleaner(BaDataCleaner):
"""
Basic data cleaning class.
We run this as the first step of the cleaning process.
"""
def process(self):
self.logger.info("Running BaDataBasicCleaner")
start = time.time()
data = self.d
missing_D_cols = [col for col in data.NG_cols if col not in data.D_cols]
self.logger.info("Adding demand columns for %d bas" % len(missing_D_cols))
for ba in missing_D_cols:
data.df.loc[:, data.KEY["D"] % ba] = 1.0
data.df.loc[:, data.KEY["NG"] % ba] -= 1.0
data.df.loc[:, data.KEY["TI"] % ba] -= 1.0
# AVRN only exports to BPAT - this is missing for now
if "AVRN" not in data.ID_cols:
self.logger.info("Adding trade columns for AVRN")
ba = "AVRN"
ba2 = "BPAT"
data.df.loc[:, data.KEY["ID"] % (ba, ba2)] = (
data.df.loc[:, data.KEY["NG"] % ba] - 1.0
)
data.df.loc[:, data.KEY["ID"] % (ba2, ba)] = (
-data.df.loc[:, data.KEY["NG"] % ba] + 1.0
)
# Add columns for biomass and geothermal for CISO
# We are assuming constant generation for each of these sources
# based on historical data. Before updating this, need to
# contact the EIA API maintainers to understand why this isn't
# reported and where to find it
self.logger.info("Adding GEO and BIO columns for CISO")
data.df.loc[:, "EBA.CISO-ALL.NG.GEO.H"] = 900.0
data.df.loc[:, "EBA.CISO-ALL.NG.BIO.H"] = 600.0
# data.df.loc[:, "EBA.CISO-ALL.NG.H"] += 600.0 + 900.0
# Add columns for the BAs that are outside of the US
foreign_bas = list(
set([col for col in data.ID_cols2 if col not in data.NG_cols])
)
self.logger.info(
"Adding demand, generation and TI columns for %d foreign bas"
% len(foreign_bas)
)
for ba in foreign_bas:
trade_cols = [col for col in data.df.columns if "%s.ID.H" % ba in col]
TI = -data.df.loc[:, trade_cols].sum(axis=1)
data.df.loc[:, data.KEY["TI"] % ba] = TI
exports = TI.apply(lambda x: max(x, 0))
imports = TI.apply(lambda x: min(x, 0))
data.df.loc[:, data.KEY["D"] % ba] = -imports
data.df.loc[:, data.KEY["NG"] % ba] = exports
if ba in ["BCHA", "HQT", "MHEB"]:
# Assume for these Canadian BAs generation is hydro
data.df.loc[:, data.KEY["SRC_WAT"] % ba] = exports
else:
# And all others are OTH (other)
data.df.loc[:, data.KEY["SRC_OTH"] % ba] = exports
for col in trade_cols:
ba2 = re.split(r"\.|-|_", col)[1]
data.df.loc[:, data.KEY["ID"] % (ba, ba2)] = -data.df.loc[:, col]
# Make sure that trade columns exist both ways
for col in data.get_cols(field="ID"):
ba = re.split(r"\.|-|_", col)[1]
ba2 = re.split(r"\.|-|_", col)[2]
othercol = data.KEY["ID"] % (ba2, ba)
if othercol not in data.df.columns:
self.logger.info("Adding %s" % othercol)
data.df.loc[:, othercol] = -data.df.loc[:, col]
# Filter unrealistic values using self.reject_dict
self._create_reject_dict()
cols = (
data.get_cols(field="D")
+ data.get_cols(field="NG")
+ data.get_cols(field="TI")
+ data.get_cols(field="ID")
)
for col in cols:
s = data.df.loc[:, col]
data.df.loc[:, col] = s.where(
(s >= self.reject_dict[col][0]) & (s <= self.reject_dict[col][1])
)
# Do the same for the generation by source columns
# If there is no generation by source, add one that is OTH
# Edge case for solar:
# There are a lot of values at -50 MWh or so during the night. We want
# to set those to 0, but consider that very negative values (below
# -1GW) are rejected
for ba in data.regions:
missing = True
for src in SRC:
col = data.KEY["SRC_%s" % src] % ba
if col in data.df.columns:
missing = False
s = data.df.loc[:, col]
if src == "SUN":
self.reject_dict[col] = (-1e3, 200e3)
data.df.loc[:, col] = s.where(
(s >= self.reject_dict[col][0])
& (s <= self.reject_dict[col][1])
)
if src == "SUN":
data.df.loc[:, col] = data.df.loc[:, col].apply(
lambda x: max(x, 0)
)
if missing:
data.df.loc[:, data.KEY["SRC_OTH"] % ba] = data.df.loc[
:, data.KEY["NG"] % ba
]
# Reinitialize fields
self.logger.info("Reinitializing fields")
data = BaData(df=data.df)
self.r = data
self.logger.info("Basic cleaning took %.2f seconds" % (time.time() - start))
def _create_reject_dict(self):
"""
Create a defaultdict to store ranges outside of which values are
considered unrealistic.
The default range is (-1., 200e3) MW. Manual ranges can be set for
specific columns here if that range is not strict enough.
"""
reject_dict = defaultdict(lambda: (-1.0, 200e3))
for col in self.d.get_cols(field="TI"):
reject_dict[col] = (-100e3, 100e3)
for col in self.d.get_cols(field="ID"):
reject_dict[col] = (-100e3, 100e3)
reject_dict["EBA.AZPS-ALL.D.H"] = (1.0, 30e3)
reject_dict["EBA.BANC-ALL.D.H"] = (1.0, 6.5e3)
reject_dict["EBA.BANC-ALL.TI.H"] = (-5e3, 5e3)
reject_dict["EBA.CISO-ALL.NG.H"] = (5e3, 60e3)
self.reject_dict = reject_dict
def rolling_window_filter(
df,
offset=10 * 24,
min_periods=100,
center=True,
replace_nan_with_mean=True,
return_mean=False,
):
"""
Apply a rolling window filter to a dataframe.
Filter using dynamic bounds: reject points that are farther than 4 standard
deviations from the mean, using a rolling window to compute the mean and
standard deviation.
Parameters
----------
df : pd.DataFrame
Dataframe to filter
offset : int
Passed on to pandas' rolling function
min_periods : int
Passed on to pandas' rolling function
center : bool
Passed on to pandas' rolling function
replace_nan_with_mean : bool
Whether to replace NaNs with the mean of the timeseries at the end of
the procedure
Notes
-----
Keeps at least 200 MWh around the mean as an acceptance range.
"""
for col in df.columns:
rolling_ = df[col].rolling(offset, min_periods=min_periods, center=center)
mean_ = rolling_.mean()
std_ = rolling_.std().apply(lambda x: max(100, x))
ub = mean_ + 4 * std_
lb = mean_ - 4 * std_
idx_reject = (df[col] >= ub) | (df[col] <= lb)
df.loc[idx_reject, col] = np.nan
if replace_nan_with_mean:
# First try interpolating linearly, but only for up to 3 hours
df.loc[:, col] = df.loc[:, col].interpolate(limit=3)
# If there is more than 3 hours of missing data, use rolling mean
df.loc[df[col].isnull(), col] = mean_.loc[df[col].isnull()]
if return_mean:
mean_ = df.rolling(offset, min_periods=min_periods, center=center).mean()
return (df, mean_)
return df
class BaDataRollingCleaner(BaDataCleaner):
"""
Rolling window cleaning.
This applies the `rolling_window_filter` function to the dataset. In order
to apply this properly to the beginning of the dataset, we load past data
that will be used for the cleaning - that is then dropped.
"""
def process(self, file_name="", folder_hist="", nruns=2):
"""
Processor function for the cleaner object.
Parameters
----------
file_name : str
Base name of the file from which to read historical data.
Data is read from "%s_basic.csv" % file_name
folder_hist : str
Folder from which to read historical data
nruns : int
Number of times to apply the rolling window procedure
Notes
-----
If we are not processing a large amount of data at a time, we may not
have enough data to appropriately estimate the rolling mean and
standard deviation for the rolling window procedure. If values are
given for `file_name` and `folder_hist`, data will be read from a
historical dataset to estimate the rolling mean and standard deviation.
If there are very large outliers, they can 'mask' smaller outliers.
Running the rolling window procedure a couple of times helps with this
issue.
"""
self.logger.info("Running BaDataRollingCleaner (%d runs)" % nruns)
start = time.time()
data = self.d
# Remember what part we are cleaning
idx_cleaning = data.df.index
try:
# Load the data we already have in memory
df_hist = pd.read_csv(
os.path.join(folder_hist, "%s_basic.csv" % file_name),
index_col=0,
parse_dates=True,
)
# Only take the last 1,000 rows
# Note that if df_hist has less than 1,000 rows,
# pandas knows to select df_hist without raising an error.
df_hist = df_hist.iloc[-1000:]
# Overwrite with the new data
old_rows = df_hist.index.difference(data.df.index)
df_hist = data.df.append(df_hist.loc[old_rows, :], sort=True)
df_hist.sort_index(inplace=True)
except FileNotFoundError:
self.logger.info("No history file")
df_hist = data.df
# Apply rolling horizon threshold procedure
# 20200206 update: don't try replacing NaNs anymore, leave that to the
# next step
for _ in range(nruns):
df_hist = rolling_window_filter(df_hist, replace_nan_with_mean=False)
# Deal with NaNs
# First deal with NaNs by taking the average of the previous day and
# next day. In general we observe strong daily patterns so this seems
# to work well. Limit the filling procedure to one day at a time. If
# there are multiple missing days, this makes for a smoother transition
# between the two valid days. If we had to do this more than 4 times,
# give up and use forward and backward fills without limits
for col in df_hist.columns:
npasses = 0
while (df_hist.loc[:, col].isna().sum() > 0) and (npasses < 4):
npasses += 1
df_hist.loc[:, col] = pd.concat(
[
df_hist.loc[:, col].groupby(df_hist.index.hour).ffill(limit=1),
df_hist.loc[:, col].groupby(df_hist.index.hour).bfill(limit=1),
],
axis=1,
).mean(axis=1)
if npasses == 4:
self.logger.debug("A lot of bad data for %s" % col)
df_hist.loc[:, col] = pd.concat(
[
df_hist.loc[:, col].groupby(df_hist.index.hour).ffill(),
df_hist.loc[:, col].groupby(df_hist.index.hour).bfill(),
],
axis=1,
).mean(axis=1)
# All bad data columns
if df_hist.loc[:, col].isna().sum() == len(df_hist):
df_hist.loc[:, col] = 0.0
# Some NaNs will still remain - try using the rolling mean average
df_hist, mean_ = rolling_window_filter(
df_hist, replace_nan_with_mean=True, return_mean=True
)
if df_hist.isna().sum().sum() > 0:
self.logger.warning("There are still some NaNs. Unexpected")
# Just keep the indices we are working on currently
data = BaData(df=df_hist.loc[idx_cleaning, :])
self.r = data
self.weights = mean_.loc[idx_cleaning, :].applymap(
lambda x: A / max(GAMMA, abs(x))
)
self.logger.info(
"Rolling window cleaning took %.2f seconds" % (time.time() - start)
)
class BaDataPyoCleaningModel(object):
"""
Create an AbstractModel() for the cleaning problem.
No data is passed into this model at this point, it is
simply written in algebraic form.
"""
def __init__(self):
m = pyo.AbstractModel()
# Sets
m.regions = pyo.Set()
m.srcs = pyo.Set()
m.regions2 = pyo.Set(within=m.regions * m.regions)
m.regions_srcs = pyo.Set(within=m.regions * m.srcs)
# Parameters
m.D = pyo.Param(m.regions, within=pyo.Reals)
m.NG = pyo.Param(m.regions, within=pyo.Reals)
m.TI = pyo.Param(m.regions, within=pyo.Reals)
m.ID = pyo.Param(m.regions2, within=pyo.Reals)
m.NG_SRC = pyo.Param(m.regions_srcs, within=pyo.Reals)
m.D_W = pyo.Param(m.regions, default=1.0, within=pyo.Reals)
m.NG_W = pyo.Param(m.regions, default=1.0, within=pyo.Reals)
m.TI_W = pyo.Param(m.regions, default=1.0, within=pyo.Reals)
m.ID_W = pyo.Param(m.regions2, default=1.0, within=pyo.Reals)
m.NG_SRC_W = pyo.Param(m.regions_srcs, default=1.0, within=pyo.Reals)
# Variables
# delta_NG_aux are aux variable for the case where there
# are no SRC data. In that case, the NG_sum constraint would
# only have: m.NG + m.delta_NG = 0.
m.delta_D = pyo.Var(m.regions, within=pyo.Reals)
m.delta_NG = pyo.Var(m.regions, within=pyo.Reals)
m.delta_TI = pyo.Var(m.regions, within=pyo.Reals)
m.delta_ID = pyo.Var(m.regions2, within=pyo.Reals)
m.delta_NG_SRC = pyo.Var(m.regions_srcs, within=pyo.Reals)
# m.delta_NG_aux = pyo.Var(m.regions, within=pyo.Reals)
# Constraints
m.D_positive = pyo.Constraint(m.regions, rule=self.D_positive)
m.NG_positive = pyo.Constraint(m.regions, rule=self.NG_positive)
m.NG_SRC_positive = pyo.Constraint(m.regions_srcs, rule=self.NG_SRC_positive)
m.energy_balance = pyo.Constraint(m.regions, rule=self.energy_balance)
m.antisymmetry = pyo.Constraint(m.regions2, rule=self.antisymmetry)
m.trade_sum = pyo.Constraint(m.regions, rule=self.trade_sum)
m.NG_sum = pyo.Constraint(m.regions, rule=self.NG_sum)
# Objective
m.total_penalty = pyo.Objective(rule=self.total_penalty, sense=pyo.minimize)
self.m = m
def D_positive(self, model, i):
return (model.D[i] + model.delta_D[i]) >= EPSILON
def NG_positive(self, model, i):
return (model.NG[i] + model.delta_NG[i]) >= EPSILON
def NG_SRC_positive(self, model, k, s):
return model.NG_SRC[k, s] + model.delta_NG_SRC[k, s] >= EPSILON
def energy_balance(self, model, i):
return (
model.D[i]
+ model.delta_D[i]
+ model.TI[i]
+ model.delta_TI[i]
- model.NG[i]
- model.delta_NG[i]
) == 0.0
def antisymmetry(self, model, i, j):
return (
model.ID[i, j]
+ model.delta_ID[i, j]
+ model.ID[j, i]
+ model.delta_ID[j, i]
== 0.0
)
def trade_sum(self, model, i):
return (
model.TI[i]
+ model.delta_TI[i]
- sum(
model.ID[k, l] + model.delta_ID[k, l]
for (k, l) in model.regions2
if k == i
)
) == 0.0
def NG_sum(self, model, i):
return (
model.NG[i]
+ model.delta_NG[i] # + model.delta_NG_aux[i]
- sum(
model.NG_SRC[k, s] + model.delta_NG_SRC[k, s]
for (k, s) in model.regions_srcs
if k == i
)
) == 0.0
def total_penalty(self, model):
return (
sum(
(
model.D_W[i] * model.delta_D[i] ** 2
+ model.NG_W[i] * model.delta_NG[i] ** 2
# + model.delta_NG_aux[i]**2
+ model.TI_W[i] * model.delta_TI[i] ** 2
)
for i in model.regions
)
+ sum(
model.ID_W[i, j] * model.delta_ID[i, j] ** 2
for (i, j) in model.regions2
)
+ sum(
model.NG_SRC_W[i, s] * model.delta_NG_SRC[i, s] ** 2
for (i, s) in model.regions_srcs
)
)
class BaDataPyoCleaner(BaDataCleaner):
"""
Optimization-based cleaning class.
Uses pyomo to build the model and Gurobi as the default solver.
"""
def __init__(self, ba_data, weights=None, solver="gurobi"):
super().__init__(ba_data)
import pyomo.environ as pyo
from pyomo.opt import SolverFactory
self.m = BaDataPyoCleaningModel().m
self.opt = SolverFactory(solver)
self.weights = weights
if weights is not None:
self.d.df = pd.concat(
[self.d.df, weights.rename(lambda x: x + "_W", axis=1)], axis=1
)
def process(self, debug=False):
start = time.time()
self.logger.info("Running BaDataPyoCleaner for %d rows" % len(self.d.df))
self.d.df = self.d.df.fillna(0)
if not debug:
self.r = self.d.df.apply(self._process, axis=1)
else:
r_list = []
delta_list = []
for idx, row in self.d.df.iterrows():
_, r, deltas = self._process(row, debug=True)
r_list.append(r)
delta_list.append(deltas)
self.r = pd.concat(r_list, axis=1).transpose()
self.deltas = pd.concat(delta_list, axis=1).transpose()
self.deltas.index = self.d.df.index
self.r.index = self.d.df.index
# Make sure the cleaning step performed as expected
self.r = BaData(df=self.r)
self.logger.info("Checking BAs...")
for ba in self.r.regions:
self.r.checkBA(ba)
self.logger.info("Execution took %.2f seconds" % (time.time() - start))
def _process(self, row, debug=False):
if row.isna().sum() > 0:
raise ValueError("Cannot call this method on data with NaNs")
i = self._create_instance(row)
self.opt.solve(i)
r = pd.concat(
[
pd.Series(
{
self.d.KEY["NG"] % k: (i.NG[k] + pyo.value(i.delta_NG[k]))
for k in i.regions
}
),
pd.Series(
{
self.d.KEY["D"] % k: (i.D[k] + pyo.value(i.delta_D[k]))
for k in i.regions
}
),
pd.Series(
{
self.d.KEY["TI"] % k: (i.TI[k] + pyo.value(i.delta_TI[k]))
for k in i.regions
}
),
pd.Series(
{
self.d.KEY["ID"]
% (k1, k2): (i.ID[k1, k2] + pyo.value(i.delta_ID[k1, k2]))
for (k1, k2) in i.regions2
}
),
pd.Series(
{
self.d.KEY["SRC_%s" % s]
% k: (i.NG_SRC[k, s] + pyo.value(i.delta_NG_SRC[k, s]))
for (k, s) in i.regions_srcs
}
),
]
)
deltas = pd.concat(
[
pd.Series(
{
self.d.KEY["NG"] % k: (pyo.value(i.delta_NG[k]))
for k in i.regions
}
),
pd.Series(
{self.d.KEY["D"] % k: (pyo.value(i.delta_D[k])) for k in i.regions}
),
pd.Series(
{
self.d.KEY["TI"] % k: (pyo.value(i.delta_TI[k]))
for k in i.regions
}
),
pd.Series(
{
self.d.KEY["ID"] % (k1, k2): (pyo.value(i.delta_ID[k1, k2]))
for (k1, k2) in i.regions2
}
),
pd.Series(
{
self.d.KEY["SRC_%s" % s] % k: (pyo.value(i.delta_NG_SRC[k, s]))
for (k, s) in i.regions_srcs
}
),
]
)
if not debug:
return r
return i, r, deltas
def _create_instance(self, row):
def append_W(x):
return [c + "_W" for c in x]
NG_SRC_data = self._get_ng_src(row)
NG_SRC_data_W = self._get_ng_src(row, weights=True)
opt_data = {
None: {
"regions": {None: self.d.regions},
"srcs": {None: SRC},
"regions2": {
None: list(
set(
[
(re.split(r"\.|-|_", el)[1], re.split(r"\.|-|_", el)[2])
for el in self.d.df.columns
if "ID" in re.split(r"\.|-|_", el)
]
)
)
},
"regions_srcs": {None: list(NG_SRC_data.keys())},
"D": self._reduce_cols(row.loc[self.d.get_cols(field="D")].to_dict()),
"NG": self._reduce_cols(row.loc[self.d.get_cols(field="NG")].to_dict()),
"TI": self._reduce_cols(row.loc[self.d.get_cols(field="TI")].to_dict()),
"ID": self._reduce_cols(
row.loc[self.d.get_cols(field="ID")].to_dict(), nfields=2
),
"NG_SRC": NG_SRC_data,
}
}
if self.weights is not None:
opt_data[None]["D_W"] = self._reduce_cols(
row.loc[append_W(self.d.get_cols(field="D"))].to_dict()
)
opt_data[None]["NG_W"] = self._reduce_cols(
row.loc[append_W(self.d.get_cols(field="NG"))].to_dict()
)
opt_data[None]["TI_W"] = self._reduce_cols(
row.loc[append_W(self.d.get_cols(field="TI"))].to_dict()
)
opt_data[None]["ID_W"] = self._reduce_cols(
row.loc[append_W(self.d.get_cols(field="ID"))].to_dict(), nfields=2
)
opt_data[None]["NG_SRC_W"] = NG_SRC_data_W
instance = self.m.create_instance(opt_data)
return instance
def _reduce_cols(self, mydict, nfields=1):
"""
Helper function to simplify the names in a dictionary
"""
newdict = {}
for k in mydict:
if nfields == 1:
newk = re.split(r"\.|-|_", k)[1]
elif nfields == 2:
newk = (re.split(r"\.|-|_", k)[1], re.split(r"\.|-|_", k)[2])
else:
raise ValueError("Unexpected argument")
newdict[newk] = mydict[k]
return newdict
def _get_ng_src(self, r, weights=False):
"""
Helper function to get the NG_SRC data.
"""
mydict = {}
for ba in self.d.regions:
for src in SRC:
col = self.d.KEY["SRC_%s" % src] % ba
if weights:
col += "_W"
if col in self.d.df.columns:
mydict[(ba, src)] = r[col]
return mydict
class BaDataCvxCleaner(BaDataCleaner):
"""
Optimization-based cleaning class.
Uses cvxpy.
"""
def __init__(self, ba_data, weights=None):
super().__init__(ba_data)
self.weights = weights
if weights is not None:
self.d.df = pd.concat(
[self.d.df, weights.rename(lambda x: x + "_W", axis=1)], axis=1
)
def process(self, debug=False, with_ng_src=True):
start = time.time()
self.logger.info("Running BaDataCvxCleaner for %d rows" % len(self.d.df))
self.d.df = self.d.df.fillna(0)
results = []
def cvx_solve(row, regions, debug=False):
if row.isna().sum() > 0:
raise ValueError("Cannot call this method on data with NaNs")
n_regions = len(regions)
D = row[[KEYS["E"]["D"] % r for r in regions]].values
D_W = [
el ** 0.5
for el in row[[KEYS["E"]["D"] % r + "_W" for r in regions]].values
]
NG = row[[KEYS["E"]["NG"] % r for r in regions]].values
NG_W = [
el ** 0.5
for el in row[[KEYS["E"]["NG"] % r + "_W" for r in regions]].values
]
TI = row[[KEYS["E"]["TI"] % r for r in regions]].values
TI_W = [
el ** 0.5
for el in row[[KEYS["E"]["TI"] % r + "_W" for r in regions]].values
]
delta_D = cp.Variable(n_regions, name="delta_D")
delta_NG = cp.Variable(n_regions, name="delta_NG")
delta_TI = cp.Variable(n_regions, name="delta_TI")
obj = (
cp.sum_squares(cp.multiply(D_W, delta_D))
+ cp.sum_squares(cp.multiply(NG_W, delta_NG))
+ cp.sum_squares(cp.multiply(TI_W, delta_TI))
)
ID = {}
ID_W = {}
for i, ri in enumerate(regions):
for j, rj in enumerate(regions):
if KEYS["E"]["ID"] % (ri, rj) in row.index:
ID[(ri, rj)] = row[KEYS["E"]["ID"] % (ri, rj)]
ID_W[(ri, rj)] = row[KEYS["E"]["ID"] % (ri, rj) + "_W"]
delta_ID = {k: cp.Variable(name=f"{k}") for k in ID}
constraints = [
D + delta_D >= 1.0,
NG + delta_NG >= 1.0,
D + delta_D + TI + delta_TI - NG - delta_NG == 0.0,
]
if with_ng_src:
NG_SRC = {}
NG_SRC_W = {}
for i, src in enumerate(SRC):
for j, r in enumerate(regions):
if KEYS["E"][f"SRC_{src}"] % r in row.index:
NG_SRC[(src, r)] = row[KEYS["E"][f"SRC_{src}"] % r]
NG_SRC_W[(src, r)] = row[KEYS["E"][f"SRC_{src}"] % r + "_W"]
delta_NG_SRC = {k: cp.Variable(name=f"{k}") for k in NG_SRC}
for k in NG_SRC:
constraints += [NG_SRC[k] + delta_NG_SRC[k] >= 1.0]
obj += NG_SRC_W[k] * delta_NG_SRC[k] ** 2
# Add the antisymmetry constraints twice is less efficient but not a huge deal.
for ri, rj in ID: # then (rj, ri) must also be in ID
constraints += [
ID[(ri, rj)]
+ delta_ID[(ri, rj)]
+ ID[(rj, ri)]
+ delta_ID[(rj, ri)]
== 0.0
]
obj += ID_W[(ri, rj)] * delta_ID[(ri, rj)] ** 2
for i, ri in enumerate(regions):
if with_ng_src:
constraints += [
NG[i]
+ delta_NG[i]
- cp.sum(
[
NG_SRC[(src, ri)] + delta_NG_SRC[(src, ri)]
for src in SRC
if (src, ri) in NG_SRC
]
)
== 0.0
]
constraints += [
TI[i]
+ delta_TI[i]
- cp.sum(
[
ID[(ri, rj)] + delta_ID[(ri, rj)]
for rj in regions
if (ri, rj) in ID
]
)
== 0.0
]
objective = cp.Minimize(obj)
prob = cp.Problem(objective, constraints)
prob.solve()
if with_ng_src:
r = pd.concat(
[
pd.Series(
NG + delta_NG.value,
index=[KEYS["E"]["NG"] % r for r in regions],
),
pd.Series(
D + delta_D.value,
index=[KEYS["E"]["D"] % r for r in regions],
),
pd.Series(
TI + delta_TI.value,
index=[KEYS["E"]["TI"] % r for r in regions],
),
pd.Series(
{KEYS["E"]["ID"] % k: ID[k] + delta_ID[k].value for k in ID}
),
pd.Series(
{
KEYS["E"][f"SRC_{s}"] % r: NG_SRC[(s, r)]
+ delta_NG_SRC[(s, r)].value
for (s, r) in NG_SRC
}
),
pd.Series({"CleaningObjective": prob.value})
]
)
else:
r = pd.concat(
[
pd.Series(
NG + delta_NG.value,
index=[KEYS["E"]["NG"] % r for r in regions],
),
pd.Series(
D + delta_D.value,
index=[KEYS["E"]["D"] % r for r in regions],
),
pd.Series(
TI + delta_TI.value,
index=[KEYS["E"]["TI"] % r for r in regions],
),
pd.Series(
{KEYS["E"]["ID"] % k: ID[k] + delta_ID[k].value for k in ID}
),
pd.Series({"CleaningObjective": prob.value})
]
)
if not debug:
return r
if with_ng_src:
deltas = pd.concat(
[
pd.Series(
delta_NG.value, index=[KEYS["E"]["NG"] % r for r in regions]
),
pd.Series(
delta_D.value, index=[KEYS["E"]["D"] % r for r in regions]
),
pd.Series(
delta_TI.value, index=[KEYS["E"]["TI"] % r for r in regions]
),
pd.Series({KEYS["E"]["ID"] % k: delta_ID[k].value for k in ID}),
pd.Series(
{
KEYS["E"][f"SRC_{s}"] % r: delta_NG_SRC[(s, r)].value
for (s, r) in NG_SRC
}
),
]
)
else:
deltas = pd.concat(
[
pd.Series(
delta_NG.value, index=[KEYS["E"]["NG"] % r for r in regions]
),
pd.Series(
delta_D.value, index=[KEYS["E"]["D"] % r for r in regions]
),
pd.Series(
delta_TI.value, index=[KEYS["E"]["TI"] % r for r in regions]
),
pd.Series({KEYS["E"]["ID"] % k: delta_ID[k].value for k in ID}),
]
)
return pd.concat([r, deltas.rename(lambda x: x + "_Delta")])
cvx_solve = dask.delayed(cvx_solve)
for idx, row in self.d.df.iterrows():
results.append(cvx_solve(row, self.d.regions, debug=debug))
results = dask.compute(*results, scheduler="processes")
df = | pd.DataFrame(results, index=self.d.df.index) | pandas.DataFrame |
# coding: utf-8
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from scipy.sparse import *
from scipy.sparse.linalg import svds
import math
from recsys.preprocess import *
from recsys.utility import *
import sys
import os
import math
RANDOM_STATE = 2342
np.random.seed(RANDOM_STATE)
print("Loading data")
train = pd.read_csv('data/train_final.csv', delimiter='\t')
playlists = pd.read_csv('data/playlists_final.csv', delimiter='\t')
target_playlists = | pd.read_csv('data/target_playlists.csv', delimiter='\t') | pandas.read_csv |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import joypy, matplotlib
import os, sys
'''
Run Instructions:
python3 boxplot_layout.py <block/latency> <violin/joy> <bin/no>
'''
block_or_lat = str(sys.argv[1])
plot_type = str(sys.argv[2])
bin_or_not = str(sys.argv[3])
round_dig = 2
plot_y_offset = 3500
plot_x_offset = -0.3
if bin_or_not == 'bin':
layout_names = ['binbfs', 'bindfs', 'binweighted\n dfs', 'binblock\n weighted\n dfs']
layout_list = [
'_binbfsthreads_1intertwine_2.csv',
'_bindfsthreads_1intertwine_2.csv',
'_binstatdfsthreads_1intertwine_2.csv',
'_binblockstatthreads_1intertwine_2.csv',
]
else:
layout_names = ['bfs', 'dfs', 'weighted\n dfs', 'block\n weighted\n dfs']
layout_list = [
'_bfsthreads_1intertwine_0.csv',
'_dfsthreads_1intertwine_0.csv',
'_statdfsthreads_1intertwine_0.csv',
'_statblockthreads_1intertwine_0.csv',
]
save_dir = '/home/ubuntu/pacset/plots'
head_dir = '/home/ubuntu/pacset/scripts/paper_plot_scripts/logs_embedded'
layout_filepath_list = [str(block_or_lat + layout_name) for layout_name in layout_list]
paths = [os.path.join(head_dir, fpath) for fpath in layout_filepath_list]
csv_list = [np.genfromtxt(path, delimiter=',') for path in paths]
csv_list_clean = [item[~np.isnan(item)] for item in csv_list]
data_dict = {}
mean_list = []
y_pos_list = []
for count, arr in enumerate(csv_list_clean):
print (layout_names[count], end=': ')
mean = round(np.mean(arr), round_dig)
mean_list.append(mean)
y_pos_list.append(np.max(arr) + plot_y_offset)
print(mean)
for key, value in zip(layout_names, csv_list_clean):
data_dict[key] = value
boxplot_df = | pd.DataFrame(data_dict) | pandas.DataFrame |
from .constraints import *
from .modifiers import *
from .querybuilder import CB
from .snippets import SNIPPETS as S, refresh_snippet_offsets
import sys
sys.path.append('/arrow/python')
import multiprocessing
import pandas as pd
import subprocess
import hashlib
import time
import gzip
import glob
import json
import io
import os
class Relation:
_ridx = 1
def __init__(self, evaluator, kind):
self.evaluator = evaluator
self.kind = kind
self.arguments = []
self.count_of = None
self.ridx = Relation._ridx
Relation._ridx += 1
def args(self, *args):
self.arguments = list(args)
return self
def process_arg(self, arg):
if arg is None:
return "_"
return arg
def get_args(self):
return ", ".join([ self.process_arg(x) for x in self.arguments ])
def make_count(self, label):
self.count_of = label
def __str__(self):
if self.kind == '$eq':
assert len(self.arguments) == 2
return self.process_arg(self.arguments[0]) + ' = ' + self.process_arg(self.arguments[1])
temp = self.kind + '(' + self.get_args() + ')'
if self.count_of is not None:
return self.count_of + ' = count : { ' + temp + ' }'
return temp
class IndexFilter:
def __init__(self, the_words):
self.the_words = the_words
def __call__(self, file):
with gzip.open(file, 'rb') as fh:
as_json = json.load(fh)
matches = []
for word in self.the_words:
matches.append(set(as_json[word] if word in as_json else []))
return matches[0].intersection(*matches[1:])
class Evaluator:
_dataset = '/data/test-1k'
@staticmethod
def use_ds_test_1k():
Evaluator._dataset = '/data/test-1k'
@staticmethod
def use_ds_gh_2017():
Evaluator._dataset = '/data/gh-2017'
@staticmethod
def use_ds_gh_2019():
Evaluator._dataset = '/data/gh-2019'
@staticmethod
def use_ds_gh_2020():
Evaluator._dataset = '/data/gh-2020'
def __init__(self, query, should_debug=False, prefilter_files=None):
query.idify()
self.inid = 1
self.worklist = [ query ]
self.debug = should_debug
self.outputs = []
self.outputs_typed = []
self.query = []
self.inputs = ''
self.dataset = Evaluator._dataset
self.labels = []
self.pre_filter_words = []
self.all_files = None
self.query_dl = None
self.files_prefilters = []
if prefilter_files is not None:
self.files_prefilters.append({
f.replace('/cb-target/', self.dataset + '/processed/')
for f in prefilter_files
})
self.query.append((str(
Relation(self, "file_info").args("fid", "fpath")
) + ',\n', 1))
self.set_output("fpath")
def build_query(self, compiled):
last = None
while len(self.worklist) > 0:
root = self.worklist[-1]
if len(root.children) == 0 or last is not None and (last in root.children):
self.visit(root)
self.worklist.pop()
last = root
else:
for child in root.children[::-1]:
child.parent = root
self.worklist.append(child)
header = self.get_header(compiled)
body = self.get_body()
footer = self.get_footer()
if self.debug:
print(header + body + footer)
return False
extra_to_hash = ""
prelude_prefix = "/app/applications/jupyter-extension/nteract_on_jupyter/notebooks/codebookold/python/snippets/"
with open(prelude_prefix + "/prelude.dl", "r") as fh:
extra_to_hash += fh.read()
with open(prelude_prefix + "/utils.dl", "r") as fh:
extra_to_hash += fh.read()
query_hash = hashlib.sha256(
(header + body + footer + extra_to_hash).encode('utf-8')
).hexdigest()
self.query_dl = '/tmp/old/queries/{}.dl'.format(query_hash)
self.query_prof_dl = '/tmp/old/queries/{}.prof.dl'.format(query_hash)
with open(self.query_dl, 'w') as fh:
fh.write(header + body + footer)
with open(self.query_prof_dl, 'w') as fh:
fh.write(self.get_header(False) + body + footer)
return True
def get_files_from_word_index(self, words):
files =[ set(pd.read_parquet(
'{}/indices/text-to-files'.format(self.dataset),
filters=[('text', '=', word)],
columns=['file']
).file.unique()) for word in words ]
return set.intersection(*files)
def compile_query(self):
if os.path.isfile('{}.cpp'.format(self.query_dl[:-3])):
print(' + Query already compiled (cached) `{}`'.format(self.query_dl))
return True
start = time.perf_counter()
profile_result = subprocess.run([
'/build/oldsouffle/src/souffle',
'-p',
'{}'.format(self.query_prof_dl.replace('.dl', '')),
'--profile-frequency',
'-F',
'/data/for-profiling',
'{}'.format(self.query_prof_dl),
], capture_output=True)
elapsed_time = time.perf_counter() - start
print(f" + Profile time: {elapsed_time:.4f}s")
start = time.perf_counter()
compile_result = subprocess.run([
'/build/oldsouffle/src/souffle',
'-PSIPS:profile-use',
'-u',
'{}'.format(self.query_prof_dl.replace('.dl', '')),
'-g-',
'{}'.format(self.query_dl)
], capture_output=True)
# FIXUP
the_program = compile_result.stdout.decode('utf-8')
the_program = the_program.replace('return 0;\n', '\nobj.dumpOutputs();\nreturn 0;\n')
with open('{}.cpp'.format(self.query_dl[:-3]), 'w') as fh:
fh.write(the_program)
compile_result = subprocess.run([
'/build/oldsouffle/src/souffle-compile',
'{}.cpp'.format(self.query_dl[:-3])
], capture_output=True)
elapsed_time = time.perf_counter() - start
if compile_result.stderr != b'' and b'error' in compile_result.stderr:
print("Souffle compile error")
print(compile_result.stderr)
return False
print(f" + Compile time: {elapsed_time:.4f}s")
return True
def select_files(self, limit=None):
start = time.perf_counter()
files = []
if len(self.pre_filter_words) > 0:
files = self.get_files_from_word_index(
self.pre_filter_words
)
if self.debug:
print("Retrived files from index (words pre-filter)...")
print(" + Found {} files".format(len(files)))
else:
self.all_files = []
with open('{}/indices/all-files.txt'.format(self.dataset), 'r') as fh:
text = fh.read()
for line in text.split('\n'):
if len(line.strip()) > 0:
self.all_files.append(line.strip())
self.all_files = set(self.all_files)
files = self.all_files
if self.debug:
print("Retrived all files (no index)...")
print(" + Found {} files".format(len(files)))
if len(self.files_prefilters) > 0:
allowable = set.intersection(*self.files_prefilters)
print(" + Had only {} allowable files (pre-filter files)".format(len(allowable)))
files = set.intersection(files, allowable)
if self.debug and limit is not None:
print("Limiting to {} files".format(limit))
elapsed_time = time.perf_counter() - start
print(f" + File select time: {elapsed_time:.4f}s")
print(" + Found {} matching files".format(len(files)))
if limit is not None:
return files[:limit]
return files
def eval(self, compile=False):
def divide_chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
refresh_snippet_offsets()
total_start = time.perf_counter()
# Build the query first
if not self.build_query(compile):
return | pd.DataFrame() | pandas.DataFrame |
# app/robo_advisor.py file
import requests
import os
import datetime
import json
import csv
from pandas import read_csv
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
# this function is used to convert from normal number to a currency (as to say)
def to_usd(my_price):
"""
Converts a numeric value to usd-formatted string, for printing and display purposes.
Param: my_price (int or float) like 4000.444444
Example: to_usd(4000.444444)
Returns: $4,000.44
"""
return f"${my_price:,.2f}"
run_time_date = datetime.datetime.now()
csv_file_path = os.path.join(os.path.dirname(__file__), "..", "data", "prices.csv")
def analysis(prices_dict):
latest_day = prices_dict["Meta Data"]["3. Last Refreshed"]
tsd = prices_dict["Time Series (Daily)"]
all_dates = list(tsd.keys())
sorted_dates = sorted(all_dates, reverse=True)
matching_date = sorted_dates[0] #since first item in the list is the most recent date
latest_close_price = tsd[matching_date]["4. close"]
high_prices = []
low_prices = []
#for loop to find the recent high and recent low
for date in all_dates:
high_price = tsd[date]["2. high"]
low_price = tsd[date]["3. low"]
high_prices.append(float(high_price))
low_prices.append(float(low_price))
recent_high = max(high_prices)
recent_low = min(low_prices)
results = {"Latest Price": latest_close_price,
"Recent High": recent_high,
"Recent Low": recent_low,
"Latest day": latest_day}
csv_file_path = os.path.join(os.path.dirname(__file__), "..", "data", "prices.csv")
csv_headers = ["timestamp", "open", "high", "low", "close", "volume"]
with open(csv_file_path, "w") as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=csv_headers)
writer.writeheader()
for date in all_dates:
daily_prices = tsd[date]
writer.writerow({
"timestamp": date,
"open": daily_prices["1. open"],
"high": daily_prices["2. high"],
"low": daily_prices["3. low"],
"close": daily_prices["4. close"],
"volume": daily_prices["5. volume"]
})
return results
while True:
user_ticker = input("Please input the stock ticker you would like to analyze: ")
# data validation (prelim) - if an invalid input then no HTTP request
if user_ticker.isalpha() or 1<= len(user_ticker) >=5:
ALPHAVANTAGE_API_KEY = os.getenv("ALPHAVANTAGE_API_KEY", default = "IBM")
request_url = "https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol="+user_ticker+"&apikey="+ALPHAVANTAGE_API_KEY
response = requests.get(request_url)
parsed_response = json.loads(response.text)
# valid input, however the ticker does not exist
if list(parsed_response.keys())[0] == "Error Message":
print("OOPS, the stock ticker you input does not exist. Please try again! ")
exit
else:
analytics = analysis(parsed_response)
# initializing a string in order to print it later on (for the recommendation)
# worked on the recommendation part with Susanna
recommendation = ""
recommendation_reason = ""
risk_acceptable = input("Please enter a valid risk threshold for you investment where the value must be between 1 and 10: ")
percentage_risk = float(risk_acceptable)/20
if (float(analytics["Latest Price"])-float(analytics["Recent Low"]))/float(analytics["Recent Low"]) > percentage_risk:
recommendation+= "NO BUY!"
recommendation_reason+= "This stock's risk is greater than your risk threshold."
else:
recommendation+= "BUY!"
recommendation_reason+= "This stock's risk lies within your desired risk threshold."
break
else:
print("OOPS, this is an invalid input. Make sure to input a valid stock ticker.")
exit
# second part of input validation is making sure - will be nested inside the else statement
print("-------------------------")
print("SELECTED TICKER: "+user_ticker.upper())
print("-------------------------")
print("REQUESTING STOCK MARKET DATA...")
print("REQUEST AT: "+run_time_date.strftime("%I:%M %p")+" on",run_time_date.strftime("%B %d")+", "+run_time_date.strftime("%Y"))
print("-------------------------")
print("LATEST DAY: ",analytics["Latest day"])
print("LATEST CLOSE: ",to_usd(float(analytics["Latest Price"])))
print("RECENT HIGH: ",to_usd(analytics["Recent High"]))
print("RECENT LOW: ",to_usd(analytics["Recent Low"]))
print("-------------------------")
print("RECOMMENDATION:",recommendation)
print("RECOMMENDATION REASON:",recommendation_reason)
print("-------------------------")
print("HAPPY INVESTING!")
print("-------------------------")
# whether user wants to see lineplot of prices
data_vis_in = input("Would you like to view a line graph that plots the prices of your desired stock over time? [y/n]")
if data_vis_in == "y":
prices_df = pd.read_csv(csv_file_path)
prices_df["timestamp"] = | pd.to_datetime(prices_df["timestamp"], format="%Y-%m-%d") | pandas.to_datetime |
import numpy as np
import pandas as pd
import os
import glob
import shutil
from IPython.display import clear_output
import seaborn as sns
import matplotlib.pyplot as plt
def get_hit_metrics(job_dir, iter_max=10, task_col='pcba-aid624173', cluster_col='BT_0.4 ID'):
def _get_hits_helper(iter_df):
hits = iter_df[task_col].sum()
return hits
des_cols = ['iter_num',
'exploitation_hits', 'exploration_hits', 'total_hits',
'total_unique_hits',
'exploitation_batch_size', 'exploration_batch_size', 'total_batch_size']
iter_results = []
iter_dfs = [pd.read_csv(job_dir + "/training_data/iter_0.csv")]
for iter_i in range(iter_max):
iter_dir = job_dir + "/iter_{}/".format(iter_i)
exploit_csv = iter_dir + 'exploitation.csv'
explore_csv = iter_dir + 'exploration.csv'
exploit_hits, exploit_unique_hits, exploit_batch_size = 0,0,0
explore_hits, explore_unique_hits, explore_batch_size = 0,0,0
curr_iter_dfs = []
if os.path.exists(exploit_csv):
exploit_df = pd.read_csv(exploit_csv)
exploit_hits = _get_hits_helper(exploit_df)
exploit_batch_size = exploit_df.shape[0]
curr_iter_dfs.append(exploit_df)
if os.path.exists(explore_csv):
explore_df = pd.read_csv(explore_csv)
explore_hits = _get_hits_helper(explore_df)
explore_batch_size = explore_df.shape[0]
curr_iter_dfs.append(explore_df)
total_hits = exploit_hits + explore_hits
total_batch_size = exploit_batch_size + explore_batch_size
curr_iter_df = pd.concat(curr_iter_dfs)
iter_hits = curr_iter_df[curr_iter_df[task_col] == 1]
# unique hits are those that belong to a cluster for which we have not found a hit in previous iters
train_df = pd.concat(iter_dfs)
train_hits = train_df[train_df[task_col] == 1]
total_unique_hits = iter_hits[~iter_hits[cluster_col].isin(train_hits[cluster_col])]
total_unique_hits = total_unique_hits[cluster_col].unique().shape[0]
iter_results.append([iter_i,
exploit_hits, explore_hits, total_hits,
total_unique_hits,
exploit_batch_size, explore_batch_size, total_batch_size])
iter_dfs.extend(curr_iter_dfs)
job_df = pd.DataFrame(iter_results,
columns=des_cols)
total_iters = job_df['iter_num'].max()
iter_sums = [10, 20, 30, 40, 50]
sums_list = []
for i in iter_sums:
job_slice = job_df[job_df['iter_num'] < i]
sum_df = job_slice.sum().to_frame().T
sums_list.append(sum_df)
sums_df = pd.concat(sums_list)
final_df = pd.concat([job_df, sums_df])
iter_sums = [9000+i for i in iter_sums]
final_df['iter_num'] = list(np.arange(iter_max)) + iter_sums
final_df['max_iter'] = total_iters
return final_df
def get_results(results_dir, iter_max=10, task_col='pcba-aid624173', cluster_col='BT_0.4 ID', run_count_threshold=5,
check_failure=True):
successful_jobs = []
failed_jobs = []
all_96 = []
all_384 = []
all_1536 = []
for i, rdir in enumerate(results_dir):
#clear_output()
#print('{}/{}'.format(i, len(results_dir)))
config_file = rdir+'config.csv'
# get job identifiers
rd_splits = rdir.split('\\')
hs_group = rd_splits[1]
hs_id = rd_splits[2]
task_col = rd_splits[3]
rf_id = rd_splits[4]
batch_size = rd_splits[5]
# check that the job completed succesfully:
# - exactly iter_max*batch_size cpds were selected and that they have unique Index ID
batch_cpds = glob.glob(rdir+'iter_*/expl*.csv')
if len(batch_cpds) > 0:
cpd_df = pd.concat([pd.read_csv(x) for x in batch_cpds])
if cpd_df['Index ID'].unique().shape[0] < iter_max*int(batch_size.split('_')[-1]):
print('Failed to reach 50 iters {}_{}_{}'.format(hs_id, rf_id, task_col))
if cpd_df['Index ID'].unique().shape[0] != cpd_df.shape[0]:
print('Failed to uniqueness condition {}_{}_{}'.format(hs_id, rf_id, task_col))
cpd_df.to_csv('./failed.csv')
assert False
if check_failure:
if cpd_df.shape[0] == iter_max*int(batch_size.split('_')[-1]):
successful_jobs.append('{}_{}_{}'.format(hs_id, rf_id, task_col))
assert cpd_df['Index ID'].unique().shape[0] == iter_max*int(batch_size.split('_')[-1])
else:
failed_jobs.append('{}_{}_{}'.format(hs_id, rf_id, task_col))
continue
else:
if cpd_df['Index ID'].unique().shape[0] == cpd_df.shape[0]:
successful_jobs.append('{}_{}_{}'.format(hs_id, rf_id, task_col))
else:
failed_jobs.append('{}_{}_{}'.format(hs_id, rf_id, task_col))
continue
else:
failed_jobs.append('{}_{}_{}'.format(hs_id, rf_id, task_col))
continue
hs_id = hs_id.replace('ClusterBasedWCSelector', 'CBWS')
hs_id = hs_id.replace('InstanceBasedWCSelector', 'InstanceBWS')
job_df = get_hit_metrics(rdir, len(glob.glob(rdir+'iter_*/')), task_col, cluster_col)
job_df['rf_id'] = rf_id
job_df['hs_id'] = hs_id
job_df['hs_group'] = hs_group
job_df['config_file'] = config_file
job_df['task_col'] = task_col
if int(batch_size.split('_')[-1]) == 96:
all_96.append(job_df)
elif int(batch_size.split('_')[-1]) == 384:
all_384.append(job_df)
else:
all_1536.append(job_df)
if len(all_96) > 0:
all_96 = pd.concat(all_96)
else:
all_96 = None
if len(all_384) > 0:
all_384 = pd.concat(all_384)
else:
all_384 = None
if len(all_1536) > 0:
all_1536 = pd.concat(all_1536)
else:
all_1536 = None
all_df = pd.concat([all_96, all_384, all_1536])
return all_96, all_384, all_1536, all_df, successful_jobs, failed_jobs
def helper_agg(col):
if col.name in ['rf_id', 'task_col']:
return '-'
elif col.name in ['hs_id', 'hs_group']:
return col.unique()[0]
else:
if '_std' in col.name:
return col.std()
else:
return col.mean()
def get_all_failures(results_df, iter_max):
rf_ids = results_df['rf_id'].unique().tolist()
task_cols = results_df['task_col'].unique().tolist()
hs_ids = ['CBWS_341', 'CBWS_55', 'CBWS_609',
'MABSelector_2', 'MABSelector_exploitive', 'CBWS_custom_1']
summary_df = results_df[results_df['iter_num']==iter_max]
cbrandom = summary_df[summary_df['hs_id'] == 'ClusterBasedRandom']
fail_success_counts = np.zeros(shape=(len(hs_ids),4))
for task in task_cols:
for rf_id in rf_ids:
for i, hs_id in enumerate(hs_ids):
temp_random = cbrandom[(cbrandom['rf_id'] == rf_id) & (cbrandom['task_col'] == task)]
rhits, runiquehits = temp_random['total_hits'].iloc[0], temp_random['total_unique_hits'].iloc[0]
temp_df = summary_df[(summary_df['rf_id'] == rf_id) & (summary_df['task_col'] == task) & (summary_df['hs_id'] == hs_id)]
mhits, muniquehits = temp_df['total_hits'].iloc[0], temp_df['total_unique_hits'].iloc[0]
hit_limit, unique_hit_limit = temp_df['hit_limit'].iloc[0], temp_df['unique_hit_limit'].iloc[0]
if (mhits <= rhits) or (muniquehits <= runiquehits):
fail_success_counts[i,0] += 1
if (mhits / hit_limit) >= 0.1:
fail_success_counts[i,1] += 1
if (mhits / hit_limit) >= 0.25:
fail_success_counts[i,2] += 1
if (mhits / hit_limit) >= 0.5:
fail_success_counts[i,3] += 1
fail_success_counts = pd.DataFrame(data=fail_success_counts,
columns=['# failures', '# >= 0.1', '# >= 0.25', '# >= 0.5'])
fail_success_counts['hs_id'] = hs_ids
return fail_success_counts
def get_last_iter_summary(results_df, iter_max, group_cols = ['hs_id', 'rf_id'],
add_fail_success_counts=False):
des_cols = ['hs_id', 'rf_id', 'max_iter', 'exploitation_hits', 'exploration_hits', 'total_hits',
'total_unique_hits', 'total_batch_size', 'hs_group', 'task_col']
sdf1 = results_df[results_df['iter_num']==iter_max][des_cols]
sdf1 = sdf1.groupby(group_cols).agg(helper_agg).sort_values('total_hits', ascending=False)
sorted_hid_list = sdf1.index.tolist()
sdf2 = results_df[results_df['iter_num']==iter_max][des_cols]
sdf2 = sdf2[[c for c in sdf2.columns if ('_hits' in c or 'hs_id' in c or 'rf_id' in c)]]
sdf2.columns = [c.replace('hits', 'std') for c in sdf2.columns]
sdf2 = sdf2.groupby(group_cols).agg(helper_agg).loc[sorted_hid_list]
sdf = pd.concat([sdf1, sdf2], axis=1)
if add_fail_success_counts:
fail_success_counts = get_all_failures(results_df, iter_max)
new_fs_cols = fail_success_counts.drop(['hs_id'], axis=1).columns.tolist()
for col in new_fs_cols:
sdf[col] = 0
sdf.loc[fail_success_counts['hs_id'].values, new_fs_cols] = fail_success_counts[new_fs_cols].values
return sdf
"""
for exp 3.1
"""
def get_stat_test_dict_exp3(results_df, iter_max, metric='total_hits'):
des_cols = ['hs_id', 'rf_id', 'max_iter', 'exploitation_hits', 'exploration_hits', 'total_hits',
'total_unique_hits', 'total_batch_size', 'hs_group', 'task_col']
results_df = results_df[results_df['iter_num']==iter_max][des_cols]
tasks = results_df['task_col'].unique()
rf_ids = results_df['rf_id'].unique()
hs_ids = results_df['hs_id'].unique()
task_data_df_dict = {}
for task_col in tasks:
data_df = pd.DataFrame(data=np.zeros((len(rf_ids),len(hs_ids))),
columns=hs_ids, index=rf_ids)
task_df = results_df[results_df['task_col'] == task_col]
for hs_id in hs_ids:
for rf_id in rf_ids:
tmp_df = task_df[(task_df['hs_id'] == hs_id) & (task_df['rf_id'] == rf_id)]
# for (strategy, rf_id) that don't exist, we set it to mean of strategy runs that do exist
metric_val = tmp_df[metric].iloc[0]
data_df.loc[rf_id, hs_id] = metric_val
task_data_df_dict[task_col] = data_df
return task_data_df_dict
"""
Computes contrast estimation based on medians in 4 steps as described in:
Garcia et al. 2010 https://sci2s.ugr.es/sites/default/files/files/TematicWebSites/sicidm/2010-Garcia-INS.pdf
see pages 6-8
exp 3.1
"""
def compute_custom_cem_exp3(results_df, iter_max, metric='total_hits'):
# get data in dataset (rows) vs strategy (columns) format
task_data_df_dict = get_stat_test_dict_exp3(results_df, iter_max, metric)
def custom_cem_helper(data_df):
# perform steps 1 and 2 of computing Zuv matrix
num_algorithms = data_df.columns.shape[0]
algorithm_names = data_df.columns.tolist()
Zuv_matrix = pd.DataFrame(data=np.zeros(shape=(num_algorithms, num_algorithms)),
columns=algorithm_names,
index=algorithm_names)
for u_idx in range(num_algorithms):
for v_idx in range(u_idx+1, num_algorithms):
u = algorithm_names[u_idx]
v = algorithm_names[v_idx]
tmp_df = data_df[[u, v]].copy()
tmp_df = tmp_df.dropna(axis=0)
u_arr = tmp_df[u].values
v_arr = tmp_df[v].values
# get difference vector of strategies u and v
perf_diff = u_arr - v_arr
# get median differences
median_diff = np.median(perf_diff)
# save to Zuv matrix
Zuv_matrix.loc[u,v] = median_diff
Zuv_matrix.loc[v,u] = -median_diff
# step 3 compute mean of median differens
mean_medians_diff = Zuv_matrix.mean(axis=1)
# step 4 compute difference of strategy u and v
cem_matrix = pd.DataFrame(data=np.zeros(shape=(num_algorithms, num_algorithms)),
columns=algorithm_names,
index=algorithm_names)
for u_idx in range(num_algorithms):
for v_idx in range(u_idx+1, num_algorithms):
u = algorithm_names[u_idx]
v = algorithm_names[v_idx]
u_val = mean_medians_diff.loc[u]
v_val = mean_medians_diff.loc[v]
# save to Zuv matrix
cem_matrix.loc[u,v] = u_val - v_val
cem_matrix.loc[v,u] = v_val - u_val
return cem_matrix
cem_task_dict = {}
for task_col in task_data_df_dict:
cem_data_df = task_data_df_dict[task_col]
cem_res = custom_cem_helper(cem_data_df)
cem_df = pd.DataFrame(cem_res, columns=cem_data_df.columns, index=cem_data_df.columns)
cem_task_dict[task_col] = cem_df
return task_data_df_dict, cem_task_dict
"""
cem for exp 3.1
"""
def compute_scmamp_cem_exp3(results_df, iter_max, metric='total_hits'):
import rpy2.robjects as robjects
import rpy2.robjects.packages as rpackages
from stat_analysis import setup_scmamp_rpy2
setup_scmamp_rpy2()
scmamp = rpackages.importr('scmamp')
task_data_df_dict = get_stat_test_dict_exp3(results_df, iter_max, metric)
cem_task_dict = {}
for task_col in task_data_df_dict:
data_df = task_data_df_dict[task_col]
cem_res = scmamp.contrastEstimationMatrix(data_df)
cem_df = pd.DataFrame(cem_res, columns=data_df.columns, index=data_df.columns)
cem_task_dict[task_col] = cem_df
return task_data_df_dict, cem_task_dict
"""
Failure of a (strategy, task, rf_id) combo is defined by having total_hits or total_unique_hits
not exceed that of ClusterBasedRandom or InstanceBasedRandom.
"""
def get_task_failures_dict(results_df, iter_max):
rf_ids = results_df['rf_id'].unique().tolist()
task_cols = results_df['task_col'].unique().tolist()
hs_ids = ['CBWS_341', 'CBWS_55', 'CBWS_609',
'MABSelector_2', 'MABSelector_exploitive', 'CBWS_custom_1']
summary_df = results_df[results_df['iter_num']==iter_max]
cbrandom = summary_df[summary_df['hs_id'] == 'ClusterBasedRandom']
ibrandom = summary_df[summary_df['hs_id'] == 'InstanceBasedRandom']
fail_success_counts_dict = {}
for task in task_cols:
fail_success_counts = np.zeros(shape=(len(hs_ids),4))
for rf_id in rf_ids:
for i, hs_id in enumerate(hs_ids):
temp_cbrandom = cbrandom[(cbrandom['rf_id'] == rf_id) & (cbrandom['task_col'] == task)]
temp_ibrandom = ibrandom[(ibrandom['rf_id'] == rf_id) & (ibrandom['task_col'] == task)]
rcbhits, rcbuniquehits = temp_cbrandom['total_hits'].iloc[0], temp_cbrandom['total_unique_hits'].iloc[0]
ribhits, ribuniquehits = temp_ibrandom['total_hits'].iloc[0], temp_ibrandom['total_unique_hits'].iloc[0]
temp_df = summary_df[(summary_df['rf_id'] == rf_id) & (summary_df['task_col'] == task) & (summary_df['hs_id'] == hs_id)]
mhits, muniquehits = temp_df['total_hits'].iloc[0], temp_df['total_unique_hits'].iloc[0]
hit_limit, unique_hit_limit = temp_df['hit_limit'].iloc[0], temp_df['unique_hit_limit'].iloc[0]
if (mhits <= rcbhits) or (muniquehits <= rcbuniquehits) or (mhits <= ribhits) or (muniquehits <= ribuniquehits):
fail_success_counts[i,0] += 1
if (mhits / hit_limit) >= 0.1:
fail_success_counts[i,1] += 1
if (mhits / hit_limit) >= 0.25:
fail_success_counts[i,2] += 1
if (mhits / hit_limit) >= 0.5:
fail_success_counts[i,3] += 1
fail_success_counts = pd.DataFrame(data=fail_success_counts,
columns=['# failures', '# >= 0.1', '# >= 0.25', '# >= 0.5'])
fail_success_counts['hs_id'] = hs_ids
fail_success_counts_dict[task] = fail_success_counts
return fail_success_counts_dict
"""
for exp 3.1
"""
def plot_cem_heatmap_exp3(cem_df, title, figsize=(16, 16), fail_success_counts=None):
from matplotlib.collections import QuadMesh
from matplotlib.text import Text
add_fail_success_counts = False
if fail_success_counts is not None:
add_fail_success_counts = True
heatmap_df = cem_df.copy()
heatmap_df[' '] = 0
heatmap_df['Total Wins'] = (cem_df > 0).sum(axis=1)
heatmap_df = heatmap_df.sort_values('Total Wins', ascending=False)
ordered_wins_hs_ids = heatmap_df['Total Wins'].index.tolist()
heatmap_df = heatmap_df[ordered_wins_hs_ids + [' ', 'Total Wins']]
facecolor_limit = 3
shrink_factor = 0.6
if add_fail_success_counts:
heatmap_df['# Failures (out of 10)'] = np.nan
heatmap_df['# >= 10%'] = np.nan
heatmap_df['# >= 25%'] = np.nan
heatmap_df['# >= 50%'] = np.nan
facecolor_limit=7
shrink_factor = 0.5
for hs_id in fail_success_counts['hs_id'].unique():
tmp_df = fail_success_counts[fail_success_counts['hs_id'] == hs_id]
failures_cnt = tmp_df['# failures'].iloc[0]
a, b, c = tmp_df['# >= 0.1'].iloc[0], tmp_df['# >= 0.25'].iloc[0], tmp_df['# >= 0.5'].iloc[0]
heatmap_df.loc[hs_id, '# Failures (out of 10)'] = failures_cnt
heatmap_df.loc[hs_id, '# >= 10%'] = a
heatmap_df.loc[hs_id, '# >= 25%'] = b
heatmap_df.loc[hs_id, '# >= 50%'] = c
labels = []
for i, row in heatmap_df.iterrows():
x = row['Total Wins']
addendum_labels = ['', '{}'.format(x)]
if add_fail_success_counts:
f, a, b, c = row['# Failures (out of 10)'], row['# >= 10%'], row['# >= 25%'], row['# >= 50%']
addendum_labels += ['{}'.format(f), '{}'.format(a), '{}'.format(b), '{}'.format(c)]
tmp = ['' for _ in range(heatmap_df.shape[0])] + addendum_labels
labels.append(tmp)
labels = np.array(labels)
fig, ax = plt.subplots(1, 1, figsize=figsize)
sns.heatmap(heatmap_df, annot=labels, linewidths=1, linecolor='grey',
fmt='', square=True, cbar_kws={"shrink": shrink_factor})
# find your QuadMesh object and get array of colors
quadmesh = ax.findobj(QuadMesh)[0]
facecolors = quadmesh.get_facecolors()
# make colors of the last column white
# set modified colors
quadmesh.set_facecolors = facecolors
for i in range(1, facecolor_limit):
facecolors[np.arange(heatmap_df.shape[1]-i, heatmap_df.shape[0]*heatmap_df.shape[1],
heatmap_df.shape[1])] = np.array([1,1,1,1])
# set color of all text to black
for i in ax.findobj(Text):
i.set_color('black')
plt.title(title)
plt.show()
cem_wins_df = heatmap_df['Total Wins']
return cem_wins_df
def plot_cem_heatmap_all_tasks_exp3(cem_task_dict, task_info, fail_success_counts_dict,
title, figsize=(16, 16), add_fail_success_counts=True,
tasks_per_row=10, shrink_factor=0.1, fontsize=35, metric='Total Hits',
save_fmt='./exp3/cem/', title_y=0.55, hspace=0.2, wspace=0.2):
from matplotlib.collections import QuadMesh
from matplotlib.text import Text
hs_ids_order = ['CBWS_341', 'CBWS_55', 'CBWS_609',
'MABSelector_2', 'MABSelector_exploitive', 'CBWS_custom_1',
'ClusterBasedRandom', 'InstanceBasedRandom']
task_info = task_info.sort_values('active_ratio')
tasks = task_info['task_col'].tolist()
task_labels = ['{}\n{} cpds\n{} hits\n{}% hits'.format(row['task_col'].replace('pcba-', ''),
row['cpd_count'], row['hit_limit'],
row['active_ratio']) for i, row in task_info.iterrows()]
cem_wins_dict = {}
total_iters = int(np.ceil(len(tasks)/tasks_per_row))
latex_lines = []
for task_batch in range(total_iters):
tasks_subset = tasks[task_batch*tasks_per_row:(task_batch+1)*tasks_per_row]
curr_tasks_per_row = len(tasks_subset)
if task_batch != (total_iters-1):
fig, axes = plt.subplots(2, curr_tasks_per_row//2, figsize=figsize)
axes = axes.flatten()
else:
fig, axes = plt.subplots(1, 2, figsize=(50, 20))
axes = axes.flatten()[:2]
for axes_i, task_col in enumerate(tasks_subset):
task_hit_limit = task_info[task_info['task_col'] == task_col]['hit_limit'].iloc[0]
task_title='Task: {}. Hit limit: {}.'.format(task_col, task_hit_limit)
cem_df = cem_task_dict[task_col]
if add_fail_success_counts:
fail_success_counts = fail_success_counts_dict[task_col]
heatmap_df = cem_df.copy()
heatmap_df[' '] = 0
heatmap_df['Total Wins'] = (cem_df > 0).sum(axis=1)
heatmap_df = heatmap_df.loc[hs_ids_order]
heatmap_df = heatmap_df[hs_ids_order + [' ', 'Total Wins']]
#heatmap_df = heatmap_df.sort_values('Total Wins', ascending=False)
#ordered_wins_hs_ids = heatmap_df['Total Wins'].index.tolist()
#heatmap_df = heatmap_df[ordered_wins_hs_ids + [' ', 'Total Wins']]
facecolor_limit = 3
if add_fail_success_counts:
heatmap_df['# Failures (out of 10)'] = np.nan
heatmap_df['# >= 10%'] = np.nan
heatmap_df['# >= 25%'] = np.nan
heatmap_df['# >= 50%'] = np.nan
facecolor_limit=7
for hs_id in fail_success_counts['hs_id'].unique():
tmp_df = fail_success_counts[fail_success_counts['hs_id'] == hs_id]
failures_cnt = tmp_df['# failures'].iloc[0]
a, b, c = tmp_df['# >= 0.1'].iloc[0], tmp_df['# >= 0.25'].iloc[0], tmp_df['# >= 0.5'].iloc[0]
heatmap_df.loc[hs_id, '# Failures (out of 10)'] = failures_cnt
heatmap_df.loc[hs_id, '# >= 10%'] = a
heatmap_df.loc[hs_id, '# >= 25%'] = b
heatmap_df.loc[hs_id, '# >= 50%'] = c
labels = []
for i, row in heatmap_df.iterrows():
x = int(row['Total Wins'])
addendum_labels = ['', '{}'.format(x)]
if add_fail_success_counts:
f, a, b, c = row['# Failures (out of 10)'], row['# >= 10%'], row['# >= 25%'], row['# >= 50%']
if not np.isnan(f):
f = int(f)
if not np.isnan(a):
a = int(a)
if not np.isnan(b):
b = int(b)
if not np.isnan(c):
c = int(c)
addendum_labels += ['{}'.format(f), '{}'.format(a), '{}'.format(b), '{}'.format(c)]
tmp = ['' for _ in range(heatmap_df.shape[0])] + addendum_labels
labels.append(tmp)
labels = np.array(labels)
cmap = plt.get_cmap("RdYlGn")
sns.heatmap(heatmap_df, annot=labels, linewidths=1, linecolor='grey', cmap=cmap,
fmt='', square=True, cbar_kws={"shrink": shrink_factor}, ax=axes[axes_i])
# find your QuadMesh object and get array of colors
quadmesh = axes[axes_i].findobj(QuadMesh)[0]
facecolors = quadmesh.get_facecolors()
# make colors of the last column white
# set modified colors
quadmesh.set_facecolors = facecolors
for i in range(1, facecolor_limit):
facecolors[np.arange(heatmap_df.shape[1]-i, heatmap_df.shape[0]*heatmap_df.shape[1],
heatmap_df.shape[1])] = np.array([1,1,1,1])
locs = axes[axes_i].get_xticks()
locs = [i+0.35 for i in locs]
axes[axes_i].set_xticks(locs)
# set color of all text to black
for i in axes[axes_i].findobj(Text):
i.set_color('black')
axes[axes_i].set_title(task_title, y=1.06, fontsize=fontsize)
if axes_i%2 > 0:
axes[axes_i].set_yticks([])
if (axes_i//2 > 0) or (task_batch == (total_iters-1)):
axes[axes_i].set_xticklabels(axes[axes_i].get_xticklabels(), rotation=70, ha='right')
else:
if task_batch != (total_iters-1):
axes[axes_i].set_xticks([])
cem_wins_df = heatmap_df['Total Wins']
cem_wins_dict[task_col] = cem_wins_df
fig.tight_layout()
plt.suptitle(title, fontsize=fontsize, y=title_y)
fig.subplots_adjust(hspace=hspace, wspace=wspace)
if save_fmt is not None:
plt.savefig(save_fmt+'{}_{}.png'.format(metric.replace(' ', '_'), task_batch+1), bbox_inches='tight');
plt.show()
latex_lines.append('\\vspace*{\\fill}')
latex_lines.append('\\begin{figure}[H]\\ContinuedFloat')
latex_lines.append('\\centering')
latex_lines.append('\\includegraphics[width=\\textwidth]{project_al/experiments/exp3/cem/'+'{}_{}'.format(metric.replace(' ', '_'), task_batch+1)+'.png}')
cont_line = '\\emph{('+ '{} of {} cont.'.format(task_batch+1, total_iters) +')}}'
latex_lines.append('\\caption[]{Experiment 3.1 per-task contrast estimation based on medians (CEM) heatmaps for \\textbf{'+metric+'} after 50 iterations along with extra columns denoting counts for various conditions. '+cont_line)
latex_lines.append("\\end{figure}")
latex_lines.append("\\vspace*{\\fill}")
with open(save_fmt+"/latex_{}.txt".format(metric), 'w') as f:
for line in latex_lines:
f.write("{}\n".format(line))
return cem_wins_dict
def plot_boxplots_simple_exp3(results_df, iter_max, task_info,
figsize=(16, 12), metric='total_hits',
title='', xlabel='', ylabel='', save_fmt=None,
fontsize=35, labelpad=20, tasks_per_plot=10, legendfontsize=25):
hue_order = ['CBWS_341', 'CBWS_55', 'CBWS_609',
'MABSelector_2', 'MABSelector_exploitive', 'CBWS_custom_1',
'ClusterBasedRandom', 'InstanceBasedRandom']
results_df = results_df[results_df['iter_num']==iter_max]
task_info = task_info.sort_values('active_ratio')
tasks = task_info['task_col'].tolist()
task_labels = ['{}\n{} cpds\n{} hits\n{}% hits'.format(row['task_col'].replace('pcba-', ''),
row['cpd_count'], row['hit_limit'],
row['active_ratio']) for i, row in task_info.iterrows()]
latex_lines = []
total_iters = int(np.ceil(len(tasks)/tasks_per_plot))
for task_batch in range(total_iters):
tasks_subset = tasks[task_batch*tasks_per_plot:(task_batch+1)*tasks_per_plot]
xtick_labels = task_labels[task_batch*tasks_per_plot:(task_batch+1)*tasks_per_plot]
trimmed_results_df = results_df[results_df['task_col'].isin(tasks_subset)]
fig, ax = plt.subplots(figsize=figsize)
sns.boxplot(x="task_col", y=metric, hue="hs_id", data=trimmed_results_df,
order=tasks_subset, hue_order=hue_order)
locs, _ = plt.xticks()
locs = [i-0.4 for i in locs]
plt.xticks(locs, xtick_labels, ha='left')
plt.xlabel(xlabel, fontsize=fontsize, labelpad=labelpad)
plt.ylabel(ylabel, fontsize=fontsize, labelpad=labelpad)
plt.title(title + ' (plot {} of {})'.format(task_batch+1, total_iters), fontsize=fontsize, y=1.05)
[plt.axvline(x+0.5, color='r', linestyle='--') for x in range(tasks_per_plot-1)] # from:https://stackoverflow.com/a/60375919
ax.legend(title='Hyperparameter ID:', title_fontsize=legendfontsize, fontsize=legendfontsize)
if save_fmt is not None:
plt.savefig(save_fmt+'boxplots_{}_{}.png'.format(metric, task_batch+1), bbox_inches='tight');
plt.show()
latex_lines.append('\\vspace*{\\fill}')
latex_lines.append('\\begin{figure}[H]\\ContinuedFloat')
latex_lines.append('\\centering')
latex_lines.append('\\includegraphics[width=\\textwidth]{project_al/experiments/exp3/boxplots/boxplots_'+metric+'_'+str(task_batch+1)+'.png}')
latex_lines.append('\\caption[]{Experiment 3.1 per-task \\textbf{Total Hits} boxplots after 50 iterations (102 tasks). ')
latex_lines.append("The x-tick labels for each task include number of compounds, number of hits, and hit \\%. \\emph{(cont.)} }")
latex_lines.append("\\end{figure}")
latex_lines.append("\\vspace*{\\fill}")
latex_lines.append("\\newpage")
with open(save_fmt+"/latex_{}.txt".format(metric), 'w') as f:
for line in latex_lines:
f.write("{}\n".format(line))
def get_win_summary_df(task_info, cem_all_iters_metric_dict):
tasks = task_info.sort_values('active_ratio')['task_col'].unique()
hs_ids_order = ['CBWS_341', 'CBWS_55', 'CBWS_609',
'MABSelector_2', 'MABSelector_exploitive', 'CBWS_custom_1',
'ClusterBasedRandom', 'InstanceBasedRandom']
iter_max_dict = {9010: 10, 9020: 20, 9030: 30, 9040: 40, 9050: 50}
metric_dict = {'total_hits': 'Total Hits', 'total_unique_hits': 'Total Unique Hits'}
data_1 = []
for metric in metric_dict:
for iter_max in iter_max_dict:
cem_task_dict, fail_success_counts_dict = cem_all_iters_metric_dict['{}_{}'.format(metric, iter_max)]
for task_col in tasks:
cem_df = cem_task_dict[task_col]
fail_success_df = fail_success_counts_dict[task_col]
cem_wins_df = (cem_df > 0).sum(axis=1)
top_strategy = cem_wins_df[cem_wins_df == cem_wins_df.max()]
top_strategy = "|".join(top_strategy.index.tolist())
data_1.append([metric_dict[metric], iter_max_dict[iter_max], task_col, top_strategy])
metric_task_top_strats_df = | pd.DataFrame(data=data_1, columns=['Metric', '# Iterations', 'Task', 'Best Strategy (Ties)']) | pandas.DataFrame |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
from unittest import TestCase, skipUnless
import numpy as np
import pandas as pd
from fbprophet import Prophet
DATA = pd.read_csv(
os.path.join(os.path.dirname(__file__), 'data.csv'),
parse_dates=['ds'],
)
DATA2 = pd.read_csv(
os.path.join(os.path.dirname(__file__), 'data2.csv'),
parse_dates=['ds'],
)
class TestProphet(TestCase):
@staticmethod
def rmse(predictions, targets):
return np.sqrt(np.mean((predictions - targets) ** 2))
def test_fit_predict(self):
days = 30
N = DATA.shape[0]
train = DATA.head(N - days)
test = DATA.tail(days)
test.reset_index(inplace=True)
forecaster = Prophet()
forecaster.fit(train, seed=1237861298)
np.random.seed(876543987)
future = forecaster.make_future_dataframe(days, include_history=False)
future = forecaster.predict(future)
# this gives ~ 10.64
res = self.rmse(future['yhat'], test['y'])
self.assertTrue(15 > res > 5, msg="backend: {}".format(forecaster.stan_backend))
def test_fit_predict_newton(self):
days = 30
N = DATA.shape[0]
train = DATA.head(N - days)
test = DATA.tail(days)
test.reset_index(inplace=True)
forecaster = Prophet()
forecaster.fit(train, seed=1237861298, algorithm='Newton')
np.random.seed(876543987)
future = forecaster.make_future_dataframe(days, include_history=False)
future = forecaster.predict(future)
# this gives ~ 10.64
res = self.rmse(future['yhat'], test['y'])
self.assertAlmostEqual(res, 23.44, places=2, msg="backend: {}".format(forecaster.stan_backend))
@skipUnless("--test-slow" in sys.argv, "Skipped due to the lack of '--test-slow' argument")
def test_fit_sampling_predict(self):
days = 30
N = DATA.shape[0]
train = DATA.head(N - days)
test = DATA.tail(days)
test.reset_index(inplace=True)
forecaster = Prophet(mcmc_samples=500)
forecaster.fit(train, seed=1237861298, chains=4)
np.random.seed(876543987)
future = forecaster.make_future_dataframe(days, include_history=False)
future = forecaster.predict(future)
# this gives ~ 215.77
res = self.rmse(future['yhat'], test['y'])
self.assertTrue(236 > res > 193, msg="backend: {}".format(forecaster.stan_backend))
def test_fit_predict_no_seasons(self):
N = DATA.shape[0]
train = DATA.head(N // 2)
future = DATA.tail(N // 2)
forecaster = Prophet(weekly_seasonality=False,
yearly_seasonality=False)
forecaster.fit(train)
forecaster.predict(future)
def test_fit_predict_no_changepoints(self):
N = DATA.shape[0]
train = DATA.head(N // 2)
future = DATA.tail(N // 2)
forecaster = Prophet(n_changepoints=0)
forecaster.fit(train)
forecaster.predict(future)
forecaster = Prophet(n_changepoints=0, mcmc_samples=100)
forecaster.fit(train)
forecaster.predict(future)
def test_fit_changepoint_not_in_history(self):
train = DATA[(DATA['ds'] < '2013-01-01') | (DATA['ds'] > '2014-01-01')]
future = pd.DataFrame({'ds': DATA['ds']})
prophet = Prophet(changepoints=['2013-06-06'])
forecaster = prophet
forecaster.fit(train)
forecaster.predict(future)
def test_fit_predict_duplicates(self):
N = DATA.shape[0]
train1 = DATA.head(N // 2).copy()
train2 = DATA.head(N // 2).copy()
train2['y'] += 10
train = train1.append(train2)
future = pd.DataFrame({'ds': DATA['ds'].tail(N // 2)})
forecaster = Prophet()
forecaster.fit(train)
forecaster.predict(future)
def test_fit_predict_constant_history(self):
N = DATA.shape[0]
train = DATA.head(N // 2).copy()
train['y'] = 20
future = pd.DataFrame({'ds': DATA['ds'].tail(N // 2)})
m = Prophet()
m.fit(train)
fcst = m.predict(future)
self.assertEqual(fcst['yhat'].values[-1], 20)
train['y'] = 0
future = pd.DataFrame({'ds': DATA['ds'].tail(N // 2)})
m = Prophet()
m.fit(train)
fcst = m.predict(future)
self.assertEqual(fcst['yhat'].values[-1], 0)
def test_fit_predict_uncertainty_disabled(self):
N = DATA.shape[0]
train = DATA.head(N // 2)
future = DATA.tail(N // 2)
for uncertainty in [0, False]:
m = Prophet(uncertainty_samples=uncertainty)
m.fit(train)
fcst = m.predict(future)
expected_cols = ['ds', 'trend', 'additive_terms',
'multiplicative_terms', 'weekly', 'yhat']
self.assertTrue(all(col in expected_cols
for col in fcst.columns.tolist()))
def test_setup_dataframe(self):
m = Prophet()
N = DATA.shape[0]
history = DATA.head(N // 2).copy()
history = m.setup_dataframe(history, initialize_scales=True)
self.assertTrue('t' in history)
self.assertEqual(history['t'].min(), 0.0)
self.assertEqual(history['t'].max(), 1.0)
self.assertTrue('y_scaled' in history)
self.assertEqual(history['y_scaled'].max(), 1.0)
def test_setup_dataframe_ds_column(self):
"Test case where 'ds' exists as an index name and column"
df = DATA.copy()
df.index = df.loc[:, 'ds']
m = Prophet()
m.fit(df)
def test_logistic_floor(self):
m = Prophet(growth='logistic')
N = DATA.shape[0]
history = DATA.head(N // 2).copy()
history['floor'] = 10.
history['cap'] = 80.
future = DATA.tail(N // 2).copy()
future['cap'] = 80.
future['floor'] = 10.
m.fit(history, algorithm='Newton')
self.assertTrue(m.logistic_floor)
self.assertTrue('floor' in m.history)
self.assertAlmostEqual(m.history['y_scaled'][0], 1.)
self.assertEqual(m.fit_kwargs, {'algorithm': 'Newton'})
fcst1 = m.predict(future)
m2 = Prophet(growth='logistic')
history2 = history.copy()
history2['y'] += 10.
history2['floor'] += 10.
history2['cap'] += 10.
future['cap'] += 10.
future['floor'] += 10.
m2.fit(history2, algorithm='Newton')
self.assertAlmostEqual(m2.history['y_scaled'][0], 1.)
fcst2 = m2.predict(future)
fcst2['yhat'] -= 10.
# Check for approximate shift invariance
self.assertTrue((np.abs(fcst1['yhat'] - fcst2['yhat']) < 1).all())
def test_flat_growth(self):
m = Prophet(growth='flat')
N = DATA.shape[0]
history = DATA.head(N // 2).copy()
m.fit(history)
future = m.make_future_dataframe(N // 2, include_history=False)
fcst = m.predict(future)
m_ = m.params['m']
k = m.params['k']
self.assertEqual(k[0, 0], 0)
self.assertEqual(fcst['trend'].unique(), m_*m.y_scale)
self.assertEqual(np.round(m_[0,0]*m.y_scale), 26)
def test_invalid_growth_input(self):
msg = 'Parameter "growth" should be "linear", ' \
'"logistic" or "flat".'
with self.assertRaisesRegex(ValueError, msg):
Prophet(growth="constant")
def test_get_changepoints(self):
m = Prophet()
N = DATA.shape[0]
history = DATA.head(N // 2).copy()
history = m.setup_dataframe(history, initialize_scales=True)
m.history = history
m.set_changepoints()
cp = m.changepoints_t
self.assertEqual(cp.shape[0], m.n_changepoints)
self.assertEqual(len(cp.shape), 1)
self.assertTrue(cp.min() > 0)
cp_indx = int(np.ceil(0.8 * history.shape[0]))
self.assertTrue(cp.max() <= history['t'].values[cp_indx])
def test_set_changepoint_range(self):
m = Prophet(changepoint_range=0.4)
N = DATA.shape[0]
history = DATA.head(N // 2).copy()
history = m.setup_dataframe(history, initialize_scales=True)
m.history = history
m.set_changepoints()
cp = m.changepoints_t
self.assertEqual(cp.shape[0], m.n_changepoints)
self.assertEqual(len(cp.shape), 1)
self.assertTrue(cp.min() > 0)
cp_indx = int(np.ceil(0.4 * history.shape[0]))
self.assertTrue(cp.max() <= history['t'].values[cp_indx])
with self.assertRaises(ValueError):
m = Prophet(changepoint_range=-0.1)
with self.assertRaises(ValueError):
m = Prophet(changepoint_range=2)
def test_get_zero_changepoints(self):
m = Prophet(n_changepoints=0)
N = DATA.shape[0]
history = DATA.head(N // 2).copy()
history = m.setup_dataframe(history, initialize_scales=True)
m.history = history
m.set_changepoints()
cp = m.changepoints_t
self.assertEqual(cp.shape[0], 1)
self.assertEqual(cp[0], 0)
def test_override_n_changepoints(self):
m = Prophet()
history = DATA.head(20).copy()
history = m.setup_dataframe(history, initialize_scales=True)
m.history = history
m.set_changepoints()
self.assertEqual(m.n_changepoints, 15)
cp = m.changepoints_t
self.assertEqual(cp.shape[0], 15)
def test_fourier_series_weekly(self):
mat = Prophet.fourier_series(DATA['ds'], 7, 3)
# These are from the R forecast package directly.
true_values = np.array([
0.7818315, 0.6234898, 0.9749279, -0.2225209, 0.4338837, -0.9009689
])
self.assertAlmostEqual(np.sum((mat[0] - true_values) ** 2), 0.0)
def test_fourier_series_yearly(self):
mat = Prophet.fourier_series(DATA['ds'], 365.25, 3)
# These are from the R forecast package directly.
true_values = np.array([
0.7006152, -0.7135393, -0.9998330, 0.01827656, 0.7262249, 0.6874572
])
self.assertAlmostEqual(np.sum((mat[0] - true_values) ** 2), 0.0)
def test_growth_init(self):
model = Prophet(growth='logistic')
history = DATA.iloc[:468].copy()
history['cap'] = history['y'].max()
history = model.setup_dataframe(history, initialize_scales=True)
k, m = model.linear_growth_init(history)
self.assertAlmostEqual(k, 0.3055671)
self.assertAlmostEqual(m, 0.5307511)
k, m = model.logistic_growth_init(history)
self.assertAlmostEqual(k, 1.507925, places=4)
self.assertAlmostEqual(m, -0.08167497, places=4)
k,m = model.flat_growth_init(history)
self.assertEqual(k, 0)
self.assertAlmostEqual(m, 0.49335657, places=4)
def test_piecewise_linear(self):
model = Prophet()
t = np.arange(11.)
m = 0
k = 1.0
deltas = np.array([0.5])
changepoint_ts = np.array([5])
y = model.piecewise_linear(t, deltas, k, m, changepoint_ts)
y_true = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0,
6.5, 8.0, 9.5, 11.0, 12.5])
self.assertEqual((y - y_true).sum(), 0.0)
t = t[8:]
y_true = y_true[8:]
y = model.piecewise_linear(t, deltas, k, m, changepoint_ts)
self.assertEqual((y - y_true).sum(), 0.0)
def test_piecewise_logistic(self):
model = Prophet()
t = np.arange(11.)
cap = np.ones(11) * 10
m = 0
k = 1.0
deltas = np.array([0.5])
changepoint_ts = np.array([5])
y = model.piecewise_logistic(t, cap, deltas, k, m, changepoint_ts)
y_true = np.array([5.000000, 7.310586, 8.807971, 9.525741, 9.820138,
9.933071, 9.984988, 9.996646, 9.999252, 9.999833,
9.999963])
self.assertAlmostEqual((y - y_true).sum(), 0.0, places=5)
t = t[8:]
y_true = y_true[8:]
cap = cap[8:]
y = model.piecewise_logistic(t, cap, deltas, k, m, changepoint_ts)
self.assertAlmostEqual((y - y_true).sum(), 0.0, places=5)
def test_flat_trend(self):
model = Prophet()
t = np.arange(11)
m = 0.5
y = model.flat_trend(t, m)
y_true = np.array([0.5]*11)
self.assertEqual((y - y_true).sum(), 0)
t = t[8:]
y_true = y_true[8:]
y = model.flat_trend(t, m)
self.assertEqual((y - y_true).sum(), 0)
def test_holidays(self):
holidays = pd.DataFrame({
'ds': pd.to_datetime(['2016-12-25']),
'holiday': ['xmas'],
'lower_window': [-1],
'upper_window': [0],
})
model = Prophet(holidays=holidays)
df = pd.DataFrame({
'ds': pd.date_range('2016-12-20', '2016-12-31')
})
feats, priors, names = model.make_holiday_features(df['ds'], model.holidays)
# 11 columns generated even though only 8 overlap
self.assertEqual(feats.shape, (df.shape[0], 2))
self.assertEqual((feats.sum(0) - np.array([1.0, 1.0])).sum(), 0)
self.assertEqual(priors, [10., 10.]) # Default prior
self.assertEqual(names, ['xmas'])
holidays = pd.DataFrame({
'ds': pd.to_datetime(['2016-12-25']),
'holiday': ['xmas'],
'lower_window': [-1],
'upper_window': [10],
})
m = Prophet(holidays=holidays)
feats, priors, names = m.make_holiday_features(df['ds'], m.holidays)
# 12 columns generated even though only 8 overlap
self.assertEqual(feats.shape, (df.shape[0], 12))
self.assertEqual(priors, list(10. * np.ones(12)))
self.assertEqual(names, ['xmas'])
# Check prior specifications
holidays = pd.DataFrame({
'ds': pd.to_datetime(['2016-12-25', '2017-12-25']),
'holiday': ['xmas', 'xmas'],
'lower_window': [-1, -1],
'upper_window': [0, 0],
'prior_scale': [5., 5.],
})
m = Prophet(holidays=holidays)
feats, priors, names = m.make_holiday_features(df['ds'], m.holidays)
self.assertEqual(priors, [5., 5.])
self.assertEqual(names, ['xmas'])
# 2 different priors
holidays2 = pd.DataFrame({
'ds': pd.to_datetime(['2012-06-06', '2013-06-06']),
'holiday': ['seans-bday'] * 2,
'lower_window': [0] * 2,
'upper_window': [1] * 2,
'prior_scale': [8] * 2,
})
holidays2 = pd.concat((holidays, holidays2), sort=True)
m = Prophet(holidays=holidays2)
feats, priors, names = m.make_holiday_features(df['ds'], m.holidays)
pn = zip(priors, [s.split('_delim_')[0] for s in feats.columns])
for t in pn:
self.assertIn(t, [(8., 'seans-bday'), (5., 'xmas')])
holidays2 = pd.DataFrame({
'ds': pd.to_datetime(['2012-06-06', '2013-06-06']),
'holiday': ['seans-bday'] * 2,
'lower_window': [0] * 2,
'upper_window': [1] * 2,
})
holidays2 = pd.concat((holidays, holidays2), sort=True)
feats, priors, names = Prophet(
holidays=holidays2, holidays_prior_scale=4
).make_holiday_features(df['ds'], holidays2)
self.assertEqual(set(priors), {4., 5.})
# Check incompatible priors
holidays = pd.DataFrame({
'ds': pd.to_datetime(['2016-12-25', '2016-12-27']),
'holiday': ['xmasish', 'xmasish'],
'lower_window': [-1, -1],
'upper_window': [0, 0],
'prior_scale': [5., 6.],
})
with self.assertRaises(ValueError):
Prophet(holidays=holidays).make_holiday_features(df['ds'], holidays)
def test_fit_with_holidays(self):
holidays = pd.DataFrame({
'ds': pd.to_datetime(['2012-06-06', '2013-06-06']),
'holiday': ['seans-bday'] * 2,
'lower_window': [0] * 2,
'upper_window': [1] * 2,
})
model = Prophet(holidays=holidays, uncertainty_samples=0)
model.fit(DATA).predict()
def test_fit_predict_with_country_holidays(self):
holidays = pd.DataFrame({
'ds': | pd.to_datetime(['2012-06-06', '2013-06-06']) | pandas.to_datetime |
import pandas as pd
df_list = []
for year in range(2007, 2020):
for month in range(1, 13):
file_path_stub = ('/work/akilby/npi/raw/ptaxcode%s%s' % (year, month))
try:
file_path = '%s.csv' % file_path_stub
df = pd.read_csv(file_path)
df['month'] = pd.to_datetime('%s-%s' % (year, month))
df = df.query('ptaxcode=="390200000X"')
df_list.append(df)
print('Combining Files: %s-%s' % (year, month))
except FileNotFoundError:
print('Warning: data does not exist')
try:
file_path2 = '%s.dta' % file_path_stub
df2 = pd.read_stata(file_path2)
df2['month'] = | pd.to_datetime('%s-%s' % (year, month)) | pandas.to_datetime |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import operator
from collections import OrderedDict
from datetime import datetime
from itertools import chain
import warnings
import numpy as np
from pandas import (notna, DataFrame, Series, MultiIndex, date_range,
Timestamp, compat)
import pandas as pd
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.apply import frame_apply
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.conftest import _get_cython_table_params
from pandas.tests.frame.common import TestData
class TestDataFrameApply(TestData):
def test_apply(self):
with np.errstate(all='ignore'):
# ufunc
applied = self.frame.apply(np.sqrt)
tm.assert_series_equal(np.sqrt(self.frame['A']), applied['A'])
# aggregator
applied = self.frame.apply(np.mean)
assert applied['A'] == np.mean(self.frame['A'])
d = self.frame.index[0]
applied = self.frame.apply(np.mean, axis=1)
assert applied[d] == np.mean(self.frame.xs(d))
assert applied.index is self.frame.index # want this
# invalid axis
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
pytest.raises(ValueError, df.apply, lambda x: x, 2)
# see gh-9573
df = DataFrame({'c0': ['A', 'A', 'B', 'B'],
'c1': ['C', 'C', 'D', 'D']})
df = df.apply(lambda ts: ts.astype('category'))
assert df.shape == (4, 2)
assert isinstance(df['c0'].dtype, CategoricalDtype)
assert isinstance(df['c1'].dtype, CategoricalDtype)
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
df = DataFrame({'A': date_range('20130101', periods=3),
'B': pd.to_timedelta(np.arange(3), unit='s')})
result = df.apply(lambda x: x, axis=1)
assert_frame_equal(result, df)
def test_apply_empty(self):
# empty
applied = self.empty.apply(np.sqrt)
assert applied.empty
applied = self.empty.apply(np.mean)
assert applied.empty
no_rows = self.frame[:0]
result = no_rows.apply(lambda x: x.mean())
expected = | Series(np.nan, index=self.frame.columns) | pandas.Series |
import glob
import os
import shutil
import pandas as pd
import netCDF4 as nc
import numpy as np
from .prep import scaffold_workdir
from ._workflow import analyze_region
from ._vocab import mid_col
from ._vocab import gid_col
from ._vocab import metric_nc_name_list
from ._vocab import cal_nc_name
def sample_gauges(workdir: str, overwrite: bool = False) -> None:
"""
Prepares working directories in the validation_runs directory and populates the data_processed and gis_inputs
folders with data from the master working directory. The gauges tables placed in the validation runs are randomly
chosen samples of the master gauge table.
Args:
workdir: the project working directory
overwrite: delete the old validation directory before sampling
Returns:
None
"""
vr_path = os.path.join(workdir, 'validation_runs')
if overwrite:
if os.path.exists(vr_path):
shutil.rmtree(vr_path)
os.mkdir(vr_path)
gt = pd.read_csv(os.path.join(workdir, 'gis_inputs', 'gauge_table.csv'))
initial_row_count = gt.shape[0]
rows_to_drop = round(gt.shape[0] / 10)
for i in range(5):
# some math related to which iteration of filtering we're on
n = initial_row_count - rows_to_drop * (i + 1)
pct_remain = 100 - 10 * (i + 1)
subpath = os.path.join(vr_path, f'{pct_remain}')
# create the new project working directory
os.mkdir(subpath)
scaffold_workdir(subpath, include_validation=False)
# overwrite the processed data directory so we don't need to redo this each time
shutil.copytree(
os.path.join(workdir, 'data_processed'),
os.path.join(subpath, 'data_processed'),
dirs_exist_ok=True
)
# sample the gauge table
gt = gt.sample(n=n)
gt.to_csv(os.path.join(subpath, 'gis_inputs', 'gauge_table.csv'), index=False)
shutil.copyfile(os.path.join(workdir, 'gis_inputs', 'drain_table.csv'),
os.path.join(subpath, 'gis_inputs', 'drain_table.csv'))
# filter the copied processed data to only contain the gauges included in this filtered step
processed_sim_data = glob.glob(os.path.join(subpath, 'data_processed', 'obs-*.csv'))
for f in processed_sim_data:
a = pd.read_csv(f, index_col=0)
a = a.filter(items=gt[gid_col].astype(str))
a.to_csv(f)
return
def run_series(workdir: str, drain_shape: str, obs_data_dir: str = None) -> None:
"""
Runs saber.analyze_region on each project in the validation_runs directory
Args:
workdir: the project working directory
drain_shape: path to the drainage line gis file
obs_data_dir: path to the directory containing the observed data
Returns:
None
"""
gauge_table = pd.read_csv(os.path.join(workdir, 'gis_inputs', 'gauge_table.csv'))
val_workdirs = [i for i in glob.glob(os.path.join(workdir, 'validation_runs', '*')) if os.path.isdir(i)]
for val_workdir in val_workdirs:
print(f'\n\n\t\t\tworking on {val_workdir}\n\n')
analyze_region(val_workdir, drain_shape, gauge_table, obs_data_dir)
return
def gen_val_table(workdir: str) -> pd.DataFrame:
"""
Prepares the validation summary table that contains the list of gauged rivers plus their statistics computed in
each validation run. Used to create gis files for mapping the results.
Args:
workdir: the project working directory
Returns:
pandas.DataFrame
"""
df = pd.read_csv(os.path.join(workdir, 'gis_inputs', 'gauge_table.csv'))
df['100'] = 1
stats_df = {}
a = nc.Dataset(os.path.join(workdir, cal_nc_name))
stats_df[mid_col] = np.asarray(a[mid_col][:])
for metric in metric_nc_name_list:
arr = np.asarray(a[metric][:])
stats_df[f'{metric}_raw'] = arr[:, 0]
stats_df[f'{metric}_adj'] = arr[:, 1]
a.close()
for d in sorted(
[a for a in glob.glob(os.path.join(workdir, 'validation_runs', '*')) if os.path.isdir(a)],
reverse=True
):
val_percent = os.path.basename(d)
valset_gids = pd.read_csv(os.path.join(d, 'gis_inputs', 'gauge_table.csv'))[gid_col].values.tolist()
# mark a column indicating the gauges included in the validation set
df[val_percent] = 0
df.loc[df[gid_col].isin(valset_gids), val_percent] = 1
# add columns for the metrics of all gauges during this validation set
a = nc.Dataset(os.path.join(d, cal_nc_name))
for metric in metric_nc_name_list:
stats_df[f'{metric}_{val_percent}'] = np.asarray(a[metric][:])[:, 1]
a.close()
# merge gauge_table with the stats, save and return
df = df.merge( | pd.DataFrame(stats_df) | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
import pandas as pd
import pandas.util.testing as tu
# %% [markdown]
# ## Setup
#
# Multiindex is used to highlight specific behavior.
# %%
d1 = pd.DataFrame({
'a': [1, 1, 3],
'b': [11, 22, 22],
'c': [4, 9, 9],
'd': [5, 4, 3]}
).set_index(['a', 'b', 'c'])
d1
# %%
d2 = d1.copy()
d2.loc[(1, 22, 8), :] = 4
d2 = d2.drop((1, 22, 9)).sort_index()
d2
# %% [markdown]
# ## Quickly show distinct items
#
# Removes all rows which are found in both dataframes, set equivalent of: (a | b) - (a & b)
# %%
# drop_duplicates() does not take index into account!!!
pd.concat([d1, d2]).drop_duplicates(keep=False)
# %%
pd.concat([d1.reset_index(), d2.reset_index()]).drop_duplicates(keep=False)
# %% [markdown]
# ## Set difference on index
# %%
pd.DataFrame(
index=d1.index.difference(d2.index)
)
# %%
pd.DataFrame(
index=d2.index.difference(d1.index)
)
# %% [markdown]
# ## Test for equality
# %%
try:
| tu.assert_frame_equal(d1, d2) | pandas.util.testing.assert_frame_equal |
import pandas as pd
import matplotlib.pyplot as plt
from utils.constants import Teams
# Pandas options for better printing
pd.set_option('display.max_columns', 500)
| pd.set_option('display.max_rows', 1000) | pandas.set_option |
#!/usr/bin/env python
# coding: utf-8
# # <font color='yellow'>How can we predict not just the hourly PM2.5 concentration at the site of one EPA sensor, but predict the hourly PM2.5 concentration anywhere?</font>
#
# Here, you build a new model for any given hour on any given day. This will leverage readings across all ~120 EPA sensors, as well as weather data, traffic data, purpleair data, and maybe beacon data to create a model that predicts the PM2.5 value at that location.
# In[1]:
import json
import csv
import pandas as pd
from pandas.io.json import json_normalize
import numpy as np
import geopandas as gpd
import shapely
from shapely.geometry import Point, MultiPoint, Polygon, MultiPolygon
from shapely.affinity import scale
import matplotlib.pyplot as plt
import glob
import os
import datetime
from datetime import timezone
import zipfile
import pickle
pd.set_option('display.max_columns', 500)
# ## <font color='yellow'>Loading data</font>
#
# We'll load EPA data, weather data, truck traffic data, Beacon data, and purpleair data
# In[2]:
df_epa = pd.read_csv("EPA_Data_MultiPointModel.csv")
df_epa.head(1)
# In[3]:
df_beac = pd.read_csv("Beacon_Data_MultiPointModel.csv")
df_beac.head(1)
# In[5]:
df_noaa = pd.read_csv("NOAA_Data_MultiPointModel.csv")
df_noaa.head(10)
# In[7]:
df_truck = pd.read_csv("Truck_Data_MultiPointModel.csv")
df_truck.drop(columns=['Unnamed: 0','Unnamed: 0.1'], inplace=True)
df_truck = df_truck.rename(columns={'0':'latitude','1':'longitude'})
df_truck['datetime'] = 'none'
df_truck['name'] = 'none'
cols = ['datetime', 'latitude', 'longitude', 'name', '100mAADT12', '100mFAF12', '100mNONFAF12',
'100mYKTON12', '100mKTONMILE12', '1000mAADT12', '1000mFAF12',
'1000mNONFAF12', '1000mYKTON12', '1000mKTONMILE12', '5000mAADT12',
'5000mFAF12', '5000mNONFAF12', '5000mYKTON12', '5000mKTONMILE12']
df_truck = df_truck[cols]
df_truck.head(1)
# In[]
df_pa = pd.read_csv("pa_full.csv")
df_pa = df_pa.rename(columns={"lat":"latitude","lon":"longitude"})
df_pa['name'] = 'none'
cols = ['datetime', 'latitude', 'longitude','name','PM1.0 (ATM)','PM2.5 (ATM)','PM10.0 (ATM)','PM2.5 (CF=1)','id']
df_pa = df_pa[cols]
df_pa.head(10)
#In[]
#Find most common hours throughout all data sets
#EPA
group_epa = df_epa.groupby("datetime")
epa_counts = group_epa["datetime"].value_counts()
EPA = epa_counts.loc[epa_counts==max(epa_counts)]
print(EPA)
# In[]
#NOAA
group_noaa = df_noaa.groupby("datetime")
noaa_counts = group_noaa["datetime"].value_counts()
NOAA = noaa_counts.loc[noaa_counts==max(noaa_counts)]
print(NOAA)
# In[]
#BEACON
group_beac = df_beac.groupby("datetime")
group_beac.head()
beac_counts = group_beac["datetime"].value_counts()
beac_counts.head(5)
BEAC = beac_counts.loc[beac_counts==max(beac_counts)]
print(BEAC)
# In[]
#PA
group_pa = df_pa.groupby("datetime")
pa_counts = group_pa["datetime"].value_counts()
PA = pa_counts.loc[pa_counts==max(pa_counts)]
print(PA)
# ## <font color='yellow'>Selecting a date and time</font>
#
# This function subsets a dataframe to only contain data from a certain date and hour.
# In[8]:
def select_datetime(df, month, day, hour, year=2018):
"""
Inputs: dataframe with a column called 'datetime' containing *UTC* strings formatted as datetime objects;
month, day, and hour desired (year is assumed 2018)
Outputs: rows of the original dataframe that match that date/hour
"""
if | pd.to_datetime(df['datetime'][0]) | pandas.to_datetime |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(result2.equals(expected2))
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assertTrue(result3.equals(expected3))
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A','B','A','C'])
idx2 = Index(['B','D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assertTrue(result.equals(expected))
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# Corner cases
union = first.union(first)
self.assertIs(union, first)
union = first.union([])
self.assertIs(union, first)
union = Index([]).union(first)
self.assertIs(union, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.union, 0.5)
# preserve names
first.name = 'A'
second.name = 'A'
union = first.union(second)
self.assertEqual(union.name, 'A')
second.name = 'B'
union = first.union(second)
self.assertIsNone(union.name)
def test_add(self):
# - API change GH 8226
with tm.assert_produces_warning():
self.strIndex + self.strIndex
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
self.assertTrue(tm.equalContents(firstCat, appended))
self.assertTrue(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(index))
# empty
result = index.append([])
self.assertTrue(result.equals(index))
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
self.assertEqual(result.name, 'foo')
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
self.assertIsNone(result.name)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assertNotIn('a', index2)
self.assertIn('afoo', index2)
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
self.assertIn('a', index)
index += '_x'
self.assertIn('a_x', index)
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
self.assertTrue(tm.equalContents(result, answer))
self.assertEqual(result.name, None)
# same names
second.name = 'name'
result = first.difference(second)
self.assertEqual(result.name, 'name')
# with empty
result = first.difference([])
self.assertTrue(tm.equalContents(result, first))
self.assertEqual(result.name, first.name)
# with everythin
result = first.difference(first)
self.assertEqual(len(result), 0)
self.assertEqual(result.name, first.name)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.diff, 0.5)
def test_symmetric_diff(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.sym_diff(idx2)
expected = Index([1, 5])
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# __xor__ syntax
expected = idx1 ^ idx2
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.sym_diff(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
self.assertTrue(tm.equalContents(result, expected))
# nans:
# GH #6444, sorting of nans. Make sure the number of nans is right
# and the correct non-nan values are there. punt on sorting.
idx1 = Index([1, 2, 3, np.nan])
idx2 = Index([0, 1, np.nan])
result = idx1.sym_diff(idx2)
# expected = Index([0.0, np.nan, 2.0, 3.0, np.nan])
nans = pd.isnull(result)
self.assertEqual(nans.sum(), 2)
self.assertEqual((~nans).sum(), 3)
[self.assertIn(x, result) for x in [0.0, 2.0, 3.0]]
# other not an Index:
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.sym_diff(idx2)
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'idx1')
result = idx1.sym_diff(idx2, result_name='new_name')
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'new_name')
# other isn't iterable
with tm.assertRaises(TypeError):
Index(idx1,dtype='object') - 1
def test_pickle(self):
self.verify_pickle(self.strIndex)
self.strIndex.name = 'foo'
self.verify_pickle(self.strIndex)
self.verify_pickle(self.dateIndex)
def test_is_numeric(self):
self.assertFalse(self.dateIndex.is_numeric())
self.assertFalse(self.strIndex.is_numeric())
self.assertTrue(self.intIndex.is_numeric())
self.assertTrue(self.floatIndex.is_numeric())
def test_is_object(self):
self.assertTrue(self.strIndex.is_object())
self.assertTrue(self.boolIndex.is_object())
self.assertFalse(self.intIndex.is_object())
self.assertFalse(self.dateIndex.is_object())
self.assertFalse(self.floatIndex.is_object())
def test_is_all_dates(self):
self.assertTrue(self.dateIndex.is_all_dates)
self.assertFalse(self.strIndex.is_all_dates)
self.assertFalse(self.intIndex.is_all_dates)
def test_summary(self):
self._check_method_works(Index.summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind.summary()
# shouldn't be formatted accidentally.
self.assertIn('~:{range}:0', result)
self.assertIn('{other}%s', result)
def test_format(self):
self._check_method_works(Index.format)
index = Index([datetime.now()])
formatted = index.format()
expected = [str(index[0])]
self.assertEqual(formatted, expected)
# 2845
index = Index([1, 2.0+3.0j, np.nan])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
# is this really allowed?
index = Index([1, 2.0+3.0j, None])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
self.strIndex[:0].format()
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
self.assertEqual(formatted[0], 'something')
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
self.assertEqual(len(result), 2)
self.assertEqual(result, expected)
def test_format_none(self):
values = ['a', 'b', 'c', None]
idx = Index(values)
idx.format()
self.assertIsNone(idx[3])
def test_take(self):
indexer = [4, 3, 0, 2]
result = self.dateIndex.take(indexer)
expected = self.dateIndex[indexer]
self.assertTrue(result.equals(expected))
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
r1 = idx2.get_indexer(idx1, method='pad')
assert_almost_equal(r1, [-1, 0, 0, 1, 1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
assert_almost_equal(r1, [0, 0, 1, 1, 2])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(idx)
self.assertEqual(idx.slice_locs(start=2), (2, n))
self.assertEqual(idx.slice_locs(start=3), (3, n))
self.assertEqual(idx.slice_locs(3, 8), (3, 6))
self.assertEqual(idx.slice_locs(5, 10), (3, n))
self.assertEqual(idx.slice_locs(5.0, 10.0), (3, n))
self.assertEqual(idx.slice_locs(4.5, 10.5), (3, 8))
self.assertEqual(idx.slice_locs(end=8), (0, 6))
self.assertEqual(idx.slice_locs(end=9), (0, 7))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(8, 2), (2, 6))
self.assertEqual(idx2.slice_locs(8.5, 1.5), (2, 6))
self.assertEqual(idx2.slice_locs(7, 3), (2, 5))
self.assertEqual(idx2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
self.assertEqual(idx.slice_locs('a', 'd'), (0, 6))
self.assertEqual(idx.slice_locs(end='d'), (0, 6))
self.assertEqual(idx.slice_locs('a', 'c'), (0, 4))
self.assertEqual(idx.slice_locs('b', 'd'), (2, 6))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs('d', 'a'), (0, 6))
self.assertEqual(idx2.slice_locs(end='a'), (0, 6))
self.assertEqual(idx2.slice_locs('d', 'b'), (0, 4))
self.assertEqual(idx2.slice_locs('c', 'a'), (2, 6))
for dtype in [int, float]:
idx = Index(np.array([10, 12, 12, 14], dtype=dtype))
self.assertEqual(idx.slice_locs(12, 12), (1, 3))
self.assertEqual(idx.slice_locs(11, 13), (1, 3))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(12, 12), (1, 3))
self.assertEqual(idx2.slice_locs(13, 11), (1, 3))
def test_slice_locs_na(self):
idx = Index([np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, start=1.5)
self.assertRaises(KeyError, idx.slice_locs, end=1.5)
self.assertEqual(idx.slice_locs(1), (1, 3))
self.assertEqual(idx.slice_locs(np.nan), (0, 3))
idx = Index([np.nan, np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_drop(self):
n = len(self.strIndex)
dropped = self.strIndex.drop(self.strIndex[lrange(5, 10)])
expected = self.strIndex[lrange(5) + lrange(10, n)]
self.assertTrue(dropped.equals(expected))
self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar'])
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
self.assertTrue(dropped.equals(expected))
ser = Index([1, 2, 3])
dropped = ser.drop(1)
expected = Index([2, 3])
self.assertTrue(dropped.equals(expected))
def test_tuple_union_bug(self):
import pandas
import numpy as np
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'), (2,
'C')], dtype=[('num', int), ('let', 'a1')])
idx1 = pandas.Index(aidx1)
idx2 = pandas.Index(aidx2)
# intersection broken?
int_idx = idx1.intersection(idx2)
# needs to be 1d like idx1 and idx2
expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
self.assertEqual(int_idx.ndim, 1)
self.assertTrue(int_idx.equals(expected))
# union broken
union_idx = idx1.union(idx2)
expected = idx2
self.assertEqual(union_idx.ndim, 1)
self.assertTrue(union_idx.equals(expected))
def test_is_monotonic_incomparable(self):
index = Index([5, datetime.now(), 7])
self.assertFalse(index.is_monotonic)
self.assertFalse(index.is_monotonic_decreasing)
def test_get_set_value(self):
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date),
values[67])
self.dateIndex.set_value(values, date, 10)
self.assertEqual(values[67], 10)
def test_isin(self):
values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
result = idx.isin(values)
self.assertEqual(len(result), 0)
self.assertEqual(result.dtype, np.bool_)
def test_isin_nan(self):
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Index(['a', pd.NaT]).isin([pd.NaT]), [False, True])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([float('nan')]), [False, False])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([pd.NaT]), [False, False])
# Float64Index overrides isin, so must be checked separately
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([float('nan')]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([pd.NaT]), [False, True])
def test_isin_level_kwarg(self):
def check_idx(idx):
values = idx.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(expected, idx.isin(values, level=0))
self.assert_numpy_array_equal(expected, idx.isin(values, level=-1))
self.assertRaises(IndexError, idx.isin, values, level=1)
self.assertRaises(IndexError, idx.isin, values, level=10)
self.assertRaises(IndexError, idx.isin, values, level=-2)
self.assertRaises(KeyError, idx.isin, values, level=1.0)
self.assertRaises(KeyError, idx.isin, values, level='foobar')
idx.name = 'foobar'
self.assert_numpy_array_equal(expected,
idx.isin(values, level='foobar'))
self.assertRaises(KeyError, idx.isin, values, level='xyzzy')
self.assertRaises(KeyError, idx.isin, values, level=np.nan)
check_idx(Index(['qux', 'baz', 'foo', 'bar']))
# Float64Index overrides isin, so must be checked separately
check_idx(Float64Index([1.0, 2.0, 3.0, 4.0]))
def test_boolean_cmp(self):
values = [1, 2, 3, 4]
idx = Index(values)
res = (idx == values)
self.assert_numpy_array_equal(res,np.array([True,True,True,True],dtype=bool))
def test_get_level_values(self):
result = self.strIndex.get_level_values(0)
self.assertTrue(result.equals(self.strIndex))
def test_slice_keep_name(self):
idx = Index(['a', 'b'], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
def test_join_self(self):
# instance attributes of the form self.<name>Index
indices = 'unicode', 'str', 'date', 'int', 'float'
kinds = 'outer', 'inner', 'left', 'right'
for index_kind in indices:
res = getattr(self, '{0}Index'.format(index_kind))
for kind in kinds:
joined = res.join(res, how=kind)
self.assertIs(res, joined)
def test_indexing_doesnt_change_class(self):
idx = Index([1, 2, 3, 'a', 'b', 'c'])
self.assertTrue(idx[1:3].identical(
pd.Index([2, 3], dtype=np.object_)))
self.assertTrue(idx[[0,1]].identical(
pd.Index([1, 2], dtype=np.object_)))
def test_outer_join_sort(self):
left_idx = Index(np.random.permutation(15))
right_idx = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
joined = left_idx.join(right_idx, how='outer')
# right_idx in this case because DatetimeIndex has join precedence over
# Int64Index
expected = right_idx.astype(object).union(left_idx.astype(object))
tm.assert_index_equal(joined, expected)
def test_nan_first_take_datetime(self):
idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
res = idx.take([-1, 0, 1])
exp = Index([idx[-1], idx[0], idx[1]])
tm.assert_index_equal(res, exp)
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self):
# GH6552
idx = pd.Index([0, 1, 2])
dt_idx = pd.date_range('20130101', periods=3)
idx.name = None
self.assertEqual(idx.reindex([])[0].name, None)
self.assertEqual(idx.reindex(np.array([]))[0].name, None)
self.assertEqual(idx.reindex(idx.tolist())[0].name, None)
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, None)
self.assertEqual(idx.reindex(idx.values)[0].name, None)
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, None)
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, None)
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, None)
idx.name = 'foobar'
self.assertEqual(idx.reindex([])[0].name, 'foobar')
self.assertEqual(idx.reindex(np.array([]))[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist())[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, 'foobar')
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, 'foobar')
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type([]), np.object_)
self.assertEqual(get_reindex_type(np.array([])), np.object_)
self.assertEqual(get_reindex_type(np.array([], dtype=np.int64)),
np.object_)
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type(pd.Int64Index([])), np.int64)
self.assertEqual(get_reindex_type(pd.Float64Index([])), np.float64)
self.assertEqual(get_reindex_type(pd.DatetimeIndex([])), np.datetime64)
reindexed = idx.reindex(pd.MultiIndex([pd.Int64Index([]),
pd.Float64Index([])],
[[], []]))[0]
self.assertEqual(reindexed.levels[0].dtype.type, np.int64)
self.assertEqual(reindexed.levels[1].dtype.type, np.float64)
class Numeric(Base):
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx * idx
tm.assert_index_equal(result, didx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * date_range('20130101',periods=5))
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_explicit_conversions(self):
# GH 8608
# add/sub are overriden explicity for Float/Int Index
idx = self._holder(np.arange(5,dtype='int64'))
# float conversions
arr = np.arange(5,dtype='int64')*3.2
expected = Float64Index(arr)
fidx = idx * 3.2
tm.assert_index_equal(fidx,expected)
fidx = 3.2 * idx
tm.assert_index_equal(fidx,expected)
# interops with numpy arrays
expected = Float64Index(arr)
a = np.zeros(5,dtype='float64')
result = fidx - a
tm.assert_index_equal(result,expected)
expected = Float64Index(-arr)
a = np.zeros(5,dtype='float64')
result = a - fidx
tm.assert_index_equal(result,expected)
def test_ufunc_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
result = np.sin(idx)
expected = Float64Index(np.sin(np.arange(5,dtype='int64')))
tm.assert_index_equal(result, expected)
class TestFloat64Index(Numeric, tm.TestCase):
_holder = Float64Index
_multiprocess_can_split_ = True
def setUp(self):
self.mixed = Float64Index([1.5, 2, 3, 4, 5])
self.float = Float64Index(np.arange(5) * 2.5)
def create_index(self):
return Float64Index(np.arange(5,dtype='float64'))
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.float).__name__):
hash(self.float)
def test_repr_roundtrip(self):
for ind in (self.mixed, self.float):
tm.assert_index_equal(eval(repr(ind)), ind)
def check_is_index(self, i):
self.assertIsInstance(i, Index)
self.assertNotIsInstance(i, Float64Index)
def check_coerce(self, a, b, is_float_index=True):
self.assertTrue(a.equals(b))
if is_float_index:
self.assertIsInstance(b, Float64Index)
else:
self.check_is_index(b)
def test_constructor(self):
# explicit construction
index = Float64Index([1,2,3,4,5])
self.assertIsInstance(index, Float64Index)
self.assertTrue((index.values == np.array([1,2,3,4,5],dtype='float64')).all())
index = Float64Index(np.array([1,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
index = Float64Index([1.,2,3,4,5])
self.assertIsInstance(index, Float64Index)
index = Float64Index(np.array([1.,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, float)
index = Float64Index(np.array([1.,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
index = Float64Index(np.array([1,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
# nan handling
result = Float64Index([np.nan, np.nan])
self.assertTrue(pd.isnull(result.values).all())
result = Float64Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
result = Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
def test_constructor_invalid(self):
# invalid
self.assertRaises(TypeError, Float64Index, 0.)
self.assertRaises(TypeError, Float64Index, ['a','b',0.])
self.assertRaises(TypeError, Float64Index, [Timestamp('20130101')])
def test_constructor_coerce(self):
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5]))
self.check_coerce(self.float,Index(np.arange(5) * 2.5))
self.check_coerce(self.float,Index(np.array(np.arange(5) * 2.5, dtype=object)))
def test_constructor_explicit(self):
# these don't auto convert
self.check_coerce(self.float,Index((np.arange(5) * 2.5), dtype=object),
is_float_index=False)
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5],dtype=object),
is_float_index=False)
def test_astype(self):
result = self.float.astype(object)
self.assertTrue(result.equals(self.float))
self.assertTrue(self.float.equals(result))
self.check_is_index(result)
i = self.mixed.copy()
i.name = 'foo'
result = i.astype(object)
self.assertTrue(result.equals(i))
self.assertTrue(i.equals(result))
self.check_is_index(result)
def test_equals(self):
i = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i2))
i = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i2))
def test_get_loc_na(self):
idx = Float64Index([np.nan, 1, 2])
self.assertEqual(idx.get_loc(1), 1)
self.assertEqual(idx.get_loc(np.nan), 0)
idx = Float64Index([np.nan, 1, np.nan])
self.assertEqual(idx.get_loc(1), 1)
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_contains_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(np.nan in i)
def test_contains_not_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(1.0 in i)
def test_doesnt_contain_all_the_things(self):
i = Float64Index([np.nan])
self.assertFalse(i.isin([0]).item())
self.assertFalse(i.isin([1]).item())
self.assertTrue(i.isin([np.nan]).item())
def test_nan_multiple_containment(self):
i = Float64Index([1.0, np.nan])
np.testing.assert_array_equal(i.isin([1.0]), np.array([True, False]))
np.testing.assert_array_equal(i.isin([2.0, np.pi]),
np.array([False, False]))
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, True]))
np.testing.assert_array_equal(i.isin([1.0, np.nan]),
np.array([True, True]))
i = Float64Index([1.0, 2.0])
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, False]))
def test_astype_from_object(self):
index = Index([1.0, np.nan, 0.2], dtype='object')
result = index.astype(float)
expected = Float64Index([1.0, np.nan, 0.2])
tm.assert_equal(result.dtype, expected.dtype)
tm.assert_index_equal(result, expected)
class TestInt64Index(Numeric, tm.TestCase):
_holder = Int64Index
_multiprocess_can_split_ = True
def setUp(self):
self.index = Int64Index(np.arange(0, 20, 2))
def create_index(self):
return Int64Index(np.arange(5,dtype='int64'))
def test_too_many_names(self):
def testit():
self.index.names = ["roger", "harold"]
assertRaisesRegexp(ValueError, "^Length", testit)
def test_constructor(self):
# pass list, coerce fine
index = Int64Index([-5, 0, 1, 2])
expected = np.array([-5, 0, 1, 2], dtype=np.int64)
self.assert_numpy_array_equal(index, expected)
# from iterable
index = Int64Index(iter([-5, 0, 1, 2]))
self.assert_numpy_array_equal(index, expected)
# scalar raise Exception
self.assertRaises(TypeError, Int64Index, 5)
# copy
arr = self.index.values
new_index = Int64Index(arr, copy=True)
self.assert_numpy_array_equal(new_index, self.index)
val = arr[0] + 3000
# this should not change index
arr[0] = val
self.assertNotEqual(new_index[0], val)
def test_constructor_corner(self):
arr = np.array([1, 2, 3, 4], dtype=object)
index = Int64Index(arr)
self.assertEqual(index.values.dtype, np.int64)
self.assertTrue(index.equals(arr))
# preventing casting
arr = np.array([1, '2', 3, '4'], dtype=object)
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr)
arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1]
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr_with_floats)
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_copy(self):
i = Int64Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Int64Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_coerce_list(self):
# coerce things
arr = Index([1, 2, 3, 4])
tm.assert_isinstance(arr, Int64Index)
# but not if explicit dtype passed
arr = Index([1, 2, 3, 4], dtype=object)
tm.assert_isinstance(arr, Index)
def test_dtype(self):
self.assertEqual(self.index.dtype, np.int64)
def test_is_monotonic(self):
self.assertTrue(self.index.is_monotonic)
self.assertTrue(self.index.is_monotonic_increasing)
self.assertFalse(self.index.is_monotonic_decreasing)
index = Int64Index([4, 3, 2, 1])
self.assertFalse(index.is_monotonic)
self.assertTrue(index.is_monotonic_decreasing)
index = Int64Index([1])
self.assertTrue(index.is_monotonic)
self.assertTrue(index.is_monotonic_increasing)
self.assertTrue(index.is_monotonic_decreasing)
def test_is_monotonic_na(self):
examples = [Index([np.nan]),
Index([np.nan, 1]),
Index([1, 2, np.nan]),
Index(['a', 'b', np.nan]),
pd.to_datetime(['NaT']),
pd.to_datetime(['NaT', '2000-01-01']),
pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']),
pd.to_timedelta(['1 day', 'NaT']),
]
for index in examples:
self.assertFalse(index.is_monotonic_increasing)
self.assertFalse(index.is_monotonic_decreasing)
def test_equals(self):
same_values = Index(self.index, dtype=object)
self.assertTrue(self.index.equals(same_values))
self.assertTrue(same_values.equals(self.index))
def test_identical(self):
i = Index(self.index.copy())
self.assertTrue(i.identical(self.index))
same_values_different_type = Index(i, dtype=object)
self.assertFalse(i.identical(same_values_different_type))
i = self.index.copy(dtype=object)
i = i.rename('foo')
same_values = Index(i, dtype=object)
self.assertTrue(same_values.identical(self.index.copy(dtype=object)))
self.assertFalse(i.identical(self.index))
self.assertTrue(Index(same_values, name='foo', dtype=object
).identical(i))
self.assertFalse(
self.index.copy(dtype=object)
.identical(self.index.copy(dtype='int64')))
def test_get_indexer(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target)
expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_pad(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='pad')
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_backfill(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='backfill')
expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5])
self.assert_numpy_array_equal(indexer, expected)
def test_join_outer(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
# guarantee of sortedness
res, lidx, ridx = self.index.join(other, how='outer',
return_indexers=True)
noidx_res = self.index.join(other, how='outer')
self.assertTrue(res.equals(noidx_res))
eres = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25])
elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1],
dtype=np.int64)
eridx = np.array([-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='outer',
return_indexers=True)
noidx_res = self.index.join(other_mono, how='outer')
self.assertTrue(res.equals(noidx_res))
eridx = np.array([-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
def test_join_inner(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='inner',
return_indexers=True)
# no guarantee of sortedness, so sort for comparison purposes
ind = res.argsort()
res = res.take(ind)
lidx = lidx.take(ind)
ridx = ridx.take(ind)
eres = Int64Index([2, 12])
elidx = np.array([1, 6])
eridx = np.array([4, 1])
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='inner',
return_indexers=True)
res2 = self.index.intersection(other_mono)
self.assertTrue(res.equals(res2))
eridx = np.array([1, 4])
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
def test_join_left(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='left',
return_indexers=True)
eres = self.index
eridx = np.array([-1, 4, -1, -1, -1, -1, 1, -1, -1, -1],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='left',
return_indexers=True)
eridx = np.array([-1, 1, -1, -1, -1, -1, 4, -1, -1, -1],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
# non-unique
"""
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,7,9])
res, lidx, ridx = idx2.join(idx, how='left', return_indexers=True)
eres = idx2
eridx = np.array([0, 2, 3, -1, -1])
elidx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
"""
def test_join_right(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='right',
return_indexers=True)
eres = other
elidx = np.array([-1, 6, -1, -1, 1, -1],
dtype=np.int64)
tm.assert_isinstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='right',
return_indexers=True)
eres = other_mono
elidx = np.array([-1, 1, -1, -1, 6, -1],
dtype=np.int64)
tm.assert_isinstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# non-unique
"""
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,7,9])
res, lidx, ridx = idx.join(idx2, how='right', return_indexers=True)
eres = idx2
elidx = np.array([0, 2, 3, -1, -1])
eridx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,9,7])
res = idx.join(idx2, how='right', return_indexers=False)
eres = idx2
self.assert(res.equals(eres))
"""
def test_join_non_int_index(self):
other = Index([3, 6, 7, 8, 10], dtype=object)
outer = self.index.join(other, how='outer')
outer2 = other.join(self.index, how='outer')
expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14,
16, 18], dtype=object)
self.assertTrue(outer.equals(outer2))
self.assertTrue(outer.equals(expected))
inner = self.index.join(other, how='inner')
inner2 = other.join(self.index, how='inner')
expected = Index([6, 8, 10], dtype=object)
self.assertTrue(inner.equals(inner2))
self.assertTrue(inner.equals(expected))
left = self.index.join(other, how='left')
self.assertTrue(left.equals(self.index))
left2 = other.join(self.index, how='left')
self.assertTrue(left2.equals(other))
right = self.index.join(other, how='right')
self.assertTrue(right.equals(other))
right2 = other.join(self.index, how='right')
self.assertTrue(right2.equals(self.index))
def test_join_non_unique(self):
left = Index([4, 4, 3, 3])
joined, lidx, ridx = left.join(left, return_indexers=True)
exp_joined = Index([3, 3, 3, 3, 4, 4, 4, 4])
self.assertTrue(joined.equals(exp_joined))
exp_lidx = np.array([2, 2, 3, 3, 0, 0, 1, 1], dtype=np.int64)
self.assert_numpy_array_equal(lidx, exp_lidx)
exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.int64)
self.assert_numpy_array_equal(ridx, exp_ridx)
def test_join_self(self):
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = self.index.join(self.index, how=kind)
self.assertIs(self.index, joined)
def test_intersection(self):
other = Index([1, 2, 3, 4, 5])
result = self.index.intersection(other)
expected = np.sort(np.intersect1d(self.index.values, other.values))
self.assert_numpy_array_equal(result, expected)
result = other.intersection(self.index)
expected = np.sort(np.asarray(np.intersect1d(self.index.values,
other.values)))
self.assert_numpy_array_equal(result, expected)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
i1 = Index(dt_dates, dtype=object)
i2 = Index(['aa'], dtype=object)
res = i2.intersection(i1)
self.assertEqual(len(res), 0)
def test_union_noncomparable(self):
from datetime import datetime, timedelta
# corner case, non-Int64Index
now = datetime.now()
other = Index([now + timedelta(i) for i in range(4)], dtype=object)
result = self.index.union(other)
expected = np.concatenate((self.index, other))
self.assert_numpy_array_equal(result, expected)
result = other.union(self.index)
expected = np.concatenate((other, self.index))
self.assert_numpy_array_equal(result, expected)
def test_cant_or_shouldnt_cast(self):
# can't
data = ['foo', 'bar', 'baz']
self.assertRaises(TypeError, Int64Index, data)
# shouldn't
data = ['0', '1', '2']
self.assertRaises(TypeError, Int64Index, data)
def test_view_Index(self):
self.index.view(Index)
def test_prevent_casting(self):
result = self.index.astype('O')
self.assertEqual(result.dtype, np.object_)
def test_take_preserve_name(self):
index = Int64Index([1, 2, 3, 4], name='foo')
taken = index.take([3, 0, 1])
self.assertEqual(index.name, taken.name)
def test_int_name_format(self):
from pandas import Series, DataFrame
index = Index(['a', 'b', 'c'], name=0)
s = Series(lrange(3), index)
df = DataFrame(lrange(3), index=index)
repr(s)
repr(df)
def test_print_unicode_columns(self):
df = pd.DataFrame(
{u("\u05d0"): [1, 2, 3], "\u05d1": [4, 5, 6], "c": [7, 8, 9]})
repr(df.columns) # should not raise UnicodeDecodeError
def test_repr_summary(self):
with cf.option_context('display.max_seq_items', 10):
r = repr(pd.Index(np.arange(1000)))
self.assertTrue(len(r) < 100)
self.assertTrue("..." in r)
def test_repr_roundtrip(self):
tm.assert_index_equal(eval(repr(self.index)), self.index)
def test_unicode_string_with_unicode(self):
idx = Index(lrange(1000))
if compat.PY3:
str(idx)
else:
compat.text_type(idx)
def test_bytestring_with_unicode(self):
idx = Index(lrange(1000))
if compat.PY3:
bytes(idx)
else:
str(idx)
def test_slice_keep_name(self):
idx = Int64Index([1, 2], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
class TestDatetimeIndex(Base, tm.TestCase):
_holder = DatetimeIndex
_multiprocess_can_split_ = True
def create_index(self):
return date_range('20130101',periods=5)
def test_pickle_compat_construction(self):
pass
def test_numeric_compat(self):
super(TestDatetimeIndex, self).test_numeric_compat()
if not compat.PY3_2:
for f in [lambda : np.timedelta64(1, 'D').astype('m8[ns]') * pd.date_range('2000-01-01', periods=3),
lambda : pd.date_range('2000-01-01', periods=3) * np.timedelta64(1, 'D').astype('m8[ns]') ]:
self.assertRaises(TypeError, f)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index=date_range('20130101',periods=3,tz='US/Eastern',name='foo')
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
self.assertEqual(str(index.reindex([])[0].tz), 'US/Eastern')
self.assertEqual(str(index.reindex(np.array([]))[0].tz), 'US/Eastern')
class TestPeriodIndex(Base, tm.TestCase):
_holder = PeriodIndex
_multiprocess_can_split_ = True
def create_index(self):
return period_range('20130101',periods=5,freq='D')
def test_pickle_compat_construction(self):
pass
class TestTimedeltaIndex(Base, tm.TestCase):
_holder = TimedeltaIndex
_multiprocess_can_split_ = True
def create_index(self):
return pd.to_timedelta(range(5),unit='d') + pd.offsets.Hour(1)
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * idx)
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_pickle_compat_construction(self):
pass
class TestMultiIndex(Base, tm.TestCase):
_holder = MultiIndex
_multiprocess_can_split_ = True
_compat_props = ['shape', 'ndim', 'size', 'itemsize']
def setUp(self):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=self.index_names, verify_integrity=False)
def create_index(self):
return self.index
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
self.assertTrue(i.labels[0].dtype == 'int8')
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'],range(40)])
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'],range(400)])
self.assertTrue(i.labels[1].dtype == 'int16')
i = MultiIndex.from_product([['a'],range(40000)])
self.assertTrue(i.labels[1].dtype == 'int32')
i = pd.MultiIndex.from_product([['a'],range(1000)])
self.assertTrue((i.labels[0]>=0).all())
self.assertTrue((i.labels[1]>=0).all())
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_set_names_and_rename(self):
# so long as these are synonyms, we don't need to test set_names
self.assertEqual(self.index.rename, self.index.set_names)
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
with assertRaisesRegexp(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, [new_names[0], self.index_names[1]])
res = ind.set_names(new_names2[0], level=0, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, [new_names2[0], self.index_names[1]])
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels, labels = self.index.levels, self.index.labels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1], inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels, labels = self.index.levels, self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1], inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assertRaisesRegexp(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0] = levels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0] = labels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with assertRaisesRegexp(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
self.assertIsNotNone(mi1._tuples)
# make sure level setting works
new_vals = mi1.set_levels(levels2).values
assert_almost_equal(vals2, new_vals)
# non-inplace doesn't kill _tuples [implementation detail]
assert_almost_equal(mi1._tuples, vals)
# and values is still same too
assert_almost_equal(mi1.values, vals)
# inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
assert_almost_equal(mi1.values, vals2)
# make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.array([(long(1), 'a')] * 6, dtype=object)
new_values = mi2.set_labels(labels2).values
# not inplace shouldn't change
assert_almost_equal(mi2._tuples, vals2)
# should have correct values
assert_almost_equal(exp_values, new_values)
# and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
self.assertEqual(mi.labels[0][0], val)
labels[0] = 15
self.assertEqual(mi.labels[0][0], val)
val = levels[0]
levels[0] = "PANDA"
self.assertEqual(mi.levels[0][0], val)
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays(
[lev1, lev2],
names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sortlevel()
self.assertIsNone(df.is_copy)
self.assertEqual(df.index.names, ('Name', 'Number'))
df = df.set_value(('grethe', '4'), 'one', 99.34)
self.assertIsNone(df.is_copy)
self.assertEqual(df.index.names, ('Name', 'Number'))
def test_names(self):
# names are assigned in __init__
names = self.index_names
level_names = [level.name for level in self.index.levels]
self.assertEqual(names, level_names)
# setting bad names on existing
index = self.index
assertRaisesRegexp(ValueError, "^Length of names", setattr, index,
"names", list(index.names) + ["third"])
assertRaisesRegexp(ValueError, "^Length of names", setattr, index,
"names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
assertRaisesRegexp(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
assertRaisesRegexp(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
self.assertEqual(ind_names, level_names)
def test_reference_duplicate_name(self):
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'x'])
self.assertTrue(idx._reference_duplicate_name('x'))
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'y'])
self.assertFalse(idx._reference_duplicate_name('x'))
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with assertRaisesRegexp(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
def test_constructor_single_level(self):
single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
tm.assert_isinstance(single_level, Index)
self.assertNotIsInstance(single_level, MultiIndex)
self.assertEqual(single_level.name, 'first')
single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]])
self.assertIsNone(single_level.name)
def test_constructor_no_levels(self):
assertRaisesRegexp(ValueError, "non-zero number of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assertRaisesRegexp(TypeError, both_re):
MultiIndex(levels=[])
with tm.assertRaisesRegexp(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
assertRaisesRegexp(ValueError, "Length of levels and labels must be"
" the same", MultiIndex, levels=levels,
labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assertRaisesRegexp(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assertRaisesRegexp(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assertRaisesRegexp(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assertRaisesRegexp(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
# deprecated properties
with warnings.catch_warnings():
warnings.simplefilter('ignore')
with tm.assertRaisesRegexp(ValueError, length_error):
self.index.copy().levels = [['a'], ['b']]
with tm.assertRaisesRegexp(ValueError, label_error):
self.index.copy().labels = [[0, 0, 0, 0], [0, 0]]
def assert_multiindex_copied(self, copy, original):
# levels shoudl be (at least, shallow copied)
assert_copy(copy.levels, original.levels)
assert_almost_equal(copy.labels, original.labels)
# labels doesn't matter which way copied
assert_almost_equal(copy.labels, original.labels)
self.assertIsNot(copy.labels, original.labels)
# names doesn't matter which way copied
self.assertEqual(copy.names, original.names)
self.assertIsNot(copy.names, original.names)
# sort order should be copied
self.assertEqual(copy.sortorder, original.sortorder)
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
self.assertEqual([level.name for level in index.levels], list(names))
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_duplicate_names(self):
self.index.names = ['foo', 'foo']
assertRaisesRegexp(KeyError, 'Level foo not found',
self.index._get_level_number, 'foo')
def test_get_level_number_integer(self):
self.index.names = [1, 0]
self.assertEqual(self.index._get_level_number(1), 0)
self.assertEqual(self.index._get_level_number(0), 1)
self.assertRaises(IndexError, self.index._get_level_number, 2)
assertRaisesRegexp(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
result = MultiIndex.from_arrays(arrays)
self.assertEqual(list(result), list(self.index))
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')], ['a', 'b']])
self.assertTrue(result.levels[0].equals(Index([Timestamp('20130101')])))
self.assertTrue(result.levels[1].equals(Index(['a','b'])))
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'),
('bar', 'a'), ('bar', 'b'), ('bar', 'c'),
('buz', 'a'), ('buz', 'b'), ('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
assert_array_equal(result, expected)
self.assertEqual(result.names, names)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = pd.lib.list_to_object_array([(1, pd.Timestamp('2000-01-01')),
(1, pd.Timestamp('2000-01-02')),
(2, pd.Timestamp('2000-01-01')),
(2, pd.Timestamp('2000-01-02'))])
assert_array_equal(mi.values, etalon)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')),
(2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
mi = pd.MultiIndex.from_tuples(tuples)
assert_array_equal(mi.values, pd.lib.list_to_object_array(tuples))
# Check that code branches for boxed values produce identical results
assert_array_equal(mi.values[:4], mi[:4].values)
def test_append(self):
result = self.index[:3].append(self.index[3:])
self.assertTrue(result.equals(self.index))
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(self.index))
# empty
result = self.index.append([])
self.assertTrue(result.equals(self.index))
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = ['foo', 'foo', 'bar', 'baz', 'qux', 'qux']
self.assert_numpy_array_equal(result, expected)
self.assertEqual(result.name, 'first')
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
self.assert_numpy_array_equal(result, expected)
def test_get_level_values_na(self):
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = [1, np.nan, 2]
assert_array_equal(values.values.astype(float), expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = [np.nan, np.nan, 2]
assert_array_equal(values.values.astype(float), expected)
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(0)
expected = [np.nan, np.nan, np.nan]
assert_array_equal(values.values.astype(float), expected)
values = index.get_level_values(1)
expected = np.array(['a', np.nan, 1],dtype=object)
assert_array_equal(values.values, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
assert_array_equal(values.values, expected.values)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(0)
self.assertEqual(values.shape, (0,))
def test_reorder_levels(self):
# this blows up
assertRaisesRegexp(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
self.assertEqual(self.index.nlevels, 2)
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
self.assertEqual(result, expected)
def test_legacy_pickle(self):
if compat.PY3:
raise nose.SkipTest("testing for legacy pickles not support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
self.assertTrue(obj.equals(obj2))
res = obj.get_indexer(obj)
exp = np.arange(len(obj))
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
self.assertTrue(obj.equals(obj2))
res = obj.get_indexer(obj)
exp = np.arange(len(obj))
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index=MultiIndex.from_product([[1,2],['a','b'],date_range('20130101',periods=3,tz='US/Eastern')],names=['one','two','three'])
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equal_levels(unpickled))
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
self.assertTrue((result.values == self.index.values).all())
def test_contains(self):
self.assertIn(('foo', 'two'), self.index)
self.assertNotIn(('bar', 'two'), self.index)
self.assertNotIn(None, self.index)
def test_is_all_dates(self):
self.assertFalse(self.index.is_all_dates)
def test_is_numeric(self):
# MultiIndex is never numeric
self.assertFalse(self.index.is_numeric())
def test_getitem(self):
# scalar
self.assertEqual(self.index[2], ('bar', 'one'))
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
self.assertTrue(result.equals(expected))
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
self.assertTrue(result.equals(expected))
self.assertTrue(result2.equals(expected))
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
self.assertEqual(sorted_idx.get_loc('baz'), slice(3, 4))
self.assertEqual(sorted_idx.get_loc('foo'), slice(0, 2))
def test_get_loc(self):
self.assertEqual(self.index.get_loc(('foo', 'two')), 1)
self.assertEqual(self.index.get_loc(('baz', 'two')), 3)
self.assertRaises(KeyError, self.index.get_loc, ('bar', 'two'))
self.assertRaises(KeyError, self.index.get_loc, 'quux')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)),
Index(lrange(4)),
Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0])])
self.assertRaises(KeyError, index.get_loc, (1, 1))
self.assertEqual(index.get_loc((2, 0)), slice(3, 5))
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
self.assertEqual(result, expected)
# self.assertRaises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert(rs == xp)
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)),
Index(lrange(4)),
Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
self.assertEqual(loc, expected)
self.assertTrue(new_index.equals(exp_index))
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
self.assertEqual(loc, expected)
self.assertIsNone(new_index)
self.assertRaises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)],
labels=[np.array([0, 0, 0, 0]),
np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
self.assertEqual(result, expected)
self.assertTrue(new_index.equals(index.droplevel(0)))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
assertRaisesRegexp(TypeError, '^Level type mismatch', idx.slice_locs,
(1, 3))
assertRaisesRegexp(TypeError, '^Level type mismatch', idx.slice_locs,
df.index[5] + timedelta(seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with assertRaisesRegexp(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with assertRaisesRegexp(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)),
Index(lrange(4)),
Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0])])
assertRaisesRegexp(KeyError, "[Kk]ey length.*greater than MultiIndex"
" lexsort depth", index.slice_locs, (1, 0, 1),
(2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
self.assertEqual(result, (1, 5))
result = sorted_idx.slice_locs(None, ('qux', 'one'))
self.assertEqual(result, (0, 5))
result = sorted_idx.slice_locs(('foo', 'two'), None)
self.assertEqual(result, (1, len(sorted_idx)))
result = sorted_idx.slice_locs('bar', 'baz')
self.assertEqual(result, (2, 4))
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]],
sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
self.assertEqual(result, (3, 6))
result = index.slice_locs(1, 5)
self.assertEqual(result, (3, 6))
result = index.slice_locs((2, 2), (5, 2))
self.assertEqual(result, (3, 6))
result = index.slice_locs(2, 5)
self.assertEqual(result, (3, 6))
result = index.slice_locs((1, 0), (6, 3))
self.assertEqual(result, (3, 8))
result = index.slice_locs(-1, 10)
self.assertEqual(result, (0, len(index)))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
self.assertFalse(index.is_unique)
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
self.assertNotIn('foo', result.levels[0])
self.assertIn(1, result.levels[0])
result = index.truncate(after=1)
self.assertNotIn(2, result.levels[0])
self.assertIn(1, result.levels[0])
result = index.truncate(before=1, after=2)
self.assertEqual(len(result.levels[0]), 2)
# after < before
self.assertRaises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
r1 = idx2.get_indexer(idx1, method='pad')
assert_almost_equal(r1, [-1, 0, 0, 1, 1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
assert_almost_equal(r1, [0, 0, 1, 1, 2])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2._tuple_index)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
self.assertTrue((r1 == [-1, -1, -1]).all())
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
assertRaisesRegexp(InvalidIndexError, "Reindexing only valid with"
" uniquely valued Index objects",
idx1.get_indexer, idx2)
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1],
[0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0]])
result = index.format()
self.assertEqual(result[3], '1 0 0 0')
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore',
category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
self.assertEqual(result[1], 'foo two')
self.reset_display_options()
warnings.filters = warn_filters
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'),
(2, 'one'), (2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
self.assertEqual(result.names, index.names)
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
self.assertEqual(result.names, index.names)
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'), (1, 'b'),
(2, 'a'), (2, 'a'), (2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
self.assertEqual(result.names, index.names)
def test_bounds(self):
self.index._bounds
def test_equals(self):
self.assertTrue(self.index.equals(self.index))
self.assertTrue(self.index.equal_levels(self.index))
self.assertFalse(self.index.equals(self.index[:-1]))
self.assertTrue(self.index.equals(self.index._tuple_index))
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)),
Index(lrange(4)),
Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1],
labels=index.labels[:-1])
self.assertFalse(index.equals(index2))
self.assertFalse(index.equal_levels(index2))
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
self.assertFalse(self.index.equals(index))
self.assertFalse(self.index.equal_levels(index))
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
self.assertFalse(self.index.equals(index))
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
self.assertTrue(mi.identical(mi2))
mi = mi.set_names(['new1', 'new2'])
self.assertTrue(mi.equals(mi2))
self.assertFalse(mi.identical(mi2))
mi2 = mi2.set_names(['new1', 'new2'])
self.assertTrue(mi.identical(mi2))
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
self.assertTrue(mi.identical(mi3))
self.assertFalse(mi.identical(mi4))
self.assertTrue(mi.equals(mi4))
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
self.assertTrue(mi.is_(mi))
self.assertTrue(mi.is_(mi.view()))
self.assertTrue(mi.is_(mi.view().view().view().view()))
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
self.assertTrue(mi2.is_(mi))
self.assertTrue(mi.is_(mi2))
self.assertTrue(mi.is_(mi.set_names(["C", "D"])))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
self.assertTrue(mi.is_(mi2))
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
self.assertFalse(mi3.is_(mi2))
# shouldn't change
self.assertTrue(mi2.is_(mi))
mi4 = mi3.view()
mi4.set_levels([[1 for _ in range(10)], lrange(10)], inplace=True)
self.assertFalse(mi4.is_(mi3))
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
self.assertFalse(mi5.is_(mi))
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index._tuple_index)
expected = MultiIndex.from_tuples(tups)
self.assertTrue(the_union.equals(expected))
# corner case, pass self or empty thing:
the_union = self.index.union(self.index)
self.assertIs(the_union, self.index)
the_union = self.index.union(self.index[:0])
self.assertIs(the_union, self.index)
# won't work in python 3
# tuples = self.index._tuple_index
# result = self.index[:4] | tuples[4:]
# self.assertTrue(result.equals(tuples))
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(self.index)
# self.assertIn(('foo', 'one'), result)
# self.assertIn('B', result)
# result2 = self.index.union(other)
# self.assertTrue(result.equals(result2))
def test_intersection(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_int = piece1 & piece2
tups = sorted(self.index[3:5]._tuple_index)
expected = MultiIndex.from_tuples(tups)
self.assertTrue(the_int.equals(expected))
# corner case, pass self
the_int = self.index.intersection(self.index)
self.assertIs(the_int, self.index)
# empty intersection: disjoint
empty = self.index[:2] & self.index[2:]
expected = self.index[:0]
self.assertTrue(empty.equals(expected))
# can't do in python 3
# tuples = self.index._tuple_index
# result = self.index & tuples
# self.assertTrue(result.equals(tuples))
def test_difference(self):
first = self.index
result = first.difference(self.index[-3:])
# - API change GH 8226
with tm.assert_produces_warning():
first - self.index[-3:]
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
tm.assert_isinstance(result, MultiIndex)
self.assertTrue(result.equals(expected))
self.assertEqual(result.names, self.index.names)
# empty difference: reflexive
result = self.index.difference(self.index)
expected = self.index[:0]
self.assertTrue(result.equals(expected))
self.assertEqual(result.names, self.index.names)
# empty difference: superset
result = self.index[-3:].difference(self.index)
expected = self.index[:0]
self.assertTrue(result.equals(expected))
self.assertEqual(result.names, self.index.names)
# empty difference: degenerate
result = self.index[:0].difference(self.index)
expected = self.index[:0]
self.assertTrue(result.equals(expected))
self.assertEqual(result.names, self.index.names)
# names not the same
chunklet = self.index[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
self.assertEqual(result.names, (None, None))
# empty, but non-equal
result = self.index.difference(self.index.sortlevel(1)[0])
self.assertEqual(len(result), 0)
# raise Exception called with non-MultiIndex
result = first.difference(first._tuple_index)
self.assertTrue(result.equals(first[:0]))
# name from empty array
result = first.difference([])
self.assertTrue(first.equals(result))
self.assertEqual(first.names, result.names)
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'),
('foo', 'two'), ('qux', 'one'),
('qux', 'two')])
expected.names = first.names
self.assertEqual(first.names, result.names)
assertRaisesRegexp(TypeError, "other must be a MultiIndex or a list"
" of tuples", first.difference, [1, 2, 3, 4, 5])
def test_from_tuples(self):
assertRaisesRegexp(TypeError, 'Cannot infer number of levels from'
' empty list', MultiIndex.from_tuples, [])
idx = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
self.assertEqual(len(idx), 2)
def test_argsort(self):
result = self.index.argsort()
expected = self.index._tuple_index.argsort()
self.assert_numpy_array_equal(result, expected)
def test_sortlevel(self):
import random
tuples = list(self.index)
random.shuffle(tuples)
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
self.assertTrue(sorted_idx.equals(expected))
sorted_idx, _ = index.sortlevel(0, ascending=False)
self.assertTrue(sorted_idx.equals(expected[::-1]))
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
self.assertTrue(sorted_idx.equals(expected))
sorted_idx, _ = index.sortlevel(1, ascending=False)
self.assertTrue(sorted_idx.equals(expected[::-1]))
def test_sortlevel_not_sort_remaining(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)
self.assertTrue(sorted_idx.equals(mi))
def test_sortlevel_deterministic(self):
tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),
('foo', 'one'), ('baz', 'two'), ('qux', 'one')]
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
self.assertTrue(sorted_idx.equals(expected))
sorted_idx, _ = index.sortlevel(0, ascending=False)
self.assertTrue(sorted_idx.equals(expected[::-1]))
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
self.assertTrue(sorted_idx.equals(expected))
sorted_idx, _ = index.sortlevel(1, ascending=False)
self.assertTrue(sorted_idx.equals(expected[::-1]))
def test_dims(self):
pass
def test_drop(self):
dropped = self.index.drop([('foo', 'two'), ('qux', 'one')])
index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')])
dropped2 = self.index.drop(index)
expected = self.index[[0, 2, 3, 5]]
self.assertTrue(dropped.equals(expected))
self.assertTrue(dropped2.equals(expected))
dropped = self.index.drop(['bar'])
expected = self.index[[0, 1, 3, 4, 5]]
self.assertTrue(dropped.equals(expected))
index = | MultiIndex.from_tuples([('bar', 'two')]) | pandas.core.index.MultiIndex.from_tuples |
from albumentations import augmentations
from data.transforms import train_image_augmentation, valid_image_augmentation
from params import SPLIT_SIZE, BATCH_SIZE, NUM_WORKERS, DATA_PATH, SEED
from pytorch_lightning import LightningDataModule
import pandas as pd
import os
from sklearn import model_selection
from torchvision import transforms
from torch.utils.data import DataLoader
from data.dataset import CassavaDataset
# TODO : to complete
class LitDataClass(LightningDataModule):
def __init__(self,
fold: int = 0,
subset: float = 1.0,
batch_size: int = BATCH_SIZE,
config: dict = {'img_size': 128},
train_val_split: float = SPLIT_SIZE,
use_extra_data: bool = False):
super().__init__(LitDataClass, self)
self.fold = fold
self.subset = subset
self.config = config
self.val_data = None
self.test_data = None
self.train_data = None
self.batch_size = batch_size
self.use_extra_data = use_extra_data
self.train_val_split = train_val_split
self.transform = transforms.Compose([transforms.ToTensor()])
def prepare_data(self):
# just download the data
# since the data is locally
# we ignore this step
pass
def setup(self, na=1):
# defining folders
folder_current = 'cassava-leaf-disease-classification'
folder_old = 'cassava-disease'
# reading the data into a csv
train_csv = pd.read_csv(
f"{DATA_PATH}/{folder_current}/train.csv")
# adding a column for image location
train_csv['path'] = train_csv['image_id'].map(
lambda x: f"{DATA_PATH}/{folder_current}/train_images/{x}")
# shuffling and reset index
train_csv.drop('image_id', axis=1, inplace=True)
# add extra data if use_extra_data = True
if self.use_extra_data:
olddir = f'{DATA_PATH}/{folder_old}/train'
folder_to_label_mapper = {
"cbb": 0,
'cbsd': 1,
'cgm': 2,
'cmd': 3,
'healthy': 4
}
paths = []
labels = []
for label in os.listdir(f'{olddir}'):
pths = [
f'{olddir}/{label}/{x}'
for x in os.listdir(f'{olddir}/{label}')]
labels += [folder_to_label_mapper[label]]*len(pths)
paths += pths
dico = {'label': labels, 'path': paths}
train_extra_data = | pd.DataFrame(data=dico) | pandas.DataFrame |
# packages to store and manipulate data
#utility
import os
# package to clean text
import re
# plotting packages
import matplotlib.pyplot as plt
#nltk for data cleaning
import nltk
import numpy as np
import pandas as pd
import seaborn as sns
# model building package
import sklearn
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
files = os.listdir('data')
files.remove('.DS_Store')
for file in files:
print(file)
current_file = file.split('.')[0]
filename = 'data/' + current_file + '.csv'
df = pd.read_csv(filename)
def remove_links(text):
'''Takes a string and removes web links from it'''
text = re.sub(r'http\S+', '', text) # remove http links
text = re.sub(r'bit.ly/\S+', '', text) # remove bitly links
text = text.strip('[link]') # remove [links]
return text
def remove_users(text):
'''Takes a string and removes retweet and @user information'''
text = re.sub('(RT\s@[A-Za-z]+[A-Za-z0-9-_]+)', '', text) # remove retweet
text = re.sub('(@[A-Za-z]+[A-Za-z0-9-_]+)', '', text) # remove tweeted at
return text
def deEmojify(inputString):
return inputString.encode('ascii', 'ignore').decode('ascii')
my_stopwords = nltk.corpus.stopwords.words('english')
my_es_stopwords = nltk.corpus.stopwords.words('spanish')
my_custom_stopwords = ['et','w','b','pt','st','I','l']
word_rooter = nltk.stem.snowball.PorterStemmer(ignore_stopwords=False).stem
my_punctuation = '!"$%&\'()*+,-./:;<=>?[\\]^_`{|}~•@\’”–…'
# cleaning master function
def clean_text(text, bigrams=False):
text = remove_users(text)
text = remove_links(text)
text = deEmojify(text)
text = text.lower() # lower case
text = re.sub('&', '', text) #remove 'amp'
text = re.sub('['+my_punctuation + ']+', ' ', text) # strip punctuation
text = re.sub('\s+', ' ', text) #remove double spacing
text = re.sub('([0-9]+)', '', text) # remove numbers
text_token_list = [word for word in text.split(' ') if word not in my_custom_stopwords] # remove custom stopwords
text_token_list = [word for word in text_token_list if word not in my_stopwords] # remove stopwords
text_token_list = [word for word in text_token_list if word not in my_es_stopwords] # remove spanish stopwords
#text_token_list = [word_rooter(word) if '#' not in word else word for word in text_token_list] # apply word rooter
if bigrams:
text_token_list = text_token_list+[text_token_list[i]+'_'+text_token_list[i+1] for i in range(len(text_token_list)-1)]
text = ' '.join(text_token_list)
return text
df['clean_text'] = df.text.apply(clean_text)
from sklearn.feature_extraction.text import CountVectorizer
# the vectorizer object will be used to transform text to vector form
vectorizer = CountVectorizer(max_df=0.9, min_df=25, token_pattern='\w+|\$[\d\.]+|\S+')
# apply transformation
tf = vectorizer.fit_transform(df['clean_text']).toarray()
# tf_feature_names tells us what word each column in the matrix represents
tf_feature_names = vectorizer.get_feature_names()
from sklearn.decomposition import LatentDirichletAllocation
number_of_topics = 10
model = LatentDirichletAllocation(n_components=number_of_topics, random_state=0, max_iter=100)
model.fit(tf)
def display_topics(model, feature_names, no_top_words):
topic_dict = {}
for topic_idx, topic in enumerate(model.components_):
topic_dict["Topic %d words" % (topic_idx)]= ['{}'.format(feature_names[i]) for i in topic.argsort()[:-no_top_words - 1:-1]]
topic_dict["Topic %d weights" % (topic_idx)]= ['{:.1f}'.format(topic[i]) for i in topic.argsort()[:-no_top_words - 1:-1]]
#Store topics in file
with open('topics' + '/' + current_file, 'w') as storage:
i = 0
while i < number_of_topics:
temp_topic = 'topic' + str(i) + '|'
for item in topic_dict['Topic %d words' % (i)]:
temp_topic += (item + ',')
#Includes Kludge to make trailing comma go away and add new line
temp_topic = temp_topic[:-1]
temp_topic += '\n'
storage.write(temp_topic)
i+=1
storage.close()
#Store It
pd.DataFrame(topic_dict).to_csv('output' + '/' + 'topics' + '/' + current_file + '.csv', index = False)
return | pd.DataFrame(topic_dict) | pandas.DataFrame |
#Autre test pour le filtre des musées sur les villes, qui vérifie la correspondance de manière plus précise.
import sys
import os
from pathlib import Path
scriptpath = Path(os.path.dirname(os.path.abspath(__file__))).parent
sys.path.insert(0,str(scriptpath))
import pandas as pd
from data_extraction.filtre_base_de_donnees import filtre_par_villes
from pandas import isnull
def test_filtre_par_villes():
df = pd.read_excel(r"tests\tests.xlsx")
assert df[df.VILLE=="ALISE-SAINTE-REINE"].applymap(lambda x: {} if | isnull(x) | pandas.isnull |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime, timedelta
from scipy.special import gamma,gammainc,gammaincc
from scipy.stats import norm
from scipy.optimize import minimize,root_scalar
import networkx as nx
from operator import itemgetter
ep = 1e-80 #For preventing overflow errors in norm.cdf
tref = pd.to_datetime('2020-01-01') #Reference time for converting dates to numbers
################# FORMATTING ########################
def format_JH(url,drop_list,columns):
data = pd.read_csv(url)
if len(columns) == 2:
data[columns[1]] = data[columns[1]].fillna(value='NaN')
data = data.T.drop(drop_list).T.set_index(columns).T
data.index = pd.to_datetime(data.index,format='%m/%d/%y')
return data
def format_kaggle(folder,metric):
data_full = pd.read_csv(folder+'train.csv')
data = data_full.pivot_table(index='Date',columns=['Country_Region','Province_State'],values=metric)
data.index = pd.to_datetime(data.index,format='%Y-%m-%d')
return data
def format_predictions(path):
pred = pd.read_csv(path).fillna(value='NaN').set_index(['Country/Region','Province/State'])
for item in ['Nmax','Nmax_low','Nmax_high','sigma','sigma_low','sigma_high']:
pred[item] = pd.to_numeric(pred[item])
for item in ['th','th_low','th_high']:
pred[item] = pd.to_datetime(pred[item],format='%Y-%m-%d')
return pred
def load_sim(path):
data = pd.read_csv(path,index_col=0,header=[0,1])
data.index = pd.to_datetime(data.index,format='%Y-%m-%d')
for item in data.keys():
data[item] = pd.to_numeric(data[item])
return data
################# ESTIMATING PARAMETER VALUES ###############
def cbarr(t):
return 1/(np.sqrt(2*np.pi)*(1-norm.cdf(t)+ep))
def tmean(tf,params):
th,sigma = params
tau = (tf-th)/sigma
return -sigma*cbarr(-tau)*np.exp(-tau**2/2)+th
def tvar(tf,params):
th,sigma = params
tau = (tf-th)/sigma
return sigma**2*cbarr(-tau)*(np.sqrt(np.pi/2)*(1+np.sign(tau)*gammaincc(3/2,tau**2/2))-cbarr(-tau)*np.exp(-tau**2/2))
def cost_init(params,data,tf):
th,sigma = params
tmean_sample = (data.index.values*data.values).sum()/data.values.sum()
tvar_sample = (((data.index.values-tmean_sample)**2)*data.values).sum()/data.values.sum()
return (tmean_sample-tmean(tf,params))**2 + (tvar_sample-tvar(tf,params))**2
################### COST FUNCTIONs #################
def cost_p(params,data,prior):
th,logK,sigma = params
t = data.index.values
tau = (t-th)/sigma
if prior is not None:
mean_sigma, var_sigma = prior
penalty = (sigma-mean_sigma)**2/(2*var_sigma)
else:
penalty = 0
prediction = logK+np.log((norm.cdf(tau)+ep))
return ((np.log(data.values)-prediction)**2).sum()/2 + penalty
def jac_p(params,data,prior):
th,logK,sigma = params
t = data.index.values
tau = (t-th)/sigma
if prior is not None:
mean_sigma, var_sigma = prior
dpenalty = (sigma-mean_sigma)/var_sigma
else:
dpenalty = 0
prediction = logK+np.log((norm.cdf(tau)+ep))
err = np.log(data.values)-prediction
dlogNdt = np.exp(-tau**2/2)/(np.sqrt(2*np.pi)*sigma*(norm.cdf(tau)+ep))
return np.asarray([(dlogNdt*err).sum(),-err.sum(),(tau*dlogNdt*err).sum()])+np.asarray([0,0,dpenalty])
def hess_p(params,data,prior):
th,logK,sigma = params
t = data.index.values
tau = (t-th)/sigma
if prior is not None:
mean_sigma, var_sigma = prior
d2penalty = 1/var_sigma
else:
d2penalty = 0
prediction = logK+np.log((norm.cdf(tau)+ep))
err = np.log(data.values)-prediction
dlogNdt_s = np.exp(-tau**2/2)/(np.sqrt(2*np.pi)*(norm.cdf(tau)+ep))
dlogNdth = -dlogNdt_s/sigma
dlogNdlogK = np.ones(len(t))
dlogNdsig = -tau*dlogNdt_s/sigma
d2Ndth2_N = -tau*dlogNdt_s/sigma**2
d2Ndsig2_N = 2*tau*(1-tau**2/2)*dlogNdt_s/(sigma**2)
d2Ndsigdth_N = (1-2*tau**2/2)*dlogNdt_s/sigma**2
term1 = np.asarray([[((-d2Ndth2_N+dlogNdth**2)*err).sum(), 0, ((-d2Ndsigdth_N+dlogNdth*dlogNdsig)*err).sum()],
[0, 0, 0],
[((-d2Ndsigdth_N+dlogNdth*dlogNdsig)*err).sum(), 0, ((-d2Ndsig2_N+dlogNdsig**2)*err).sum()]])
term2 = np.asarray([[(dlogNdth**2).sum(), (dlogNdth*dlogNdlogK).sum(), (dlogNdth*dlogNdsig).sum()],
[(dlogNdth*dlogNdlogK).sum(), (dlogNdlogK**2).sum(), (dlogNdsig*dlogNdlogK).sum()],
[(dlogNdth*dlogNdsig).sum(), (dlogNdsig*dlogNdlogK).sum(), (dlogNdsig**2).sum()]])
term3 = np.zeros((3,3))
term3[2,2] = d2penalty
return term1 + term2+ term3
def th_err(th,data,sigma,tf):
tmean_sample = (data.index.values*data.values).sum()/data.values.sum()
tau = (tf-th)/sigma
tmean = -sigma*cbarr(-tau)*np.exp(-tau**2/2)+th
return tmean_sample-tmean
def cost_p_sig(params,data,sigma):
th,logK = params
t = data.index.values
tau = (t-th)/sigma
prediction = logK+np.log((norm.cdf(tau)+ep))
return 0.5*((np.log(data.values)-prediction)**2).sum()
def jac_p_sig(params,data,sigma):
th,logK = params
t = data.index.values
tau = (t-th)/sigma
prediction = logK+np.log((norm.cdf(tau)+ep))
err = np.log(data.values)-prediction
dlogNdt = np.exp(-tau**2/2)/(np.sqrt(np.pi*2)*sigma*(norm.cdf(tau)+ep))
return np.asarray([(dlogNdt*err).sum(),
-err.sum()])
################## FITTING #####################
def fit_erf_sig(data,p0=5e2,sigma=7):
#Get initial conditions
train = data.loc[data>0].diff().iloc[1:]
t = (train.index-tref)/timedelta(days=1)
train.index = t
train = pd.to_numeric(train)
th0 = (t.values*train.values).sum()/train.values.sum()
out = root_scalar(th_err,args=(train,sigma,t[-1]),x0=th0,x1=th0+10)
th0 = out.root
tau0 = (t[-1]-th0)/sigma
logK0 = np.log(data.iloc[-1]/(norm.cdf(tau0)+ep))
params = [th0,logK0,sigma]
#Train the model
train = data.loc[data>p0]
t = (train.index-tref)/timedelta(days=1)
train.index = t
train = pd.to_numeric(train)
out = minimize(cost_p_sig,[th0,logK0],args=(train,sigma),jac=jac_p_sig,method='BFGS')
params = list(out.x)+[sigma,2*out.fun/len(train)]
return params
def fit_erf(data,p0=5e2,verbose=False,prior=None):
#Get initial conditions
train = data.loc[data>0].diff().iloc[1:]
t = (train.index-tref)/timedelta(days=1)
train.index = t
train = pd.to_numeric(train)
th0 = (t.values*train.values).sum()/train.values.sum()
sig0 = np.sqrt(((t-th0).values**2*train.values).sum()/train.values.sum())
tf = t[-1]
if prior is not None:
mean_sigma, var_sigma = prior
lb = mean_sigma-2*np.sqrt(var_sigma)
ub = mean_sigma+2*np.sqrt(var_sigma)
else:
lb = None
ub = None
out = minimize(cost_init,[th0,sig0],args=(train,tf),bounds=((None,None),(lb,ub)))
th0,sig0 = out.x
tau0 = (tf-th0)/sig0
logK0 = np.log(data.iloc[-1]/(norm.cdf(tau0)+ep))
#Fit the curve
train = data.loc[data>p0]
t = (train.index-tref)/timedelta(days=1)
train.index = t
train = pd.to_numeric(train)
out = minimize(cost_p,[th0,logK0,sig0],args=(train,prior),method='Nelder-Mead')
#Save the parameters and score, and print states
params = list(out.x)+[2*out.fun/len(train)]
if verbose:
print(out)
return params, [th0,logK0,sig0], out.success
def fit_all(data,p0=5e2,plot=False,ylabel=None,prior=None):
params_list = pd.DataFrame(index=data.keys(),columns=['th','logK','sigma','score'])
for item in data.keys():
params_list.loc[item] = [np.nan,np.nan,np.nan,np.nan]
# Only fit regions that have nonzero new cases/fatalities on at least seven days
if (data[item].diff()>1).sum() > 7:
# Only fit regions that have at least five data points after crossing p0
if (data[item]>p0).sum() > 5:
params,params_0,success = fit_erf(data[item],p0=p0,prior=prior)
params_list.loc[item] = params
if plot:
fig,ax,params_good = plot_predictions(data[item],params)
ax.set_title(item)
ax.set_ylabel(ylabel)
ax.set_ylim((10,None))
plt.show()
return params_list.dropna()
################## CONDFIDENCE BOUNDS AND PRIORS ###################
def make_prior(data,params,thresh,plot=False,buffer=0):
params_valid = params.loc[data.iloc[-1]>thresh].replace('NaN',np.nan).dropna().sort_values('sigma')
not_peaked = params_valid['th']>(data.index[-1]-tref+pd.to_timedelta(buffer,unit='days'))/pd.to_timedelta(1,unit='days')
peaked = params_valid['th']<=(data.index[-1]-tref+pd.to_timedelta(buffer,unit='days'))/pd.to_timedelta(1,unit='days')
params_valid = params_valid.loc[peaked]
if plot:
params_valid['sigma'].loc[peaked].plot.hist()
peaked = peaked.loc[peaked].index.tolist()
not_peaked = not_peaked.loc[not_peaked].index.tolist()
return params_valid['sigma'].loc[peaked].mean(), params_valid['sigma'].loc[peaked].var(), peaked, not_peaked
def conf_bounds(t,params,hess_inv):
th,logK,sigma,score = params
lb = []
ub = []
ml = []
for ti in t:
tau = (ti-th)/sigma
prediction = logK+np.log((norm.cdf(tau)+ep))
dlogNdt = np.exp(-tau**2/2)/(np.sqrt(2*np.pi)*sigma*(norm.cdf(tau)+ep))
dlogNdx = np.asarray([-dlogNdt,1,-tau*dlogNdt])
sigma_pred2 = dlogNdx[np.newaxis,:].dot(hess_inv.dot(dlogNdx)).squeeze()*score
ub.append(np.exp(prediction+2*np.sqrt(sigma_pred2)))
lb.append(np.exp(prediction-2*np.sqrt(sigma_pred2)))
ml.append(np.exp(prediction))
return np.asarray(lb), np.asarray(ml), np.asarray(ub)
def conf_bounds_eig(t,params,hess_inv):
th,logK,sigma,score = params
v,u = np.linalg.eig(hess_inv*score)
sloppy_v = v[0]
sloppy_u = u[:,0]
params_upper = params[:3]+2*sloppy_u*np.sqrt(sloppy_v)
params_lower = params[:3]-2*sloppy_u*np.sqrt(sloppy_v)
tau = (t-th)/sigma
ml = np.exp(logK)*(norm.cdf(tau)+ep)
th,logK,sigma = params_lower
tau = (t-th)/sigma
lb = np.exp(logK)*(norm.cdf(tau)+ep)
th,logK,sigma = params_upper
tau = (t-th)/sigma
ub = np.exp(logK)*(norm.cdf(tau)+ep)
return lb,ml,ub
def get_sigvar(params,data,p0):
th,logK,sigma0,score0 = params
train = pd.to_numeric(data.loc[data>p0])
train.index=(train.index-tref)/timedelta(days=1)
H = hess_p(params[:-1],train,None)
return np.linalg.inv(H)[2,2]*params[-1]
def sweep_sigma(params,data,p0,sig_bound=30):
th,logK,sigma0,score0 = params
sigvar = get_sigvar(params,data,p0)
if sigvar < 0:
sigvar = 100
params_sweep = []
for sigma in np.logspace(np.log10(np.max([sigma0-4*np.sqrt(sigvar),1])),np.log10(sigma0+sig_bound*np.sqrt(sigvar)),200):
params_sweep.append(fit_erf_sig(data,sigma=sigma,p0=p0))
return np.asarray(params_sweep)
def get_score_thresh(params_sweep,M,c):
sigma = params_sweep[:,2]
dsig = np.diff(sigma)
sigma = sigma[1:]
score = params_sweep[1:,3]
sig_xi2 = np.min(score)
prob = np.exp(-score*M/(2*sig_xi2))/(np.exp(-score*M/(2*sig_xi2))*dsig).sum()
score_set = list(set(score))
score_set.sort()
score_set = np.asarray(score_set)
pcum = np.asarray([(prob[score<=val]*dsig[score<=val]).sum() for val in score_set])
scoremax = score_set[pcum<=c][-1]
return sigma, prob, scoremax
def conf_bounds_sigma(t,params_sweep,M,c):
sigma,prob,scoremax = get_score_thresh(params_sweep,M,c)
params_good = params_sweep[params_sweep[:,3]<=scoremax]
th,logK,sigma = params_good[np.argmin(params_good[:,-1]),:3]
tau = (t-th)/sigma
ml = np.exp(logK)*(norm.cdf(tau)+ep)
th,logK,sigma = params_good[0,:3]
tau = (t-th)/sigma
lb = np.exp(logK)*(norm.cdf(tau)+ep)
th,logK,sigma = params_good[-1,:3]
tau = (t-th)/sigma
ub = np.exp(logK)*(norm.cdf(tau)+ep)
return lb,ml,ub,params_good
def predict_all(data,params_list,p0=50,c=0.95,verbose=False,th_string=False):
pred_idx = params_list.index.copy()
predictions = []
for item in pred_idx:
if verbose:
print(item[0]+', '+item[1])
#Load the data and best-fit params
train = data[item]
params = params_list.loc[item].copy()
try:
#Fit for a range of sigma values
params_sweep = sweep_sigma(params,train,p0)
sigma,prob,scoremax = get_score_thresh(params_sweep,len(train.loc[train>p0]),c)
params_good = params_sweep[params_sweep[:,3]<=scoremax]
total = np.exp(params_good[:,1])
th = tref+ | pd.to_timedelta(params_good[:,0],unit='days') | pandas.to_timedelta |
import argparse
import numpy as np
import pandas as pd
import sys
import datetime as dt
from dateutil.parser import parse
from agent.ExchangeAgent import ExchangeAgent
from agent.NoiseAgent import NoiseAgent
from agent.ValueAgent import ValueAgent
from agent.examples.MarketMakerAgent import MarketMakerAgent
from agent.examples.MomentumAgent import MomentumAgent
from agent.execution.TWAPExecutionAgent import TWAPExecutionAgent
from agent.execution.VWAPExecutionAgent import VWAPExecutionAgent
from Kernel import Kernel
from util import util
from util.order import LimitOrder
from util.oracle.ExternalFileOracle import ExternalFileOracle
########################################################################################################################
############################################### GENERAL CONFIG #########################################################
parser = argparse.ArgumentParser(description='Detailed options for market replay config.')
parser.add_argument('-c',
'--config',
required=True,
help='Name of config file to execute')
parser.add_argument('-t',
'--ticker',
required=True,
help='Ticker (symbol) to use for simulation')
parser.add_argument('-d', '--historical-date',
required=True,
type=parse,
help='historical date being simulated in format YYYYMMDD.')
parser.add_argument('-f',
'--fundamental-file-path',
required=True,
help="Path to external fundamental file.")
parser.add_argument('-e',
'--execution_agents',
action='store_true',
help='Flag to add the execution agents')
parser.add_argument('-s',
'--seed',
type=int,
default=None,
help='numpy.random.seed() for simulation')
parser.add_argument('-l',
'--log_dir',
default=None,
help='Log directory name (default: unix timestamp at program start)')
parser.add_argument('-v',
'--verbose',
action='store_true',
help='Maximum verbosity!')
parser.add_argument('--config_help',
action='store_true',
help='Print argument options for this config file')
args, remaining_args = parser.parse_known_args()
if args.config_help:
parser.print_help()
sys.exit()
seed = args.seed # Random seed specification on the command line.
if not seed: seed = int(pd.Timestamp.now().timestamp() * 1000000) % (2 ** 32 - 1)
np.random.seed(seed)
util.silent_mode = not args.verbose
LimitOrder.silent_mode = not args.verbose
simulation_start_time = dt.datetime.now()
print("Simulation Start Time: {}".format(simulation_start_time))
print("Configuration seed: {}".format(seed))
######################## Agents Config #########################################################################
# Historical date to simulate.
historical_date_pd = pd.to_datetime(args.historical_date)
mkt_open = historical_date_pd + pd.to_timedelta('09:30:00')
mkt_close = historical_date_pd + | pd.to_timedelta('16:00:00') | pandas.to_timedelta |
# vim: fdm=indent
# author: <NAME>
# date: 17/06/19
# content: Atlas averages
__all__ = ['Averages']
import warnings
import numpy as np
import pandas as pd
from anndata import AnnData
import leidenalg
from .fetch_atlas import AtlasFetcher
from .cluster_with_annotations import ClusterWithAnnotations
class Averages(object):
'''Annotate new cell types using averages of an atlas'''
def __init__(
self,
atlas,
n_cells_per_type=None,
n_features_per_cell_type=30,
n_features_overdispersed=500,
features_additional=None,
n_pcs=20,
n_neighbors=10,
n_neighbors_out_of_atlas=5,
distance_metric='correlation',
threshold_neighborhood=0.8,
clustering_metric='cpm',
resolution_parameter=0.001,
normalize_counts=True,
join='keep_first',
):
'''Prepare the model for cell annotation
Args:
atlas (str, list of str, list of dict, dict, or AnnData): cell
atlas to use. Generally there are two kind of choices:
The first possibility selects the corresponding cell atlas or
atlases from northstar's online list. The names of currently
available dataset is here:
https://github.com/iosonofabio/atlas_averages/blob/master/table.tsv
(check the first column for atlas names). If a list of
str, multiple atlases will be fetched and combined. Only features
that are in all atlases will be kept. If you use this feature, be
careful to not mix atlases from different species. If a list of
dict, it merges atlases as above but you can specify what cell
types to fetch from each atlas. Each element of the list must be a
dict with two key-value pairs: 'atlas_name' is the atlas name, and
'cell_types' must be a list of cell types to retain. Example:
atlas=[{'atlas_name': 'Enge_2017', 'cell_types': ['alpha']}] would
load the atlas Enge_2017 and only retain alpha cells. You can also
use a dict to specify a single atlas and to retain only certain cell
types. The format is as above, e.g. to select only alpha cells
from Enge_2017 you can use:
atlas={'atlas_name': 'Enge_2017', 'cell_types': ['alpha']}.
The second possibility is to use a custom atlas (e.g. some
unpublished data). 'atlas' must be an AnnData object with cell
type averages ("cells") as rows and genes as columns and one cell
metadata column 'NumberOfCells' describing the number of cells
for each cell type. In other words:
adata.obs['NumberOfCells']
must exist, and:
adata.obs_names
must contain the known cell types.
n_cells_per_type (None or int): if None, use the number of cells
per type from the atlas. Else, fix it to this number for all types.
n_features_per_cell_type (int): number of features marking each
fixed column (atlas cell type). The argument 'features' takes
priority over this one.
n_features_overdispersed (int): number of unbiased, overdispersed
features to be picked from the new dataset. The argument
'features' takes priority over this one.
features_additional (list of str or None): additional features to
keep on top of automatic selection. The argument 'features' takes
priority over this one.
n_pcs (int): number of principal components to keep in the weighted
PCA.
n_neighbors (int): number of neighbors in the similarity graph.
n_neighbors_out_of_atlas (int): number of neighbors coming out of
the atlas nodes into the new dataset.
distance_metric (str): metric to use as distance. It should be a
metric accepted by scipy.spatial.distance.cdist.
threshold_neighborhood (float): do not consider distances larger than this as
neighbors
clustering_metric (str): 'cpm' (default, Cell Potts Model) or
'modularity'. Sets the type of partition used in the clustering
step.
resolution_parameter (float): number between 0 and 1 that sets
how easy it is for the clustering algorithm to make new clusters
normalize_counts (bool): whether to renormalize the counts at the
merging stage to make sure atlas and new data follow the same
normalization. Be careful if you turn this off.
join (str): must be 'keep_first', 'union', or 'intersection'. This
argument is used when sourcing multiple atlases and decides what
to do with features that are not present in all atlases.
'keep_first' keeps the features in the first atlas and pads the
other atlases with zeros, 'union' pads every atlas that is missing
a feature and 'intersection' only keep features that are in all
atlases.
'''
self.atlas = atlas
self.n_cells_per_type = n_cells_per_type
self.n_features_per_cell_type = n_features_per_cell_type
self.n_features_overdispersed = n_features_overdispersed
self.features_additional = features_additional
self.n_pcs = n_pcs
self.n_neighbors = n_neighbors
self.n_neighbors_out_of_atlas = n_neighbors_out_of_atlas
self.distance_metric = distance_metric
self.threshold_neighborhood = threshold_neighborhood
self.clustering_metric = clustering_metric
self.resolution_parameter = resolution_parameter
self.normalize_counts = normalize_counts
self.join = join
def fit(self, new_data):
'''Run with averages of the atlas
Args:
new_data (pandas.DataFrame or anndata.AnnData): the new data to be
clustered. If a dataframe, t must have features as rows and
cell names as columns (as in loom files). anndata uses the opposite
convention, so it must have cell names as rows (obs_names) and
features as columns (var_names) and this class will transpose it.
Returns:
None, but this instance of Averages acquired the property
`membership` containing the cluster memberships (cell types) of the
columns except the first n_fixed. The first n_fixed columns are
assumes to have distinct memberships in the range [0, n_fixed - 1].
'''
self.new_data = new_data
self._check_init_arguments()
self.fetch_atlas_if_needed()
self.compute_feature_intersection()
self._check_feature_intersection()
self.prepare_feature_selection()
self.select_features()
self._check_feature_selection()
self.merge_atlas_newdata()
self.compute_pca()
self.compute_similarity_graph()
self.cluster_graph()
def fit_transform(self, new_data):
'''Run with averages of the atlas and return the cell types
Args:
new_data (pandas.DataFrame or anndata.AnnData): the new data to be
clustered. If a dataframe, t must have features as rows and
cell names as columns (as in loom files). anndata uses the opposite
convention, so it must have cell names as rows (obs_names) and
features as columns (var_names) and this class will transpose it.
Returns:
the cluster memberships (cell types) of the
columns except the first n_fixed. The first n_fixed columns are
assumes to have distinct memberships in the range [0, n_fixed - 1].
'''
self.fit(new_data)
return self.membership
def _check_init_arguments(self):
# Check atlas
at = self.atlas
if isinstance(at, str):
pass
elif isinstance(at, list) or isinstance(at, tuple):
for elem in at:
if isinstance(elem, str):
pass
elif isinstance(elem, dict):
if 'atlas_name' not in elem:
raise ValueError('List of atlases: format incorrect')
if 'cell_types' not in elem:
raise ValueError('List of atlases: format incorrect')
else:
raise ValueError('List of atlases: format incorrect')
elif isinstance(at, dict) and ('atlas_name' in at) and \
('cell_types' in at):
pass
elif isinstance(at, AnnData):
if 'NumberOfCells' not in at.obs:
raise AttributeError(
'atlas must have a "NumberOfCells" obs column')
else:
raise ValueError('Atlas not formatted correctly')
# Convert new data to anndata if needed
nd = self.new_data
if isinstance(nd, AnnData):
pass
elif isinstance(nd, pd.DataFrame):
# AnnData uses features as columns, so transpose and convert
# (the assumption is that in the DataFrame convention, rows are
# features)
nd = AnnData(
X=nd.values.T,
obs={'CellID': nd.columns.values},
var={'GeneName': nd.index.values},
)
nd.obs_names = nd.obs['CellID']
nd.var_names = nd.var['GeneName']
self.new_data = nd
else:
raise ValueError(
'New data must be an AnnData object or pd.DataFrame',
)
# New data could be too small to do PCA
n_newcells, n_newgenes = self.new_data.shape
if n_newgenes < self.n_pcs:
warnings.warn(
('The number of features in the new data is lenn than ' +
'the number of PCs, so northstar might give inaccurate ' +
'results'))
if n_newcells < self.n_pcs:
warnings.warn(
('The number of cells in the new data is lenn than ' +
'the number of PCs, so northstar might give inaccurate ' +
'results'))
if min(n_newgenes, n_newcells) < self.n_pcs:
warnings.warn('Reducing the number of PCs to {:}'.format(
min(n_newgenes, n_newcells)))
self.n_pcs = min(n_newgenes, n_newcells)
# New data could be too small for knn
if n_newcells < self.n_neighbors + 1:
warnings.warn(
('The number of cells in the new data is less than the ' +
'number of neighbors requested for the knn: reducing the ' +
'number of graph neighbors to {:}'.format(
max(1, n_newcells - 1)),
))
self.n_neighbors = max(1, n_newcells - 1)
nf1 = self.n_features_per_cell_type
nf2 = self.n_features_overdispersed
nf3 = self.features_additional
if not isinstance(nf1, int):
raise ValueError('n_features_per_cell_type must be an int >= 0')
if not isinstance(nf1, int):
raise ValueError('n_features_overdispersed must be an int >= 0')
if (nf1 < 1) and (nf2 < 1) and (nf3 < 1):
raise ValueError('No features selected')
def _check_feature_intersection(self):
L = len(self.features_ovl)
if L == 0:
raise ValueError(
('No overlapping features in atlas and new data, are gene ' +
'names correct for this species?'))
if L < 50:
warnings.warn(
('Only {:} overlapping features found in atlas and new ' +
'data'.format(L)))
def _check_feature_selection(self):
L = len(self.features)
if L == 0:
raise ValueError(
('No features survived selection, check nortstar parameters'))
if L < self.n_pcs:
warnings.warn(
('Only {0} features selected, reducing PCA to {0} components'.format(L)))
self.n_pcs = L
def fetch_atlas_if_needed(self):
'''Fetch atlas(es) if needed'''
at = self.atlas
if isinstance(at, str):
self.atlas = AtlasFetcher().fetch_atlas(
at,
kind='average',
)
elif isinstance(self.atlas, list) or isinstance(self.atlas, tuple):
self.atlas = AtlasFetcher().fetch_multiple_atlases(
at,
kind='average',
join=self.join,
)
elif isinstance(at, dict) and ('atlas_name' in at) and \
('cell_types' in at):
self.atlas = AtlasFetcher().fetch_atlas(
at['atlas_name'],
kind='average',
cell_types=at['cell_types'],
)
def compute_feature_intersection(self):
'''Calculate the intersection of features between atlas and new data'''
# Intersect features
self.features_atlas = self.atlas.var_names.values
self.features_newdata = self.new_data.var_names.values
self.features_ovl = np.intersect1d(
self.features_atlas,
self.features_newdata,
)
def prepare_feature_selection(self):
# Cell names and types
self.cell_types_atlas = self.atlas.obs_names
self.cell_names_atlas = self.atlas.obs_names
self.cell_names_newdata = self.new_data.obs_names
ctypes_ext = []
cnames_ext = []
if self.n_cells_per_type is None:
ncells_per_ct = self.atlas.obs['NumberOfCells'].astype(np.int64)
else:
ncells_per_ct = [self.n_cells_per_type] * self.atlas.shape[0]
for i, ni in enumerate(ncells_per_ct):
for ii in range(ni):
ctypes_ext.append(self.cell_types_atlas[i])
cnames_ext.append(self.cell_types_atlas[i]+'_{:}'.format(ii+1))
self.cell_types_atlas_extended = ctypes_ext
self.cell_names_atlas_extended = cnames_ext
# Numbers
self.n_atlas = self.atlas.shape[0]
self.n_newdata = self.new_data.shape[0]
self.n_total = self.n_atlas + self.n_newdata
self.n_atlas_extended = len(self.cell_names_atlas_extended)
self.n_total_extended = self.n_atlas_extended + self.n_newdata
# Cell numbers
self.sizes = np.ones(self.n_total, np.float32)
if self.n_cells_per_type is not None:
self.sizes[:self.n_atlas] *= self.n_cells_per_type
else:
self.sizes[:self.n_atlas] = self.atlas.obs['NumberOfCells'].astype(
np.float32)
def select_features(self):
'''Select features among the overlap of atlas and new data
Returns:
ndarray of feature names.
'''
features_atlas = self.features_atlas
features_newdata = self.features_newdata
features_ovl = list(self.features_ovl)
features_add = self.features_additional
n_atlas = self.n_atlas
nf1 = self.n_features_per_cell_type
nf2 = self.n_features_overdispersed
features = set()
# Atlas markers
if (nf1 > 0) and (n_atlas > 1):
matrix = self.atlas.X
for icol in range(n_atlas):
ge1 = matrix[icol]
ge2 = (matrix.sum(axis=0) - ge1) / (n_atlas - 1)
fold_change = np.log2(ge1 + 0.1) - np.log2(ge2 + 0.1)
tmp = np.argsort(fold_change)[::-1]
ind_markers_atlas = []
for i in tmp:
if features_atlas[i] in features_ovl:
ind_markers_atlas.append(i)
if len(ind_markers_atlas) == nf1:
break
# Add atlas markers
features |= set(features_atlas[ind_markers_atlas])
# Overdispersed features from new data
if nf2 > 0:
if nf2 >= len(features_ovl):
features |= set(features_ovl)
else:
matrix = self.new_data.X
nd_mean = matrix.mean(axis=0)
nd_var = matrix.var(axis=0)
fano = (nd_var + 1e-10) / (nd_mean + 1e-10)
tmp = np.argsort(fano)[::-1]
ind_ovd_newdata = []
for i in tmp:
if features_newdata[i] in features_ovl:
ind_ovd_newdata.append(i)
if len(ind_ovd_newdata) == nf2:
break
# Add overdispersed features
features |= set(features_newdata[ind_ovd_newdata])
# Additional features
if features_add is not None:
features |= (set(features_add) & set(features_ovl))
self.features = np.array(list(features))
def merge_atlas_newdata(self):
'''Merge atlas data and the new data after feature selection
NOTE: is self.normalize is True, the merged count matrix is normalized
by 1 million total counts.
'''
features = self.features
L = len(features)
N1 = self.n_atlas
N = self.n_total
# This is the largest memory footprint of northstar
matrix = np.empty((N, L), dtype=np.float32)
# Find the feature indices for atlas
ind_features_atlas = pd.Series(
np.arange(len(self.features_atlas)),
index=self.features_atlas,
).loc[features].values
matrix[:N1] = self.atlas.X[:, ind_features_atlas]
# Find the feature indices for new data
ind_features_newdata = pd.Series(
np.arange(len(self.features_newdata)),
index=self.features_newdata,
).loc[features].values
matrix[N1:] = self.new_data.X[:, ind_features_newdata]
# The normalization function also sets pseudocounts
if self.normalize_counts:
matrix = 1e6 * (matrix.T / (matrix.sum(axis=1) + 0.1)).T
self.matrix = matrix
def compute_pca(self):
'''Compute k nearest neighbors from a matrix with fixed nodes
Returns:
list of lists with the first k or less indices of the neighbors for
each free column. The length is N - n_fixed. For each now, there are
less than k entries if no other column within the distance threshold
were found, or if N < k.
The algorithm proceeds as follows:
0. take the log of the counts
1. subtract the mean along the observation axis (N) and divide by the
standard dev along the same axis
2. calculate the weighted covariance matrix
3. calculate normal PCA on that matrix
'''
matrix = self.matrix
sizes = self.sizes
n_atlas = self.n_atlas
n_pcs = self.n_pcs
# Test input arguments
N, L = matrix.shape
if len(sizes) != N:
raise ValueError('Matrix and sizes dimensions do not match')
if n_atlas >= N:
raise ValueError('n_fixed larger or equal matrix number of columns')
if n_pcs > min(L, N):
raise ValueError('n_pcs greater than smaller matrix dimension, those eigenvalues are zero')
# 0. take log
matrix = np.log10(matrix + 0.1)
# 1. standardize
weights = 1.0 * sizes / sizes.sum()
mean_w = weights @ matrix
var_w = weights @ ((matrix - mean_w)**2)
std_w = np.sqrt(var_w)
Xnorm = (matrix - mean_w) / std_w
# take care of non-varying components
Xnorm[np.isnan(Xnorm)] = 0
# 2. weighted covariance
# This matrix has size L x L. Typically L ~ 500 << N, so the covariance
# L x L is much smaller than N x N
cov_w = np.cov(Xnorm.T, fweights=sizes)
# 3. PCA
# rvects columns are the right singular vectors
evals, evects = np.linalg.eig(cov_w)
# sort by decreasing eigenvalue (explained variance) and truncate
ind = evals.argsort()[::-1][:n_pcs]
# NOTE: we do not actually need the eigenvalues anymore
lvects = np.real(evects.T[ind])
# calculate right singular vectors given the left singular vectors
# NOTE: this is true even if we truncated the PCA via n_pcs << L
# rvects columns are the right singular vectors
rvects = (lvects @ Xnorm.T).T
# 4. expand embedded vectors to account for sizes
# NOTE: this could be done by carefully tracking multiplicities
# in the neighborhood calculation, but it's not worth it: the
# amount of overhead memory used here is small because only a few
# principal components are used
Ne = int(np.sum(sizes))
rvectse = np.empty((Ne, n_pcs), np.float32)
cell_type_expanded = []
i = 0
n_fixed_expanded = 0
for isi, size in enumerate(sizes):
if isi < n_atlas:
cte = self.cell_types_atlas[isi]
n_fixed_expanded += int(size)
else:
cte = ''
cell_type_expanded.extend([cte] * int(size))
for j in range(int(size)):
rvectse[i] = rvects[isi]
i += 1
cell_type_expanded = np.array(cell_type_expanded)
self.pca_data = {
'pcs': rvects,
'pcs_expanded': rvectse,
'cell_type': cell_type_expanded,
'n_atlas': n_fixed_expanded,
}
def compute_similarity_graph(self):
'''Compute similarity graph from the extended PC space
1. calculate the distance matrix by expanding atlas columns
2. calculate neighborhoods
3. construct similarity graph from neighborhood lists
'''
from scipy.spatial.distance import cdist
import igraph as ig
sizes = self.sizes
n_atlas = self.n_atlas
k = self.n_neighbors
kout = self.n_neighbors_out_of_atlas
metric = self.distance_metric
threshold = self.threshold_neighborhood
rvects = self.pca_data['pcs']
rvectse = self.pca_data['pcs_expanded']
Ne = len(rvectse)
# 5. calculate distance matrix and neighbors
# we do it row by row, it costs a bit in terms of runtime but
# has huge savings in terms of memory since we don't need the square
# distance matrix
n_fixede = int(np.sum(sizes[:n_atlas]))
neighbors = []
# Treat things within and outside of the atlas differently
# Atlas neighbors
i = 0
for isi in range(n_atlas):
# Find the nearest neighbors in the new data
drow = cdist(rvects[[isi]], rvects[n_atlas:], metric=metric)[0]
ind = np.argpartition(-drow, -kout)[-kout:]
# Discard the ones beyond threshold
ind = ind[drow[ind] <= threshold]
# Indices are not sorted within ind, so we need to sort them
# in descending order by distance (more efficient in the next step)
ind = ind[np.argsort(drow[ind])]
for ii in range(int(sizes[isi])):
# Internal edges
neis = list(range(i, i+int(sizes[isi])))
# Remove self
neis.remove(i+ii)
# External edges
neis.extend(list(ind + n_fixede))
neighbors.append(neis)
i += int(sizes[isi])
# New data neighbors
for i in range(n_fixede, Ne):
drow = cdist(rvectse[[i]], rvectse, metric=metric)[0]
# set distance to self as a high number, to avoid self
drow[i] = drow.max() + 1
# Find largest k negative distances (k neighbors)
ind = np.argpartition(-drow, -k)[-k:]
# Discard the ones beyond threshold
ind = ind[drow[ind] <= threshold]
# Indices are not sorted within ind, so we need to sort them
# in descending order by distance (more efficient in the next step)
ind = ind[np.argsort(drow[ind])]
neighbors.append(list(ind))
self.neighbors = neighbors
# Construct graph from the lists of neighbors
edges_d = set()
for i, neis in enumerate(neighbors):
for n in neis:
edges_d.add(frozenset((i, n)))
edges = [tuple(e) for e in edges_d]
self.graph = ig.Graph(n=Ne, edges=edges, directed=False)
def cluster_graph(self):
'''Compute communities from a matrix with fixed nodes
Returns:
None, but Averages.membership is set as an array with
size N - n_fixed with the atlas cell types of all cells from the
new dataset.
'''
clu = ClusterWithAnnotations(
self.graph,
self.cell_types_atlas_extended,
resolution_parameter=self.resolution_parameter,
metric=self.clustering_metric,
)
self.membership = clu.fit_transform()
def estimate_closest_atlas_cell_type(self):
'''Estimate atlas cell type closest to each new cluster'''
from scipy.spatial.distance import cdist
# Use PC space
rvectse = self.pca_data['pcs']
n_atlas = self.pca_data['n_atlas']
cell_types = self.pca_data['cell_type'][:n_atlas]
L = rvectse.shape[1]
# Extract atlas averages in PC space
cell_types_atlas = np.unique(cell_types)
rvectse_atlas = rvectse[:n_atlas]
N = len(cell_types_atlas)
avg_atl = np.empty((L, N), np.float32)
for i, ct in enumerate(cell_types_atlas):
# They are already replicates, take the first copy
avg_atl[:, i] = rvectse_atlas[cell_types == ct][0]
# Calculate averages for the new clusters
cell_types_new = list(set(self.membership) - set(cell_types_atlas))
rvectse_new = rvectse[n_atlas:]
N = len(cell_types_new)
avg_new = np.empty((L, N), np.float32)
for i, ct in enumerate(cell_types_new):
avg_new[:, i] = rvectse_new[self.membership == ct].mean(axis=0)
# Calculate distance matrix between new and old in the high-dimensional
# feature-selected space
dmat = cdist(avg_new.T, avg_atl.T, metric='euclidean')
# Pick the closest
closest = np.argmin(dmat, axis=1)
# Give it actual names
closest = | pd.Series(cell_types[closest], index=cell_types_new) | pandas.Series |
import argparse
import os.path as osp
import os
import SimpleITK as sitk
import numpy as np
import json
import pandas as pd
CLASSES = ('background', 'femoral bone', 'femoral cartilage', 'tibial bone', 'tibial cartilage')
pd_classes=('femoral bone', 'tibial bone' ,'femoral cartilage', 'tibial cartilage')
pd_metrics=('dice','asd','rsd','msd','vd','voe')
def parse_args():
parser = argparse.ArgumentParser(
description='Convert OAI ZIB MRI annotations to mmsegmentation format')
parser.add_argument('--nifti-path', help='OAI ZIB MRI nifti path')
parser.add_argument('-o', '--out_dir', help='output from oai_zib_mri_back path')
args = parser.parse_args()
return args
def statisticize(result_dict):
result_mean = pd.DataFrame(columns=pd_metrics,index=pd_classes)
result_std = | pd.DataFrame(columns=pd_metrics,index=pd_classes) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull)
from pandas.compat import lrange
from pandas import compat
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesApply(TestData, tm.TestCase):
def test_apply(self):
with np.errstate(all='ignore'):
assert_series_equal(self.ts.apply(np.sqrt), np.sqrt(self.ts))
# elementwise-apply
import math
assert_series_equal(self.ts.apply(math.exp), np.exp(self.ts))
# how to handle Series result, #2316
result = self.ts.apply(lambda x: Series(
[x, x ** 2], index=['x', 'x^2']))
expected = DataFrame({'x': self.ts, 'x^2': self.ts ** 2})
tm.assert_frame_equal(result, expected)
# empty series
s = Series(dtype=object, name='foo', index=pd.Index([], name='bar'))
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
# check all metadata (GH 9322)
self.assertIsNot(s, rs)
self.assertIs(s.index, rs.index)
self.assertEqual(s.dtype, rs.dtype)
self.assertEqual(s.name, rs.name)
# index but no data
s = Series(index=[1, 2, 3])
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
def test_apply_same_length_inference_bug(self):
s = Series([1, 2])
f = lambda x: (x, x + 1)
result = s.apply(f)
expected = s.map(f)
assert_series_equal(result, expected)
s = Series([1, 2, 3])
result = s.apply(f)
expected = s.map(f)
assert_series_equal(result, expected)
def test_apply_dont_convert_dtype(self):
s = Series(np.random.randn(10))
f = lambda x: x if x > 0 else np.nan
result = s.apply(f, convert_dtype=False)
self.assertEqual(result.dtype, object)
def test_apply_args(self):
s = Series(['foo,bar'])
result = s.apply(str.split, args=(',', ))
self.assertEqual(result[0], ['foo', 'bar'])
tm.assertIsInstance(result[0], list)
def test_apply_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
vals = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'datetime64[ns]')
# boxed value must be Timestamp instance
res = s.apply(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_None', 'Timestamp_2_None'])
tm.assert_series_equal(res, exp)
vals = [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'datetime64[ns, US/Eastern]')
res = s.apply(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_US/Eastern', 'Timestamp_2_US/Eastern'])
tm.assert_series_equal(res, exp)
# timedelta
vals = [pd.Timedelta('1 days'), pd.Timedelta('2 days')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'timedelta64[ns]')
res = s.apply(lambda x: '{0}_{1}'.format(x.__class__.__name__, x.days))
exp = pd.Series(['Timedelta_1', 'Timedelta_2'])
tm.assert_series_equal(res, exp)
# period (object dtype, not boxed)
vals = [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'object')
res = s.apply(lambda x: '{0}_{1}'.format(x.__class__.__name__,
x.freqstr))
exp = pd.Series(['Period_M', 'Period_M'])
tm.assert_series_equal(res, exp)
def test_apply_datetimetz(self):
values = pd.date_range('2011-01-01', '2011-01-02',
freq='H').tz_localize('Asia/Tokyo')
s = pd.Series(values, name='XX')
result = s.apply(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range('2011-01-02', '2011-01-03',
freq='H').tz_localize('Asia/Tokyo')
exp = pd.Series(exp_values, name='XX')
tm.assert_series_equal(result, exp)
# change dtype
# GH 14506 : Returned dtype changed from int32 to int64
result = s.apply(lambda x: x.hour)
exp = pd.Series(list(range(24)) + [0], name='XX', dtype=np.int64)
tm.assert_series_equal(result, exp)
# not vectorized
def f(x):
if not isinstance(x, pd.Timestamp):
raise ValueError
return str(x.tz)
result = s.map(f)
exp = pd.Series(['Asia/Tokyo'] * 25, name='XX')
tm.assert_series_equal(result, exp)
class TestSeriesMap(TestData, tm.TestCase):
def test_map(self):
index, data = tm.getMixedTypeDict()
source = Series(data['B'], index=data['C'])
target = Series(data['C'][:4], index=data['D'][:4])
merged = target.map(source)
for k, v in compat.iteritems(merged):
self.assertEqual(v, source[target[k]])
# input could be a dict
merged = target.map(source.to_dict())
for k, v in compat.iteritems(merged):
self.assertEqual(v, source[target[k]])
# function
result = self.ts.map(lambda x: x * 2)
self.assert_series_equal(result, self.ts * 2)
# GH 10324
a = Series([1, 2, 3, 4])
b = Series(["even", "odd", "even", "odd"], dtype="category")
c = Series(["even", "odd", "even", "odd"])
exp = Series(["odd", "even", "odd", np.nan], dtype="category")
self.assert_series_equal(a.map(b), exp)
exp = Series(["odd", "even", "odd", np.nan])
self.assert_series_equal(a.map(c), exp)
a = Series(['a', 'b', 'c', 'd'])
b = Series([1, 2, 3, 4],
index=pd.CategoricalIndex(['b', 'c', 'd', 'e']))
c = Series([1, 2, 3, 4], index=Index(['b', 'c', 'd', 'e']))
exp = Series([np.nan, 1, 2, 3])
self.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, 1, 2, 3])
self.assert_series_equal(a.map(c), exp)
a = Series(['a', 'b', 'c', 'd'])
b = Series(['B', 'C', 'D', 'E'], dtype='category',
index=pd.CategoricalIndex(['b', 'c', 'd', 'e']))
c = Series(['B', 'C', 'D', 'E'], index=Index(['b', 'c', 'd', 'e']))
exp = Series(pd.Categorical([np.nan, 'B', 'C', 'D'],
categories=['B', 'C', 'D', 'E']))
self.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, 'B', 'C', 'D'])
self.assert_series_equal(a.map(c), exp)
def test_map_compat(self):
# related GH 8024
s = Series([True, True, False], index=[1, 2, 3])
result = s.map({True: 'foo', False: 'bar'})
expected = Series(['foo', 'foo', 'bar'], index=[1, 2, 3])
assert_series_equal(result, expected)
def test_map_int(self):
left = Series({'a': 1., 'b': 2., 'c': 3., 'd': 4})
right = Series({1: 11, 2: 22, 3: 33})
self.assertEqual(left.dtype, np.float_)
self.assertTrue(issubclass(right.dtype.type, np.integer))
merged = left.map(right)
self.assertEqual(merged.dtype, np.float_)
self.assertTrue(isnull(merged['d']))
self.assertTrue(not | isnull(merged['c']) | pandas.isnull |
"""
Functions for writing a directory for iModulonDB webpages
"""
import logging
import os
import re
from itertools import chain
from zipfile import ZipFile
import numpy as np
import pandas as pd
from matplotlib.colors import to_hex
from tqdm.notebook import tqdm
from pymodulon.plotting import _broken_line, _get_fit, _solid_line
##################
# User Functions #
##################
def imodulondb_compatibility(model, inplace=False, tfcomplex_to_gene=None):
"""
Checks for all issues and missing information prior to exporting to iModulonDB.
If inplace = True, modifies the model (not recommended for main model variables).
Parameters
----------
model: :class:`~pymodulon.core.IcaData`
IcaData object to check
inplace: bool, optional
If true, modifies the model to prepare for export.
Not recommended for use with your main model variable.
tfcomplex_to_gene: dict, optional
dictionary pointing complex TRN entries to matching gene names in the gene
table (ex: {"FlhDC":"flhD"})
Returns
-------
table_issues: pd.DataFrame
Each row corresponds to an issue with one of the main class elements.
Columns:
* Table: which table or other variable the issue is in
* Missing Column: the column of the Table with the issue (not case
sensitive; capitalization is ignored).
* Solution: Unless "CRITICAL" is in this cell, the site behavior if the
issue remained is described here.
tf_issues: pd.DataFrame
Each row corresponds to a regulator that is used in the imodulon_table.
Columns:
* in_trn: whether the regulator is in the model.trn. Regulators not
in the TRN will be ignored in the site's histograms and gene tables.
* has_link: whether the regulator has a link in tf_links. If not, no
link to external regulator databases will be shown.
* has_gene: whether the regulator can be matched to a gene in the model.
If this is false, then there will be no regulator scatter plot on the
site. You can link TF complexes to one of their genes using the
tfcomplex_to_gene input.
missing_g_links: pd.Series
The genes on this list don't have links in the gene_links. Their gene pages
for these genes will not display links.
missing_DOIs: pd.Series
The samples listed here don't have DOIs in the sample_table. Clicking on their
associated bars in the activity plots will not link to relevant papers.
"""
if tfcomplex_to_gene is None:
tfcomplex_to_gene = {}
table_issues = pd.DataFrame(columns=["Table", "Missing Column", "Solution"])
# Check for X
if model.X is None:
table_issues = table_issues.append(
{
"Table": "X",
"Missing Column": "all",
"Solution": "CRITICAL. Add the expression matrix"
" so that gene pages can be generated.",
},
ignore_index=True,
)
logging.warning("Critical issue: No X matrix")
# Check for updated imodulondb table
default_imdb_table = {
"organism": "New Organism",
"dataset": "New Dataset",
"strain": "Unspecified",
"publication_name": "Unpublished Study",
"publication_link": "",
"gene_link_db": "External Database",
"organism_folder": "new_organism",
"dataset_folder": "new_dataset",
}
for k, v in default_imdb_table.items():
if model.imodulondb_table[k] == v:
if k == "publication_link":
solution = "The publication name will not be a hyperlink."
else:
solution = 'The default, "{}", will be used.'.format(v)
table_issues = table_issues.append(
{
"Table": "iModulonDB",
"Missing Column": k,
"Solution": solution,
},
ignore_index=True,
)
# Check the gene table
gene_table_cols = {
"gene_name": "Locus tags (gene_table.index) will be used.",
"gene_product": "Locus tags (gene_table.index) will be used.",
"cog": "COG info will not display & the gene scatter plot will"
" not have color.",
"start": "The x axis of the scatter plot will be a numerical"
" value instead of a genome location.",
"operon": "Operon info will not display.",
"regulator": "Regulator info will not display. If you have a"
" TRN, add it to the model to auto-generate this column.",
}
gene_table_lower = {i.lower(): i for i in model.gene_table.columns}
for col in gene_table_cols.keys():
if not (col in gene_table_lower.keys()):
table_issues = table_issues.append(
{
"Table": "Gene",
"Missing Column": col,
"Solution": gene_table_cols[col],
},
ignore_index=True,
)
if (col in ["gene_name", "gene_product"]) & inplace:
model.gene_table[col] = model.gene_table.index
elif inplace:
model.gene_table = model.gene_table.rename(
{gene_table_lower[col]: col}, axis=1
)
# check for missing gene links
missing_g_links = []
for g in model.M.index:
if (
not (isinstance(model.gene_links[g], str))
or model.gene_links[g].strip() == ""
):
missing_g_links.append(g)
missing_g_links = | pd.Series(missing_g_links, name="missing_gene_links") | pandas.Series |
"""
dataset = AbstractDataset()
"""
from collections import OrderedDict, defaultdict
import json
from pathlib import Path
import numpy as np
import pandas as pd
from tqdm import tqdm
import random
def make_perfect_forecast(prices, horizon):
prices = np.array(prices).reshape(-1, 1)
forecast = np.hstack([np.roll(prices, -i) for i in range(0, horizon)])
return forecast[:-(horizon-1), :]
def load_episodes(path):
# pass in list of filepaths
if isinstance(path, list):
if isinstance(path[0], pd.DataFrame):
# list of dataframes?
return path
else:
# list of paths
episodes = [Path(p) for p in path]
print(f'loading {len(episodes)} from list')
csvs = [pd.read_csv(p, index_col=0) for p in tqdm(episodes) if p.suffix == '.csv']
parquets = [pd.read_parquet(p) for p in tqdm(episodes) if p.suffix == '.parquet']
eps = csvs + parquets
print(f'loaded {len(episodes)} from list')
return eps
# pass in directory
elif Path(path).is_dir() or isinstance(path, str):
path = Path(path)
episodes = [p for p in path.iterdir() if p.suffix == '.csv']
else:
path = Path(path)
assert path.is_file() and path.suffix == '.csv'
episodes = [path, ]
print(f'loading {len(episodes)} from {path.name}')
eps = [ | pd.read_csv(p, index_col=0) | pandas.read_csv |
import sys
from pathlib import Path
from datetime import datetime
from time import sleep
import pandas as pd
from meteostat import Daily, Point
sys.path.append(str(Path.cwd()))
from pipeline_config import root_dir # noqa: E402
from pipeline_logger import logger # noqa: E402
from utils import get_module_purpose, read_args, write_ff_csv # noqa: E402
def collect_daily_weather_conditions(
game_date: str, latitude: float, longitude: float
) -> pd.DataFrame:
game_date = datetime.strptime(game_date, "%Y-%m-%d")
point = Point(lat=latitude, lon=longitude)
game_day_weather_df = (
Daily(point, start=game_date, end=game_date).fetch().reset_index()
)
return game_day_weather_df
def collect_weather(
calendar_df: pd.DataFrame, stadium_df: pd.DataFrame
) -> pd.DataFrame:
game_location_fields = ["date", "team", "opp", "stadium_name", "roof_type"]
game_weather_df = pd.DataFrame(columns=game_location_fields)
for row in calendar_df.itertuples(index=False):
logger.info(f"collecting data for {row}")
game_date, team, opp, is_away = (row.date, row.team, row.opp, row.is_away)
if is_away == 1:
home_team = opp
else:
home_team = team
game_location = stadium_df[stadium_df["team"] == home_team]
if game_location.empty:
raise Exception(f"No stadium found for {home_team}")
stadium_name, roof_type, lon, lat = (
game_location[["stadium_name", "roof_type", "longitude", "latitude"]]
.iloc[0]
.tolist()
)
game_location_data = [row.date, row.team, row.opp, stadium_name, roof_type]
if roof_type in ["Indoor", "Retractable"]:
game_day_weather_df = pd.DataFrame(
[game_location_data], columns=game_location_fields
)
else:
game_day_weather_df = collect_daily_weather_conditions(
game_date=game_date, latitude=lat, longitude=lon
)
game_day_weather_df = game_day_weather_df.drop(columns="time")
game_day_weather_df = pd.DataFrame(
[game_location_data + game_day_weather_df.iloc[0].tolist()],
columns=game_location_fields + game_day_weather_df.columns.tolist(),
)
game_weather_df = | pd.concat([game_weather_df, game_day_weather_df]) | pandas.concat |
import os
from fnmatch import fnmatch
import pandas as pd
import numpy as np
import pickle
import sys
import matplotlib.pyplot as plt
from VarDACAE import ML_utils
from collections import OrderedDict
#plt.style.use('seaborn-white')
def get_DA_info(exp_dir_base, key="percent_improvement"):
max_epoch = 0
last_df = None
DA_data = []
for path, subdirs, files in os.walk(exp_dir_base):
for file in files:
if file[-9:] == "_test.csv":
try:
epoch_csv = int(file.replace("_test.csv", ""))
except:
continue
fp = os.path.join(path, file)
dfDA = pd.read_csv(fp)
try:
DA_mean = dfDA[key].mean()
DA_std = dfDA[key].std()
except:
DA_mean, DA_std = None, None
res = (epoch_csv, dfDA, DA_mean, DA_std)
DA_data.append(res)
if epoch_csv >= max_epoch:
max_epoch = epoch_csv
last_df = dfDA
#get DF with best
mean_DA = [(epoch, mean, std) for (epoch, _, mean, std) in DA_data]
mean_DA.sort()
if mean_DA[0][1] is not None:
mean_DA = [{"epoch": x, "mean": y, "std1": z, "upper2std": (y + 2 * z), "lower2std": (y - 2 * z), "std2": 2 * z} for (x, y, z) in mean_DA]
mean_DF = pd.DataFrame(mean_DA)
else:
mean_DF = None
return DA_data, mean_DF, last_df
def extract_res_from_files2(exp_dir_base, epochs, keys):
"""Takes a directory (or directories) and searches recursively for
subdirs that have a test train and settings file
(meaning a complete experiment was conducted).
Returns:
A list of dictionaries where each element in the list
is an experiment and the dictionary has the following form:
data_dict = {"train_df": df1, "test_df":df2,
"settings":settings, "path": path}
"""
if isinstance(exp_dir_base, str):
exp_dirs = [exp_dir_base]
elif isinstance(exp_dir_base, list):
exp_dirs = exp_dir_base
else:
raise ValueError("exp_dir_base must be a string or a list")
SETTINGS = "settings.txt"
results = []
for exp_dir_base in exp_dirs:
for path, subdirs, files in os.walk(exp_dir_base):
test_fps, train_fps, settings = [], [], None
for name in files:
if fnmatch(name, SETTINGS):
settings = os.path.join(path, name)
if fnmatch(name, "*test.csv"):
splt = name.split("-")
if len(splt) > 1:
continue
num, _ = name.split("_")
epoch = int(num)
if epoch in epochs:
fp = os.path.join(path, name)
test_fps.append((epoch, fp))
if fnmatch(name, "*train.csv"):
splt = name.split("-")
if len(splt) > 1:
continue
num, _ = name.split("_")
epoch = int(num)
if epoch in epochs:
fp = os.path.join(path, name)
train_fps.append((epoch, fp))
if test_fps and train_fps and settings:
test_fps = sorted(test_fps)
train_fps = sorted(train_fps)
with open(settings, "rb") as f:
settings = pickle.load(f)
test_dfs, train_dfs = [], []
for epoch, fp in test_fps:
df = pd.read_csv(fp)
result = {"epoch": epoch}
for key in keys:
res = df[key].mean()
result[key] = res
test_dfs.append(result)
test_dfs = pd.DataFrame(test_dfs)
for epoch, fp in train_fps:
df = pd.read_csv(fp)
result = {"epoch": epoch}
for key in keys:
res = df[key].mean()
result[key] = res
train_dfs.append(result)
train_dfs = pd.DataFrame(train_dfs)
#collate train and test data
train_dfs["Subset"] = "train"
test_dfs["Subset"] = "test"
df = pd.concat([test_dfs, train_dfs], ignore_index=True)
model_data = get_model_specific_data(settings, path)
#DA_data, mean_DF, last_df = get_DA_info(path, "mse_DA")
data_dict = {"df": df,
"settings":settings,
"path": path,
"model_data": model_data,}
results.append(data_dict)
print("{} experiments conducted".format(len(results)))
sort_res = sorted(results, key = lambda x: x['path'])
return sort_res
#Extract results files from sub directories
def extract_res_from_files(exp_dir_base):
"""Takes a directory (or directories) and searches recursively for
subdirs that have a test train and settings file
(meaning a complete experiment was conducted).
Returns:
A list of dictionaries where each element in the list
is an experiment and the dictionary has the following form:
data_dict = {"train_df": df1, "test_df":df2,
"settings":settings, "path": path}
"""
if isinstance(exp_dir_base, str):
exp_dirs = [exp_dir_base]
elif isinstance(exp_dir_base, list):
exp_dirs = exp_dir_base
else:
raise ValueError("exp_dir_base must be a string or a list")
TEST = "test.csv"
TRAIN = "train.csv"
SETTINGS = "settings.txt"
results = []
for exp_dir_base in exp_dirs:
for path, subdirs, files in os.walk(exp_dir_base):
test, train, settings = None, None, None
for name in files:
if fnmatch(name, TEST):
test = os.path.join(path, name)
elif fnmatch(name, TRAIN):
train = os.path.join(path, name)
elif fnmatch(name, SETTINGS):
settings = os.path.join(path, name)
if settings and not test and not train:
test, train = [], []
for name in files:
if fnmatch(name, "*test.csv"):
test.append(os.path.join(path, name))
elif fnmatch(name, "*train.csv"):
train.append(os.path.join(path, name))
if test and train and settings:
if isinstance(test, list):
dftest = []
for fp in test:
dftest.append(pd.read_csv(fp))
dftrain = []
for fp in train:
dftrain.append(pd.read_csv(fp))
else:
dftest = pd.read_csv(test)
dftrain = pd.read_csv(train)
with open(settings, "rb") as f:
settings = pickle.load(f)
model_data = get_model_specific_data(settings, path)
DA_data, mean_DF, last_df = get_DA_info(path, "mse_DA")
data_dict = {"train_df": dftrain,
"test_df":dftest,
"test_DA_df_final": last_df,
"DA_mean_DF": mean_DF,
"settings":settings,
"path": path,
"model_data": model_data,}
results.append(data_dict)
print("{} experiments conducted".format(len(results)))
sort_res = sorted(results, key = lambda x: x['path'])
return sort_res
def plot_results_loss_epochs(results, ylim1 = None, ylim2=None):
"""Plots subplot with train/valid loss vs number epochs"""
nx = 3
ny = int(np.ceil(len(results) / nx))
fig, axs = plt.subplots(ny, nx, sharey=True)
fig.set_size_inches(nx * 5, ny * 4)
print(axs.shape)
color1 = 'tab:red'
color = 'tab:blue'
for idx, ax in enumerate(axs.flatten()):
if idx + 1 > len(results):
break
test_df = results[idx]["test_df"]
train_df = results[idx]["train_df"]
if isinstance(test_df, list):
test_df = pd.concat(test_df, axis=0, ignore_index=True)
train_df = pd.concat(train_df, axis=0, ignore_index=True)
sttn = results[idx]["settings"]
DA_mean_DF = results[idx].get("DA_mean_DF")
model_data = results[idx]["model_data"]
ax.plot(test_df.epoch, test_df.reconstruction_err, 'ro-')
ax.plot(train_df.epoch, train_df.reconstruction_err, 'g+-')
ax.grid(True, axis='y', color=color1 )
ax.grid(True, axis='x', )
#############################
# multiple line plot
ax.set_ylabel('MSE loss', color=color1)
ax.tick_params(axis='y', labelcolor=color1)
ax2 = ax.twinx() # instantiate a second axes that shares the same x-axis
ax2.grid(True, axis='y', color=color)
#set axes:
if ylim1:
ax.set_ylim(ylim1[0], ylim1[1])
if ylim2:
ax2.set_ylim(ylim2[0], ylim2[1])
ax2.set_ylabel('Test DA percentage Improvement %', color=color) # we already handled the x-label with ax1
ax2.errorbar("epoch", 'mean', yerr=DA_mean_DF.std1, data=DA_mean_DF, marker='+', color=color, )
ax2.tick_params(axis='y', labelcolor=color)
fig.tight_layout() # otherwise the right y-label is slightly clipped
########################
try:
latent = sttn.get_number_modes()
except:
latent = "??"
activation = sttn.ACTIVATION
if hasattr(sttn, "BATCH_NORM"):
BN = sttn.BATCH_NORM
if BN:
BN = "BN"
else:
BN = "NBN"
else:
BN = "NBN"
if hasattr(sttn, "learning_rate"):
lr = sttn.learning_rate
else:
lr = "??"
if hasattr(sttn, "AUGMENTATION"):
aug = sttn.AUGMENTATION
else:
aug = False
if hasattr(sttn, "DROPOUT"):
drop = sttn.DROPOUT
else:
drop = False
try:
num_layers = sttn.get_num_layers_decode()
except:
num_layers = "??"
title = "act={}, ".format(activation)
for idx, (key, value) in enumerate(model_data.items()):
if idx % 3 == 0 and idx > 0:
title += "\n"
if isinstance(value, float):
title += "{}={:.4f}, ".format(key, value)
else:
title += "{}={}, ".format(key, value)
title = title[:-1]
ax.set_title(title)
plt.show()
def extract(res):
"""Extracts relevant data to a dataframe from the 'results' dictionary"""
test_df = res["test_df"]
train_df = res["train_df"]
sttn = res["settings"]
valid_loss = min(test_df.reconstruction_err)
model_name = sttn.__class__.__name__
try:
latent = sttn.get_number_modes()
except:
latent = "??"
activation = sttn.ACTIVATION
channels = sttn.get_channels()
num_channels = None
if isinstance(channels, list):
num_channels = sum(channels)
first_channel = channels[1] #get the input channel (this may be a bottleneck)
if hasattr(sttn, "get_num_layers_decode"):
num_layers = sttn.get_num_layers_decode()
chan_layer = "??"
if num_channels:
chan_layer = num_channels/num_layers
else:
num_layers = "??"
chan_layer = "??"
if hasattr(sttn, "CHANGEOVER_DEFAULT"):
conv_changeover = sttn.CHANGEOVER_DEFAULT
else:
conv_changeover = 10
if hasattr(sttn, "BATCH_NORM"):
BN = bool(sttn.BATCH_NORM)
else:
BN = False
if hasattr(sttn, "AUGMENTATION"):
aug = sttn.AUGMENTATION
else:
aug = False
if hasattr(sttn, "DROPOUT"):
drop = sttn.DROPOUT
else:
drop = False
if hasattr(sttn, "learning_rate"):
lr = sttn.learning_rate
else:
lr = "??"
data = {"model":model_name, "valid_loss":valid_loss, "activation":activation,
"latent_dims": latent, "num_layers":num_layers, "total_channels":num_channels,
"channels/layer":chan_layer, "conv_changeover": conv_changeover,
"path": res["path"], "first_channel": first_channel, "batch_norm": BN,
"channels": channels, "learning_rate": lr, "augmentation": aug, "dropout": drop}
return data
def create_res_df(results, remove_duplicates=False):
df_res = | pd.DataFrame(columns=["model", "valid_loss", "activation", "latent_dims", "num_layers", "total_channels", "channels/layer"]) | pandas.DataFrame |
import pymssql # 引入pymssql模块
import pandas as pd
def conn(job, company):
conn = pymssql.connect('(local)', 'sa', 'cjm521', 'QA') # 服务器名,账户,密码,数据库名
if conn:
print("连接成功!")
cursor = conn.cursor()
f_job = False
f_company = False
if job is None and company is None:
return "请重新输入"
if job is not None:
# sql= "select A.ID,quar from ceb_recruit as A ,ceb_quarters as B where firmId=B.Id and quar like '%学徒%'"
sql = "select A.ID,quar,unitName,salary from ceb_recruit as A ,ceb_quarters as B where hiringId=B.Id and quar like '%{0}%'".format(
job)
cursor.execute(sql)
resList = cursor.fetchall()
# cols为字段信息 例如((''))
cols = cursor.description
col = []
for i in cols:
col.append(i[0])
data = list(map(list, resList))
data1 = | pd.DataFrame(data, columns=col) | pandas.DataFrame |
import tensorflow as tf
from tensorflow.python.ops import math_ops as tfmath_ops
import numpy as np
import matplotlib.pyplot as plt
import os
from datetime import datetime as dt
import sys
from matplotlib.patches import Ellipse
import shutil
import pandas as pd
import pickle
import time
import subprocess as sp
def Make_path_batch(
batch=40,
tmax=30,
lt=5,
seed=None
):
"""
Samples x(t), y(t) from a GP
args:
batch: number of samples
tmax: length of samples
lt: GP length scale
returns:
traj: nparray (batch, tmax, 2)
"""
ilt = -0.5/(lt*lt)
T = np.arange(tmax)
Sigma = np.exp( ilt * (T.reshape(-1,1) - T.reshape(1,-1))**2)
Mu = np.zeros(tmax)
np.random.seed(seed)
traj = np.random.multivariate_normal(Mu, Sigma, (batch, 2))
traj = np.transpose(traj, (0,2,1))
return traj
def Make_Video_batch(tmax=50,
px=32,
py=32,
lt=5,
batch=40,
seed=1,
r=3
):
"""
params:
tmax: number of frames to generate
px: horizontal resolution
py: vertical resolution
lt: length scale
batch: number of videos
seed: rng seed
r: radius of ball in pixels
returns:
traj0: (batch, tmax, 2) numpy array
vid_batch: (batch, tmax, px, py) numpy array
"""
traj0 = Make_path_batch(batch=batch, tmax=tmax, lt=lt)
traj = traj0.copy()
# convert trajectories to pixel dims
traj[:,:,0] = traj[:,:,0] * (px/5) + (0.5*px)
traj[:,:,1] = traj[:,:,1] * (py/5) + (0.5*py)
rr = r*r
def pixelate_frame(xy):
"""
takes a single x,y pixel point and converts to binary image
with ball centered at x,y.
"""
x = xy[0]
y = xy[1]
sq_x = (np.arange(px) - x)**2
sq_y = (np.arange(py) - y)**2
sq = sq_x.reshape(1,-1) + sq_y.reshape(-1,1)
image = 1*(sq < rr)
return image
def pixelate_series(XY):
vid = map(pixelate_frame, XY)
vid = [v for v in vid]
return np.asarray(vid)
vid_batch = [pixelate_series(traj_i) for traj_i in traj]
vid_batch = np.asarray(vid_batch)
return traj0, vid_batch
def play_video(vid_batch, j=0):
"""
vid_batch: batch*tmax*px*py batch of videos
j: int, which elem of batch to play
"""
_, ax = plt.subplots(figsize=(5,5))
plt.ion()
for i in range(vid_batch.shape[1]):
ax.clear()
ax.imshow(vid_batch[j,i,:,:])
plt.pause(0.1)
def build_video_batch_graph(tmax=50,
px=32,
py=32,
lt=5,
batch=1,
seed=1,
r=3,
dtype=tf.float32):
assert px==py, "video batch graph assumes square frames"
rr = r*r
ilt = tf.constant(-0.5/(lt**2), dtype=dtype)
K = tf.range(tmax, dtype=dtype)
K = (tf.reshape(K, (tmax, 1)) - tf.reshape(K, (1, tmax)))**2
# print((K*ilt).get_shape())
# sys.exit()
K = tf.exp(K*ilt) + 0.00001*tf.eye(tmax, dtype=dtype)
chol_K = tf.Variable(tf.linalg.cholesky(K), trainable=False)
ran_Z = tf.random.normal((tmax, 2*batch))
paths = tf.matmul(chol_K, ran_Z)
paths = tf.reshape(paths, (tmax, batch, 2))
paths = tf.transpose(paths, (1,0,2))
# assumes px = py
paths = paths*0.2*px + 0.5*px
# paths[:,:,0] = paths[:,:,0]*0.2*px + 0.5*px
# paths[:,:,1] = paths[:,:,1]*0.2*py + 0.5*py
vid_batch = []
tf_px = tf.range(px, dtype=dtype)
tf_py = tf.range(py, dtype=dtype)
for b in range(batch):
frames_tmax = []
for t in range(tmax):
lx = tf.reshape((tf_px - paths[b,t,0])**2, (px, 1))
ly = tf.reshape((tf_py - paths[b,t,1])**2, (1, py))
frame = lx + ly < rr
frames_tmax.append(tf.reshape(frame, (1,1,px,py)))
vid_batch.append(tf.concat(frames_tmax, 1))
vid_batch = [tfmath_ops.cast(vid, dtype=dtype) for vid in vid_batch]
vid_batch = tf.concat(vid_batch, 0)
return vid_batch
def MSE_rotation(X, Y, VX=None):
"""
Given X, rotate it onto Y
args:
X: np array (batch, tmax, 2)
Y: np array (batch, tmax, 2)
VX: variance of X values (batch, tmax, 2)
returns:
X_rot: rotated X (batch, tmax, 2)
W: nparray (2, 2)
B: nparray (2, 1)
MSE: ||X_rot - Y||^2
VX_rot: rotated cov matrices (default zeros)
"""
batch, tmax, _ = X.shape
X = X.reshape((batch*tmax, 2))
X = np.hstack([X, np.ones((batch*tmax, 1))])
Y = Y.reshape(batch*tmax, 2)
W, MSE, _, _ = np.linalg.lstsq(X, Y, rcond=None)
try:
MSE = MSE[0] + MSE[1]
except:
MSE = np.nan
X_rot = np.matmul(X, W)
X_rot = X_rot.reshape(batch, tmax, 2)
VX_rot = np.zeros((batch, tmax, 2, 2))
if VX is not None:
W_rot = W[:2,:]
W_rot_t = np.transpose(W[:2,:])
for b in range(batch):
for t in range(tmax):
VX_i = np.diag(VX[b,t,:])
VX_i = np.matmul(W_rot, VX_i)
VX_i = np.matmul(VX_i, W_rot_t)
VX_rot[b,t,:,:] = VX_i
return X_rot, W, MSE, VX_rot
def plot_latents(truevids,
truepath,
reconvids=None,
reconpath=None,
reconvar=None,
ax=None,
nplots=4,
paths=None):
"""
Plots an array of input videos and reconstructions.
args:
truevids: (batch, tmax, px, py) np array of videos
truepath: (batch, tmax, 2) np array of latent positions
reconvids: (batch, tmax, px, py) np array of videos
reconpath: (batch, tmax, 2) np array of latent positions
reconvar: (batch, tmax, 2, 2) np array, cov mat
ax: (optional) list of lists of axes objects for plotting
nplots: int, number of rows of plot, each row is one video
paths: (batch, tmax, 2) np array optional extra array to plot
returns:
fig: figure object with all plots
"""
if ax is None:
_, ax = plt.subplots(nplots,3, figsize=(6, 8))
for axi in ax:
for axj in axi:
axj.clear()
_, tmax, _, _ = truevids.shape
# get axis limits for the latent space
xmin = np.min([truepath[:nplots,:,0].min(),
reconpath[:nplots,:,0].min()]) -0.1
xmin = np.min([xmin, -2.5])
xmax = np.max([truepath[:nplots,:,0].max(),
reconpath[:nplots,:,0].max()]) +0.1
xmax = np.max([xmax, 2.5])
ymin = np.min([truepath[:nplots,:,1].min(),
reconpath[:nplots,:,1].min()]) -0.1
ymin = np.min([ymin, -2.5])
ymax = np.max([truepath[:nplots,:,1].max(),
reconpath[:nplots,:,1].max()]) +0.1
ymax = np.max([xmax, 2.5])
def make_heatmap(vid):
"""
args:
vid: tmax, px, py
returns:
flat_vid: px, py
"""
vid = np.array([(t+4)*v for t,v in enumerate(vid)])
flat_vid = np.max(vid, 0)*(1/(4+tmax))
return flat_vid
if reconvar is not None:
E = np.linalg.eig(reconvar[:nplots,:,:,:])
H = np.sqrt(E[0][:,:,0])
W = np.sqrt(E[0][:,:,1])
A = np.arctan2(E[1][:,:,0,1], E[1][:,:,0,0])*(360/(2*np.pi))
def plot_set(i):
# i is batch element = plot column
# top row of plots is true data heatmap
tv = make_heatmap(truevids[i,:,:,:])
ax[0][i].imshow(1-tv, origin='lower', cmap='Greys')
ax[0][i].axis('off')
# middle row is trajectories
ax[1][i].plot(truepath[i,:,0], truepath[i,:,1])
ax[1][i].set_xlim([xmin, xmax])
ax[1][i].set_ylim([ymin, ymax])
ax[1][i].scatter(truepath[i,-1,0], truepath[i,-1,1])
if reconpath is not None:
ax[1][i].plot(reconpath[i,:,0], reconpath[i,:,1])
ax[1][i].scatter(reconpath[i,-1,0], reconpath[i,-1,1])
if paths is not None:
ax[1][i].plot(paths[i,:,0], paths[i,:,1])
ax[1][i].scatter(paths[i,-1,0], paths[i,-1,1])
if reconvar is not None:
ells = [Ellipse(xy=reconpath[i,t,:],
width=W[i,t],
height=H[i,t],
angle=A[i,t]) for t in range(tmax)]
for e in ells:
ax[1][i].add_artist(e)
e.set_clip_box(ax[1][i].bbox)
e.set_alpha(0.25)
e.set_facecolor('C1')
# Third row is reconstructed video
if reconvids is not None:
rv = make_heatmap(reconvids[i,:,:,:])
ax[2][i].imshow(1-rv, origin='lower', cmap='Greys')
ax[2][i].axis('off')
for i in range(nplots):
plot_set(i)
return ax
def make_checkpoint_folder(base_dir, expid=None, extra=""):
"""
Makes a folder and sub folders for pics and results
Args:
base_dir: the root directory where new folder will be made
expid: optional extra sub dir inside base_dir
"""
# make a "root" dir to store all checkpoints
# homedir = os.getenv("HOME")
# base_dir = homedir+"/GPVAE_checkpoints/"
if expid is not None:
base_dir = base_dir + "/" + expid + "/"
if not os.path.exists(base_dir):
os.makedirs(base_dir)
# now make a unique folder inside the root for this experiments
filenum = str(len(os.listdir(base_dir))) + ":"+extra+"__on__"
T = dt.now()
filetime = str(T.day)+"_"+str(T.month)+"_"+str(T.year) + "__at__"
filetime += str(T.hour)+":"+str(T.minute)+":"+str(T.second)
# main folder
checkpoint_folder = base_dir + filenum + filetime
os.makedirs(checkpoint_folder)
# pictures folder
pic_folder = checkpoint_folder + "/pics/"
os.makedirs(pic_folder)
# pickled results files
res_folder = checkpoint_folder + "/res/"
os.makedirs(res_folder)
# source code
src_folder = checkpoint_folder + "/sourcecode/"
os.makedirs(src_folder)
old_src_dir = os.path.dirname(os.path.abspath(__file__)) + "/"
src_files = os.listdir(old_src_dir)
print("\n\nCopying source Code to "+src_folder)
for f in src_files:
if ".py" in f:
src_file = old_src_dir + f
shutil.copy2(src_file, src_folder)
print(src_file)
print("\n")
return checkpoint_folder + "/"
class pandas_res_saver:
"""
Takes a file and a list of col names to initialise a
pandas array. Then accepts extra rows to be added
and occasionally written to disc.
"""
def __init__(self, res_file, colnames):
# reload old results frame
if os.path.exists(res_file):
if list(pd.read_pickle(res_file).columns)==colnames:
print("res_file: recovered ")
self.data = pd.read_pickle(res_file)
self.res_file = res_file
else:
print("res_file: old exists but not same, making new ")
self.res_file = res_file + "_" + str(time.time())
self.data = pd.DataFrame(columns=colnames)
else:
print("res_file: new")
self.res_file = res_file
self.data = | pd.DataFrame(columns=colnames) | pandas.DataFrame |
import h5py
import pandas as pd
import numpy as np
from obspy import read
import datetime as dtt
import datetime
from scipy.stats import kurtosis
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from scipy import spatial
from scipy.signal import butter, lfilter
#import librosa
# # sys.path.insert(0, '../01_DataPrep')
from scipy.io import loadmat
# sys.path.append('.')
import scipy as sp
import scipy.signal
##########################################################################################
def butter_bandpass(fmin, fmax, fs, order=5):
nyq = 0.5 * fs
low = fmin / nyq
high = fmax / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, fmin, fmax, fs, order=5):
b, a = butter_bandpass(fmin, fmax, fs, order=order)
y = lfilter(b, a, data)
return y
# .oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo..oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo
##################################################################################################
##################################################################################################
# _ _ _ _ _ _ _
# | | (_) | | | | | | | | (_)
# | |__ __ _ ___ _ ___ __| | __ _| |_ __ _ _____ ___ __ | | ___ _ __ __ _| |_ _ ___ _ __
# | '_ \ / _` / __| |/ __| / _` |/ _` | __/ _` | / _ \ \/ / '_ \| |/ _ \| '__/ _` | __| |/ _ \| '_ \
# | |_) | (_| \__ \ | (__ | (_| | (_| | || (_| | | __/> <| |_) | | (_) | | | (_| | |_| | (_) | | | |
# |_.__/ \__,_|___/_|\___| \__,_|\__,_|\__\__,_| \___/_/\_\ .__/|_|\___/|_| \__,_|\__|_|\___/|_| |_|
# | |
# |_|
##################################################################################################
def getWF(evID,dataH5_path,station,channel,fmin,fmax,fs):
with h5py.File(dataH5_path,'a') as fileLoad:
wf_data = fileLoad[f'waveforms/{station}/{channel}'].get(str(evID))[:]
wf_filter = butter_bandpass_filter(wf_data, fmin,fmax,fs,order=4)
wf_zeromean = wf_filter - np.mean(wf_filter)
return wf_zeromean
def getSpectra(evID,station,path_proj,normed=True):
if normed == False:
##right now saving normed
try:
mat = loadmat(f'{path_proj}01_input/{station}/specMats_nonNormed/{evID}.mat')
except:
mat = loadmat(f'{path_proj}01_input/{station}/specMats/{evID}.mat')
else:
mat = loadmat(f'{path_proj}01_input/{station}/specMats/{evID}.mat')
specMat = mat.get('STFT')
matSum = specMat.sum(1)
return matSum,specMat
def getSpectra_fromWF(evID,dataH5_path,station,channel,normed=True):
## get WF from H5 and calc full sgram for plotting
with h5py.File(dataH5_path,'r') as dataFile:
wf_data = dataFile[f'waveforms/{station}/{channel}'].get(str(evID))[:]
fs = dataFile['spec_parameters/'].get('fs')[()]
# fmin =
nperseg = dataFile['spec_parameters/'].get('nperseg')[()]
noverlap = dataFile['spec_parameters/'].get('noverlap')[()]
nfft = dataFile['spec_parameters/'].get('nfft')[()]
fmax = dataFile['spec_parameters/'].get('fmax')[()]
fmax = np.ceil(fmax)
fmin = dataFile['spec_parameters/'].get('fmin')[()]
fmin = np.floor(fmin)
fSTFT = dataFile['spec_parameters/'].get('fSTFT')[()]
tSTFT = dataFile['spec_parameters/'].get('tSTFT')[()]
sgram_mode = dataFile['spec_parameters/'].get('mode')[()].decode('utf-8')
scaling = dataFile['spec_parameters/'].get('scaling')[()].decode('utf-8')
fs = int(np.ceil(fs))
fSTFT, tSTFT, STFT_0 = sp.signal.spectrogram(x=wf_data,
fs=fs,
nperseg=nperseg,
noverlap=noverlap,
#nfft=Length of the FFT used, if a zero padded FFT is desired
nfft=nfft,
scaling=scaling,
axis=-1,
mode=sgram_mode)
if normed:
STFT_norm = STFT_0 / np.median(STFT_0) ##norm by median
else:
STFT_norm = STFT_0
STFT_dB = 20*np.log10(STFT_norm, where=STFT_norm != 0) ##convert to dB
specMat = np.maximum(0, STFT_dB) #make sure nonnegative
specMatsum = specMat.sum(1)
return specMatsum,specMat,fSTFT
def getSpectraMedian(path_proj,cat00,k,station,normed=True):
catk = cat00[cat00.Cluster == k]
for j,evID in enumerate(catk.event_ID.iloc):
if normed==False:
matSum,specMat = getSpectra(evID,station,path_proj,normed=False)
else:
matSum,specMat = getSpectra(evID,station,path_proj,normed=True)
if j == 0:
specMatsum_med = np.zeros(len(matSum))
specMatsum_med = np.vstack([specMatsum_med,matSum])
specMatsum_med = np.median(specMatsum_med,axis=0)
return specMatsum_med
def getSgram(path_proj,evID,station,tSTFT=[0]):
mat = loadmat(f'{path_proj}01_input/{station}/specMats/{evID}.mat')
specMat = mat.get('STFT')
date = pd.to_datetime('200' + str(evID))
x = [date + dtt.timedelta(seconds=i) for i in tSTFT]
return specMat,x
def makeHourlyDF(ev_perhour_clus):
"""
Returns dataframe of events binned by hour of day
ev_perhour_resamp : pandas dataframe indexed by datetime
"""
ev_perhour_resamp = ev_perhour_clus.resample('H').event_ID.count()
hour_labels = list(ev_perhour_resamp.index.hour.unique())
hour_labels.sort()
#
ev_perhour_resamp_list = list(np.zeros(len(hour_labels)))
ev_perhour_mean_list = list(np.zeros(len(hour_labels)))
hour_index = 0
for ho in range(len(hour_labels)):
hour_name = hour_labels[hour_index]
ev_count = 0
# print(hour_name)
for ev in range(len(ev_perhour_resamp)):
if ev_perhour_resamp.index[ev].hour == hour_name:
ev_perhour_resamp_list[ho] += ev_perhour_resamp[ev]
ev_count += 1
# print(str(ev_count) + ' events in hour #' + str(hour_name))
ev_perhour_mean_list[ho] = ev_perhour_resamp_list[ho] / ev_count
hour_index += 1
##TS 2021/06/17 -- TS adjust hours here to CET
hour_labels = [h + 2 for h in hour_labels]
hour_labels[hour_labels==24] = 0
hour_labels[hour_labels==25] = 1
ev_perhour_resamp_df = pd.DataFrame({ 'EvPerHour' : ev_perhour_resamp_list,
'MeanEvPerHour' : ev_perhour_mean_list},
index=hour_labels)
ev_perhour_resamp_df['Hour'] = hour_labels
return ev_perhour_resamp_df
def getDailyTempDiff(garciaDF_H,garciaDF_D,**plt_kwargs):
tstart = plt_kwargs['tstartreal']
tend = plt_kwargs['tendreal']
garciaDF_H1 = garciaDF_H[garciaDF_H.datetime>=tstart]
garciaDF_H1 = garciaDF_H1[garciaDF_H1.datetime<tend]
garciaDF_D1 = garciaDF_D[garciaDF_D.datetime>=tstart]
garciaDF_D1 = garciaDF_D1[garciaDF_D1.datetime<tend]
temp_H = garciaDF_H1.tempC
temp_H_a = np.array(temp_H)
temp_H_a_r = temp_H_a.reshape(len(garciaDF_D1),24)
mean_diff = []
for i in range(len(temp_H_a_r[:,0])):
# plt.plot(temp_H_a_r[i,:] - garciaDF_D1.temp_D.iloc[i])
mean_diff.append(temp_H_a_r[i,:] - garciaDF_D1.temp_D.iloc[i])
mean_mean_diff = np.mean(mean_diff,axis=0)
return mean_mean_diff
##################################################################################################
# .oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo..oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo.oOo
##################################################################################################
def getFeatures(catalog,filetype,fmin,fmax,fs,path_WF,nfft,dataH5_path,station,channel):
columns=['event_ID','datetime','datetime_index','Cluster','RSAM','SC','P2P','VAR']
df = | pd.DataFrame(columns=columns) | pandas.DataFrame |
"""
Copyright (c) 2021, Electric Power Research Institute
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of DER-VET nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
Finances.py
This Python class contains methods and attributes vital for completing financial analysis given optimal dispathc.
"""
from storagevet.Finances import Financial
import numpy as np
import copy
import pandas as pd
from storagevet.ErrorHandling import *
SATURDAY = 5
class CostBenefitAnalysis(Financial):
""" This Cost Benefit Analysis Module
"""
def __init__(self, financial_params, start_year, end_year):
""" Initialize CBA model and edit any attributes that the user denoted a separate value
to evaluate the CBA with
Args:
financial_params (dict): parameter dictionary as the Params class created
"""
super().__init__(financial_params, start_year, end_year)
self.horizon_mode = financial_params['analysis_horizon_mode']
self.location = financial_params['location']
self.ownership = financial_params['ownership']
self.state_tax_rate = financial_params['state_tax_rate']/100
self.federal_tax_rate = financial_params['federal_tax_rate']/100
self.property_tax_rate = financial_params['property_tax_rate']/100
self.ecc_mode = financial_params['ecc_mode']
self.ecc_df = pd.DataFrame()
self.equipment_lifetime_report = pd.DataFrame()
self.tax_calculations = None
self.Scenario = financial_params['CBA']['Scenario']
self.Finance = financial_params['CBA']['Finance']
self.valuestream_values = financial_params['CBA']['valuestream_values']
self.ders_values = financial_params['CBA']['ders_values']
if 'Battery' in self.ders_values.keys():
self.ders_values['Battery'] = self.ders_values.pop('Battery')
if 'CAES' in self.ders_values.keys():
self.ders_values['CAES'] = self.ders_values.pop('CAES')
self.value_streams = {}
self.ders = []
self.macrs_depreciation = {
3: [33.33, 44.45, 14.81, 7.41],
5: [20, 32, 19.2, 11.52, 11.52, 5.76],
7: [14.29, 24.49, 17.49, 12.49, 8.93, 8.92, 8.93, 4.46],
10: [10, 18, 14.4, 11.52, 9.22, 7.37, 6.55, 6.55, 6.56, 6.55,
3.28],
15: [5, 9.5, 8.55, 7.7, 6.83, 6.23, 5.9, 5.9, 5.91, 5.9,
5.91, 5.9, 5.91, 5.9, 5.91, 2.95],
20: [3.75, 7.219, 6.677, 6.177, 5.713, 5.285, 4.888, 4.522, 4.462, 4.461,
4.462, 4.461, 4.462, 4.461, 4.462, 4.461, 4.462, 4.461, 4.462, 4.461,
2.231]
}
def find_end_year(self, der_list):
""" This method looks at the analysis horizon mode and sets up the CBA class
for the indicated mode
Args:
der_list (list): list of DERs initialized with user values
Returns: pandas Period representation of the year that DERVET will end CBA analysis
"""
project_start_year = self.start_year
user_given_end_year = self.end_year
# (1) User-defined (this should continue to be default)
if self.horizon_mode == 1:
self.end_year = user_given_end_year
# (2) Auto-calculate based on shortest equipment lifetime. (No size optimization)
if self.horizon_mode == 2:
shortest_lifetime = 1000 # no technology should last 1000 years -- so this is safe to hardcode
for der_instance in der_list:
shortest_lifetime = min(der_instance.expected_lifetime, shortest_lifetime)
if der_instance.being_sized():
TellUser.error("Analysis horizon mode == 'Auto-calculate based on shortest equipment lifetime', DER-VET will not size any DERs " +
f"when this horizon mode is selected. {der_instance.name} is being sized. Please resolve and rerun.")
self.end_year = pd.Period(year=0, freq='y') # cannot preform size optimization with mode==2
self.end_year = project_start_year + shortest_lifetime-1
# (3) Auto-calculate based on longest equipment lifetime. (No size optimization)
if self.horizon_mode == 3:
longest_lifetime = 0
for der_instance in der_list:
if der_instance.technology_type != 'Load':
longest_lifetime = max(der_instance.expected_lifetime, longest_lifetime)
if der_instance.being_sized():
TellUser.error("Analysis horizon mode == 'Auto-calculate based on longest equipment lifetime', DER-VET will not size any DERs " +
f"when this horizon mode is selected. {der_instance.name} is being sized. Please resolve and rerun.")
self.end_year = pd.Period(year=0, freq='y') # cannot preform size optimization with mode==3
self.end_year = project_start_year + longest_lifetime-1
return self.end_year
def ecc_checks(self, der_list, service_dict):
"""
Args:
der_list: list of ders
service_dict: dictionary of services
Returns:
"""
# require that ownership model is Utility TODO
# check that a service in this set: {Reliability, Deferral} - the union of the 2 sets should not be 0
if not len(set(service_dict.keys()) & {'Reliability', 'Deferral'}):
TellUser.error(f"An ecc analysis does not make sense for the case you selected. A reliability or asset deferral case" +
"would be better suited for economic carrying cost analysis")
raise ModelParameterError("The combination of services does not work with the rest of your case settings. " +
"Please see log file for more information.")
# require that e < d
for der_inst in der_list:
conflict_occured = False
if der_inst.escalation_rate >= self.npv_discount_rate:
conflict_occured = True
TellUser.error(f"The technology escalation rate ({der_inst.escalation_rate}) cannot be greater " +
f"than the project discount rate ({self.npv_discount_rate}). Please edit the 'ter' value for {der_inst.name}.")
if conflict_occured:
raise ModelParameterError("TER and discount rates conflict. Please see log file for more information.")
@staticmethod
def get_years_before_and_after_failures(end_year, der_list):
""" The optimization should be re-run for every year an 'unreplacable' piece of equipment fails before the
lifetime of the longest-lived equipment. No need to re-run the optimization if equipment fails in some
year and is replaced.
Args:
end_year (pd.Period): the last year the project is operational
der_list (list): list of DERs initialized with user values
Returns: list of the year(s) after an 'unreplacable' DER fails/reaches its end of life
"""
rerun_opt_on = []
for der_instance in der_list:
last_operation_year = None
if der_instance.tag == 'Battery' and der_instance.incl_cycle_degrade:
# ignore battery's failure years as defined by user if user wants to include degradation in their analysis
# instead set it to be the project's last year+1
last_operation_year = end_year.year
yrs_failed = der_instance.set_failure_years(end_year, last_operation_year)
if not der_instance.replaceable:
# if the DER is not replaceable then add the following year to the set of analysis years
rerun_opt_on += yrs_failed
# filter out any years beyond end_year
rerun_opt_on = [year for year in rerun_opt_on if year < end_year.year]
# add years that the operational DER mix will change (year after last year of operation)
rerun_opt_on += [year+1 for year in rerun_opt_on if year < end_year.year]
return list(set(rerun_opt_on)) # get rid of any duplicates
def annuity_scalar(self, opt_years):
"""Calculates an annuity scalar, used for sizing, to convert yearly costs/benefits
this method is sometimes called before the class is initialized (hence it has to be
static)
Args:
opt_years (list): List of years that the user wants to optimize--should be length=1
Returns: the NPV multiplier
"""
n = self.end_year.year - self.start_year.year
dollar_per_year = np.ones(n)
base_year = min(opt_years)
yr_index = base_year - self.start_year.year
while yr_index < n - 1:
dollar_per_year[yr_index + 1] = dollar_per_year[yr_index] * (1 + self.inflation_rate)
yr_index += 1
yr_index = base_year - self.start_year.year
while yr_index > 0:
dollar_per_year[yr_index - 1] = dollar_per_year[yr_index] * (1 / (1 + self.inflation_rate))
yr_index -= 1
lifetime_npv_alpha = np.npv(self.npv_discount_rate, [0] + dollar_per_year)
return lifetime_npv_alpha
def calculate(self, technologies, value_streams, results, opt_years):
""" this function calculates the proforma, cost-benefit, npv, and payback using the optimization variable results
saved in results and the set of technology and service instances that have (if any) values that the user indicated
they wanted to use when evaluating the CBA.
Instead of using the technologies and services as they are passed in from the call in the Results class, we will pass
the technologies and services with the values the user denoted to be used for evaluating the CBA.
Args:
technologies (list): all active technologies (provided access to ESS, generators, renewables to get capital and om costs)
value_streams (Dict): Dict of all services to calculate cost avoided or profit
results (DataFrame): DataFrame of all the concatenated timseries_report() method results from each DER
and ValueStream
opt_years (list)
"""
self.initiate_cost_benefit_analysis(technologies, value_streams)
super().calculate(self.ders, self.value_streams, results, opt_years)
self.create_equipment_lifetime_report(self.ders)
def initiate_cost_benefit_analysis(self, technologies, valuestreams):
""" Prepares all the attributes in this instance of cbaDER with all the evaluation values.
This function should be called before any finacial methods so that the user defined evaluation
values are used
Args:
technologies (list): the management point of all active technology to access (needed to get capital and om costs)
valuestreams (Dict): Dict of all services to calculate cost avoided or profit
"""
# we deep copy because we do not want to change the original ValueStream objects
self.value_streams = copy.deepcopy(valuestreams)
self.ders = copy.deepcopy(technologies)
self.place_evaluation_data()
def place_evaluation_data(self):
""" Place the data specified in the evaluation column into the correct places. This means all the monthly data,
timeseries data, and single values are saved in their corresponding attributes within whatever ValueStream and DER
that is active and has different values specified to evaluate the CBA with.
"""
monthly_data = self.Scenario.get('monthly_data')
time_series = self.Scenario.get('time_series')
if time_series is not None or monthly_data is not None:
for value_stream in self.value_streams.values():
value_stream.update_price_signals(monthly_data, time_series)
if 'customer_tariff' in self.Finance:
self.tariff = self.Finance['customer_tariff']
if 'User' in self.value_streams.keys():
self.update_with_evaluation(self.value_streams['User'], self.valuestream_values['User'], self.verbose)
for der_inst in self.ders:
der_tag = der_inst.tag
der_id = der_inst.id
evaluation_inputs = self.ders_values.get(der_tag, {}).get(der_id)
if evaluation_inputs is not None:
der_inst.update_for_evaluation(evaluation_inputs)
@staticmethod
def update_with_evaluation(param_object, evaluation_dict, verbose):
"""Searches through the class variables (which are dictionaries of the parameters with values to be used in the CBA)
and saves that value
Args:
param_object (DER, ValueStream): the actual object that we want to edit
evaluation_dict (dict, None): keys are the string representation of the attribute where value is saved, and values
are what the attribute value should be
verbose (bool): true or fla
Returns: the param_object with attributes set to the evaluation values instead of the optimization values
"""
if evaluation_dict: # evaluates true if dict is not empty and the value is not None
for key, value in evaluation_dict.items():
try:
setattr(param_object, key, value)
TellUser.debug('attribute (' + param_object.name + ': ' + key + ') set: ' + str(value))
except KeyError:
TellUser.debug('No attribute ' + param_object.name + ': ' + key)
def proforma_report(self, technologies, valuestreams, results, opt_years):
""" Calculates and returns the proforma
Args:
technologies (list): list of technologies (needed to get capital and om costs)
valuestreams (Dict): Dict of all services to calculate cost avoided or profit
results (DataFrame): DataFrame of all the concatenated timseries_report() method results from each DER
and ValueStream
opt_years (list)
Returns: dataframe proforma
"""
proforma = super().proforma_report(technologies, valuestreams, results, opt_years)
proforma_wo_yr_net = proforma.drop('Yearly Net Value', axis=1)
proforma = self.replacement_costs(proforma_wo_yr_net, technologies)
proforma = self.zero_out_dead_der_costs(proforma, technologies)
proforma = self.update_capital_cost_construction_year(proforma, technologies)
# check if there are are costs on CAPEX YEAR - if there arent, then remove it from proforma
if not proforma.loc['CAPEX Year', :].any():
proforma.drop('CAPEX Year', inplace=True)
# add EOL costs to proforma
der_eol = self.calculate_end_of_life_value(proforma, technologies, self.inflation_rate,
opt_years)
proforma = proforma.join(der_eol)
if self.ecc_mode:
for der_inst in technologies:
if der_inst.tag == "Load":
continue
# replace capital cost columns with economic_carrying cost
der_ecc_df, total_ecc = der_inst.economic_carrying_cost_report(
self.inflation_rate, self.end_year, self.apply_rate)
# drop original Capital Cost
proforma.drop(columns=[der_inst.zero_column_name()], inplace=True)
# drop any replacement costs
if f"{der_inst.unique_tech_id()} Replacement Costs" in proforma.columns:
proforma.drop(columns=[f"{der_inst.unique_tech_id()} Replacement Costs"], inplace=True)
# add the ECC to the proforma
proforma = proforma.join(total_ecc)
# add ECC costs broken out by when initial cost occurs to complete DF
self.ecc_df = pd.concat([self.ecc_df, der_ecc_df], axis=1)
else:
proforma = self.calculate_taxes(proforma, technologies)
# sort alphabetically
proforma.sort_index(axis=1, inplace=True)
proforma.fillna(value=0, inplace=True)
# recalculate the net (sum of the row's columns)
proforma['Yearly Net Value'] = proforma.sum(axis=1)
return proforma
def replacement_costs(self, proforma, technologies):
""" takes the proforma and adds cash flow columns that represent any tax that was received or paid
as a result
Args:
proforma (DataFrame): Pro-forma DataFrame that was created from each ValueStream or DER active
technologies (list): Dict of technologies (needed to get capital and om costs)
"""
replacement_df = pd.DataFrame()
for der_inst in technologies:
temp = der_inst.replacement_report(self.end_year, self.apply_rate)
if temp is not None and not temp.empty:
replacement_df = pd.concat([replacement_df, temp], axis=1)
proforma = proforma.join(replacement_df)
proforma = proforma.fillna(value=0)
return proforma
def zero_out_dead_der_costs(self, proforma, technologies):
""" Determines years of the project that a DER is past its expected lifetime, then
zeros out the costs for those years (for each DER in the project)
Args:
proforma:
technologies:
Returns: updated proforma
"""
no_more_der_yr = 0
for der_isnt in technologies:
last_operating_year = der_isnt.last_operation_year
if der_isnt.tag != 'Load':
no_more_der_yr = max(no_more_der_yr, last_operating_year.year)
if not der_isnt.replaceable and self.end_year > last_operating_year:
column_mask = proforma.columns.str.contains(der_isnt.unique_tech_id(), regex=False)
proforma.loc[last_operating_year + 1:, column_mask] = 0
# zero out all costs and benefits after the last equipement piece fails
if self.end_year.year >= no_more_der_yr + 1 >= self.start_year.year:
proforma.loc[ | pd.Period(no_more_der_yr + 1, freq='y') | pandas.Period |
import logging
log = logging.getLogger(__name__)
from scale_client.core.sensed_event import SensedEvent
import pandas as pd
import json
DEFAULT_TIMEZONE='America/Los_Angeles'
class ParsedSensedEvents(pd.DataFrame):
"""
Parses the SensedEvent output file from a SCALE client app and stores it in a pandas.DataFrame
for later manipulation and aggregation with the other client outputs.
NOTE: any columns with labels matching 'time*' will be converted to pandas.Timestamp with the expectation that
they're in Unix epoch format!
"""
def __init__(self, data, timezone=DEFAULT_TIMEZONE, **kwargs):
"""
Parses the given data into a dict of events recorded by the client
and passes the resulting data into a pandas.DataFrame with additional columns specified by kwargs,
which can include e.g. experimental treatments, host IP address, etc.
:param data: raw string containing JSON object-like data e.g. nested dicts/lists
:type data: str
:param timezone: the timezone to use for converting time columns (default='America/Los_Angeles'); set to None to disable conversion
:param kwargs: additional static values for columns to distinguish this group of events from others
e.g. host_id, host_ip
"""
# NOTE: can't save any attributes until after we run super constructor!
data = self.parse_data(data, **kwargs)
# sub-classes may extract the column data in different ways depending on the output format
columns = self.extract_columns(data)
columns.update(kwargs)
self.convert_columns(columns, timezone=timezone)
super(ParsedSensedEvents, self).__init__(columns)
def parse_data(self, data, **params):
"""Override this to parse the raw data string using a format other than JSON. params is ignored by default,
but corresponds to the kwargs in the constructor."""
return json.loads(data)
def extract_columns(self, data, parse_metadata=True):
"""
Extracts the important columns from the given list of SensedEvents
:param data:
:type data: list[dict]
:param parse_metadata: if True (default), include columns for the metadata
:return:
"""
# QUESTION: how to handle empty results???
events = [SensedEvent.from_map(e) for e in data]
cols = {'topic': [ev.topic for ev in events],
'time_sent': [ev.timestamp for ev in events],
# TODO: might not even want this? what to do with it? the 'scale-local:/' part makes it less useful...
'source': [ev.source for ev in events],
'value': [ev.data for ev in events],
}
# Include the metadata in case it has something valuable for us.
# We have to gather up all unique keys first to ensure each row has all the needed columns so they line up.
metadata_keys = set()
for ev in events:
for k in ev.metadata:
metadata_keys.add(k)
cols.update({
k: [ev.metadata.get(k) for ev in events] for k in metadata_keys
})
return cols
def convert_columns(self, columns, timezone=None):
"""
Converts the columns to more specific pandas data types: this is where we convert time columns.
:param columns:
:param timezone: the timezone info to use; to disable this conversion set it to None
"""
for k, val in columns.items():
# XXX: any columns starting with 'time' should be converted to pandas.Timestamp!
if k.startswith('time') and timezone:
columns[k] = | pd.to_datetime(val, unit='s') | pandas.to_datetime |
import pandas as pd
import numpy as np
from pathlib import Path
from decimal import Decimal
import webbrowser
SUM_NAME = 'max_score'
def trueround_precision(number, places=0, rounding=None)->Decimal:
'''
trueround_precision(number, places, rounding=ROUND_HALF_UP)
Uses true precision for floating numbers using the 'decimal' module in
python and assumes the module has already been imported before calling
this function. The return object is of type Decimal.
All rounding options are available from the decimal module including
ROUND_CEILING, ROUND_DOWN, ROUND_FLOOR, ROUND_HALF_DOWN, ROUND_HALF_EVEN,
ROUND_HALF_UP, ROUND_UP, and ROUND_05UP.
examples:
>>> trueround(2.5, 0) == Decimal('3')
True
>>> trueround(2.5, 0, ROUND_DOWN) == Decimal('2')
True
number is a floating point number or a string type containing a number on
on which to be acted.
places is the number of decimal places to round to with '0' as the default.
Note: if type float is passed as the first argument to the function, it
will first be converted to a str type for correct rounding.
GPL 2.0
copywrite by <NAME> <<EMAIL>>
'''
from decimal import ROUND_HALF_UP
from decimal import ROUND_CEILING
from decimal import ROUND_DOWN
from decimal import ROUND_FLOOR
from decimal import ROUND_HALF_DOWN
from decimal import ROUND_HALF_EVEN
from decimal import ROUND_UP
from decimal import ROUND_05UP
if type(number) == type(float()):
number = str(number)
if rounding == None:
rounding = ROUND_HALF_UP
place = '1.'
for i in range(places):
place = ''.join([place, '0'])
return Decimal(number).quantize(Decimal(place), rounding=rounding)
def cronbach_alpha(items: pd.DataFrame):
'''Cronbach’s alpha信度系数
items: 题目数据, 每列是一个题目'''
items_count = items.shape[1]
variance_sum = items.var(axis=0, ddof=1).sum()
total_var = float(items.sum(axis=1).var(ddof=1))
return (items_count / float(items_count - 1) *
(1 - variance_sum / total_var))
def cronbach_alpha_std(items: pd.DataFrame):
'''Cronbach’s alpha信度系数
items: 题目数据, 每列是一个题目'''
items_count = items.shape[1]
corr = items.corr('pearson')
index = np.triu_indices(items_count)
corr.values[index] = 0
s = corr.sum().sum()
n = items_count * (items_count-1)/2
mean = s / n
return items_count * mean / (1+(items_count-1)*mean)
def factor_scores(items: pd.DataFrame, factors: pd.DataFrame):
'''计算平均数'''
fs = []
for factor, group in factors.groupby('factor'):
items[factor] = items[group['item'].values].mean(axis=1)
fs.append(factor)
items['sum_factors'] = items[fs].sum(axis=1)
return items[fs+['sum_factors']]
def factor_corr(fscores):
return fscores.corr()
def difficulty(items: pd.DataFrame, max_scores: pd.DataFrame):
diff = pd.Series([None]*items.shape[1], index=max_scores['item'])
max_scores = max_scores.set_index('item')[SUM_NAME]
max_scores = max_scores.to_dict()
for i in max_scores:
n = items[i].count()
s = items[i].sum()
diff.loc[i] = s / (n*max_scores[i])
return diff
def distinction(items: pd.DataFrame):
dist = pd.Series([None]*items.shape[1], index=items.columns)
total = items.sum(axis=1)
for i in items.columns:
item = items[i]
dist.loc[i]=np.corrcoef(total.values, item.values)[0, 1]
return dist
def draw_diff_dist(data: pd.DataFrame, filename):
print(data)
ax = data.plot()
ax.set_xticks(range(data.shape[0]))
ax.set_xticklabels(list(data.index), ha="center", rotation = 90)
ax.get_figure().savefig(filename)
def diff_table(diff: pd.Series):
diff = diff.map(lambda x: trueround_precision(x, 3))
head = '<tr><td>难度</td><td>难度描述</td><td>题目数量</td></tr>'
rtn = [head,]
groups = [
(0, 0.199),
(0.2, 0.399),
(0.4, 0.699),
(0.7, 0.799),
(0.8, 1)
]
labels = ['难','较难','中等','较易','容易',]
i = 0
for g in groups:
label = labels[i]
n = sum((diff >= g[0]) &( diff <=g[1]))
i += 1
row = f'<tr><td>{g[0]}~{g[1]}</td><td>{label}</td><td>{n}</td></tr>'
rtn.append(row)
discribe = {}
discribe['最大难度值'] = diff.max()
discribe['最小难度值'] = diff.min()
discribe['平均难度值'] = diff.mean()
for k,v in discribe.items():
row = f'<tr><td>{k}</td><td>{v}</td></tr>'
rtn.append(row)
rows = '\n'.join(rtn)
return f'<table class="table table-striped">{rows}</table>'
def dist_table(diff: pd.Series):
diff = diff.map(lambda x: trueround_precision(x, 3))
head = '<tr><td>区分度</td><td>区分度描述</td><td>题目数量</td></tr>'
rtn = [head,]
groups = [
(0, 0.199),
(0.2, 0.299),
(0.3, 0.399),
(0.4, 1)
]
labels = ['需要修改','修改之后会更好','合格','较好',]
i = 0
for g in groups:
label = labels[i]
n = sum((diff >= g[0]) &( diff <=g[1]))
i += 1
row = f'<tr><td>{g[0]}~{g[1]}</td><td>{label}</td><td>{n}</td></tr>'
rtn.append(row)
discribe = {}
discribe['最大区分度值'] = diff.max()
discribe['最小区分度值'] = diff.min()
discribe['平均区分度值'] = diff.mean()
for k,v in discribe.items():
row = f'<tr><td>{k}</td><td>{v}</td></tr>'
rtn.append(row)
rows = '\n'.join(rtn)
return f'<table class="table table-striped">{rows}</table>'
def item_quality(data: pd.DataFrame, diff_dist: pd.DataFrame):
diff_dist.columns = ['难度', '区分度']
diff_dist['删除该题后的试卷信度'] = None
for i in data.columns:
cols = [c for c in data.columns if c != i]
subdf = data[cols]
r = cronbach_alpha(subdf)
diff_dist.loc[i, '删除该题后的试卷信度']=r
for c in diff_dist.columns:
diff_dist[c] = diff_dist[c].map(lambda x: trueround_precision(x, 3))
return diff_dist
def generate_report(data: pd.DataFrame, factors: pd.DataFrame, outpath: Path):
template = Path(__file__).parent.absolute() / 'template.html'
template = template.read_text(encoding='utf8')
reliability=cronbach_alpha(data)
fscores = factor_scores(data, factors)
pearson = factor_corr(fscores).to_html()
pearson = pearson.replace('class="dataframe"', 'class="table table-striped"')
img_path = outpath / 'diff-dist.png'
diff = difficulty(data[factors['item']], factors[['item', 'max_score']])
dist = distinction(data[factors['item']])
diff_dist = pd.concat([diff, dist], axis=1)
diff_dist.columns = ['difficulty', 'distinction']
draw_diff_dist(diff_dist, str(img_path))
diff_dis_img = f'<img src="file://{img_path}" />'
diff_table_ = diff_table(diff)
dist_table_ = dist_table(dist)
item_quality_ = item_quality(data[factors['item']], diff_dist).to_html()
item_quality_ = item_quality_.replace('class="dataframe"', 'class="table table-striped"')
html = template.format(
reliability=reliability,
pearson=pearson,
diff_dis_img=diff_dis_img,
diff_table = diff_table_,
dist_table = dist_table_,
item_quality = item_quality_
)
report_path = outpath / 'report.html'
Path(report_path).write_text(html, encoding='utf8')
def read_excel(fpath):
data = | pd.read_excel(fpath, 'data') | pandas.read_excel |
import numpy as np
import pytest
from pandas import Categorical, Series
import pandas._testing as tm
@pytest.mark.parametrize(
"keep, expected",
[
("first", Series([False, False, False, False, True, True, False])),
("last", Series([False, True, True, False, False, False, False])),
(False, Series([False, True, True, False, True, True, False])),
],
)
def test_drop_duplicates(any_numpy_dtype, keep, expected):
tc = Series([1, 0, 3, 5, 3, 0, 4], dtype=np.dtype(any_numpy_dtype))
if tc.dtype == "bool":
pytest.skip("tested separately in test_drop_duplicates_bool")
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])
sc = tc.copy()
sc.drop_duplicates(keep=keep, inplace=True)
tm.assert_series_equal(sc, tc[~expected])
@pytest.mark.parametrize(
"keep, expected",
[
("first", Series([False, False, True, True])),
("last", Series([True, True, False, False])),
(False, Series([True, True, True, True])),
],
)
def test_drop_duplicates_bool(keep, expected):
tc = Series([True, False, True, False])
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])
sc = tc.copy()
sc.drop_duplicates(keep=keep, inplace=True)
tm.assert_series_equal(sc, tc[~expected])
@pytest.mark.parametrize("values", [[], list(range(5))])
def test_drop_duplicates_no_duplicates(any_numpy_dtype, keep, values):
tc = Series(values, dtype=np.dtype(any_numpy_dtype))
expected = Series([False] * len(tc), dtype="bool")
if tc.dtype == "bool":
# 0 -> False and 1-> True
# any other value would be duplicated
tc = tc[:2]
expected = expected[:2]
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
result_dropped = tc.drop_duplicates(keep=keep)
tm.assert_series_equal(result_dropped, tc)
# validate shallow copy
assert result_dropped is not tc
class TestSeriesDropDuplicates:
@pytest.mark.parametrize(
"dtype",
["int_", "uint", "float_", "unicode_", "timedelta64[h]", "datetime64[D]"],
)
def test_drop_duplicates_categorical_non_bool(self, dtype, ordered_fixture):
cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))
# Test case 1
input1 = np.array([1, 2, 3, 3], dtype=np.dtype(dtype))
tc1 = Series(Categorical(input1, categories=cat_array, ordered=ordered_fixture))
if dtype == "datetime64[D]":
# pre-empty flaky xfail, tc1 values are seemingly-random
if not (np.array(tc1) == input1).all():
pytest.xfail(reason="GH#7996")
expected = Series([False, False, False, True])
tm.assert_series_equal(tc1.duplicated(), expected)
tm.assert_series_equal(tc1.drop_duplicates(), tc1[~expected])
sc = tc1.copy()
sc.drop_duplicates(inplace=True)
tm.assert_series_equal(sc, tc1[~expected])
expected = Series([False, False, True, False])
tm.assert_series_equal(tc1.duplicated(keep="last"), expected)
tm.assert_series_equal(tc1.drop_duplicates(keep="last"), tc1[~expected])
sc = tc1.copy()
sc.drop_duplicates(keep="last", inplace=True)
tm.assert_series_equal(sc, tc1[~expected])
expected = Series([False, False, True, True])
tm.assert_series_equal(tc1.duplicated(keep=False), expected)
tm.assert_series_equal(tc1.drop_duplicates(keep=False), tc1[~expected])
sc = tc1.copy()
sc.drop_duplicates(keep=False, inplace=True)
tm.assert_series_equal(sc, tc1[~expected])
# Test case 2
input2 = np.array([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype(dtype))
tc2 = Series(Categorical(input2, categories=cat_array, ordered=ordered_fixture))
if dtype == "datetime64[D]":
# pre-empty flaky xfail, tc2 values are seemingly-random
if not (np.array(tc2) == input2).all():
pytest.xfail(reason="GH#7996")
expected = Series([False, False, False, False, True, True, False])
tm.assert_series_equal(tc2.duplicated(), expected)
tm.assert_series_equal(tc2.drop_duplicates(), tc2[~expected])
sc = tc2.copy()
sc.drop_duplicates(inplace=True)
| tm.assert_series_equal(sc, tc2[~expected]) | pandas._testing.assert_series_equal |
# -*- coding: utf-8 -*-
"""
Created on Sun May 2 14:45:13 2021
@author: saidsa
"""
from MacroData import get_Fama_French_ts
from Utils import OLS_regression
from Utils import align_date_index
from MacroData import get_Fama_French_Mkt_Return
import numpy as np
import pandas as pd
def _DEP_CAPM(stock_obj):
# Run OLS regression of stock returns against Fama French Rm-Rf => returns
# a value for beta of your stock, and a ts of epsilon
# Use epsilon ts to get var of epsilon (value)
# Get fama french Rf, Rm-Rf and Rm, compute avg and var of these
# CAPM => Stock_Exp_Ret and Stock_Exp_Var
stock_returns = stock_obj['PriceClose'].pct_change(1).dropna()
Mkt_Rf_Return_ts = get_Fama_French_ts('Mkt-RF')
Mkt_Rf_Return_ts.index = Mkt_Rf_Return_ts.index.map(lambda x: x.date())
X, Y = align_date_index(obj_1=Mkt_Rf_Return_ts.to_frame(), obj_2=stock_returns)
regression_dict = OLS_regression(X=X ,Y=Y , add_constant=True)
beta_stock = regression_dict['Beta_hat']['Mkt-RF']
var_epsilon_stock = regression_dict['Epsilon_hat'].var()
Mkt_Return_ts = get_Fama_French_Mkt_Return()
Rf_Return_ts = get_Fama_French_ts('RF')
Mkt_Avg_Return = Mkt_Return_ts.mean()
Mkt_Var_Return = Mkt_Return_ts.var()
Rf_Avg_Return = Rf_Return_ts.mean()
Stock_Expected_return = Rf_Avg_Return + (Mkt_Avg_Return - Rf_Avg_Return) * beta_stock
Stock_Expected_var = Mkt_Var_Return * (beta_stock ** 2) + var_epsilon_stock
return Stock_Expected_return, Stock_Expected_var
def CAPM(stock_obj_arr, window = 126):
# Given a stock_obj_arr, we get returns of these stocks
# We align index ts
# We use the Rf as a constant over the regression window
# We assume expected value of market returns as a simple avg over the regression
# window. We assume the same for market_var
# We run a rolling regression for every stock to get its market betas and
# idiosyncratic risk. Finally we construct the CAPM model to get the:
# Expected returns
# VAR_COVAR matrix
# Systematic and Idiosyncratic VARCOVAR matrix
Mkt_Rf_Return_ts = get_Fama_French_ts('Mkt-RF')
Mkt_Rf_Return_ts.index = Mkt_Rf_Return_ts.index.map(lambda x: x.date())
Mkt_Return_ts = get_Fama_French_Mkt_Return()
Mkt_Return_ts.index = Mkt_Return_ts.index.map(lambda x: x.date())
Rf_Return_ts = get_Fama_French_ts('RF')
Rf_Return_ts.index = Rf_Return_ts.index.map(lambda x: x.date())
arr = [Mkt_Rf_Return_ts, Mkt_Return_ts, Rf_Return_ts]
for obj in stock_obj_arr:
stock_returns = obj['PriceClose'].pct_change(1).dropna()
arr.append(stock_returns)
aligned_arr = align_date_index(arr)
# Window loop
total_observations = len(aligned_arr[0])
nbr_regressions = total_observations - window
stock_labels = [obj.ticker for obj in stock_obj_arr]
regression_output_dates = aligned_arr[0].index[window:]
Expected_Returns_df = pd.DataFrame(columns=stock_labels, index=regression_output_dates)
Covar_Returns_dict = {dt: pd.DataFrame(index=stock_labels, columns=stock_labels) for dt in regression_output_dates}
Sys_Covar_Returns_dict = {dt: | pd.DataFrame(index=stock_labels, columns=stock_labels) | pandas.DataFrame |
# coding: utf-8
# # Classification des Iris en utilisant tensorflow
# # I - Introduction
#
# ---
# #### Objectif
# <div style="text-align:justify;">L'objectif est de suivre un projet de Machine du concept à son intégration. Nous allons donc partir d'une base de données simple existant déjà sur internet. Nous allons ensuite concevoir un classificateur multiclasse à l'aide de tensorflow et mettre ce modèle en place sur une application mobile.</div>
#
# #### La base de données
# <div style="text-align:justify;">Nous allons utiliser la base de données de classification d'Iris du [site Kaggle](https://www.kaggle.com/uciml/iris). Dans cette base de données, il existe 3 labels: Iris-setosa, Iris-versicolor
# et Iris-virginica. Ces labels correspondent aux espèces d'Iris que nous souhaitons différencier. La base de données contient la largeur ainsi que la longueur des pétales et des sépales de 150 plantes.</div>
# # II - Génération du modèle
#
# ---
# ## 1. Exploration de la base de données
# In[1]:
import pandas as pd # Data Structure
import seaborn as sns # Data Vizualisation
# On commence par importer la base de données à l'aide de **pandas**.
# In[2]:
datas = pd.read_csv("datas/Iris.csv")
# In[3]:
display(datas.head())
print("Shape: {}".format(datas.shape))
# On utilise **seaborn** pour explorer graphiquement les données.
# In[4]:
g=sns.pairplot(datas, hue="Species", size=2.5)
# ## 2. Data Preprocessing
# ### 2.1 Drop Id
# L'id n'est d'aucune utilité, on s'en débarasse donc dès le début.
# In[5]:
datas.drop("Id", axis=1, inplace=True)
# ### 2.2 Séparation labels/features
# In[6]:
# On récupère les noms de toutes les colonnes
cols=datas.columns
# On les sépare
features = cols[0:4]
labels = cols[4]
print("Liste des features:")
for k in features:
print("- {}".format(k))
print("\nLabel: {}".format(labels))
# ### 2.3 Mélange des données
# In[7]:
import numpy as np # Manipulation de listes
# **numpy** est utilisé ici pour mélanger la base de données.
# In[8]:
indices = datas.index.tolist()
indices = np.array(indices)
np.random.shuffle(indices)
X = datas.reindex(indices)[features]
y = datas.reindex(indices)[labels]
# ### 2.4 Categorical to numerical
# On convertit les valeurs des labels qui sont des catégories en valeurs numériques pour être intérprétées par notre algorithme.
# In[9]:
y.head()
# In[10]:
from pandas import get_dummies
# In[11]:
y= | get_dummies(y) | pandas.get_dummies |
"""
Contains helper functions and class Etym, which are called internally.
Some of the functions may also be useful for the user in \
other linguistic contexts.
"""
from ast import literal_eval
from collections import Counter
from datetime import datetime
from functools import partial
from itertools import product
from logging import getLogger
from pathlib import Path
from gensim.downloader import load
from ipatok import clusterise, tokenise
from networkx import DiGraph, all_shortest_paths, shortest_path
from numpy import array_equiv, isnan, subtract, zeros
from pandas import DataFrame, read_csv
from panphon.distance import Distance
from tqdm import tqdm
logger = getLogger(__name__)
model = None
tokenise = partial(tokenise, replace=True)
clusterise = partial(clusterise, replace=True)
class InventoryMissingError(Exception):
"""
Called by lonapy.helpers.Etym.rank_closest and \
loanpy.helpers.Etym.rank_closest_phonotactics if neither forms.csv \
is defined nor the phonotactic/phoneme inventory is plugged in.
"""
pass
def plug_in_model(word2vec_model):
"""
Allows to plug in a pre-trained word2vec model into global variable \
loanpy.helpers.model. \
This is for using vectors that can't be loaded with gensim's \
API. Vectors could be plugged in without this function as well, but this way \
debugging is easier. For more information see gensim's documentation, e.g. \
call help(gensim.downloader.load)
:param word2vec_model: The word2vec model to use
:type word2vec_model: None | gensim.models.keyedvectors.KeyedVectors
:returns: global variable <model> gets defined
:rtype: None (global model = word2vec_model)
:Example:
>>> from loanpy import helpers as hp
>>> from gensim.test.utils import common_texts
>>> from gensim.models import word2vec
>>> # no internet needed: load dummy vectors from gensim's test kit folder.
>>> hp.plug_in_model(word2vec.Word2Vec(common_texts, min_count=1).wv)
>>> # contains only vectors for "human", "computer", "interface"
>>> hp.model
<gensim.models.keyedvectors.KeyedVectors object at 0x7f85fe36d9d0>
>>> from loanpy import helpers as hp
>>> from gensim.downloader import load
>>> # stable internet connection needed to load the following:
>>> hp.plug_in_model(load("glove-twitter-25"))
>>> hp.model # should take only few seconds to load
<gensim.models.keyedvectors.KeyedVectors object at 0x7ff728663880>
For more information see gensim's documentation:
>>> from gensim.download import load
>>> help(load)
"""
global model
model = word2vec_model
def read_cvfb():
"""
Called by loanpy.helpers.Etym.__init__; \
Reads file cvfb.txt that was generated based on ipa_all.csv \
by loanpy.helpers.make_cvfb. \
It's a tuple of two dictionaries. Keys are same as col "ipa" in ipa_all.csv. \
Values of first dictionary are "C" if consonant and "V" if vowel (6358 keys). \
Values of 2nd dictionary are "F" if front vowel \
and "B" if back vowel (1240 keys). \
Only called by Etym.__init__ to define loanpy.helpers.Etym.phon2cv and \
loanpy.helpers.Ety.vow2fb, \
which in turn is used by loanpy.helpers.Etym.word2phonotactics, \
loanpy.helpers.Etym.has_harmony and others. \
This file could be read directly when importing, but this way things \
feel more stable.
:returns: two dictionaries, the first defining consonants \
and vowels (cv), \
the second defining front and back vowels (fb).
:rtype: (dict, dict)
:Example:
>>> from loanpy.helpers import read_cvfb
>>> read_cvfb()
(two dictionaries of length 6358 and 1240)
"""
path = Path(__file__).parent / "cvfb.txt"
with open(path, "r", encoding="utf-8") as f:
cvfb = literal_eval(f.read())
return cvfb[0], cvfb[1]
def read_forms(dff):
"""
Called by loanpy.helpers.Etym.__init__; Reads forms.csv (cldf), \
keeps only columns "Segement", "Cognacy" and \
"Language ID", drops spaces in Segments to internally re-tokenise later. \
Only called by Etym.__init__ to create local variable dff (data frame forms). \
Returns None if dff is None. So that class can be initiated without args too.
:param dff: path to forms.csv
:type dff: pathlib.PosixPath | str | None
:returns: a workable version of forms.csv as a pandas data frame
:rtype: pandas.core.frame.DataFrame | None
:Example:
>>> from pathlib import Path
>>> from loanpy.helpers import __file__, read_forms
>>> path2file = Path(__file__).parent / "tests" / \
"input_files" / "forms.csv"
>>> read_forms(path2file)
Language_ID Segments Cognacy
0 1 abc 1
1 2 xyz 1
"""
if not dff:
return None
dff = | read_csv(dff, usecols=["Segments", "Cognacy", "Language_ID"]) | pandas.read_csv |
# Compare Algorithms
#!/usr/bin/env python3
__author__ = '<NAME>'
'''
This program builds a driver signature model per account from readings aggregated over 15 minute intervals.
It creates a random-forest model by reading the aggregated values directly from the database. We assume that the
aggregation of the device records is happening periodically. See extract_driver_features_from_car_readings.js
and extract_features_from_mldataset.js to know how the aggregation is being done. The aggregated data for each vehicle
(per 15 minutes) is stored in the collection 'vehicle_signature_records'.
The created learning model is stored in the current directory where this python program is running.
'''
# Typical way to call this is:
# python compare-learning-models.py localhost driver
import json
import numpy as np
import pandas as pd
import sys
from bson import json_util
from datetime import datetime, timedelta
from itertools import cycle
from pandas.io.json import json_normalize
from pymongo import MongoClient
from sklearn.externals import joblib
from sklearn.metrics import average_precision_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import precision_recall_fscore_support as prf
import operator
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
import matplotlib.pyplot as plt
# np.random.seed(1671) # for reproducibility
__author__ = '<NAME>'
def _connect_mongo(host, port, username, password, db):
""" A utility for making a connection to MongoDB """
if username and password:
mongo_uri = 'mongodb://%s:%s@%s:%s/%s' % (username, password, host, port, db)
conn = MongoClient(mongo_uri)
else:
conn = MongoClient(host, port)
return conn[db]
def read_mongo(db, collection, query={}, projection='', limit=1000, host='localhost', port=27017, username=None, password=None, no_id=False):
""" Read from Mongo and Store into DataFrame """
# Connect to MongoDB
db = _connect_mongo(host=host, port=port, username=username, password=password, db=db)
# Make a query to the specific DB and Collection
cursor = db[collection].find(query, projection).limit(limit)
# Expand the cursor and construct the DataFrame
datalist = list(cursor)
#print(datalist)
sanitized = json.loads(json_util.dumps(datalist))
normalized = | json_normalize(sanitized) | pandas.io.json.json_normalize |
import os
import numpy as np
import pandas as pd
import streamlit as st
import time
from datetime import datetime
from glob import glob
from omegaconf import OmegaConf
from pandas.api.types import is_numeric_dtype
from streamlit_autorefresh import st_autorefresh
from dataloader import read_csv, clear_data
from preprocessing.filter import apply_filter
from preprocessing.target import apply_target, target_encode_numeric, target_encode_category
from preprocessing import delete_nan, replace_nan, delete_outlier, encode_category
from model import split_data, get_best_model
from analysis import get_shap_value, get_importance, simulation_1d, simulation_2d
from graph.evaluation import plot_reg_evaluation, plot_confusion_matrix
from graph.importance import plot_importance
from graph.explanation import plot_shap, plot_simulation_1d, plot_simulation_2d
from graph.matplot import plot_simulation_1d as matplotlib_simulation_1d
from graph.matplot import plot_shap as matplotlib_shap
from helper import get_session_id, encode, convert_figs2zip
# Warning
import warnings
warnings.filterwarnings('ignore')
# # Korean
# import matplotlib
# from matplotlib import font_manager, rc
# font_name = font_manager.FontProperties(fname="c:/Windows/Fonts/malgun.ttf").get_name()
# rc('font', family=font_name)
# matplotlib.rcParams['axes.unicode_minus'] = False
# Create Session
if 'config' not in st.session_state:
st.session_state['config'] = OmegaConf.load('config.yaml')
if 'files' not in st.session_state:
st.session_state['files'] = np.sort(glob(
os.path.join(
st.session_state['config']['file']['root'],
'*.csv'
)
))
if 'train_file_path' not in st.session_state:
st.session_state['train_file_path'] = None
if 'filter' not in st.session_state:
st.session_state['filter'] = None
if 'encoder' not in st.session_state:
st.session_state['encoder'] = None
if 'target' not in st.session_state:
st.session_state['target'] = None
if 'feature_all' not in st.session_state:
st.session_state['feature_all'] = None
if 'feature_selected' not in st.session_state:
st.session_state['feature_selected'] = None
if 'data_quality' not in st.session_state:
st.session_state['data_quality'] = None
if 'mode' not in st.session_state:
st.session_state['mode'] = None
if 'model' not in st.session_state:
st.session_state['model'] = None
if 'state_0' not in st.session_state:
st.session_state['state_0'] = None
if '_df_0' not in st.session_state:
st.session_state['_df_0'] = None
if 'state_1' not in st.session_state:
st.session_state['state_1'] = None
if '_df_1' not in st.session_state:
st.session_state['_df_1'] = None
if 'state_2' not in st.session_state:
st.session_state['state_2'] = None
if '_df_2' not in st.session_state:
st.session_state['_df_2'] = None
if 'state_3' not in st.session_state:
st.session_state['state_3'] = None
if '_df_3' not in st.session_state:
st.session_state['_df_3'] = None
# Title
st.markdown('# XAI for tree models')
st.write(f'SESSION ID: {get_session_id()}')
# STEP 1.
st.markdown('### STEP 1. Data preparation')
# Start Time
start_time = time.time()
# State 0: _df_0
state_0 = {}
# Select Train
train_file_path = st.selectbox(
label = 'Train Data',
options = st.session_state['files'],
index = 0
)
state_0['train_file_path'] = train_file_path
# update _df_0
if (
state_0 != st.session_state['state_0']
):
df = read_csv(
path = state_0['train_file_path'],
max_len = st.session_state['config']['data']['max_len'],
add_random_noise = st.session_state['config']['data']['add_random_noise'],
random_state = st.session_state['config']['setup']['random_state'],
)
df = clear_data(df)
# Update session state
st.session_state['train_file_path'] = state_0['train_file_path']
st.session_state['_df_0'] = df
st.session_state['model'] = None
# Print Options
st.sidebar.write('Options')
# State 1: _df_1
state_1 = {}
# Get Filter Number
num_filter = st.sidebar.number_input(
label = 'Filter',
value = 0,
min_value = 0,
max_value = len(st.session_state['_df_0'].columns),
step=1
)
# Get Filter Value
filter = {}
if num_filter > 0:
for i in range(num_filter):
column = st.selectbox(
label = f'Filtered column #{i+1}',
options = [None]+list(st.session_state['_df_0'].columns),
)
if column is not None:
values = list(
np.sort(st.session_state['_df_0'][column].dropna().unique())
)
selected_values = st.multiselect(
label = f'Select values #{i+1}',
options = values,
default = values
)
filter[column] = selected_values
state_1['filter'] = filter
# Get Mode
mode = st.selectbox(
label = 'Type',
options = ['Regression', 'Binary Classification']
)
state_1['mode'] = mode
# Get Target
target = st.selectbox(
label = 'Target',
options = list(st.session_state['_df_0'].columns)
)
state_1['target'] = target
# Target Encoding
if mode == 'Binary Classification':
values = st.session_state['_df_0'][target].dropna()
if is_numeric_dtype(values):
column_c0, column_i0, column_c1, column_i1 = st.columns(4)
with column_c0:
l_q = st.number_input(
label = 'Label 0 Upper Limit (%)',
value = 20,
min_value = 0,
max_value = 100,
step = 1
)
state_1['l_q'] = l_q
with column_c1:
h_q = st.number_input(
label = 'Label 0 Lower Limit (%)',
value = 80,
min_value = 0,
max_value = 100,
step = 1
)
state_1['h_q'] = h_q
with column_i0:
st.metric(
label = 'Label 0 Maximum',
value = f"{np.percentile(values, q=l_q):.4f}"
)
with column_i1:
st.metric(
label = 'Label 1 Minimum',
value = f"{np.percentile(values, q=h_q):.4f}"
)
else:
uniques = list(np.sort(np.unique(values)))
col_0, col_1 = st.columns(2)
with col_0:
label_0 = st.selectbox(
label = 'Label 0',
options = uniques,
index = 0
)
state_1['label_0'] = label_0
with col_1:
label_1 = st.selectbox(
label = 'Label 1',
options = [column for column in uniques if column != label_0],
index = 0
)
state_1['label_1'] = label_1
# update _df_1
if (
state_0 != st.session_state['state_0'] or
state_1 != st.session_state['state_1']
):
# Get DF
df = st.session_state['_df_0'].copy()
# Apply Filter
df = apply_filter(
df = df,
filter = filter
)
# Apply Target
df = apply_target(
df = df,
target = target
)
# Encode target if the mode is binary classification
if state_1['mode'] == 'Binary Classification':
if ('l_q' in state_1) and ('h_q' in state_1):
df = target_encode_numeric(
df = df,
target = state_1['target'],
l_q = state_1['l_q'],
h_q = state_1['h_q']
)
elif ('label_0' in state_1) and ('label_1' in state_1):
df = target_encode_category(
df = df,
target = state_1['target'],
label_0 = state_1['label_0'],
label_1 = state_1['label_1']
)
# Update session state
st.session_state['filter'] = state_1['filter']
st.session_state['target'] = state_1['target']
st.session_state['feature_all'] = [column for column in df.columns if column != state_1['target']]
st.session_state['data_quality'] = df.notnull().sum() / len(df)
st.session_state['mode'] = state_1['mode']
if ('l_q' in state_1) and ('h_q' in state_1):
st.session_state['l_q'] = state_1['l_q']
st.session_state['h_q'] = state_1['h_q']
st.session_state['label_0'] = None
st.session_state['label_1'] = None
elif ('label_0' in state_1) and ('label_1' in state_1):
st.session_state['l_q'] = None
st.session_state['h_q'] = None
st.session_state['label_0'] = state_1['label_0']
st.session_state['label_1'] = state_1['label_1']
else:
st.session_state['l_q'] = None
st.session_state['h_q'] = None
st.session_state['label_0'] = None
st.session_state['label_1'] = None
st.session_state['_df_1'] = df
st.session_state['model'] = None
# State 2: _df_2
state_2 = {}
# NaN Data
nan_data = st.sidebar.selectbox(
label = 'NaN Data',
options = ['Delete', 'Replace']
)
state_2['nan_data'] = nan_data
# Auto Feature Selection
auto_feature_selection = st.sidebar.selectbox(
label = 'Auto Feature Selection',
options = [False, True]
)
state_2['auto_feature_selection'] = auto_feature_selection
# update _df_2
if (
state_0 != st.session_state['state_0'] or
state_1 != st.session_state['state_1'] or
state_2 != st.session_state['state_2']
):
# Get DF
df = st.session_state['_df_1'].copy()
# Encode Data
df, encoder = encode_category(df)
# Update session state
st.session_state['nan_data'] = state_2['nan_data']
st.session_state['auto_feature_selection'] = auto_feature_selection
st.session_state['encoder'] = encoder
st.session_state['_df_2'] = df.reset_index(drop=True)
st.session_state['model'] = None
# State 3: _df_3
state_3 = {}
# Select Features
st.sidebar.markdown("""---""")
st.sidebar.write('Features')
st.sidebar.text(f'Data quality | name')
index = [
st.sidebar.checkbox(
label = f"{st.session_state['data_quality'][column]:.2f} | {column}",
key = f"_{column}",
value = True,
) for column in st.session_state['feature_all']
]
feature_selected = list(np.array(st.session_state['feature_all'])[index])
state_3['feature_selected'] = feature_selected
# Magage Features
def uncheck():
for column in st.session_state['feature_all']:
st.session_state[f'_{column}'] = False
def check():
for column in st.session_state['feature_all']:
st.session_state[f'_{column}'] = True
_, col_1, col_2 = st.sidebar.columns([1, 4, 5])
with col_1:
st.button(
label = 'Check All',
on_click = check
)
with col_2:
st.button(
label = 'Uncheck All',
on_click = uncheck
)
# update _df_3
if (
state_0 != st.session_state['state_0'] or
state_1 != st.session_state['state_1'] or
state_2 != st.session_state['state_2'] or
state_3 != st.session_state['state_3']
):
# Get DF
df = st.session_state['_df_2'].copy()
# Select columns
columns = state_3['feature_selected'] + [st.session_state['target']]
df = df[columns]
# Update session state
st.session_state['feature_selected'] = state_3['feature_selected']
st.session_state['_df_3'] = df
st.session_state['model'] = None
# Update states
st.session_state['state_0'] = state_0
st.session_state['state_1'] = state_1
st.session_state['state_2'] = state_2
st.session_state['state_3'] = state_3
# Data wall time
wall_time = time.time() - start_time
# Print Information
st.sidebar.markdown("""---""")
st.sidebar.write(f"Wall time: {wall_time:.4f} sec")
st.sidebar.write(f"Data Num: {len(st.session_state['_df_3'])}")
st.sidebar.write(f"Target: {st.session_state['target']}")
st.sidebar.write(f"Feature Num: {len(feature_selected)}")
# Print Encoder
columns = st.session_state['feature_selected'] + [st.session_state['target']]
encoder = {}
if len(st.session_state['encoder']) > 0:
for column in columns:
if column in st.session_state['encoder']:
encoder[column] = st.session_state['encoder'][column]
if len(encoder) > 0:
st.sidebar.write('Encoded Features')
st.sidebar.write(encoder)
# Print DF
st.write('Sample Data (5)')
st.write(st.session_state['_df_3'].iloc[:5])
# Train Model
if st.session_state['model'] is None:
st.markdown("""---""")
if st.button('Start Model Training'):
# Log
time_now = str(datetime.now())[:19]
print(f'START | {time_now} | {get_session_id()} | {st.session_state["train_file_path"]}')
# Load Data
df = st.session_state['_df_3'].copy()
features = st.session_state['feature_selected']
target = st.session_state['target']
if st.session_state['mode'] == 'Regression':
mode = 'reg'
if st.session_state['mode'] == 'Binary Classification':
mode = 'clf'
# NaN Data
df = df[features+[target]].copy()
if df.isna().sum().sum() == 0:
st.session_state['nan_processed'] = False
else:
if st.session_state['nan_data'] == 'Delete':
df = delete_nan(df)
elif st.session_state['nan_data'] == 'Replace':
df = replace_nan(
df = df,
random_state = st.session_state['config']['setup']['random_state']
)
st.session_state['nan_processed'] = True
st.session_state['data_num'] = len(df)
# Dataset
datasets = split_data(
df = df,
features = features,
target = target,
mode = mode,
n_splits = st.session_state['config']['split']['n_splits'],
shuffle = True,
random_state = st.session_state['config']['setup']['random_state']
)
# Best Model
best_model, history = get_best_model(
datasets = datasets,
mode = mode,
random_state = st.session_state['config']['setup']['random_state'],
n_jobs = st.session_state['config']['setup']['n_jobs']
)
best_model['features'] = features
best_model['target'] = target
best_model['datasets'] = datasets
# SHAP
source, shap_value = get_shap_value(
config = best_model,
max_num = st.session_state['config']['shap']['max_num']
)
output = get_importance(
shap_value,
sort = st.session_state['config']['importance']['sort'],
normalize = st.session_state['config']['importance']['normalize']
)
shap = {}
shap['features'] = output['features']
shap['importance'] = output['importance']
shap['source'] = source
shap['shap_value'] = shap_value
if (
st.session_state['auto_feature_selection'] and
'random_noise' in shap['features']
):
features = shap['features']
index = np.where(np.array(features)=='random_noise')[0][0]
if index != 0:
# Print Info
st.write('Auto Feature Selection is ON.')
# Set new features
features = features[:index]
# Dataset
datasets = split_data(
df = df,
features = features,
target = target,
mode = mode,
n_splits = st.session_state['config']['split']['n_splits'],
shuffle = True,
random_state = st.session_state['config']['setup']['random_state']
)
# Best Model
best_model, history = get_best_model(
datasets = datasets,
mode = mode,
random_state = st.session_state['config']['setup']['random_state'],
n_jobs = st.session_state['config']['setup']['n_jobs']
)
best_model['features'] = features
best_model['target'] = target
best_model['datasets'] = datasets
# SHAP
source, shap_value = get_shap_value(
config = best_model,
max_num = st.session_state['config']['shap']['max_num']
)
output = get_importance(
shap_value,
sort = st.session_state['config']['importance']['sort'],
normalize = st.session_state['config']['importance']['normalize']
)
shap = {}
shap['features'] = output['features']
shap['importance'] = output['importance']
shap['source'] = source
shap['shap_value'] = shap_value
# Update session state
st.session_state['history'] = history
st.session_state['model'] = best_model
st.session_state['shap'] = shap
# Refresh page
st_autorefresh(interval=100, limit=2)
# Result
else:
# STEP 2. Evaluation
st.markdown('### STEP 2. Evaluation')
# NaN Data
if st.session_state['nan_processed']:
st.write(f"NaN Data process mode is {st.session_state['nan_data']}.")
# Data number
st.write(f"Data Number: {st.session_state['data_num']}")
# Print Best Model
best = {}
best['name'] = st.session_state['model']['name']
best.update(st.session_state['model']['score'])
st.write('Best Model')
st.write(best)
# Print Score
st.write(st.session_state['history'])
# Graph
if st.session_state['mode'] == 'Regression':
st.altair_chart(
plot_reg_evaluation(
true = st.session_state['model']['oob_true'],
pred = st.session_state['model']['oob_pred'],
target = st.session_state['model']['target']
),
use_container_width = True
)
elif st.session_state['mode'] == 'Binary Classification':
st.pyplot(
plot_confusion_matrix(
true = st.session_state['model']['oob_true'],
pred = st.session_state['model']['oob_pred'],
target = st.session_state['model']['target']
)
)
# STEP 3. Feature Importance
features = st.session_state['shap']['features']
importance = st.session_state['shap']['importance']
col_1, col_2 = st.columns([3, 1])
with col_1:
st.markdown('### STEP 3. Feature Importance')
with col_2:
show_number = st.number_input(
label = 'Number',
value = np.minimum(10, len(features)),
min_value = 1,
max_value = len(features),
step = 1
)
st.altair_chart(
plot_importance(
features = features,
importance = importance,
target = st.session_state['model']['target'],
num = show_number
),
use_container_width=True
)
# Download CSV
df_importance = pd.DataFrame()
df_importance['feature'] = features
df_importance['importance'] = importance
st.download_button(
label = 'Download (.csv)',
data = df_importance.to_csv(index=False).encode('utf-8-sig'),
file_name = f'importance.csv',
mime = 'text/csv'
)
# STEP 4. Local Explanation
df = df = st.session_state['_df_3']
source = st.session_state['shap']['source']
shap_value = st.session_state['shap']['shap_value']
col_1, col_2 = st.columns([3, 1])
with col_1:
st.markdown('### STEP 4. Local Explanation')
with col_2:
type_name = st.selectbox(
label = 'Type',
options = ['SHAP', '1D Simulation', '2D Simulation']
)
if type_name == 'SHAP':
feature = st.selectbox(
label = 'Feature',
options = features
)
st.altair_chart(
plot_shap(
x = source[feature].values,
y = shap_value[feature].values,
x_all = df[feature].dropna().values,
feature = feature,
target = st.session_state['model']['target'],
mean = np.mean(st.session_state['model']['oob_true'])
),
use_container_width = True
)
# Print Encode
if feature in st.session_state['encoder']:
st.write(feature)
st.write(st.session_state['encoder'][feature])
# Download CSV
df_shap = | pd.DataFrame() | pandas.DataFrame |
import scipy.io as sio
import pandas as pd
import os
import json
import shutil as sh
# load delimited text file into a pandas dataframe
def loadDelimToPandas(file_name, delim):
if os.path.isfile(file_name):
df = pd.read_csv(file_name, sep=delim)
else:
print('loadDelimToPandas::file not found')
df = None
return df
# make directory defined in the path
# creates subjdirectories along the way
def make_directory(dir_path):
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
# check if the file_name sits in a valid directory, if not create it
def validate_directory(file_name):
d = os.path.dirname(file_name)
if not os.path.isdir(d):
os.makedirs(d)
# print header names in a mat file
def printMatHeader(filename):
d = sio.loadmat(filename)
# list of fields to remove from dictionary
igField = ['__header__','__version__','__globals__']
for key, value in d.items():
if key not in igField:
print(key)
# load and return data in a mat file
def readMatData(filename):
d = sio.loadmat(filename)
# list of fields to remove from dictionary
igField = ['__header__','__version__','__globals__']
for i in range(len(igField)):
d.pop(igField[i])
return d
# load and return data in a dat file
def readDatData(filename,inclCols):
df = | pd.read_csv(filename,sep='\\t',usecols=inclCols,engine='python') | pandas.read_csv |
def report_classification(df_features,df_target,algorithms='default',test_size=0.3,scaling=None,
large_data=False,encode='dummy',average='binary',change_data_type = False,
threshold=8,random_state=None):
'''
df_features : Pandas DataFrame
df_target : Pandas Series
algorithms : List ,'default'=
[LogisticRegression(),
GaussianNB(),
DecisionTreeClassifier(),
RandomForestClassifier(),
GradientBoostingClassifier(),
AdaBoostClassifier(),
XGBClassifier()]
The above are the default algorithms, if one needs any specific algorithms, they have to import
libraries then pass the instances of alogorith as list
For example, if one needs random forest and adaboost only, then pass
algorithms=[RandomForestClassifier(max_depth=8),AdaBoostClassifier()]
But, these libraries must be imported before passing into above list like
test_size: If float, should be between 0.0 and 1.0 and represent the proportion of the
dataset to include in the test split.
scaling : {'standard-scalar', 'min-max'} or None , default=None
encode : {'dummy','onehot','label'} ,default='dummy'
change_data_type : bool, default=False
Some columns will be of numerical datatype though there are only 2-3 unique values in that column,
so these columns must be converted to object as it is more relevant.
By setting change_data_type= True , these columns will be converted into object datatype
threshold : int ,default=8
Maximum unique value a column can have
large_data : bool, default=False
If the dataset is large then the parameter large_data should be set to True,
make sure if your system has enough memory before setting Large_data=True
average : {'micro', 'macro', 'samples','weighted', 'binary'} or None, default='binary'
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
random_state : int, RandomState instance or None, default=None
'''
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder,StandardScaler,MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,confusion_matrix,roc_auc_score,roc_curve,accuracy_score,recall_score,precision_score
from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier,AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from xgboost import XGBClassifier
from warnings import filterwarnings
filterwarnings('ignore')
print("Shape of the data :",df_features.shape)
print("---------------------------------------")
#Check if there is any missing values
if df_features.isna().sum().sum()==0:
df_num=df_features.select_dtypes(exclude="object")
#Some columns will be of numerical datatype though there are only 2-3 unique values in that column
#Here the if-condition will check if the unique values are less than the specified threshold in each column
if change_data_type == True:
for i in df_num.columns:
if len(df_num[i].value_counts())<threshold:
#The datatype will be changed to object if the condition is not satisfied
df_features[i] = df_features[i].astype('object')
print("Datatype of {} changed to 'object as there were less than {} unique values".format(i,threshold))
print("-----------------------------------------------------------------------------------------")
else:
pass
#In some features like movie-tiltle,id,etc where there will be many unique values must be must be dropped
#These features can also be label encoded and then can be passed
df_cat=df_features.select_dtypes(include="object")
for i in df_cat:
if df_features[i].nunique()>threshold:
raise Exception("Recheck the datatype of {}, as there are more than {} unique values or change the datatype of {}".format(i,threshold))
df_num=df_features.select_dtypes(exclude="object")
#Encoding of categorical features
if df_cat.shape[1]!=0:
#Dummy-encoding
if encode == 'dummy':
print("Encoding : Dummy Encoding" )
print("---------------------------------------")
encoding=pd.get_dummies(df_cat,drop_first=True)
X=pd.concat([encoding,df_num],axis=1)
#Onehot encoding
elif encode == 'onehot':
print("Encoding : One-hot Encoding" )
print("---------------------------------------")
encoding=pd.get_dummies(df_cat)
X=pd.concat([encoding,df_num],axis=1)
#Label encoding
elif encode == 'label':
print("Encoding : Label Encoding" )
print("---------------------------------------")
encoding=df_cat.apply(LabelEncoder().fit_transform)
X=pd.concat([encoding,df_num],axis=1)
#If there are no categorical features
else:
X=df_features
#Encoding of target column
labelencoder = LabelEncoder()
y = labelencoder.fit_transform(df_target)
#Value count of target column
count=pd.Series(y).value_counts()
print("Value count of target variable :")
for i in range(len(count)):
print("Count of {}s is {} ".format(count.index[i],count.values[i]))
print("---------------------------------------")
#Scaling
#Standard scaling
if scaling=='standard-scalar':
print("Scaling : StandardScalar")
print("---------------------------------------")
ss=StandardScaler()
X=ss.fit_transform(X)
#MinmaxScalar
elif scaling=='min-max':
print("Scaling : MinmaxScalar")
print("---------------------------------------")
mm=MinMaxScaler()
X=mm.fit_transform(X)
else:
print("Scaling : None")
print("---------------------------------------")
#Condition to check how large the data after encoding
if (X.shape[0]*X.shape[1] < 1000000) | large_data==True:
print("Number of Datapoints :",X.shape[0]*X.shape[1])
print("---------------------------------------")
else:
raise Exception("Data too large to process, if you want to still execute, set parameter large_data=False")
#Train-test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state)
print("Test size for train test split :",test_size)
print("---------------------------------------")
#Algorithms
if algorithms == 'default':
algorithms=[LogisticRegression(),
GaussianNB(),
DecisionTreeClassifier(random_state=random_state),
RandomForestClassifier(random_state=random_state),
GradientBoostingClassifier(random_state=random_state),
AdaBoostClassifier(random_state=random_state),
XGBClassifier(random_state=random_state,verbosity=0)]
else:
algorithms=algorithms
#Binary Classification
if df_target.nunique()<3:
results=pd.DataFrame(columns=["Algorithm_name",'Train_accuracy','Test_accuracy',
"Test_Roc_Auc_score",'Test_recall','Test_precision'])
for i in algorithms:
print("Executing :",i)
i.fit(X_train, y_train)
train_pred_i=i.predict(X_train)
train_acc=accuracy_score(y_train,train_pred_i)
test_pred_i=i.predict(X_test)
test_acc=accuracy_score(y_test,test_pred_i)
recall=recall_score(y_test,test_pred_i,average=average)
precision=precision_score(y_test,test_pred_i,average=average)
roc_auc=roc_auc_score(y_test,test_pred_i)
row={"Algorithm_name":str(i)[:-2],'Train_accuracy':train_acc,"Test_accuracy":test_acc,
"Test_Roc_Auc_score":roc_auc,'Test_recall':recall,"Test_precision":precision}
results=results.append(row,ignore_index=True)
return results
#Multiclass Classification
else:
results=pd.DataFrame(columns=["Algorithm_name",'Train_accuracy','Test_accuracy',"f1_score"])
for i in algorithms:
print("Executing :",i)
i.fit(X_train, y_train)
train_pred_i=i.predict(X_train)
train_acc=accuracy_score(y_train,train_pred_i)
test_pred_i=i.predict(X_test)
test_acc=accuracy_score(y_test,test_pred_i)
f1=recall_score(y_test,test_pred_i,average=average)
row={"Algorithm_name":str(i)[:-2],'Train_accuracy':train_acc,"Test_accuracy":test_acc,"f1_score":f1}
results=results.append(row,ignore_index=True)
return results
else:
raise Exception("The data contains missing values, first handle missing values and then pass the data")
def report_regression(df_features,df_target,algorithms='default',test_size=0.3,
scaling=None,large_data=False,change_data_type=True,encode='dummy',
threshold=8,random_state=None):
'''
df_features : Pandas DataFrame
df_target : Pandas Series
algorithms : List ,'default'=
[LinearRegression(),
Lasso(),
Ridge(),
RandomForestRegressor(),
GradientBoostingRegressor(),
AdaBoostRegressor(),
XGBRegressor]
The above are the default algorithms, if one needs any specific algorithms, they have to import
libraries then pass the instances of alogorith as list
For example, if one needs random forest and adaboost only, then pass
algorithms=[RandomForestRegressor(max_depth=8),AdaBoostRegressor()]
But, these libraries must be imported before passing into above list like
test_size: If float, should be between 0.0 and 1.0 and represent the proportion of the
dataset to include in the test split.
scaling : {'standard-scalar', 'min-max'} or None , default=None
encode : {'dummy','onehot','label'} ,default='dummy'
change_data_type : bool, default=False
Some columns will be of numerical datatype though there are only 2-3 unique values in that column,
so these columns must be converted to object as it is more relevant.
By setting change_data_type= True , these columns will be converted into object datatype
threshold : int ,default=8
Maximum unique value a column can have
large_data : bool, default=False
If the dataset is large then the parameter large_data should be set to True,
make sure if your system has enough memory before setting Large_data=True
random_state : int, RandomState instance or None, default=None
'''
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder,StandardScaler,MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor,GradientBoostingRegressor,AdaBoostRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LinearRegression,Lasso,Ridge
from xgboost import XGBRegressor
from warnings import filterwarnings
filterwarnings('ignore')
print("Shape of data :",df_features.shape)
print("---------------------------------------")
#Check if there is any missing values
if df_features.isna().sum().sum()==0:
df_num=df_features.select_dtypes(exclude="object")
#Some columns will be of numerical datatype though there are only 2-3 unique values in that column
#Here the if-condition will check if the unique values are less than the specified threshold in each column
if change_data_type == True:
for i in df_num.columns:
#The datatype will be changed to object if the condition is not satisfied
if len(df_num[i].value_counts())<threshold:
df_features[i] = df_features[i].astype('object')
print("Datatype of {} changed to 'object as there were less than {} unique values".format(i,threshold))
print("-----------------------------------------------------------------------------------------")
else:
pass
#In some features like movie-tiltle,id,etc where there will be many unique values must be must be dropped
#These features can also be label encoded and then can be passed
df_cat=df_features.select_dtypes(include="object")
for i in df_cat:
if df_features[i].nunique()>threshold:
raise Exception("Recheck the datatype of {}, as there are more than {} unique values or change the datatype of {}".format(i,threshold))
df_num=df_features.select_dtypes(exclude="object")
#Encoding of categorical features
if df_cat.shape[1]!=0:
#Dummy Encoding
if encode == 'dummy':
print("Encoding : Dummy Encoding" )
print("---------------------------------------")
encoding=pd.get_dummies(df_cat,drop_first=True)
X=pd.concat([encoding,df_num],axis=1)
#Onehot encoding
elif encode == 'onehot':
print("Encoding : One-hot Encoding" )
print("---------------------------------------")
encoding=pd.get_dummies(df_cat)
X= | pd.concat([encoding,df_num],axis=1) | pandas.concat |
#!/usr/bin/env python
import copy
import sys
import pandas as pd
import numpy as np
from ..src_webapp.constants import (
CONST_COL_NAME_DATE,
CONST_COL_NAME_COST,
CONST_COL_NAME_YM,
CONST_COL_NAME_IDEALCOST,
CONST_COL_NAME_AVAILCOST,
CONST_COL_NAME_SUB,
CONST_COL_NAME_SGUID,
)
from ..src_webapp.data_loader import create_dataframe
from ..src_webapp.utilities import diff_month
from ..src_webapp.totals import group_year_month, add_missing_year_months
from ..src_webapp.subs import get_data_for_subid
# from src_webapp.constants import (
# CONST_COL_NAME_DATE,
# CONST_COL_NAME_COST,
# CONST_COL_NAME_YM,
# CONST_COL_NAME_IDEALCOST,
# CONST_COL_NAME_AVAILCOST,
# CONST_COL_NAME_SUB,
# CONST_COL_NAME_SGUID
# )
# from src_webapp.data_loader import create_dataframe
# from src_webapp.utilities import diff_month
# from src_webapp.totals import group_year_month, add_missing_year_months
# from src_webapp.subs import get_data_for_subid
from dateutil.relativedelta import relativedelta
class EA_budget:
def __init__(self, name, dt_from, dt_to, amount, currency):
self.name = name
self.dt_from = dt_from
self.dt_to = dt_to
self.amount = amount
self.currency = currency
def create_cost_avail_ideal_df(
ea_budget, ea_usage, date_from, date_to, max_reg_cost_day, use_days=False
):
"""
Creates a dataframe with actual, ideal and available costs for an EA budget
Args:
ea_budget - EA budget
ea_usage - EA usage dataframe
date_from - analysis period start date (e.g. financial year start)
date_to - analysis period end date (e.g. financial year end)
mmax_reg_cost_day - the present (max date with sponsorship usage)
use_days - a flag to use days for estimation, otherwise months
Returns:
ea_df - a dataframe actual, ideal and available costs for an EA budget
"""
if ea_usage is not None:
# TODO: budget_spent needs to be estimated depending on the date
sys.exit(1)
else:
budget_spent = 0.0
# EA budget has expired or has not started yet
if ea_budget.dt_to < date_from or ea_budget.dt_from > date_to:
return None
budget_days = (ea_budget.dt_to - ea_budget.dt_from).days
budget_months = diff_month(ea_budget.dt_to, ea_budget.dt_from)
budget_avg_day = float(ea_budget.amount) / float(budget_days)
budget_avg_month = float(ea_budget.amount) / float(budget_months)
budget_left = ea_budget.amount - budget_spent
budget_days_left = max((ea_budget.dt_to - max_reg_cost_day).days, 0)
budget_months_left = max(
diff_month(
ea_budget.dt_to,
copy.copy(max_reg_cost_day).replace(day=1) - relativedelta(days=1),
),
0,
)
budget_left_avg_day = float(budget_left) / float(budget_days_left)
budget_left_avg_month = float(budget_left) / float(budget_months_left)
cur_date = copy.copy(date_from).replace(day=1)
year_month_list = []
ideal_list = []
avail_list = []
while cur_date <= date_to:
month_st = max(copy.copy(cur_date).replace(day=1), date_from)
month_end = min(
copy.copy(cur_date).replace(day=1)
+ relativedelta(months=1)
- relativedelta(days=1),
date_to,
)
days_cnt = (month_end - month_st).days
year_month = "{year}-{month:02d}".format(
year=month_st.year, month=month_st.month
)
# Check if the month has passed
full_month = max_reg_cost_day >= month_end
if full_month:
if ea_budget.dt_from >= month_end:
monthly = 0.0
else:
if use_days:
monthly = budget_avg_day * days_cnt
else:
monthly = budget_avg_month
monthly_avail = None
else:
monthly = None
if month_end <= ea_budget.dt_to:
if use_days:
monthly_avail = budget_left_avg_day * days_cnt
else:
monthly_avail = budget_left_avg_month
else:
monthly_avail = 0.0
year_month_list.append(year_month)
ideal_list.append(monthly)
avail_list.append(monthly_avail)
cur_date += relativedelta(months=1)
d = {
CONST_COL_NAME_YM: year_month_list,
CONST_COL_NAME_IDEALCOST: ideal_list,
CONST_COL_NAME_AVAILCOST: avail_list,
}
df = pd.DataFrame(data=d)
return df
def analyse_ea_usage(
budgets, ea_data_df, date_from, date_to, max_reg_cost_day
):
"""
Args:
budgets - a list of EA_budget budegts containing all the available EA
budgets
ea_data_df - a dataframe with all the EA usage data
date_from - analysis period start date (e.g. financial year start)
date_to - analysis period end date (e.g. financial year end)
max_reg_cost_day - the present (max date with sponsorship usage)
Returns:
ea_budget
ea_usage
ea_remain
main_budget_df
"""
if ea_data_df is not None:
sys.exit(1)
else:
print("WARNING : ATM analysis does not include EA usage data")
print("WARNING : ATM analysis does not include EA usage data")
print("WARNING : ATM analysis does not include EA usage data")
main_budget_df = None
for ea_budget in budgets:
budget_df = create_cost_avail_ideal_df(
ea_budget, ea_data_df, date_from, date_to, max_reg_cost_day
)
if main_budget_df is None:
main_budget_df = copy.deepcopy(budget_df)
else:
main_budget_df[CONST_COL_NAME_IDEALCOST] = (
main_budget_df[CONST_COL_NAME_IDEALCOST]
+ budget_df[CONST_COL_NAME_IDEALCOST]
)
main_budget_df[CONST_COL_NAME_AVAILCOST] = (
main_budget_df[CONST_COL_NAME_AVAILCOST]
+ budget_df[CONST_COL_NAME_AVAILCOST]
)
if ea_data_df is None:
main_budget_df[CONST_COL_NAME_COST] = 0.0
ea_budget = main_budget_df[CONST_COL_NAME_AVAILCOST].sum()
else:
sys.exit(1)
ea_budget = None
ea_usage = main_budget_df[CONST_COL_NAME_COST].sum()
ea_remain = ea_budget - ea_usage
return ea_budget, ea_usage, ea_remain, main_budget_df
def analyse_spnsr_usage(data_path, date_from, date_to, spnsr_budget):
"""
Performs analysis for usage of sponsorship budegt
Args:
spnsr_data_path - path to the directory which contains all available
usage data from both the sponsorship portal and EduHub
date_from - analysis period start date (e.g. financial year start)
date_to - analysis period end date (e.g. financial year end)
spnsr_budget - sponsorship budget for the analysis period
(e.g. financial year)
Returns:
spnsr_remain - total remnain budget for the specified period
spnsr_usage - total usage over the specified period
spnsr_usage_df - data frame with usage analysis
max_reg_cost_day - the present (max date with sponsorship usage)
"""
# reading in the data
data_df = create_dataframe(data_path)
# applying data filters
sub_data_df = data_df[
(data_df.Date >= date_from) & (data_df.Date <= date_to)
]
spnsr_usage = sub_data_df[CONST_COL_NAME_COST].sum()
spnsr_remain = spnsr_budget - spnsr_usage
spnsr_usage_ym_df = group_year_month(sub_data_df)
# adding zeros for the missing year-months
spnsr_usage_ym_df = add_missing_year_months(
spnsr_usage_ym_df, date_from, date_to, CONST_COL_NAME_COST
)
# estimates ideal and available costs
spnsr_ideal_avail_ym_df, max_reg_cost_day = get_ideal_avail_spnsr_costs(
sub_data_df, date_from, date_to, spnsr_budget
)
# reset index
spnsr_usage_ym_df = spnsr_usage_ym_df.reset_index(drop=True)
spnsr_ideal_avail_ym_df = spnsr_ideal_avail_ym_df.reset_index(drop=True)
# combining actual, ideal and available costs into one dataframe
spnsr_usage_df = pd.concat(
[
spnsr_ideal_avail_ym_df,
spnsr_usage_ym_df.drop([CONST_COL_NAME_YM], axis=1),
],
axis=1,
)
return spnsr_remain, spnsr_usage, spnsr_usage_df, max_reg_cost_day
def get_ideal_avail_spnsr_costs(
sub_raw_data_df,
spnsr_date_from,
spnsr_date_to,
spnsr_budget,
use_days=False,
):
"""Estimates the ideal spending scenario and available costs based on the
usage during the analysis period.
Args:
sub_raw_data_df - raw usage dataframe
date_from - analysis period start date (eg. financial year start)
date_to - analysis period end date (eg. financial year end)
spnsr_budget - sponsorship budget for the analysis period
(e.g. financial year)
use_days - a flag to use days for estimation, otherwise months
Returns:
df - a dataframe with an ideal spending scenario and available budget
max_reg_cost_day - the present (max date with sponsorship usage)
"""
date_from = spnsr_date_from
date_to = spnsr_date_to + relativedelta(days=1)
# registering the latest date with registered usage
max_reg_cost_day = pd.to_datetime(
max(sub_raw_data_df[CONST_COL_NAME_DATE])
)
budget_spent = sub_raw_data_df[CONST_COL_NAME_COST].sum()
budget_left = spnsr_budget - budget_spent
# number of days/months within the analysis period
days_total = (date_to - date_from).days
months_total = diff_month(date_to, date_from)
if days_total <= 0:
return | pd.DataFrame() | pandas.DataFrame |
from experiments.gamut_games import *
from prettytable import PrettyTable
from algorithms.algorithms import global_sampling, psp
from structures.gamestructure import path_to_nfg_files
from experiments.noise import UniformNoise
from structures.bounds import HoeffdingBound
import pandas as pd
import ast
import time
def get_rg_ground_truth_game(params: Dict, game_params: Dict):
return RandomGames(title=game_params['title'],
num_players=params['num_players'],
num_strategies=params['num_strategies'],
max_payoff=params['max_payoff'],
min_payoff=params['min_payoff'],
noise=game_params['noise'])
def get_pd_ground_truth_game(params: Dict, game_params: Dict):
return PrisonersDilemma(title=game_params['title'],
max_payoff=params['max_payoff'],
min_payoff=params['min_payoff'],
noise=game_params['noise'])
def get_bs_ground_truth_game(params: Dict, game_params: Dict):
return BattleOfTheSexes(title=game_params['title'],
max_payoff=params['max_payoff'],
min_payoff=params['min_payoff'],
noise=game_params['noise'])
def get_cn_ground_truth_game(params: Dict, game_params: Dict):
return CongestionGame(title=game_params['title'],
num_players=params['num_players'],
num_facilities=params['num_facilities'],
max_payoff=params['max_payoff'],
min_payoff=params['min_payoff'],
noise=game_params['noise'])
def get_td_ground_truth_game(params: Dict, game_params: Dict):
return TravelersDilemma(title=game_params['title'],
num_players=params['num_players'],
num_strategies=params['num_strategies'],
max_payoff=params['max_payoff'],
min_payoff=params['min_payoff'],
noise=game_params['noise'])
def get_ch_ground_truth_game(params: Dict, game_params: Dict):
return Chicken(title=game_params['title'],
max_payoff=params['max_payoff'],
min_payoff=params['min_payoff'],
noise=game_params['noise'])
def get_me_ground_truth_game(params: Dict, game_params: Dict):
return MinimumEffort(title=game_params['title'],
num_players=params['num_players'],
num_strategies=params['num_strategies'],
max_payoff=params['max_payoff'],
min_payoff=params['min_payoff'],
noise=game_params['noise'])
def get_gd_ground_truth_game(params: Dict, game_params: Dict):
return GrabTheDollar(title=game_params['title'],
num_strategies=params['num_strategies'],
max_payoff=params['max_payoff'],
min_payoff=params['min_payoff'],
noise=game_params['noise'])
def get_zs_ground_truth_game(params: Dict, game_params: Dict):
return ZeroSum(title=game_params['title'],
num_strategies=params['num_strategies'],
max_payoff=params['max_payoff'],
min_payoff=params['min_payoff'],
noise=game_params['noise'])
def get_cg_ground_truth_game(params: Dict, game_params: Dict):
return CompoundGame(title=game_params['title'],
num_players=params['num_players'],
max_payoff=params['max_payoff'],
min_payoff=params['min_payoff'],
noise=game_params['noise'])
class Experiment(ABC):
# Declare the type of games we are allowed to experiment with.
game_generators_dict = {'rg': get_rg_ground_truth_game,
'pd': get_pd_ground_truth_game,
'bs': get_bs_ground_truth_game,
'cn': get_cn_ground_truth_game,
'td': get_td_ground_truth_game,
'ch': get_ch_ground_truth_game,
'me': get_me_ground_truth_game,
'gd': get_gd_ground_truth_game,
'zs': get_zs_ground_truth_game,
'cg': get_cg_ground_truth_game}
def __init__(self, params: Dict):
self.params = params
self.gt_generator = Experiment.game_generators_dict[self.params['ground_truth_game_generator']]
Experiment.generate_params_prettytable(params=self.params, meta_file_location=self.params['result_file_location'] + '.meta')
@abstractmethod
def run_experiment(self):
pass
@staticmethod
def generate_params_prettytable(params: Dict, meta_file_location: str) -> None:
"""
Generate a pretty table with the parameters of an experiment, print it and save it to a file.
:param params: a list of tuples (param, value)
:param meta_file_location: the location of the file where the pretty table of parameters will be stored
:return:
"""
#
t = PrettyTable()
t.field_names = ["Param", "Value"]
for param, value in params.items():
t.add_row([param, str(value)])
print(t)
# Save meta info file so we know what parameters were used to run the experiment.
with open(meta_file_location, 'w+') as meta_file:
meta_file.write(str(t))
class GSExperiments(Experiment):
def run_experiment(self):
# List for results
results = []
# Draw some number of ground-truth games.
for i in range(0, self.params['num_games']):
print(f'Game #{i}')
# Test different noise models.
for j, noise in enumerate(self.params['noise_models']):
print(f'Noise #{j}', end='\t ')
game = self.gt_generator(self.params,
{'title': 'expt_gs_game_' + self.params['ground_truth_game_generator'] + '_' + self.params['experiment_name'], 'noise': noise})
c = noise.get_c(self.params['max_payoff'], self.params['min_payoff'])
# For fix noise model and ground-truth game, perform multiple trials defined as runs of GS.
for t in range(0, self.params['num_trials']):
if t % 10 == 0:
print(t, end='\t')
df = pd.DataFrame(results, columns=['game', 'variance', 'bound', 'm', 'eps'])
df.to_csv(self.params['result_file_location'], index=False)
for m in self.params['m_test']:
# Run GS for each type of bound.
for bound in self.params['bounds']:
g = game.clone()
epsilon_gs, total_num_samples_gs = global_sampling(estimated_game=g, bound=bound, m=m, delta=self.params['delta'], c=c)
# Collect results in the form (game index, variance of the noise model, name of bound, number of samples, epsilon).
results += [[i, noise.get_variance(), str(bound)[0], m, epsilon_gs]]
print('')
# Convert results to DataFrame and save to a csv file
df = | pd.DataFrame(results, columns=['game', 'variance', 'bound', 'm', 'eps']) | pandas.DataFrame |
# Importing required libraries
import pandas as pd
import matplotlib.pyplot as plt
# Path of File
path = r'/home/abhishek/Data Analysis/Datasets/ipl.csv'
# Reading data in Data Frame
df = pd.read_csv(path)
# create column `year` which stores the year in which match was played
df['date'] = pd.to_datetime(df['date'])
df['year'] = df['date'].dt.year
df['season'] = df['year'] - 2007
# Plot the wins gained by teams across all seasons
df2 = df.drop_duplicates(['match_code'])
df_wins = df.drop_duplicates(['match_code'])
df_wins = df_wins.groupby(['winner'])['match_code'].nunique()
df_wins.sort_values(ascending=True).plot(kind='barh')
plt.xlabel('Wins')
# Plot Number of matches played by each team through all seasons
temp_data = pd.melt(df2, id_vars=['match_code', 'year'], value_vars= ['team1', 'team2'])
matches_played = temp_data.value.value_counts()
plt.figure(figsize=(12,6))
matches_played.plot(x= matches_played.index, y = matches_played, kind = 'bar', title= 'No. of matches played across 9 seasons')
plt.xticks(rotation = 'vertical')
plt.show()
# Top bowlers through all seasons
df_bowler_runs = df.groupby('bowler')[['total']].sum()
df_bowler_runs = df_bowler_runs.reset_index()
df_bowler_overs = df.groupby('bowler')[['delivery']].sum()/6
df_bowler_overs = df_bowler_overs.reset_index()
df_merged = pd.merge(left=df_bowler_overs, right=df_bowler_runs, on='bowler')
df_merged.rename(columns={'total': 'Total Runs', 'delivery': 'Overs Bowled'}, inplace=True)
df_merged['Economy'] = df_merged['Total Runs']/df_merged['Overs Bowled']
df_merged.set_index(keys='bowler',inplace=True)
df_merged[['Economy']].head(15).sort_values(by='Economy', ascending=False).plot(kind='barh')
plt.ylabel('Bowlers')
plt.xlabel('Economy')
plt.title('Top 15 Bowlers')
# How did the different pitches behave? What was the average score for each stadium?
total_runs_scored_venue_wise = df.groupby('venue')[['total']].sum()
total_matches_played_venue_wise = df2.groupby('venue')[['match_code']].count()
df_venue_run_avg = total_runs_scored_venue_wise['total']/total_matches_played_venue_wise['match_code']
df_venue_run_avg.sort_values(ascending=True).plot(kind='barh', legend=None)
plt.title('Avg Score Venue Wise')
plt.ylabel('Venue')
plt.xlabel('Avg Runs Scored')
# Types of Dismissal and how often they occur
types_of_dismissal = df['wicket_kind'].value_counts()
no_of_bowls = df['delivery'].count()
avg_dismissal_frequency = types_of_dismissal/no_of_bowls
avg_dismissal_frequency.plot(kind='bar')
plt.ylim(0, 0.03)
# Plot no. of boundaries across IPL seasons
no_of_boundaries = df[(df['runs']==4) | (df['runs']==6)]
no_of_boundaries['runs'].value_counts().plot(kind='bar')
plt.title('No of boundaries across all seasons')
plt.xticks(rotation='horizontal')
plt.xlabel('Boundary Type')
plt.ylabel('Count')
plt.show()
# Average statistics across all seasons
per_match_data = df.drop_duplicates(subset='match_code', keep='first').reset_index(drop=True)
total_runs_per_season = df.groupby('year')['total'].sum()
balls_delivered_per_season = df.groupby('year')['delivery'].count()
no_of_match_played_per_season = per_match_data.groupby('year')['match_code'].count()
avg_balls_per_match = balls_delivered_per_season/no_of_match_played_per_season
avg_runs_per_match = total_runs_per_season/no_of_match_played_per_season
avg_runs_per_ball = total_runs_per_season/balls_delivered_per_season
avg_data = | pd.DataFrame([no_of_match_played_per_season, avg_runs_per_match, avg_balls_per_match, avg_runs_per_ball]) | pandas.DataFrame |
"""
pyLDAvis Prepare
===============
Main transformation functions for preparing LDAdata to the visualization's data structures
"""
from __future__ import absolute_import
from past.builtins import basestring
from collections import namedtuple
import json
import logging
from joblib import Parallel, delayed, cpu_count
import numpy as np
import pandas as pd
from scipy.stats import entropy
from scipy.spatial.distance import pdist, squareform
from .utils import NumPyEncoder
try:
from sklearn.manifold import MDS, TSNE
sklearn_present = True
except ImportError:
sklearn_present = False
def __num_dist_rows__(array, ndigits=2):
return array.shape[0] - int(( | pd.DataFrame(array) | pandas.DataFrame |
import calendar
from datetime import datetime
import locale
import unicodedata
import numpy as np
import pytest
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
Timedelta,
Timestamp,
date_range,
offsets,
)
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray
class TestDatetime64:
def test_no_millisecond_field(self):
msg = "type object 'DatetimeIndex' has no attribute 'millisecond'"
with pytest.raises(AttributeError, match=msg):
DatetimeIndex.millisecond
msg = "'DatetimeIndex' object has no attribute 'millisecond'"
with pytest.raises(AttributeError, match=msg):
DatetimeIndex([]).millisecond
def test_datetimeindex_accessors(self):
dti_naive = date_range(freq="D", start=datetime(1998, 1, 1), periods=365)
# GH#13303
dti_tz = date_range(
freq="D", start=datetime(1998, 1, 1), periods=365, tz="US/Eastern"
)
for dti in [dti_naive, dti_tz]:
assert dti.year[0] == 1998
assert dti.month[0] == 1
assert dti.day[0] == 1
assert dti.hour[0] == 0
assert dti.minute[0] == 0
assert dti.second[0] == 0
assert dti.microsecond[0] == 0
assert dti.dayofweek[0] == 3
assert dti.dayofyear[0] == 1
assert dti.dayofyear[120] == 121
assert dti.isocalendar().week[0] == 1
assert dti.isocalendar().week[120] == 18
assert dti.quarter[0] == 1
assert dti.quarter[120] == 2
assert dti.days_in_month[0] == 31
assert dti.days_in_month[90] == 30
assert dti.is_month_start[0]
assert not dti.is_month_start[1]
assert dti.is_month_start[31]
assert dti.is_quarter_start[0]
assert dti.is_quarter_start[90]
assert dti.is_year_start[0]
assert not dti.is_year_start[364]
assert not dti.is_month_end[0]
assert dti.is_month_end[30]
assert not dti.is_month_end[31]
assert dti.is_month_end[364]
assert not dti.is_quarter_end[0]
assert not dti.is_quarter_end[30]
assert dti.is_quarter_end[89]
assert dti.is_quarter_end[364]
assert not dti.is_year_end[0]
assert dti.is_year_end[364]
assert len(dti.year) == 365
assert len(dti.month) == 365
assert len(dti.day) == 365
assert len(dti.hour) == 365
assert len(dti.minute) == 365
assert len(dti.second) == 365
assert len(dti.microsecond) == 365
assert len(dti.dayofweek) == 365
assert len(dti.dayofyear) == 365
assert len(dti.isocalendar()) == 365
assert len(dti.quarter) == 365
assert len(dti.is_month_start) == 365
assert len(dti.is_month_end) == 365
assert len(dti.is_quarter_start) == 365
assert len(dti.is_quarter_end) == 365
assert len(dti.is_year_start) == 365
assert len(dti.is_year_end) == 365
dti.name = "name"
# non boolean accessors -> return Index
for accessor in DatetimeArray._field_ops:
if accessor in ["week", "weekofyear"]:
# GH#33595 Deprecate week and weekofyear
continue
res = getattr(dti, accessor)
assert len(res) == 365
assert isinstance(res, Index)
assert res.name == "name"
# boolean accessors -> return array
for accessor in DatetimeArray._bool_ops:
res = getattr(dti, accessor)
assert len(res) == 365
assert isinstance(res, np.ndarray)
# test boolean indexing
res = dti[dti.is_quarter_start]
exp = dti[[0, 90, 181, 273]]
tm.assert_index_equal(res, exp)
res = dti[dti.is_leap_year]
exp = DatetimeIndex([], freq="D", tz=dti.tz, name="name")
tm.assert_index_equal(res, exp)
def test_datetimeindex_accessors2(self):
dti = date_range(freq="BQ-FEB", start=datetime(1998, 1, 1), periods=4)
assert sum(dti.is_quarter_start) == 0
assert sum(dti.is_quarter_end) == 4
assert sum(dti.is_year_start) == 0
assert sum(dti.is_year_end) == 1
def test_datetimeindex_accessors3(self):
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay,
bday_egypt = offsets.CustomBusinessDay(weekmask="Sun Mon Tue Wed Thu")
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
msg = "Custom business days is not supported by is_month_start"
with pytest.raises(ValueError, match=msg):
dti.is_month_start
def test_datetimeindex_accessors4(self):
dti = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-03"])
assert dti.is_month_start[0] == 1
def test_datetimeindex_accessors5(self):
with tm.assert_produces_warning(FutureWarning, match="The 'freq' argument"):
tests = [
(Timestamp("2013-06-01", freq="M").is_month_start, 1),
(Timestamp("2013-06-01", freq="BM").is_month_start, 0),
(Timestamp("2013-06-03", freq="M").is_month_start, 0),
(Timestamp("2013-06-03", freq="BM").is_month_start, 1),
(Timestamp("2013-02-28", freq="Q-FEB").is_month_end, 1),
(Timestamp("2013-02-28", freq="Q-FEB").is_quarter_end, 1),
(Timestamp("2013-02-28", freq="Q-FEB").is_year_end, 1),
(Timestamp("2013-03-01", freq="Q-FEB").is_month_start, 1),
(Timestamp("2013-03-01", freq="Q-FEB").is_quarter_start, 1),
(Timestamp("2013-03-01", freq="Q-FEB").is_year_start, 1),
(Timestamp("2013-03-31", freq="QS-FEB").is_month_end, 1),
(Timestamp("2013-03-31", freq="QS-FEB").is_quarter_end, 0),
(Timestamp("2013-03-31", freq="QS-FEB").is_year_end, 0),
(Timestamp("2013-02-01", freq="QS-FEB").is_month_start, 1),
(Timestamp("2013-02-01", freq="QS-FEB").is_quarter_start, 1),
(Timestamp("2013-02-01", freq="QS-FEB").is_year_start, 1),
(Timestamp("2013-06-30", freq="BQ").is_month_end, 0),
(Timestamp("2013-06-30", freq="BQ").is_quarter_end, 0),
(Timestamp("2013-06-30", freq="BQ").is_year_end, 0),
(Timestamp("2013-06-28", freq="BQ").is_month_end, 1),
(Timestamp("2013-06-28", freq="BQ").is_quarter_end, 1),
(Timestamp("2013-06-28", freq="BQ").is_year_end, 0),
(Timestamp("2013-06-30", freq="BQS-APR").is_month_end, 0),
( | Timestamp("2013-06-30", freq="BQS-APR") | pandas.Timestamp |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 8 08:53:30 2019
@author: rhou
"""
import warnings
warnings.filterwarnings("ignore")
import os, sys
import argparse
import matplotlib
matplotlib.use('agg')
import pandas as pd
import numpy as np
try:
import seaborn as sns
except ImportError:
sys.exit('\n\nError: seaborn module is missing, please install it before proceeding.')
try:
import igraph as ig
except ImportError:
sys.exit('\n\nError: igraph module is missing, please install it before proceeding.')
try:
import networkx as nx
except ImportError:
sys.exit('\n\nError: NetworkX module is missing, please install it before proceeding.')
try:
import pygraphviz as pgv
except ImportError:
sys.exit('\n\nError: PyGraphviz module is missing, please install it before proceeding.')
#filter adjacency matrix
def ChooseTopEdges(adjM, keepTopEdge):
if keepTopEdge == 0:
return adjM
edgeDict = {'s':[],'t':[],'v':[]}
for idx in adjM.index:
for col in adjM.columns:
edgeDict['s'].append(idx)
edgeDict['t'].append(col)
if adjM.loc[idx,col] <=0:
edgeDict['v'].append((-1.0) * adjM.loc[idx,col])
else:
edgeDict['v'].append(adjM.loc[idx,col])
edgeD = pd.DataFrame(edgeDict).sort_values(by=['v'], ascending=False)
edgeD = edgeD.head(keepTopEdge)
nadjM = pd.DataFrame(0.0, index=adjM.index,columns=adjM.index)
for idx in edgeD.index:
nadjM.loc[edgeD.loc[idx,['s']],edgeD.loc[idx,['t']]] = adjM.loc[edgeD.loc[idx,['s']],edgeD.loc[idx,['t']]]
return nadjM
# build delta adjacency matrix
def BuildDeltaAdjM(edgeDF, origlabels, labels, specificityThreshold, weightThreshold, frequencyThreshold, keepTopEdge):
edgeDF['Sending cluster'] = edgeDF['Sending cluster'].astype(str)
edgeDF['Target cluster'] = edgeDF['Target cluster'].astype(str)
adjM1 = pd.DataFrame(0.0, index=origlabels, columns=origlabels)
adjSpecM1 = pd.DataFrame(0.0, index=origlabels, columns=origlabels)
adjCountM1 = | pd.DataFrame(0, index=origlabels, columns=origlabels) | pandas.DataFrame |
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from keras.preprocessing.text import Tokenizer
from keras.models import Sequential
from keras import layers
from base import *
import matplotlib.pyplot as plt
import pandas as pd
plt.style.use("ggplot")
plt.interactive(False)
param_grid = dict(
num_filters=[32, 64, 128],
kernel_size=[3, 5, 7],
vocab_size=[5000],
embedding_dim=[50],
maxlen=[100],
)
def extract(df, dataset):
ds_df = df[df["source"] == dataset]
ds_sentences = ds_df["sentence"].values
labs = ds_df["label"].values
return (ds_df, ds_sentences, labs)
def get_train_and_test(sentences, y, test_sz=0.25, rs=1000):
sentences_train, sentences_test, y_train, y_test = train_test_split(
sentences, y, test_size=test_sz, random_state=rs
)
vectorizer = CountVectorizer()
vectorizer.fit(sentences_train)
x_train = vectorizer.transform(sentences_train)
x_test = vectorizer.transform(sentences_test)
return (x_train, x_test, y_train, y_test)
def plot_history(history, source):
acc = history.history["accuracy"]
val_acc = history.history["val_accuracy"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
x = range(1, len(acc) + 1)
fig = plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(x, acc, "b", label="Training acc")
plt.plot(x, val_acc, "r", label="Validation acc")
plt.title(f"Training and validation accuracy for {source}")
plt.xlabel("Batch number")
plt.ylabel("Accuracy")
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(x, loss, "b", label="Training loss")
plt.plot(x, val_loss, "r", label="Validation loss")
plt.title(f"Training and validation loss for {source}")
plt.xlabel("Batch number")
plt.ylabel("Loss")
plt.legend()
return plt
# initialize the entire dataframe from the data folder
df_list = []
for source, filepath in data_path.items():
df = pd.read_csv(filepath, names=["sentence", "label"], sep="\t")
df["source"] = source
df_list.append(df)
df = | pd.concat(df_list) | pandas.concat |
from ioUtils import getFile, saveFile
from fsUtils import isDir, setDir, setFile, isFile, mkDir
from timeUtils import timestat
from pandas import Series
from sys import prefix
class masterArtistNameDB:
def __init__(self, source, install=False, debug=False):
self.debug = debug
print("{0} masterArtistNameDB(\"{1}\") {2}".format("="*25,source,"="*25))
self.debug = debug
self.source = source
self.musicNamesDir = setDir(prefix, 'musicnames')
self.initializeData() if install is False else self.installData()
def initializeData(self):
self.manualRenames = self.getData(fast=True, local=False)
retval,manualRenames = self.duplicateIndexTest()
if retval is False:
raise ValueError("There are duplicate key,values in the [{0}] data".format(self.source))
self.manualRenames = manualRenames
retval = self.recursiveTest()
if retval is False:
raise ValueError("There are recursive key,values in the [{0}] data".format(self.source))
self.summary()
def installData(self):
if not isDir(self.musicNamesDir):
print("Install: Making Prefix Dir [{0}]".format(self.musicNamesDir))
mkDir(self.musicNamesDir)
if not isFile(self.getFilename(fast=True, local=False)):
print("Install: Creating Prefix Data From Local Data")
self.writeToMainPickleFromLocalYAML()
def summary(self, manualRenames=None):
manualRenames = self.manualRenames if manualRenames is None else manualRenames
print("masterArtistNameDB(\"{0}\") Summary:".format(self.source))
print(" Entries: {0}".format(len(manualRenames)))
print(" Artists: {0}".format(manualRenames.nunique()))
#########################################################################################################
#
# I/O
#
#########################################################################################################
def getFilename(self, fast, local):
basename="ManualRenames"
self.localpfname = "{0}{1}.p".format(self.source, basename)
self.localyfname = "{0}{1}.yaml".format(self.source, basename)
self.pfname = setFile(self.musicNamesDir, self.localpfname)
self.yfname = setFile(self.musicNamesDir, self.localyfname)
if fast is True:
if local is True:
return self.localpfname
else:
return self.pfname
else:
if local is True:
return self.localyfname
else:
return self.yfname
raise ValueError("Somehow didn't get a filename!")
def getData(self, fast=True, local=False):
ftype = {True: "Pickle", False: "YAML"}
ltype = {True: "Local", False: "Main"}
ts = timestat("Getting Manual Renames Data From {0} {1} File".format(ltype[local], ftype[fast]))
fname = self.getFilename(fast, local)
manualRenames = getFile(fname)
ts.stop()
return manualRenames
def writeToLocalYamlFromMainPickle(self):
ts = timestat("Writing To Local YAML From Main Pickle")
manualRenames = self.getData(fast=True, local=False)
self.saveData(manualRenames, fast=False, local=True)
ts.stop()
def writeToMainPickleFromLocalYAML(self):
ts = timestat("Writing To Main Pickle From Local YAML")
manualRenames = self.getData(fast=False, local=True)
self.saveData(manualRenames, fast=True, local=False)
ts.stop()
def saveData(self, manualRenames=None, fast=True, local=False):
ftype = {True: "Pickle", False: "YAML"}
ltype = {True: "Local", False: "Main"}
ts = timestat("Saving Manual Renames Data To {0} {1} File".format(ltype[local], ftype[fast]))
manualRenames = self.manualRenames if manualRenames is None else manualRenames
#self.summary(manualRenames)
fname = self.getFilename(fast, local)
if fast:
toSave = | Series(manualRenames) | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.