prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import base64
import datetime
import json
from json import JSONEncoder
from google.protobuf.json_format import MessageToJson, ParseDict
from google.protobuf.descriptor import FieldDescriptor
from mlflow.exceptions import MlflowException
from collections import defaultdict
from functools import partial
_PROTOBUF_INT64_FIELDS = [
FieldDescriptor.TYPE_INT64,
FieldDescriptor.TYPE_UINT64,
FieldDescriptor.TYPE_FIXED64,
FieldDescriptor.TYPE_SFIXED64,
FieldDescriptor.TYPE_SINT64,
]
def _mark_int64_fields_for_proto_maps(proto_map, value_field_type):
"""Converts a proto map to JSON, preserving only int64-related fields."""
json_dict = {}
for key, value in proto_map.items():
# The value of a protobuf map can only be a scalar or a message (not a map or repeated
# field).
if value_field_type == FieldDescriptor.TYPE_MESSAGE:
json_dict[key] = _mark_int64_fields(value)
elif value_field_type in _PROTOBUF_INT64_FIELDS:
json_dict[key] = int(value)
elif isinstance(key, int):
json_dict[key] = value
return json_dict
def _mark_int64_fields(proto_message):
"""Converts a proto message to JSON, preserving only int64-related fields."""
json_dict = {}
for field, value in proto_message.ListFields():
if (
# These three conditions check if this field is a protobuf map.
# See the official implementation: https://bit.ly/3EMx1rl
field.type == FieldDescriptor.TYPE_MESSAGE
and field.message_type.has_options
and field.message_type.GetOptions().map_entry
):
# Deal with proto map fields separately in another function.
json_dict[field.name] = _mark_int64_fields_for_proto_maps(
value, field.message_type.fields_by_name["value"].type
)
continue
if field.type == FieldDescriptor.TYPE_MESSAGE:
ftype = partial(_mark_int64_fields)
elif field.type in _PROTOBUF_INT64_FIELDS:
ftype = int
else:
# Skip all non-int64 fields.
continue
json_dict[field.name] = (
[ftype(v) for v in value]
if field.label == FieldDescriptor.LABEL_REPEATED
else ftype(value)
)
return json_dict
def _merge_json_dicts(from_dict, to_dict):
"""Merges the json elements of from_dict into to_dict. Only works for json dicts
converted from proto messages
"""
for key, value in from_dict.items():
if isinstance(key, int) and str(key) in to_dict:
# When the key (i.e. the proto field name) is an integer, it must be a proto map field
# with integer as the key. For example:
# from_dict is {'field_map': {1: '2', 3: '4'}}
# to_dict is {'field_map': {'1': '2', '3': '4'}}
# So we need to replace the str keys with int keys in to_dict.
to_dict[key] = to_dict[str(key)]
del to_dict[str(key)]
if key not in to_dict:
continue
if isinstance(value, dict):
_merge_json_dicts(from_dict[key], to_dict[key])
elif isinstance(value, list):
for i, v in enumerate(value):
if isinstance(v, dict):
_merge_json_dicts(v, to_dict[key][i])
else:
to_dict[key][i] = v
else:
to_dict[key] = from_dict[key]
return to_dict
def message_to_json(message):
"""Converts a message to JSON, using snake_case for field names."""
# Google's MessageToJson API converts int64 proto fields to JSON strings.
# For more info, see https://github.com/protocolbuffers/protobuf/issues/2954
json_dict_with_int64_as_str = json.loads(
MessageToJson(message, preserving_proto_field_name=True)
)
# We convert this proto message into a JSON dict where only int64 proto fields
# are preserved, and they are treated as JSON numbers, not strings.
json_dict_with_int64_fields_only = _mark_int64_fields(message)
# By merging these two JSON dicts, we end up with a JSON dict where int64 proto fields are not
# converted to JSON strings. Int64 keys in proto maps will always be converted to JSON strings
# because JSON doesn't support non-string keys.
json_dict_with_int64_as_numbers = _merge_json_dicts(
json_dict_with_int64_fields_only, json_dict_with_int64_as_str
)
return json.dumps(json_dict_with_int64_as_numbers, indent=2)
def _stringify_all_experiment_ids(x):
"""Converts experiment_id fields which are defined as ints into strings in the given json.
This is necessary for backwards- and forwards-compatibility with MLflow clients/servers
running MLflow 0.9.0 and below, as experiment_id was changed from an int to a string.
To note, the Python JSON serializer is happy to auto-convert strings into ints (so a
server or client that sees the new format is fine), but is unwilling to convert ints
to strings. Therefore, we need to manually perform this conversion.
This code can be removed after MLflow 1.0, after users have given reasonable time to
upgrade clients and servers to MLflow 0.9.1+.
"""
if isinstance(x, dict):
items = x.items()
for k, v in items:
if k == "experiment_id":
x[k] = str(v)
elif k == "experiment_ids":
x[k] = [str(w) for w in v]
elif k == "info" and isinstance(v, dict) and "experiment_id" in v and "run_uuid" in v:
# shortcut for run info
v["experiment_id"] = str(v["experiment_id"])
elif k not in ("params", "tags", "metrics"): # skip run data
_stringify_all_experiment_ids(v)
elif isinstance(x, list):
for y in x:
_stringify_all_experiment_ids(y)
def parse_dict(js_dict, message):
"""Parses a JSON dictionary into a message proto, ignoring unknown fields in the JSON."""
_stringify_all_experiment_ids(js_dict)
ParseDict(js_dict=js_dict, message=message, ignore_unknown_fields=True)
class NumpyEncoder(JSONEncoder):
"""Special json encoder for numpy types.
Note that some numpy types doesn't have native python equivalence,
hence json.dumps will raise TypeError.
In this case, you'll need to convert your numpy types into its closest python equivalence.
"""
def try_convert(self, o):
import numpy as np
import pandas as pd
def encode_binary(x):
return base64.encodebytes(x).decode("ascii")
if isinstance(o, np.ndarray):
if o.dtype == object:
return [self.try_convert(x)[0] for x in o.tolist()], True
elif o.dtype == np.bytes_:
return np.vectorize(encode_binary)(o), True
else:
return o.tolist(), True
if isinstance(o, np.generic):
return o.item(), True
if isinstance(o, bytes) or isinstance(o, bytearray):
return encode_binary(o), True
if isinstance(o, np.datetime64):
return np.datetime_as_string(o), True
if isinstance(o, (pd.Timestamp, datetime.date)):
return o.isoformat(), True
return o, False
def default(self, o): # pylint: disable=E0202
res, converted = self.try_convert(o)
if converted:
return res
else:
return super().default(o)
def _dataframe_from_json(
path_or_str, schema=None, pandas_orient: str = "split", precise_float=False
):
"""
Parse json into pandas.DataFrame. User can pass schema to ensure correct type parsing and to
make any necessary conversions (e.g. string -> binary for binary columns).
:param path_or_str: Path to a json file or a json string.
:param schema: Mlflow schema used when parsing the data.
:param pandas_orient: pandas data frame convention used to store the data.
:return: pandas.DataFrame.
"""
import pandas as pd
from mlflow.types import DataType
if schema is not None:
if schema.is_tensor_spec():
# The schema can be either:
# - a single tensor: attempt to parse all columns with the same dtype
# - a dictionary of tensors: each column gets the type from an equally named tensor
if len(schema.inputs) == 1:
dtypes = schema.numpy_types()[0]
else:
dtypes = dict(zip(schema.input_names(), schema.numpy_types()))
else:
dtypes = dict(zip(schema.input_names(), schema.pandas_types()))
df = pd.read_json(
path_or_str,
orient=pandas_orient,
dtype=dtypes,
precise_float=precise_float,
convert_dates=False,
)
if not schema.is_tensor_spec():
actual_cols = set(df.columns)
for type_, name in zip(schema.input_types(), schema.input_names()):
if type_ == DataType.binary and name in actual_cols:
df[name] = df[name].map(lambda x: base64.decodebytes(bytes(x, "utf8")))
return df
else:
return pd.read_json(
path_or_str, orient=pandas_orient, dtype=False, precise_float=precise_float
)
def _get_jsonable_obj(data, pandas_orient="records"):
"""Attempt to make the data json-able via standard library.
Look for some commonly used types that are not jsonable and convert them into json-able ones.
Unknown data types are returned as is.
:param data: data to be converted, works with pandas and numpy, rest will be returned as is.
:param pandas_orient: If `data` is a Pandas DataFrame, it will be converted to a JSON
dictionary using this Pandas serialization orientation.
"""
import numpy as np
import pandas as pd
if isinstance(data, np.ndarray):
return data.tolist()
if isinstance(data, pd.DataFrame):
return data.to_dict(orient=pandas_orient)
if isinstance(data, pd.Series):
return
|
pd.DataFrame(data)
|
pandas.DataFrame
|
import argparse
import json
import pandas as pd
import numpy as np
import itertools
import operator
import warnings
import pprint
from datetime import datetime
from support.io_support import load_data
from support.evaluation import smape, evaluate_smape
from .itsmpy import ITSM
def fit_arima_model(series, M):
"""
Fits best arima model by lowest aicc values
Args:
series: time series data
type: np.arrays
M: data model ([] for None)
type: list
Returns:
a: ARMA model coefficients (phi, theta, sigma2)
type: dictionary
"""
itsm = ITSM()
e = itsm.Resid(series, M)
warnings.filterwarnings("ignore")
a = itsm.autofit(e)
print('ARMA model: ')
pprint.pprint(a, width=1)
return a
def main(data_path, keys_path, pred_days=60):
print('***** Starting time: ', datetime.now(), '**************************')
print('***** Loading data ************************************************')
pages, dates, visits = load_data(data_path)
# pages, dates, visits = pages[:1], dates, visits[:1,:]
keys =
|
pd.read_csv(keys_path)
|
pandas.read_csv
|
import numpy as np
import pandas as pd
import datetime as dt
import time
import matplotlib.pyplot as plt
import seaborn as sns
import vnpy.analyze.data.data_prepare as dp
from jqdatasdk import *
from vnpy.trader.database import database_manager
from mpl_toolkits.axisartist.parasite_axes import HostAxes, ParasiteAxes
import matplotlib.dates as dates
from matplotlib import ticker
import math
from vnpy.analyze.util.cal_returns import CalReturns
def step1_draw_close(data, show=False):
"""画出收盘价的价格曲线"""
df = data.copy()
if show:
fig, ax = plt.subplots(1, figsize=(16, 9))
df.close.plot(ax=ax, figsize=(16, 9), colormap='coolwarm')
plt.show()
def step2_pe_pb(data, show=False):
"""画出PE、PB之间的关系"""
df = data.copy()
print('PE\PB的相关系数如下:\n %s' % (df[['pe', 'pb']].corr()))
if show:
sns.jointplot(df['pe'], df['pe'], kind='reg', height=9)
fig, ax = plt.subplots(1, figsize=(16, 9))
df[['pe', 'pb']].plot(ax=ax, secondary_y=['pb'], figsize=(16, 9), colormap='coolwarm')
plt.show()
def step3_close_pe(data, pe_percentile_blow=0.4, pe_percentile_upper=0.6, show=False):
"""close与PE之间关系"""
df = data.copy()
print('CLOSE\PE的相关系数如下:\n %s' % (df[['close', 'pe']].corr()))
percentile_blow = df['pe'].quantile(pe_percentile_blow) # 4分位
percentile_upper = df['pe'].quantile(pe_percentile_upper) # 6分位
print('下分为使用%s,PE值:%s, 上分为使用%s,PE值:%s' % (
pe_percentile_blow, percentile_upper, pe_percentile_upper, percentile_upper))
if show:
sns.jointplot(df['close'], df['pe'], kind='reg', height=9)
fig, ax = plt.subplots(1, figsize=(16, 9))
df[['close', 'pe']].plot(ax=ax, secondary_y=['pe'], figsize=(16, 9), colormap='coolwarm')
plt.axhline(y=percentile_blow, color='g', linestyle='-')
plt.axhline(y=percentile_upper, color='r', linestyle='-')
plt.show()
def step4_close_percentile_pe(data, n=7, show=False, show_p_hist=False):
"""
close与PE百分位之间关系
不同时期之间的PE对比已经发生巨大变化,根据一个周期内百分位对比更有价值
"""
df = data.copy()
# 这里的计算按一年244个交易日计算
windows = int(n * 244) # 将时间取整数
if len(data) < windows:
print('当前数据小于滚动窗口设置,无法完成滚动分为计算')
return
column = 'percentile_' + str(n) + 'Y'
df[column] = df['pe'].rolling(windows).apply(lambda x: pd.Series(x).rank().iloc[-1] /
pd.Series(x).shape[0], raw=True)
if show:
fig, ax = plt.subplots(1, figsize=(16, 9))
df[['close', column]].plot(ax=ax, secondary_y=[column], figsize=(16, 9), colormap='coolwarm')
plt.show()
if show_p_hist:
"""动态百分位分布,直方图"""
fig, ax = plt.subplots(1, figsize=(16, 9))
df[column].hist(ax=ax, figsize=(16, 9))
plt.show()
return df
def first_trade_date_in_month(df):
"""找到每个月第一个交易日"""
month_first_date = set()
pre_year, pre_month = 0, 0
for index, row in df.iterrows():
if pre_year != index.year or pre_month != index.month:
month_first_date.add(index)
pre_year = index.year
pre_month = index.month
return month_first_date
def trade_model(data, column='percentile_7Y', show=False, show_annual_invest=True):
"""
交易模型:
1、低估:买入、适中:保持不变、高估:卖出
"""
df = data.copy()
# 去除无滚动百分位数据
df.dropna(inplace=True)
# 找每个月第一个交易日
month_first_date = first_trade_date_in_month(df)
# 假设每个月第一个交易日增加5000元可支配
month_invest_const = 5000
available_cash = 0 # 可用资金
stock_q = 0 # 股票数量(为计算方便,可以使用小数表示)
# 图形展示数据:累计投入、当前持有股票资产、变现回报
trade_date = []
invest_cash = []
stock_assets = []
return_cash = []
# 买入记录
trades = {}
df_return = pd.DataFrame(columns=('date', 'invest', 'stock', 'return'))
for index, row in df.iterrows():
# 首先还是遵守标准定投思想,投还是不投,不考虑投多少问题。卖出的资产直接入袋为安,不参与定投
trade_date.append(index)
if month_first_date.__contains__(index):
# available_cash = available_cash + month_invest_const
# 当月不投下月自动清空
available_cash = month_invest_const
if row[column] < 0.4 and available_cash > 0:
# 较低估值区间, 买入
afford_q = available_cash / row['close']
stock_q += afford_q
invest_cash.append(available_cash)
trades[index] = available_cash # 加入买入记录
available_cash = 0
return_cash.append(0)
elif row[column] > 0.6 and stock_q > 0:
# 过高估值区间, 卖出
selled_p = month_invest_const / row['close'] # 卖掉份数
stock_q = stock_q - selled_p
invest_cash.append(0)
return_cash.append(month_invest_const)
else:
# 不做任何操作
invest_cash.append(0)
return_cash.append(0)
stock_assets.append(stock_q * row['close'])
df_return['date'] = trade_date
df_return['invest'] = invest_cash
df_return['stock'] = stock_assets
df_return['return'] = return_cash
df_return['invest_cumsum'] = df_return['invest'].cumsum()
df_return['return_cumsum'] = df_return['return'].cumsum()
df_return['hold'] = df_return['return_cumsum'] + df_return['stock']
# 设置data为index
df_return['date'] = pd.to_datetime(df_return['date']) # 转换时间类型
df_return.set_index(['date'], inplace=True)
df_return.index.name = None # 去掉索引列名
df_return['close'] = df['close']
print(df_return.head())
# 计算年化收益
earings = CalReturns.annual_returns(trades, df_return.index[-1], df_return['hold'][-1])
print('年化收益率:%s' % earings)
if show:
fig, ax = plt.subplots(1, figsize=(16, 9))
df_return[['invest_cumsum', 'hold', 'close']].plot(ax=ax, secondary_y=['close'], figsize=(16, 9),
colormap='coolwarm')
plt.show()
if show_annual_invest:
"""展示年度投入与收益, 📊柱状图 (年度投入、年度剩余))"""
trade_year = [date.year for date in trade_date]
df_g = pd.DataFrame(columns=('date', 'invest'))
df_g['date'] = trade_year
df_g['invest'] = invest_cash
df_view = df_g.groupby('date').sum() # group by
fig, ax = plt.subplots(1, figsize=(16, 9))
df_view[['invest']].plot(ax=ax, figsize=(16, 9), kind='bar')
plt.show()
if __name__ == "__main__":
# 选取沪深300开始分析
start_date = dt.datetime(2005, 5, 1)
end_date = dt.datetime(2020, 5, 1)
df = dp.load_bar_data('000300', 'XSHG', start_date=start_date, end_data=end_date)
df_finance = dp.load_finance_data('000300.XSHG', start_date=start_date, end_date=end_date)
if len(df) == len(df_finance):
print('load data success, len:%s' % len(df))
df['pe'] = df_finance['pe']
df['pb'] = df_finance['pb']
# 设置data为index
df['date'] =
|
pd.to_datetime(df['date'])
|
pandas.to_datetime
|
# pylint: disable=E1101
from datetime import datetime
import datetime as dt
import os
import warnings
import nose
import struct
import sys
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.compat import iterkeys
from pandas.core.frame import DataFrame, Series
from pandas.core.common import is_categorical_dtype
from pandas.io.parsers import read_csv
from pandas.io.stata import (read_stata, StataReader, InvalidColumnName,
PossiblePrecisionLoss, StataMissingValue)
import pandas.util.testing as tm
from pandas.tslib import NaT
from pandas import compat
class TestStata(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta')
self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta')
self.dta2_113 = os.path.join(self.dirpath, 'stata2_113.dta')
self.dta2_114 = os.path.join(self.dirpath, 'stata2_114.dta')
self.dta2_115 = os.path.join(self.dirpath, 'stata2_115.dta')
self.dta2_117 = os.path.join(self.dirpath, 'stata2_117.dta')
self.dta3_113 = os.path.join(self.dirpath, 'stata3_113.dta')
self.dta3_114 = os.path.join(self.dirpath, 'stata3_114.dta')
self.dta3_115 = os.path.join(self.dirpath, 'stata3_115.dta')
self.dta3_117 = os.path.join(self.dirpath, 'stata3_117.dta')
self.csv3 = os.path.join(self.dirpath, 'stata3.csv')
self.dta4_113 = os.path.join(self.dirpath, 'stata4_113.dta')
self.dta4_114 = os.path.join(self.dirpath, 'stata4_114.dta')
self.dta4_115 = os.path.join(self.dirpath, 'stata4_115.dta')
self.dta4_117 = os.path.join(self.dirpath, 'stata4_117.dta')
self.dta_encoding = os.path.join(self.dirpath, 'stata1_encoding.dta')
self.csv14 = os.path.join(self.dirpath, 'stata5.csv')
self.dta14_113 = os.path.join(self.dirpath, 'stata5_113.dta')
self.dta14_114 = os.path.join(self.dirpath, 'stata5_114.dta')
self.dta14_115 = os.path.join(self.dirpath, 'stata5_115.dta')
self.dta14_117 = os.path.join(self.dirpath, 'stata5_117.dta')
self.csv15 = os.path.join(self.dirpath, 'stata6.csv')
self.dta15_113 = os.path.join(self.dirpath, 'stata6_113.dta')
self.dta15_114 = os.path.join(self.dirpath, 'stata6_114.dta')
self.dta15_115 = os.path.join(self.dirpath, 'stata6_115.dta')
self.dta15_117 = os.path.join(self.dirpath, 'stata6_117.dta')
self.dta16_115 = os.path.join(self.dirpath, 'stata7_115.dta')
self.dta16_117 = os.path.join(self.dirpath, 'stata7_117.dta')
self.dta17_113 = os.path.join(self.dirpath, 'stata8_113.dta')
self.dta17_115 = os.path.join(self.dirpath, 'stata8_115.dta')
self.dta17_117 = os.path.join(self.dirpath, 'stata8_117.dta')
self.dta18_115 = os.path.join(self.dirpath, 'stata9_115.dta')
self.dta18_117 = os.path.join(self.dirpath, 'stata9_117.dta')
self.dta19_115 = os.path.join(self.dirpath, 'stata10_115.dta')
self.dta19_117 = os.path.join(self.dirpath, 'stata10_117.dta')
self.dta20_115 = os.path.join(self.dirpath, 'stata11_115.dta')
self.dta20_117 = os.path.join(self.dirpath, 'stata11_117.dta')
self.dta21_117 = os.path.join(self.dirpath, 'stata12_117.dta')
def read_dta(self, file):
# Legacy default reader configuration
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
def test_read_empty_dta(self):
empty_ds = DataFrame(columns=['unit'])
# GH 7369, make sure can read a 0-obs dta file
with tm.ensure_clean() as path:
empty_ds.to_stata(path,write_index=False)
empty_ds2 = read_stata(path)
tm.assert_frame_equal(empty_ds, empty_ds2)
def test_data_method(self):
# Minimal testing of legacy data method
reader_114 = StataReader(self.dta1_114)
with warnings.catch_warnings(record=True) as w:
parsed_114_data = reader_114.data()
reader_114 = StataReader(self.dta1_114)
parsed_114_read = reader_114.read()
tm.assert_frame_equal(parsed_114_data, parsed_114_read)
def test_read_dta1(self):
reader_114 = StataReader(self.dta1_114)
parsed_114 = reader_114.read()
reader_117 = StataReader(self.dta1_117)
parsed_117 = reader_117.read()
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
# this is an oddity as really the nan should be float64, but
# the casting doesn't fail so need to match stata here
expected['float_miss'] = expected['float_miss'].astype(np.float32)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta2(self):
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('datetime interp under 2.6 is faulty')
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1)
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1)
),
(
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
)
],
columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date',
'monthly_date', 'quarterly_date', 'half_yearly_date',
'yearly_date']
)
expected['yearly_date'] = expected['yearly_date'].astype('O')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed_114 = self.read_dta(self.dta2_114)
parsed_115 = self.read_dta(self.dta2_115)
parsed_117 = self.read_dta(self.dta2_117)
# 113 is buggy due to limits of date format support in Stata
# parsed_113 = self.read_dta(self.dta2_113)
# Remove resource warnings
w = [x for x in w if x.category is UserWarning]
# should get warning for each call to read_dta
tm.assert_equal(len(w), 3)
# buggy test because of the NaT comparison on certain platforms
# Format 113 test fails since it does not support tc and tC formats
# tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta3(self):
parsed_113 = self.read_dta(self.dta3_113)
parsed_114 = self.read_dta(self.dta3_114)
parsed_115 = self.read_dta(self.dta3_115)
parsed_117 = self.read_dta(self.dta3_117)
# match stata here
expected = self.read_csv(self.csv3)
expected = expected.astype(np.float32)
expected['year'] = expected['year'].astype(np.int16)
expected['quarter'] = expected['quarter'].astype(np.int8)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta4(self):
parsed_113 = self.read_dta(self.dta4_113)
parsed_114 = self.read_dta(self.dta4_114)
parsed_115 = self.read_dta(self.dta4_115)
parsed_117 = self.read_dta(self.dta4_117)
expected = DataFrame.from_records(
[
["one", "ten", "one", "one", "one"],
["two", "nine", "two", "two", "two"],
["three", "eight", "three", "three", "three"],
["four", "seven", 4, "four", "four"],
["five", "six", 5, np.nan, "five"],
["six", "five", 6, np.nan, "six"],
["seven", "four", 7, np.nan, "seven"],
["eight", "three", 8, np.nan, "eight"],
["nine", "two", 9, np.nan, "nine"],
["ten", "one", "ten", np.nan, "ten"]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled'])
# these are all categoricals
expected = pd.concat([expected[col].astype('category') for col in expected], axis=1)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
# File containing strls
def test_read_dta12(self):
parsed_117 = self.read_dta(self.dta21_117)
expected = DataFrame.from_records(
[
[1, "abc", "abcdefghi"],
[3, "cba", "qwertywertyqwerty"],
[93, "", "strl"],
],
columns=['x', 'y', 'z'])
tm.assert_frame_equal(parsed_117, expected, check_dtype=False)
def test_read_write_dta5(self):
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_write_dta6(self):
original = self.read_csv(self.csv3)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['year'] = original['year'].astype(np.int32)
original['quarter'] = original['quarter'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_read_write_dta10(self):
original = DataFrame(data=[["string", "object", 1, 1.1,
np.datetime64('2003-12-25')]],
columns=['string', 'object', 'integer', 'floating',
'datetime'])
original["object"] = Series(original["object"], dtype=object)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['integer'] = original['integer'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, {'datetime': 'tc'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_stata_doc_examples(self):
with tm.ensure_clean() as path:
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.to_stata(path)
def test_write_preserves_original(self):
# 9795
np.random.seed(423)
df = pd.DataFrame(np.random.randn(5,4), columns=list('abcd'))
df.ix[2, 'a':'c'] = np.nan
df_copy = df.copy()
df.to_stata('test.dta', write_index=False)
tm.assert_frame_equal(df, df_copy)
def test_encoding(self):
# GH 4626, proper encoding handling
raw = read_stata(self.dta_encoding)
encoded = read_stata(self.dta_encoding, encoding="latin-1")
result = encoded.kreis1849[0]
if compat.PY3:
expected = raw.kreis1849[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, compat.string_types)
else:
expected = raw.kreis1849.str.decode("latin-1")[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, unicode)
with tm.ensure_clean() as path:
encoded.to_stata(path,encoding='latin-1', write_index=False)
reread_encoded = read_stata(path, encoding='latin-1')
tm.assert_frame_equal(encoded, reread_encoded)
def test_read_write_dta11(self):
original = DataFrame([(1, 2, 3, 4)],
columns=['good', compat.u('b\u00E4d'), '8number', 'astringwithmorethan32characters______'])
formatted = DataFrame([(1, 2, 3, 4)],
columns=['good', 'b_d', '_8number', 'astringwithmorethan32characters_'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
# should get a warning for that format.
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta12(self):
original = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_1',
'astringwithmorethan32characters_2',
'+',
'-',
'short',
'delete'])
formatted = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_',
'_0astringwithmorethan32character',
'_',
'_1_',
'_short',
'_delete'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
tm.assert_equal(len(w), 1) # should get a warning for that format.
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta13(self):
s1 = Series(2**9, dtype=np.int16)
s2 = Series(2**17, dtype=np.int32)
s3 = Series(2**33, dtype=np.int64)
original = DataFrame({'int16': s1, 'int32': s2, 'int64': s3})
original.index.name = 'index'
formatted = original
formatted['int64'] = formatted['int64'].astype(np.float64)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
formatted)
def test_read_write_reread_dta14(self):
expected = self.read_csv(self.csv14)
cols = ['byte_', 'int_', 'long_', 'float_', 'double_']
for col in cols:
expected[col] = expected[col].convert_objects(convert_numeric=True)
expected['float_'] = expected['float_'].astype(np.float32)
expected['date_td'] = pd.to_datetime(expected['date_td'], coerce=True)
parsed_113 = self.read_dta(self.dta14_113)
parsed_113.index.name = 'index'
parsed_114 = self.read_dta(self.dta14_114)
parsed_114.index.name = 'index'
parsed_115 = self.read_dta(self.dta14_115)
parsed_115.index.name = 'index'
parsed_117 = self.read_dta(self.dta14_117)
parsed_117.index.name = 'index'
tm.assert_frame_equal(parsed_114, parsed_113)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
with tm.ensure_clean() as path:
parsed_114.to_stata(path, {'date_td': 'td'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), parsed_114)
def test_read_write_reread_dta15(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime, args=('%Y-%m-%d',))
parsed_113 = self.read_dta(self.dta15_113)
parsed_114 = self.read_dta(self.dta15_114)
parsed_115 = self.read_dta(self.dta15_115)
parsed_117 = self.read_dta(self.dta15_117)
tm.assert_frame_equal(expected, parsed_114)
tm.assert_frame_equal(parsed_113, parsed_114)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
def test_timestamp_and_label(self):
original = DataFrame([(1,)], columns=['var'])
time_stamp = datetime(2000, 2, 29, 14, 21)
data_label = 'This is a data file.'
with tm.ensure_clean() as path:
original.to_stata(path, time_stamp=time_stamp, data_label=data_label)
reader = StataReader(path)
parsed_time_stamp = dt.datetime.strptime(reader.time_stamp, ('%d %b %Y %H:%M'))
assert parsed_time_stamp == time_stamp
assert reader.data_label == data_label
def test_numeric_column_names(self):
original = DataFrame(np.reshape(np.arange(25.0), (5, 5)))
original.index.name = 'index'
with tm.ensure_clean() as path:
# should get a warning for that format.
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path), InvalidColumnName)
# should produce a single warning
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
columns = list(written_and_read_again.columns)
convert_col_name = lambda x: int(x[1])
written_and_read_again.columns = map(convert_col_name, columns)
tm.assert_frame_equal(original, written_and_read_again)
def test_nan_to_missing_value(self):
s1 = Series(np.arange(4.0), dtype=np.float32)
s2 = Series(np.arange(4.0), dtype=np.float64)
s1[::2] = np.nan
s2[1::2] = np.nan
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, original)
def test_no_index(self):
columns = ['x', 'y']
original = DataFrame(np.reshape(np.arange(10.0), (5, 2)),
columns=columns)
original.index.name = 'index_not_written'
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
written_and_read_again = self.read_dta(path)
tm.assertRaises(KeyError,
lambda: written_and_read_again['index_not_written'])
def test_string_no_dates(self):
s1 = Series(['a', 'A longer string'])
s2 = Series([1.0, 2.0], dtype=np.float64)
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_large_value_conversion(self):
s0 = Series([1, 99], dtype=np.int8)
s1 = Series([1, 127], dtype=np.int8)
s2 = Series([1, 2 ** 15 - 1], dtype=np.int16)
s3 = Series([1, 2 ** 63 - 1], dtype=np.int64)
original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3})
original.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path),
PossiblePrecisionLoss)
# should produce a single warning
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified['s1'] = Series(modified['s1'], dtype=np.int16)
modified['s2'] = Series(modified['s2'], dtype=np.int32)
modified['s3'] = Series(modified['s3'], dtype=np.float64)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_dates_invalid_column(self):
original = DataFrame([datetime(2006, 11, 19, 23, 13, 20)])
original.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path, {0: 'tc'}),
InvalidColumnName)
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified.columns = ['_0']
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_date_export_formats(self):
columns = ['tc', 'td', 'tw', 'tm', 'tq', 'th', 'ty']
conversions = dict(((c, c) for c in columns))
data = [datetime(2006, 11, 20, 23, 13, 20)] * len(columns)
original = DataFrame([data], columns=columns)
original.index.name = 'index'
expected_values = [datetime(2006, 11, 20, 23, 13, 20), # Time
datetime(2006, 11, 20), # Day
datetime(2006, 11, 19), # Week
datetime(2006, 11, 1), # Month
datetime(2006, 10, 1), # Quarter year
datetime(2006, 7, 1), # Half year
datetime(2006, 1, 1)] # Year
expected = DataFrame([expected_values], columns=columns)
expected.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, conversions)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_write_missing_strings(self):
original = DataFrame([["1"], [None]], columns=["foo"])
expected = DataFrame([["1"], [""]], columns=["foo"])
expected.index.name = 'index'
with
|
tm.ensure_clean()
|
pandas.util.testing.ensure_clean
|
#!/usr/bin/env python
# coding: utf-8
# ## Importing dependencies
# some parts of this code was used from here https://www.kaggle.com/rftexas/better-image-tiles-removing-white-spaces
# In[1]:
import os
import cv2
import PIL
import random
import openslide
import skimage.io
import matplotlib
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from IPython.display import Image, display
train_df =
|
pd.read_csv('../input/prostate-cancer-grade-assessment/train.csv')
|
pandas.read_csv
|
import argparse
import pandas as pd
from pandas.io.json import json_normalize
import json
import os
def extract_events_from_stream(stream_df, event_type):
""" Extracts specific event from stream.
"""
events = stream_df.loc[stream_df.EventType == event_type][['EventTime', 'Event']]
events_json = events['Event'].to_json(orient="records")
json_struct = json.loads(events_json)
# TODO : get rid of structs containing all `int` types
event_extracted = json_normalize(json_struct)
event_extracted = pd.merge(events['EventTime'].reset_index(), event_extracted, left_index=True, right_index=True)
if not event_extracted.empty:
event_extracted = event_extracted[['EventTime', 'order_id', 'limit_price', 'quantity', 'is_buy_order']]
event_extracted.rename(columns={'EventTime': 'TIMESTAMP',
'order_id': 'ORDER_ID',
'limit_price': 'PRICE',
'quantity': 'SIZE',
'is_buy_order': 'BUY_SELL_FLAG'}, inplace=True)
else:
event_extracted = pd.DataFrame({
'TIMESTAMP': [],
'ORDER_ID': [],
'PRICE': [],
'SIZE': [],
'BUY_SELL_FLAG': []
})
return event_extracted
def seconds_since_midnight(s):
""" Converts a pandas Series object of datetime64[ns] timestamps to Series of seconds from midnight on that day.
Inspired by https://stackoverflow.com/a/38050344
"""
d = pd.to_datetime(s.dt.date)
delta_t = s - d
return delta_t.dt.total_seconds()
def convert_stream_to_format(stream_df, fmt="LOBSTER"):
""" Converts imported ABIDES DataFrame into LOBSTER FORMAT.
"""
event_dfs = []
market_events = {
"LIMIT_ORDER": 1,
# "MODIFY_ORDER": 2, # causing errors in market replay
"ORDER_CANCELLED": 3,
"ORDER_EXECUTED": 4
}
reversed_market_events = {val: key for key, val in market_events.items()}
for event_name, lobster_code in market_events.items():
event_df = extract_events_from_stream(stream_df, event_name)
if event_df.empty:
continue
else:
event_df["Time"] = seconds_since_midnight(event_df["TIMESTAMP"])
event_df["Type"] = lobster_code
event_dfs.append(event_df)
lobster_df =
|
pd.concat(event_dfs)
|
pandas.concat
|
from datenguidepy.query_builder import Query
from datenguidepy.query_execution import QueryExecutioner, ExecutionResults
from typing import Dict, Any, cast, Optional, List
import pandas as pd
from functools import partial
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
ALL_REGIONS: pd.DataFrame = pd.read_csv(
os.path.join(dir_path, "regions.csv"), index_col="id"
)
class ConfigMapping:
def __init__(self, mapping: Dict[str, Any]):
self._mapping = mapping
def __getattr__(self, k: str) -> Any:
return self._mapping[k]
def __dir__(self):
return list(self._mapping.keys())
def __repr__(self):
return "\n".join(
name.ljust(30, ".") + " " + id for name, id in self._mapping.items()
)
def __iter__(self):
return self._mapping.values().__iter__()
def hirachy_up(
lowestids: str, hirachy_frame: pd.DataFrame = ALL_REGIONS
) -> pd.DataFrame:
anscestors = []
current_ids = lowestids
while len(current_ids) > 0:
current_regions = hirachy_frame.query("index.isin(@current_ids)")
anscestors.append(current_regions)
current_ids = current_regions.dropna().parent.unique()
return
|
pd.concat(anscestors)
|
pandas.concat
|
import pandas as pd
import numpy as np
import requests
import json
import pickle
import os
from sklearn.preprocessing import OneHotEncoder
from sklearn.neighbors import NearestNeighbors
from sklearn.base import BaseEstimator, TransformerMixin
from flask import Blueprint, request, jsonify, render_template, Flask, redirect, url_for
#from flask_cors import CORS, cross_origin
model = Blueprint("model", __name__)
class My_encoder(BaseEstimator, TransformerMixin):
def __init__(self,drop = 'first',sparse=False):
self.encoder = OneHotEncoder(drop = drop,sparse = sparse)
self.features_to_encode = []
self.columns = []
def fit(self,X_train,features_to_encode):
data = X_train.copy()
self.features_to_encode = features_to_encode
data_to_encode = data[self.features_to_encode]
self.columns = pd.get_dummies(data_to_encode,drop_first = True).columns
self.encoder.fit(data_to_encode)
return self.encoder
def transform(self,X_test):
data = X_test.copy()
data.reset_index(drop = True,inplace =True)
data_to_encode = data[self.features_to_encode]
data_left = data.drop(self.features_to_encode,axis = 1)
data_encoded = pd.DataFrame(self.encoder.transform(data_to_encode),columns = self.columns)
return pd.concat([data_left,data_encoded],axis = 1)
df=
|
pd.read_csv('https://raw.githubusercontent.com/Build-Week-Spotify-Song-Suggester-5/Data-Science/master/app/most_popular_spotify_songs.csv')
|
pandas.read_csv
|
import pandas as pd
import os
from scipy import stats
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import savgol_filter
def smooth(x, window_len=11, window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
"""
if x.ndim != 1:
raise ("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ("Input vector needs to be bigger than window size.")
if window_len < 3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = np.r_[x[window_len - 1:0:-1], x, x[-2:-window_len - 1:-1]]
# print(len(s))
if window == 'flat': # moving average
w = np.ones(window_len, 'd')
else:
w = eval('np.' + window + '(window_len)')
y = np.convolve(w / w.sum(), s, mode='valid')
return y[int(window_len/2-1):-int(window_len/2)]
###################################
offset_frame = 20
result_path = r'result/plot'
processed_result_path = r'result/plot_processed'
os.makedirs(processed_result_path, exist_ok=True)
###################################
angles_info_path = os.path.join(result_path, 'Angles.xlsx')
distances_info_path = os.path.join(result_path, 'Distances.xlsx')
df_angles = pd.read_excel(angles_info_path, sheet_name='Angles')
df_distances = pd.read_excel(distances_info_path, sheet_name='Distances')
df_angles = df_angles.query(f'Frame > {offset_frame}')
df_distances = df_distances.query(f'Frame > {offset_frame}')
## Remove and modify outliers
z_threshlod = 3
for column in df_angles.columns:
z = np.abs(stats.zscore(df_angles[column]))
df_angles[column] = np.where(z < z_threshlod, df_angles[column], np.NAN)
# df_angles[column] = df_angles[column].rolling(window=10, center=True).mean().fillna(method='bfill').fillna(method='ffill')
df_angles.fillna(method='ffill', inplace=True)
for column in df_distances.columns:
z = np.abs(stats.zscore(df_distances[column]))
df_distances[column] = np.where(z < z_threshlod, df_distances[column], np.NAN)
# df_distances[column] = df_distances[column].rolling(window=10, center=True).mean().fillna(method='bfill').fillna(method='ffill')
df_distances.fillna(method='ffill', inplace=True)
## Smoothing
frames = list(df_angles['Frame'])
all_angles = {'Frame': frames}
min_angles = {'Frame': frames}
max_angles = {'Frame': frames}
for column in df_angles.columns:
if column != 'Frame':
angles = list(df_angles[column])
smoothed_angles = savgol_filter(angles, window_length=51, polyorder=3)
# smoothed_angles = smooth(np.array(angles), window_len=20, window='hanning')
smoothed_angles = [int(angle) for angle in smoothed_angles]
all_angles[column] = smoothed_angles
for i, angle in enumerate(smoothed_angles):
if column in min_angles.keys():
previous_min_angle = min_angles[column][i-1]
if angle < previous_min_angle:
min_angles[column].append(angle)
else:
min_angles[column].append(previous_min_angle)
else:
min_angles[column] = [angle]
##
if column in max_angles.keys():
previous_max_angle = max_angles[column][i-1]
if angle > previous_max_angle:
max_angles[column].append(angle)
else:
max_angles[column].append(previous_max_angle)
else:
max_angles[column] = [angle]
frames = list(df_distances['Frame'])
all_distances = {'Frame': frames}
min_distances = {'Frame': frames}
max_distances = {'Frame': frames}
for column in df_distances.columns:
if column != 'Frame':
distances = list(df_distances[column])
smoothed_distances = savgol_filter(distances, window_length=51, polyorder=3)
# smoothed_distances = smooth(smoothed_distances, window_len=10, window='hanning')
# smoothed_distances = smooth(np.array(distances), window_len=20, window='hanning')
smoothed_distances = [round(distance, 2) for distance in smoothed_distances]
all_distances[column] = smoothed_distances
for i, distance in enumerate(smoothed_distances):
if column in min_distances.keys():
previous_min_distance = min_distances[column][i-1]
if distance < previous_min_distance:
min_distances[column].append(distance)
else:
min_distances[column].append(previous_min_distance)
else:
min_distances[column] = [distance]
##
if column in max_distances.keys():
previous_max_distance = max_distances[column][i-1]
if distance > previous_max_distance:
max_distances[column].append(distance)
else:
max_distances[column].append(previous_max_distance)
else:
max_distances[column] = [distance]
processed_angles_info_path = os.path.join(processed_result_path, 'Angles.xlsx')
df_angles =
|
pd.DataFrame.from_dict(all_angles)
|
pandas.DataFrame.from_dict
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 12 15:21:09 2020
@author: sdy
"""
import os
import numpy as np
import pandas as pd
from sklearn.metrics import pairwise_distances
import tensorflow as tf
from tensorflow.keras.models import Model
from tqdm import tqdm
from model.ghnet_util import load_input_fmt, restore_power_input
from env.power_env import PowerEnv
from ana.space import run_dist_proc, get_poisson_rmax
from core.power import Power
from model.mlp import custom_mlp, MultiRegWithPFLossLayer
from data.ghnet_data import mm_normalize, mm_denormalize
EPS = 1.e-6
def adjust_sample(f_model, x0, targets, lr=0.01, step=10, lim=0.1):
x = tf.Variable(x0[np.newaxis, :])
opt = tf.keras.optimizers.SGD(learning_rate=lr)
# opt = tf.keras.optimizers.Adam(learning_rate=lr)
loss0 = 100.
rate = 1.
for i in range(step):
with tf.GradientTape() as tape:
tape.watch(x)
features = f_model(x)
loss = tf.reduce_sum(tf.pow(features - targets, 2))
rate *= 0.5 if loss < loss0 else 2.0
loss0 = loss.numpy()
grad = tape.gradient(loss, x)
# print(i, 'loss =', loss)
# print(i, 'grad =', grad)
# print(i, 'x0 =', x)
opt.apply_gradients(grads_and_vars=[(tf.sign(grad) * rate, x)])
# print(i, 'x =', x)
# features = f_model(x).numpy().reshape(-1)
# features0 = f_model(x0.reshape(1, -1)).numpy().reshape(-1)
# print(np.linalg.norm(targets - features))
# print(np.linalg.norm(targets - features0))
return x0 + np.clip(x.numpy() - x0, -lim, lim)
def assess_sample(path):
pass
"""
def adjust_sample(f_model, x0, targets, lim=np.inf):
x = tf.expand_dims(tf.Variable(x0), axis=0)
for i in range(50):
with tf.GradientTape() as tape:
tape.watch(x)
features = f_model(x)
loss = tf.reduce_sum(tf.pow(features - targets, 2))
# print(loss)
grad = tape.gradient(loss, x)
# print(grad)
x += -grad * 1.0
# features = f_model(x).numpy().reshape(-1)
# features0 = f_model(x0.reshape(1, -1)).numpy().reshape(-1)
# print(np.linalg.norm(targets - features))
# print(np.linalg.norm(targets - features0))
return x0+np.clip(x.numpy()-x0, -lim, lim)
"""
def assess_dist(features, normalize=True):
if normalize:
min_data = features.min(axis=0).reshape(1, -1)
max_data = features.max(axis=0).reshape(1, -1)
data = (features - min_data) / (max_data - min_data + EPS)
else:
data = features.copy()
half_dists = pairwise_distances(data) / 2.
max_idx = half_dists.argmax()
half_dists += np.diag(half_dists.max(axis=0))
min_idx = half_dists.argmin()
r_min = half_dists.min(axis=0)
return r_min, max_idx, min_idx
if __name__ == '__main__':
base_path = os.path.join(os.path.expanduser('~'), 'data', 'wepri36', 'wepri36')
work_path = os.path.join(os.path.expanduser('~'), 'data', 'wepri36', 'gen100')
n_agent = 100
agents = []
fmt = 'off'
res_want = {'CCTOUT': ['name', 'cct']}
input_fmt = load_input_fmt(work_path + '/input.txt', input_mode=True)
for i in range(n_agent):
sub_path = os.path.join(work_path, str(i))
if not os.path.exists(sub_path):
os.mkdir(sub_path)
pe = PowerEnv(base_path=base_path, work_path=sub_path,
fmt=fmt, input_fmt=input_fmt, res_want=res_want)
pe.random_generate_power(genp_method='uniform', genv_method='uniform',
loadp_method='normal', open_prob=None)
agents.append(pe)
states = np.zeros((n_agent, input_fmt.shape[0]), dtype=np.float32)
results = pd.DataFrame(columns=['pf', 'AC44_ac', 'AC10_ac', 'AC11_ac'],
dtype=np.float32)
models = custom_mlp(input_fmt.shape[0], len(results.columns))
pre_model = models['predict']
train_model = models['train']
feature_model = models['feature']
n_feature = feature_model.output.shape[1]
targets = np.zeros((n_agent, n_feature), dtype=np.float32)
traces = {'state': [],
'feature0': [], 'feature_model': [], 'target': [], 'feature_new': [],
'min_pair': [], 'max_pair': []}
r_max = get_poisson_rmax(n_feature, n_agent)
for j in range(10):
results.drop(results.index, inplace=True)
for i in tqdm(range(n_agent), desc='simulation', ascii=True):
path = os.path.join(agents[i].work_path, str(agents[i].step))
pf = agents[i].check_pf(path, need_data=True, need_st=True)
states[i] = agents[i].get_state(normalize=False)
res =
|
pd.Series(index=results.columns, dtype=np.float32, name=i)
|
pandas.Series
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 7 15:52:10 2017
@author: konodera
if t-1 == first buy, what's the ratio of reorderd?
"""
import pandas as pd
import numpy as np
from collections import defaultdict
from tqdm import tqdm
import utils
utils.start(__file__)
#==============================================================================
# load
#==============================================================================
usecols = ['user_id', 'product_id', 'order_number', 'reordered', 'order_number_rev']
log = utils.read_pickles('../input/mk/log', usecols).sort_values(usecols[:3])
#==============================================================================
# def
#==============================================================================
def make(T):
"""
T = 0
folder = 'trainT-0'
"""
if T==-1:
folder = 'test'
else:
folder = 'trainT-'+str(T)
log_ = log[log.order_number_rev>T]
log_['user_max_onb'] = log_.groupby('user_id').order_number.transform(np.max)
log_ = log_.groupby(['user_id', 'product_id']).head(2)
item_cnt = defaultdict(int)
item_chance = defaultdict(int)
pid_bk = uid_bk = onb_bk = None
for uid, pid, onb, max_onb in log_[['user_id', 'product_id', 'order_number', 'user_max_onb']].values:
if uid==uid_bk and pid==pid_bk and (onb-onb_bk==1):
item_cnt[pid] +=1
if onb!=max_onb:
item_chance[pid] +=1
pid_bk = pid
uid_bk = uid
onb_bk = onb
item_cnt =
|
pd.DataFrame.from_dict(item_cnt, orient='index')
|
pandas.DataFrame.from_dict
|
import logging
import re
import sys
from pathlib import Path
from typing import Dict, Iterable, Iterator, List, Tuple
from urllib.parse import urlparse
from warnings import warn
import numpy as np
import pandas as pd
from ixmp.backend import ItemType
log = logging.getLogger(__name__)
# Globally accessible logger.
# TODO remove when :func:`logger` is removed.
_LOGGER = None
def logger():
"""Access global logger.
.. deprecated:: 3.3
To control logging from ixmp, instead use :mod:`logging` to retrieve it:
.. code-block:: python
import logging
ixmp_logger = logging.getLogger("ixmp")
# Example: set the level to INFO
ixmp_logger.setLevel(logging.INFO)
"""
warn(
"ixmp.utils.logger() is deprecated as of 3.3.0, and will be removed in ixmp "
'5.0. Use logging.getLogger("ixmp").',
DeprecationWarning,
)
return logging.getLogger("ixmp")
def as_str_list(arg, idx_names=None):
"""Convert various *arg* to list of str.
Several types of arguments are handled:
- :obj:`None`: returned as None.
- class:`str`: returned as a length-1 list of str.
- iterable of values: :class:`str` is called on each value.
- :class:`dict`, with `idx_names`: the `idx_names` are used to look up values in the
dict. The return value has the corresponding values in the same order.
"""
if arg is None:
return None
elif idx_names is None:
# arg must be iterable
# NB narrower ABC Sequence does not work here; e.g. test_excel_io()
# fails via Scenario.add_set().
if isinstance(arg, Iterable) and not isinstance(arg, str):
return list(map(str, arg))
else:
return [str(arg)]
else:
return [str(arg[idx]) for idx in idx_names]
def isscalar(x):
"""Returns True if `x` is a scalar."""
warn(
"ixmp.utils.isscalar() will be removed in ixmp >= 5.0. Use numpy.isscalar()",
DeprecationWarning,
)
return np.isscalar(x)
def check_year(y, s):
"""Returns True if y is an int, raises an error if y is not None"""
if y is not None:
if not isinstance(y, int):
raise ValueError("arg `{}` must be an integer!".format(s))
return True
def diff(a, b, filters=None) -> Iterator[Tuple[str, pd.DataFrame]]:
"""Compute the difference between Scenarios `a` and `b`.
:func:`diff` combines :func:`pandas.merge` and :meth:`Scenario.items`. Only
parameters are compared. :func:`~pandas.merge` is called with the arguments
``how="outer", sort=True, suffixes=("_a", "_b"), indicator=True``; the merge is
performed on all columns except 'value' or 'unit'.
Yields
------
tuple of str, pandas.DataFrame
Tuples of item name and data.
"""
# Iterators; index 0 corresponds to `a`, 1 to `b`
items = [
a.items(filters=filters, type=ItemType.PAR),
b.items(filters=filters, type=ItemType.PAR),
]
# State variables for loop
name = ["", ""]
data: List[pd.DataFrame] = [None, None]
# Elements for first iteration
name[0], data[0] = next(items[0])
name[1], data[1] = next(items[1])
while True:
# Convert scalars to pd.DataFrame
data = list(map(maybe_convert_scalar, data))
# Compare the names from `a` and `b` to ensure matching items
if name[0] == name[1]:
# Matching items in `a` and `b`
current_name = name[0]
left, right = data
else:
# Mismatched; either `a` or `b` has no data for these filters
current_name = min(*name)
if name[0] > current_name:
# No data in `a` for `current_name`; create an empty DataFrame
left, right = pd.DataFrame(columns=data[1].columns), data[1]
else:
left, right = data[0], pd.DataFrame(columns=data[0].columns)
# Either merge on remaining columns; or, for scalars, on the indices
on = sorted(set(left.columns) - {"value", "unit"})
on_arg: Dict[str, object] = (
dict(on=on) if on else dict(left_index=True, right_index=True)
)
# Merge the data from each side
yield current_name, pd.merge(
left,
right,
how="outer",
**on_arg,
sort=True,
suffixes=("_a", "_b"),
indicator=True,
)
# Maybe advance each iterators
for i in (0, 1):
try:
if name[i] == current_name:
# data was compared in this iteration; advance
name[i], data[i] = next(items[i])
except StopIteration:
# No more data for this iterator.
# Use "~" because it sorts after all ASCII characters
name[i], data[i] = "~ end", None
if name[0] == name[1] == "~ end":
break
def maybe_check_out(timeseries, state=None):
"""Check out `timeseries` depending on `state`.
If `state` is :obj:`None`, then :meth:`check_out` is called.
Returns
-------
:obj:`True`
if `state` was :obj:`None` and a check out was performed, i.e. `timeseries` was
previously in a checked-in state.
:obj:`False`
if `state` was :obj:`None` and no check out was performed, i.e. `timeseries`
was already in a checked-out state.
`state`
if `state` was not :obj:`None` and no check out was attempted.
Raises
------
ValueError
If `timeseries` is a :class:`.Scenario` object and
:meth:`~.Scenario.has_solution` is :obj:`True`.
See Also
--------
:meth:`.TimeSeries.check_out`
:meth:`.Scenario.check_out`
"""
if state is not None:
return state
try:
timeseries.check_out()
except RuntimeError:
# If `timeseries` is new (has not been committed), the checkout attempt raises
# an exception
return False
else:
return True
def maybe_commit(timeseries, condition, message):
"""Commit `timeseries` with `message` if `condition` is :obj:`True`.
Returns
-------
:obj:`True`
if a commit is performed.
:obj:`False`
if any exception is raised during the attempted commit. The exception
is logged with level ``INFO``.
See Also
--------
:meth:`.TimeSeries.commit`
"""
if not condition:
return False
try:
timeseries.commit(message)
except RuntimeError as exc:
log.info(f"maybe_commit() didn't commit: {exc}")
return False
else:
return True
def maybe_convert_scalar(obj) -> pd.DataFrame:
"""Convert `obj` to :class:`pandas.DataFrame`.
Parameters
----------
obj
Any value returned by :meth:`Scenario.par`. For a scalar (0-dimensional)
parameter, this will be :class:`dict`.
Returns
-------
pandas.DataFrame
:meth:`maybe_convert_scalar` always returns a data frame.
"""
if isinstance(obj, dict):
return pd.DataFrame.from_dict({0: obj}, orient="index")
else:
return obj
def parse_url(url):
"""Parse *url* and return Platform and Scenario information.
A URL (Uniform Resource Locator), as the name implies, uniquely identifies
a specific scenario and (optionally) version of a model, as well as
(optionally) the database in which it is stored. ixmp URLs take forms
like::
ixmp://PLATFORM/MODEL/SCENARIO[#VERSION]
MODEL/SCENARIO[#VERSION]
where:
- The ``PLATFORM`` is a configured platform name; see :obj:`ixmp.config`.
- ``MODEL`` may not contain the forward slash character ('/'); ``SCENARIO``
may contain any number of forward slashes. Both must be supplied.
- ``VERSION`` is optional but, if supplied, must be an integer.
Returns
-------
platform_info : dict
Keyword argument 'name' for the :class:`Platform` constructor.
scenario_info : dict
Keyword arguments for a :class:`Scenario` on the above platform:
'model', 'scenario' and, optionally, 'version'.
Raises
------
ValueError
For malformed URLs.
"""
components = urlparse(url)
if components.scheme not in ("ixmp", ""):
raise ValueError("URL must begin with ixmp:// or //")
platform_info = dict()
if components.netloc:
platform_info["name"] = components.netloc
scenario_info = dict()
path = components.path.split("/")
if len(path):
# If netloc was given, path begins with '/'; discard
path = path if len(path[0]) else path[1:]
if len(path) < 2:
raise ValueError("URL path must be 'MODEL/SCENARIO'")
scenario_info["model"] = path[0]
scenario_info["scenario"] = "/".join(path[1:])
if len(components.query):
raise ValueError(f"queries ({components.query}) not supported in URLs")
if len(components.fragment):
try:
version = int(components.fragment)
except ValueError:
if components.fragment != "new":
raise ValueError(
f"URL version must be int or 'new'; got '{components.fragment}'"
)
else:
version = "new"
scenario_info["version"] = version
return platform_info, scenario_info
def to_iamc_layout(df: pd.DataFrame) -> pd.DataFrame:
"""Transform *df* to a standard IAMC layout.
The returned object has:
- Any (Multi)Index levels reset as columns.
- Lower-case column names 'region', 'variable', 'subannual', and 'unit'.
- If not present in *df*, the value 'Year' in the 'subannual' column.
Parameters
----------
df : pandas.DataFrame
May have a 'node' column, which will be renamed to 'region'.
Returns
-------
pandas.DataFrame
Raises
------
ValueError
If 'region', 'variable', or 'unit' is not among the column names.
"""
# Reset the index if meaningful entries are included there
if not list(df.index.names) == [None]:
df.reset_index(inplace=True)
# Rename columns in lower case, and transform 'node' to 'region'
cols = {c: str(c).lower() for c in df.columns}
cols.update(node="region")
df = df.rename(columns=cols)
required_cols = ["region", "variable", "unit"]
missing = list(set(required_cols) - set(df.columns))
if len(missing):
raise ValueError(f"missing required columns {repr(missing)}")
# Add a column 'subannual' with the default value
if "subannual" not in df.columns:
df["subannual"] = "Year"
return df
def year_list(x):
"""Return the elements of x that can be cast to year (int)."""
lst = []
for i in x:
try:
int(i) # this is a year
lst.append(i)
except ValueError:
pass
return lst
def filtered(df, filters):
"""Returns a filtered dataframe based on a filters dictionary"""
if filters is None:
return df
mask = pd.Series(True, index=df.index)
for k, v in filters.items():
isin = df[k].isin(as_str_list(v))
mask = mask & isin
return df[mask]
def format_scenario_list(
platform, model=None, scenario=None, match=None, default_only=False, as_url=False
):
"""Return a formatted list of TimeSeries on *platform*.
Parameters
----------
platform : :class:`.Platform`
model : str, optional
Model name to restrict results. Passed to :meth:`.scenario_list`.
scenario : str, optional
Scenario name to restrict results. Passed to :meth:`.scenario_list`.
match : str, optional
Regular expression to restrict results. Only results where the model or
scenario name matches are returned.
default_only : bool, optional
Only return TimeSeries where a default version has been set with
:meth:`TimeSeries.set_as_default`.
as_url : bool, optional
Format results as ixmp URLs.
Returns
-------
list of str
If *as_url* is :obj:`False`, also include summary information.
"""
try:
match = re.compile(".*" + match + ".*")
except TypeError:
pass
def describe(df):
N = len(df)
min = df.version.min()
max = df.version.max()
result = dict(N=N, range="")
if N > 1:
result["range"] = "{}–{}".format(min, max)
if N != max:
result["range"] += " ({} versions)".format(N)
try:
mask = df.is_default.astype(bool)
result["default"] = df.loc[mask, "version"].iat[0]
except IndexError:
result["default"] = max
return
|
pd.Series(result)
|
pandas.Series
|
import re
from datetime import datetime
import nose
import pytz
import platform
from time import sleep
import os
import logging
import numpy as np
from distutils.version import StrictVersion
from pandas import compat
from pandas import NaT
from pandas.compat import u, range
from pandas.core.frame import DataFrame
import pandas.io.gbq as gbq
import pandas.util.testing as tm
from pandas.compat.numpy import np_datetime64_compat
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
_IMPORTS = False
_GOOGLE_API_CLIENT_INSTALLED = False
_GOOGLE_API_CLIENT_VALID_VERSION = False
_HTTPLIB2_INSTALLED = False
_SETUPTOOLS_INSTALLED = False
def _skip_if_no_project_id():
if not _get_project_id():
raise nose.SkipTest(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
raise nose.SkipTest("Cannot run integration tests without a "
"private key json file path")
def _skip_if_no_private_key_contents():
if not _get_private_key_contents():
raise nose.SkipTest("Cannot run integration tests without a "
"private key json contents")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
else:
return PROJECT_ID
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
else:
return PRIVATE_KEY_JSON_PATH
def _get_private_key_contents():
if _in_travis_environment():
with open(os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])) as f:
return f.read()
else:
return PRIVATE_KEY_JSON_CONTENTS
def _test_imports():
global _GOOGLE_API_CLIENT_INSTALLED, _GOOGLE_API_CLIENT_VALID_VERSION, \
_HTTPLIB2_INSTALLED, _SETUPTOOLS_INSTALLED
try:
import pkg_resources
_SETUPTOOLS_INSTALLED = True
except ImportError:
_SETUPTOOLS_INSTALLED = False
if compat.PY3:
google_api_minimum_version = '1.4.1'
else:
google_api_minimum_version = '1.2.0'
if _SETUPTOOLS_INSTALLED:
try:
try:
from googleapiclient.discovery import build # noqa
from googleapiclient.errors import HttpError # noqa
except:
from apiclient.discovery import build # noqa
from apiclient.errors import HttpError # noqa
from oauth2client.client import OAuth2WebServerFlow # noqa
from oauth2client.client import AccessTokenRefreshError # noqa
from oauth2client.file import Storage # noqa
from oauth2client.tools import run_flow # noqa
_GOOGLE_API_CLIENT_INSTALLED = True
_GOOGLE_API_CLIENT_VERSION = pkg_resources.get_distribution(
'google-api-python-client').version
if (StrictVersion(_GOOGLE_API_CLIENT_VERSION) >=
StrictVersion(google_api_minimum_version)):
_GOOGLE_API_CLIENT_VALID_VERSION = True
except ImportError:
_GOOGLE_API_CLIENT_INSTALLED = False
try:
import httplib2 # noqa
_HTTPLIB2_INSTALLED = True
except ImportError:
_HTTPLIB2_INSTALLED = False
if not _SETUPTOOLS_INSTALLED:
raise ImportError('Could not import pkg_resources (setuptools).')
if not _GOOGLE_API_CLIENT_INSTALLED:
raise ImportError('Could not import Google API Client.')
if not _GOOGLE_API_CLIENT_VALID_VERSION:
raise ImportError("pandas requires google-api-python-client >= {0} "
"for Google BigQuery support, "
"current version {1}"
.format(google_api_minimum_version,
_GOOGLE_API_CLIENT_VERSION))
if not _HTTPLIB2_INSTALLED:
raise ImportError(
"pandas requires httplib2 for Google BigQuery support")
# Bug fix for https://github.com/pandas-dev/pandas/issues/12572
# We need to know that a supported version of oauth2client is installed
# Test that either of the following is installed:
# - SignedJwtAssertionCredentials from oauth2client.client
# - ServiceAccountCredentials from oauth2client.service_account
# SignedJwtAssertionCredentials is available in oauthclient < 2.0.0
# ServiceAccountCredentials is available in oauthclient >= 2.0.0
oauth2client_v1 = True
oauth2client_v2 = True
try:
from oauth2client.client import SignedJwtAssertionCredentials # noqa
except ImportError:
oauth2client_v1 = False
try:
from oauth2client.service_account import ServiceAccountCredentials # noqa
except ImportError:
oauth2client_v2 = False
if not oauth2client_v1 and not oauth2client_v2:
raise ImportError("Missing oauth2client required for BigQuery "
"service account support")
def _setup_common():
try:
_test_imports()
except (ImportError, NotImplementedError) as import_exception:
raise nose.SkipTest(import_exception)
if _in_travis_environment():
logging.getLogger('oauth2client').setLevel(logging.ERROR)
logging.getLogger('apiclient').setLevel(logging.ERROR)
def _check_if_can_get_correct_default_credentials():
# Checks if "Application Default Credentials" can be fetched
# from the environment the tests are running in.
# See Issue #13577
import httplib2
try:
from googleapiclient.discovery import build
except ImportError:
from apiclient.discovery import build
try:
from oauth2client.client import GoogleCredentials
credentials = GoogleCredentials.get_application_default()
http = httplib2.Http()
http = credentials.authorize(http)
bigquery_service = build('bigquery', 'v2', http=http)
jobs = bigquery_service.jobs()
job_data = {'configuration': {'query': {'query': 'SELECT 1'}}}
jobs.insert(projectId=_get_project_id(), body=job_data).execute()
return True
except:
return False
def clean_gbq_environment(private_key=None):
dataset = gbq._Dataset(_get_project_id(), private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
def test_generate_bq_schema_deprecated():
# 11121 Deprecation of generate_bq_schema
with tm.assert_produces_warning(FutureWarning):
df = make_mixed_dataframe_v2(10)
gbq.generate_bq_schema(df)
class TestGBQConnectorIntegration(tm.TestCase):
def setUp(self):
_setup_common()
_skip_if_no_project_id()
self.sut = gbq.GbqConnector(_get_project_id(),
private_key=_get_private_key_path())
def test_should_be_able_to_make_a_connector(self):
self.assertTrue(self.sut is not None,
'Could not create a GbqConnector')
def test_should_be_able_to_get_valid_credentials(self):
credentials = self.sut.get_credentials()
self.assertFalse(credentials.invalid, 'Returned credentials invalid')
def test_should_be_able_to_get_a_bigquery_service(self):
bigquery_service = self.sut.get_service()
self.assertTrue(bigquery_service is not None, 'No service returned')
def test_should_be_able_to_get_schema_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(schema is not None)
def test_should_be_able_to_get_results_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(pages is not None)
def test_get_application_default_credentials_does_not_throw_error(self):
if _check_if_can_get_correct_default_credentials():
raise nose.SkipTest("Can get default_credentials "
"from the environment!")
credentials = self.sut.get_application_default_credentials()
self.assertIsNone(credentials)
def test_get_application_default_credentials_returns_credentials(self):
if not _check_if_can_get_correct_default_credentials():
raise nose.SkipTest("Cannot get default_credentials "
"from the environment!")
from oauth2client.client import GoogleCredentials
credentials = self.sut.get_application_default_credentials()
self.assertTrue(isinstance(credentials, GoogleCredentials))
class TestGBQConnectorServiceAccountKeyPathIntegration(tm.TestCase):
def setUp(self):
_setup_common()
_skip_if_no_project_id()
_skip_if_no_private_key_path()
self.sut = gbq.GbqConnector(_get_project_id(),
private_key=_get_private_key_path())
def test_should_be_able_to_make_a_connector(self):
self.assertTrue(self.sut is not None,
'Could not create a GbqConnector')
def test_should_be_able_to_get_valid_credentials(self):
credentials = self.sut.get_credentials()
self.assertFalse(credentials.invalid, 'Returned credentials invalid')
def test_should_be_able_to_get_a_bigquery_service(self):
bigquery_service = self.sut.get_service()
self.assertTrue(bigquery_service is not None, 'No service returned')
def test_should_be_able_to_get_schema_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(schema is not None)
def test_should_be_able_to_get_results_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(pages is not None)
class TestGBQConnectorServiceAccountKeyContentsIntegration(tm.TestCase):
def setUp(self):
_setup_common()
_skip_if_no_project_id()
_skip_if_no_private_key_path()
self.sut = gbq.GbqConnector(_get_project_id(),
private_key=_get_private_key_path())
def test_should_be_able_to_make_a_connector(self):
self.assertTrue(self.sut is not None,
'Could not create a GbqConnector')
def test_should_be_able_to_get_valid_credentials(self):
credentials = self.sut.get_credentials()
self.assertFalse(credentials.invalid, 'Returned credentials invalid')
def test_should_be_able_to_get_a_bigquery_service(self):
bigquery_service = self.sut.get_service()
self.assertTrue(bigquery_service is not None, 'No service returned')
def test_should_be_able_to_get_schema_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(schema is not None)
def test_should_be_able_to_get_results_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(pages is not None)
class GBQUnitTests(tm.TestCase):
def setUp(self):
_setup_common()
def test_import_google_api_python_client(self):
if compat.PY2:
with tm.assertRaises(ImportError):
from googleapiclient.discovery import build # noqa
from googleapiclient.errors import HttpError # noqa
from apiclient.discovery import build # noqa
from apiclient.errors import HttpError # noqa
else:
from googleapiclient.discovery import build # noqa
from googleapiclient.errors import HttpError # noqa
def test_should_return_bigquery_integers_as_python_floats(self):
result = gbq._parse_entry(1, 'INTEGER')
tm.assert_equal(result, float(1))
def test_should_return_bigquery_floats_as_python_floats(self):
result = gbq._parse_entry(1, 'FLOAT')
tm.assert_equal(result, float(1))
def test_should_return_bigquery_timestamps_as_numpy_datetime(self):
result = gbq._parse_entry('0e9', 'TIMESTAMP')
tm.assert_equal(result, np_datetime64_compat('1970-01-01T00:00:00Z'))
def test_should_return_bigquery_booleans_as_python_booleans(self):
result = gbq._parse_entry('false', 'BOOLEAN')
tm.assert_equal(result, False)
def test_should_return_bigquery_strings_as_python_strings(self):
result = gbq._parse_entry('STRING', 'STRING')
tm.assert_equal(result, 'STRING')
def test_to_gbq_should_fail_if_invalid_table_name_passed(self):
with tm.assertRaises(gbq.NotFoundException):
gbq.to_gbq(DataFrame(), 'invalid_table_name', project_id="1234")
def test_to_gbq_with_no_project_id_given_should_fail(self):
with tm.assertRaises(TypeError):
gbq.to_gbq(
|
DataFrame()
|
pandas.core.frame.DataFrame
|
import argparse
import os
import csv
import sys
import matplotlib.pyplot as plt
import itertools
# import seaborn as sns
import collections
import pandas as pd
def statistics(csv_path):
data = pd.read_csv(csv_path)
# data = data.sort_values(by=['target'])
print(data)
var = data.var()
print(var[0])
print(var)
var_list = []
for n, val in var.iteritems():
var_list.append([n, val])
var_list.sort(key=lambda i: i[0])
data['variance'] = var
data = data.sort_values(by=['target'])
fig, ax = plt.subplots(figsize=(40, 5))
# ax.bar(data['target'], data['variance'])
print([i[0] for i in var_list], [i[1] for i in var_list])
ax.bar([i[0] for i in var_list], [i[1] for i in var_list])
plt.xticks(rotation=30, ha='right')
plt.show()
averaged_var = {}
for name, val in var_list:
key = name.split('_')[0]
if key not in averaged_var.keys():
averaged_var[key] = [val, 1]
else:
averaged_var[key] = [averaged_var[key][0] + val, averaged_var[key][1]+1]
print(averaged_var)
for key, val in averaged_var.items():
averaged_var[key] = val[0]/val[1]
print(averaged_var)
fig1, ax1 = plt.subplots(figsize=(10, 5))
ax1.bar(averaged_var.keys(), averaged_var.values())
plt.xticks(rotation=30, ha='right')
plt.show()
# data.to_csv("/Volumes/data/libs/from_ftp/strudel-libs_ver-2.0_voxel-0.5/var_0.0-2.3_2.csv")
def filter_data1(dataframe):
dataframe = dataframe.sort_values(by=['target'])
new_frame = pd.DataFrame()
length = len(dataframe['target'])
for index, series in dataframe.iterrows():
correlations = []
ref_type = series["target"].split("_")[0]
for a, b in series.iteritems():
# print(a, b)
if a == 'target':
pass
else:
res_type = a.split('_')[0]
if res_type != ref_type:
correlations.append(b)
if len(correlations) < length:
for i in range(length-len(correlations)):
correlations.append(None)
new_frame[series["target"]] = correlations
variance = new_frame.mean()
print(variance)
averaged_var = {}
for name, val in variance.iteritems():
key = name.split('_')[0]
if key not in averaged_var.keys():
averaged_var[key] = [val, 1]
else:
averaged_var[key] = [averaged_var[key][0] + val, averaged_var[key][1] + 1]
print(averaged_var)
for key, val in averaged_var.items():
averaged_var[key] = val[0] / val[1]
print(averaged_var)
fig1, ax1 = plt.subplots(figsize=(10, 5))
ax1.bar(averaged_var.keys(), averaged_var.values())
plt.xticks(rotation=30, ha='right')
ax1.set_ylim(0.6, 1)
plt.show()
new_frame.to_csv("/Volumes/data/libs/from_ftp/strudel-libs_ver-2.0_voxel-0.5/filtered_3.5-4.0.csv")
tmp_dict = {}
def filter_data(dataframe):
dataframe = dataframe.sort_values(by=['target'])
new_frame = pd.DataFrame()
length = len(dataframe['target'])
for index, series in dataframe.iterrows():
correlations = []
ref_type = series["target"].split("_")[0]
for a, b in series.iteritems():
# print(a, b)
if a == 'target':
pass
else:
res_type = a.split('_')[0]
if res_type != ref_type:
correlations.append(b)
if len(correlations) < length:
for i in range(length - len(correlations)):
correlations.append(None)
new_frame[series["target"]] = correlations
return new_frame
def extract_max_corr(dataframe):
dataframe = dataframe.sort_values(by=['target'], ascending=False)
new_frame = pd.DataFrame()
# length = len(dataframe['target'])
for index, series in dataframe.iterrows():
tmp_dict = {}
ref_type = series["target"].split("_")[0]
tmp_dict[ref_type] = 1
for a, b in series.iteritems():
# print(a, b)
if a == 'target':
pass
else:
res_type = a.split('_')[0]
if res_type != ref_type:
if res_type not in tmp_dict.keys():
tmp_dict[res_type] = round(b, 5)
elif tmp_dict[res_type] < b:
tmp_dict[res_type] = round(b, 5)
ordered_dict = collections.OrderedDict(sorted(tmp_dict.items()))
new_frame[series["target"]] = ordered_dict.values()
new_frame.insert(loc=0, column='res', value=ordered_dict.keys())
new_frame = new_frame.set_index('res')
return new_frame
def comute_mean(frame):
mean_series = frame.mean()
return mean_series
def average_over_rotamers(series):
averaged_val = {}
for name, val in series.iteritems():
key = name.split('_')[0]
if key not in averaged_val.keys():
averaged_val[key] = [val, 1]
else:
averaged_val[key] = [averaged_val[key][0] + val, averaged_val[key][1] + 1]
# print(averaged_val)
for key, val in averaged_val.items():
averaged_val[key] = val[0] / val[1]
return averaged_val
def series_to_dict(series):
dic_data = {}
for name, val in series.iteritems():
dic_data[name] = val
return dic_data
def plot_single(data_dict, resolution_range, plot_path=None):
fig, ax = plt.subplots(figsize=(80, 10))
ax.bar(data_dict.keys(), data_dict.values())
plt.xticks(rotation=45, ha='right')
ax.set_ylim(0.6, 1)
plt.title(f'Resolution band: {resolution_range}')
ax.set_xlabel('Motif')
ax.set_ylabel('Average correlation')
plt.subplots_adjust(bottom=0.55)
if plot_path is None:
plt.show()
else:
fig.savefig(plot_path)
def plot_multiple(data_list, plot_path=None, y_lim=(0, 1)):
hatches = itertools.cycle(['///', '+++', 'oo', 'XX', 'OO', '.', '--'])
fig, ax = plt.subplots(figsize=(12, 5))
width = 0.4
dd = width*len(data_list)/2 + 1
x_ticks = [i for i in range(20)]
x_ticks = [ x * dd for x in x_ticks]
print(len(x_ticks))
ax.set_xticks([ x + width * len(data_list)/2 for x in x_ticks])
for data, res_range in data_list:
print(res_range)
print(len(data))
x_ticks = [x+width for x in x_ticks]
# ax.bar(data.keys(), data.values(), label=f'{res_range}')
hatch = next(hatches)
bar = ax.bar(x_ticks, data.values(), width, label=f'{res_range}')
print(bar)
for b in bar:
b.set_hatch(hatch)
ax.legend()
ax.set_xticklabels(data_list[0][0].keys(), rotation=0)
ax.set_xlim(-2*width, x_ticks[-1] + 2*width)
ax.set_ylim(y_lim)
ax.set_xlabel('Motif type')
ax.set_ylabel('1-correlation')
if plot_path is None:
plt.show()
else:
plt.subplots_adjust(bottom=0.15)
fig.savefig(plot_path)
def plot_multiple_non_av(data_list, plot_path=None, y_lim=(0, 1)):
fig, ax = plt.subplots(figsize=(12, 5))
width = 0.25
dd = width*len(data_list)/2 + 1
x_ticks = [i for i in range(len(data_list[0][0]))]
x_ticks = [ x * dd for x in x_ticks]
print(len(x_ticks))
ax.set_xticks([ x + width * len(data_list)/2 for x in x_ticks])
for data, res_range in data_list:
print(res_range)
print(len(data))
x_ticks = [x+width for x in x_ticks]
# ax.bar(data.keys(), data.values(), label=f'{res_range}')
ax.bar(x_ticks, data.values(), width, label=f'{res_range}')
ax.legend()
ax.set_xticklabels(data_list[0][0].keys(), rotation=65)
ax.set_xlim(0, x_ticks[-1] + 5.5)
ax.set_ylim(y_lim)
ax.set_xlabel('Motif type')
ax.set_ylabel('Average correlation')
if plot_path is None:
plt.show()
else:
plt.subplots_adjust(bottom=0.15)
fig.savefig(plot_path)
# def filter_max_
def one_minus_val(val_dict):
for key, val in val_dict.items():
val_dict[key] = 1 - val
return val_dict
def plot_all_average_corr(data_path, out_folder=None, action='mean'):
files = os.listdir(data_path)
csv_files = [f for f in files if f.endswith('.csv') and not f.startswith('.')]
csv_files.sort()
data_list = []
tmp = csv_files[:3] + [csv_files[-1]]
# tmp = csv_files
for file in tmp:
print(file)
res_range = file.split('.csv')[0].split('_')[-1]
path = os.path.join(data_path, file)
data = pd.read_csv(path)
filtered = filter_data(data)
if action == 'mean':
series = comute_mean(filtered)
else:
return
rot_aver = average_over_rotamers(series)
print(rot_aver)
rot_aver = one_minus_val(rot_aver)
data_list.append([rot_aver, res_range])
plot_multiple(data_list, y_lim=(0, 0.3), plot_path=os.path.join(out_folder, 'aver_corr_3+1_one_minus_hatch_final.png'))
def plot_all_corr(data_path, out_folder=None, action='mean'):
files = os.listdir(data_path)
csv_files = [f for f in files if f.endswith('.csv')]
csv_files.sort()
data_list = []
for file in csv_files:
res_range = file.split('.csv')[0]
path = os.path.join(data_path, file)
data = pd.read_csv(path)
filtered = filter_data(data)
if action == 'mean':
series = comute_mean(filtered)
else:
return
rot_aver = series_to_dict(series)
data_list.append([rot_aver, res_range])
plot_single(rot_aver, res_range, plot_path=os.path.join(out_folder,f'{res_range}_all_rot.png'))
break
def plot_grid_similarity(df, plot_path):
import numpy as np
import matplotlib as mpl
mpl.rcParams['xtick.top'] = True
mpl.rcParams['xtick.labeltop'] = True
fig, ax = plt.subplots(figsize=(16, 36))
from matplotlib import colors
cmap = colors.ListedColormap(['white', 'blue', 'white'])
bounds = [0, 0.95, 0.999, 1]
norm = colors.BoundaryNorm(bounds, cmap.N)
ax.pcolor(df, cmap=cmap, norm=norm)
y_labels = [l.split('_') for l in df.index]
form_labels = []
for l in y_labels:
if l[0] in ['ala', 'gly']:
txt = f'{l[0]}'
else:
txt = f'{l[0]} {l[-1]}'
form_labels.append(txt)
ax.grid(which='major', axis='both', linestyle='-', color='k', linewidth=0.5)
plt.yticks(np.arange(0.99, len(df.index), 1), form_labels, va='top', ha='right')
print(np.arange(0.5, len(df.columns), 1))
print(df.columns)
labels = [l + ' ' for l in df.columns]
plt.xticks(np.arange(0.99, len(df.columns), 1), labels, ha='right')
plt.tick_params(axis='x', which='major', labelsize=16)
plt.tick_params(axis='y', which='major', labelsize=14)
if plot_path is None:
plt.show()
else:
plt.subplots_adjust(bottom=0.15)
fig.savefig(plot_path)
def generate_all_drid_similarity(data_path, out_folder=None):
files = os.listdir(data_path)
csv_files = [f for f in files if f.endswith('.csv') and not f.startswith('.')]
csv_files.sort()
data_list = []
tmp = csv_files
# tmp = csv_files
for file in tmp:
print(file)
res_range = file.split('.csv')[0].split('_')[-1]
path = os.path.join(data_path, file)
data =
|
pd.read_csv(path)
|
pandas.read_csv
|
import os
from datetime import datetime
import joblib
import numpy as np
import pandas as pd
from rfpimp import permutation_importances
from sklearn.metrics import mean_absolute_error
from sklearn.base import clone
from _08_stats import imp_df, drop_col_feat_imp, mae
"""
Module that generates statistics of the trained model.
"""
def generate_stats(simulation_folder):
"""
Function for generating statistics about model
:param simulation_folder: str, name of subfolder for given data sets
:return: none, statistics saved down as side effect
"""
Start = datetime.now()
project_directory = os.path.dirname(os.getcwd())
path_to_data = os.path.join(project_directory, "Data", simulation_folder)
path_to_characteristics_data = os.path.join(path_to_data, "Characteristics")
path_to_scenario = os.path.join(project_directory, "Models", simulation_folder,
"XGB", "Model")
path_to_stats = os.path.join(path_to_scenario, "Stats")
if not os.path.exists(path_to_stats):
os.makedirs(path_to_stats)
path_to_model = os.path.join(path_to_scenario, "model.sav")
X_train = np.load(os.path.join(path_to_characteristics_data, "X_train.npy"), allow_pickle=True)
X_test = np.load(os.path.join(path_to_characteristics_data, "X_test.npy"), allow_pickle=True)
y_train = np.load(os.path.join(path_to_characteristics_data, "y_train.npy"), allow_pickle=True)
y_test = np.load(os.path.join(path_to_characteristics_data, "y_test.npy"), allow_pickle=True)
# TODO: fix the save of the data to get variable names from there
characteristics_data = pd.read_csv(os.path.join(path_to_characteristics_data, "characteristics.csv"))
model = joblib.load(path_to_model)
data_type = ["Train", "Test"]
for dt in data_type:
X = X_train if dt == "Train" else X_test
y = y_train if dt == "Train" else y_test
# Making the Confusion Matrix
y_pred = model.predict(X)
## TODO: mae per class
print("mae")
test_train_mae = mean_absolute_error(y, y_pred)
df =
|
pd.DataFrame({'mae': [test_train_mae]})
|
pandas.DataFrame
|
import os
import itertools
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
from datetime import datetime
def basic_prediction():
return
def forecasting(df_blocks_):
df_blocks_['date'] = pd.to_datetime(df_blocks_['date'])
df_blocks_ = df_blocks_.set_index('date')
df_blocks_ = df_blocks_['tx'].resample('W').mean()
print(len(df_blocks_))
decomposition = sm.tsa.seasonal_decompose(df_blocks_, model='additive')
fig = decomposition.plot()
arima(df_blocks_, False)
def parameter_selection(df_blocks_):
p = d = q = range(0, 2)
pdq = list(itertools.product(p, d, q))
seasonal_pdq = [(x[0], x[1], x[2], 12) for x in list(itertools.product(p, d, q))]
ress = []
for param in pdq:
for param_seasonal in seasonal_pdq:
try:
mod = sm.tsa.statespace.SARIMAX(df_blocks_,
order=param,
seasonal_order=param_seasonal,
enforce_stationarity=False,
enforce_invertibility=False)
results = mod.fit()
ress.append([param, param_seasonal, results.aic])
# print('ARIMA{}x{}12 - AIC:{}'.format(param, param_seasonal, results.aic))
except:
continue
return np.asarray(sorted(ress, key=lambda x: x[2], reverse=True))[-1]
def arima(df_blocks_, param_select):
# Select parameters
a = parameter_selection(df_blocks_)
pred_ci = pd.DataFrame()
pred_uc_m = pd.DataFrame()
mse =[]
for i in range(1, 5, 1):
mod = sm.tsa.statespace.SARIMAX(df_blocks_[:'201' + str(i)], order=a[0],
seasonal_order=a[1],
enforce_stationarity=False, enforce_invertibility=False)
# Fit the selection model
results = mod.fit()
# Plot diagnostics
results.plot_diagnostics(figsize=(16, 8))
# Get and plot predictions staring a given date
pred = results.get_prediction(start=pd.to_datetime(df_blocks_['201' + str(i) + '-02':].index.values[0]), dynamic=False)
# Compute MSE and RMSE error
y_forecasted = pred.predicted_mean
y_truth = df_blocks_['201' + str(i) + '-06':]
#
mse.append(((y_forecasted - y_truth) ** 2).mean())
# print('The Root Mean Squared Error of our forecasts is {}'.format(round(np.sqrt(mse), 2)))
#
pred_uc = results.get_forecast(steps=1)
pred_ci = pd.concat([pred_ci, pred_uc.conf_int()])
pred_uc_m =
|
pd.concat([pred_uc_m, pred_uc.predicted_mean])
|
pandas.concat
|
import pandas as pd
import gender_guesser.detector as gender
import geojson
gender_guesser = gender.Detector(case_sensitive=False)
def predict_gender(dataframe,column_name,rolling_frame='180d'):
'''
take full dataframe w/ tweets and extract
gender for a name-column where applicable
returns two-column df w/ timestamp & gender
'''
splitter = lambda x: ''.join(x.split()[:1])
gender_column = dataframe.loc[dataframe[column_name].notnull()][column_name].apply(
splitter).apply(
gender_guesser.get_gender)
gender_dataframe = pd.DataFrame(data = {
'time' : list(gender_column.index),
'gender' : list(gender_column)
})
gender_dataframe = gender_dataframe.set_index('time')
gender_dataframe_tab = gender_dataframe.groupby([gender_dataframe.index.date,gender_dataframe['gender']]).size().reset_index()
gender_dataframe_tab['date'] = gender_dataframe_tab['level_0']
gender_dataframe_tab['count'] = gender_dataframe_tab[0]
gender_dataframe_tab = gender_dataframe_tab.drop([0,'level_0'],axis=1)
gender_dataframe_tab = gender_dataframe_tab.set_index('date')
gender_dataframe_tab.index = pd.to_datetime(gender_dataframe_tab.index)
gdf_pivot = gender_dataframe_tab.pivot(columns='gender', values='count')
gdf_pivot = gdf_pivot.rolling(rolling_frame).mean()
gdf_pivot = gdf_pivot.reset_index()
gdf_pivot['date'] = gdf_pivot['date'].astype(str)
gdf_pivot = gdf_pivot.drop(['mostly_male','mostly_female','andy','unknown'],axis=1)
return gdf_pivot
def create_hourly_stats(dataframe):
get_hour = lambda x: x.hour
get_weekday = lambda x: x.weekday()
local_times = dataframe.copy()
local_times = local_times.loc[dataframe['local_time'].notnull()]
local_times['weekday'] = local_times['local_time'].apply(get_weekday)
local_times['hour'] = local_times['local_time'].apply(get_hour)
local_times = local_times.replace(to_replace={'weekday':
{0:'Weekday',
1:'Weekday',
2:'Weekday',
3:'Weekday',
4:'Weekday',
5:'Weekend',
6:'Weekend',
}
})
local_times = local_times.groupby([local_times['hour'],local_times['weekday']]).size().reset_index()
local_times['values'] = local_times[0]
local_times = local_times.set_index(local_times['hour'])
local_times = local_times.pivot(columns='weekday', values='values').reset_index()
local_times['Weekday'] = local_times['Weekday'] / 5
local_times['Weekend'] = local_times['Weekend'] / 2
return local_times.reset_index()
def create_tweet_types(dataframe):
dataframe_grouped = dataframe.groupby(dataframe.index.date).count()
dataframe_grouped.index = pd.to_datetime(dataframe_grouped.index)
dataframe_mean_week = dataframe_grouped.rolling('180d').mean()
dataframe_mean_week['p_url'] = (dataframe_mean_week['url'] / dataframe_mean_week['text']) * 100
dataframe_mean_week['p_media'] = (dataframe_mean_week['media'] / dataframe_mean_week['text']) * 100
dataframe_mean_week['p_reply'] = (dataframe_mean_week['reply_name'] / dataframe_mean_week['text']) * 100
dataframe_mean_week['p_rt'] = (dataframe_mean_week['retweet_name'] / dataframe_mean_week['text']) * 100
dataframe_mean_week['p_hash'] = (dataframe_mean_week['hashtag'] / dataframe_mean_week['text']) * 100
dataframe_mean_week['p_other'] = 100 - (dataframe_mean_week['p_reply'] + dataframe_mean_week['p_rt'])
dataframe_mean_week = dataframe_mean_week.reset_index()
dataframe_mean_week['date'] = dataframe_mean_week['index'].astype(str)
dataframe_mean_week = dataframe_mean_week.drop(['reply_user_name',
'retweet_user_name',
'twitter_user_name',
'latitude',
'longitude',
'local_time',
'url',
'media',
'reply_name',
'retweet_name',
'hashtag',
'index',
],
axis=1)
return dataframe_mean_week.reset_index()
def create_top_replies(dataframe):
exclude_name_list = [dataframe.iloc[0]['twitter_user_name']]
dataframe = dataframe[~dataframe['reply_user_name'].isin(exclude_name_list)]
top_replies = dataframe[dataframe['reply_user_name'].isin(list(dataframe['reply_user_name'].value_counts()[:5].reset_index()['index']))]
top_replies = top_replies.reset_index()[['reply_user_name','utc_time']]
top_replies['utc_time'] = top_replies['utc_time'].dt.date
top_replies = top_replies.groupby(["utc_time", "reply_user_name"]).size()
top_replies = top_replies.reset_index()
top_replies['date'] = top_replies['utc_time'].astype(str)
top_replies['value'] = top_replies[0]
top_replies = top_replies.drop([0,'utc_time'],axis=1)
top_replies['date'] =
|
pd.to_datetime(top_replies['date'])
|
pandas.to_datetime
|
import napari.layers
import napari.utils.events
import napari_plugin_engine
import pandas
from ._dock_widget import DockWidget
from .generator import Generator
from ._feature_selection_widget import FeatureSelectionWidget
import numpy as np
def features(
image: napari.layers.Image,
masks: napari.layers.Labels,
viewer: napari.Viewer
):
dock_widget = DockWidget()
viewer.window.add_dock_widget(dock_widget, area="bottom")
viewer.window.add_dock_widget(dock_widget)
feature_selection_widget = FeatureSelectionWidget()
viewer.window.add_dock_widget(feature_selection_widget)
generator = Generator(np.asarray(masks.data), np.asarray(image.data), feature_selection_widget.selected)
data =
|
pandas.DataFrame([feature for feature in generator], columns=generator.columns)
|
pandas.DataFrame
|
import sys
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
plt.rcParams['font.size'] = 6
root_path = os.path.dirname(os.path.abspath('__file__'))
# root_path = os.path.abspath(os.path.join(root_path,os.path.pardir))
graphs_path = root_path+'/graphs/'
results_path = root_path+'/results_analysis/results/'
print("root path:{}".format(root_path))
sys.path.append(root_path)
from tools.results_reader import read_two_stage, read_pure_esvr,read_pure_arma
h_arma = read_pure_arma("Huaxian")
x_arma = read_pure_arma("Xianyang")
z_arma = read_pure_arma("Zhangjiashan")
h_svr_1 = pd.read_csv(root_path+'/Huaxian/projects/esvr/1_ahead_pacf_lag12/optimal_model_results.csv')
h_svr_3 = pd.read_csv(root_path+'/Huaxian/projects/esvr/3_ahead_pacf_lag12/optimal_model_results.csv')
h_svr_5 = pd.read_csv(root_path+'/Huaxian/projects/esvr/5_ahead_pacf_lag12/optimal_model_results.csv')
h_svr_7 = pd.read_csv(root_path+'/Huaxian/projects/esvr/7_ahead_pacf_lag12/optimal_model_results.csv')
x_svr_1 = pd.read_csv(root_path+'/Xianyang/projects/esvr/1_ahead_pacf_lag12/optimal_model_results.csv')
x_svr_3 = pd.read_csv(root_path+'/Xianyang/projects/esvr/3_ahead_pacf_lag12/optimal_model_results.csv')
x_svr_5 = pd.read_csv(root_path+'/Xianyang/projects/esvr/5_ahead_pacf_lag12/optimal_model_results.csv')
x_svr_7 = pd.read_csv(root_path+'/Xianyang/projects/esvr/7_ahead_pacf_lag12/optimal_model_results.csv')
z_svr_1 = pd.read_csv(root_path+'/Zhangjiashan/projects/esvr/1_ahead_pacf_lag12/optimal_model_results.csv')
z_svr_3 = pd.read_csv(root_path+'/Zhangjiashan/projects/esvr/3_ahead_pacf_lag12/optimal_model_results.csv')
z_svr_5 = pd.read_csv(root_path+'/Zhangjiashan/projects/esvr/5_ahead_pacf_lag12/optimal_model_results.csv')
z_svr_7 = pd.read_csv(root_path+'/Zhangjiashan/projects/esvr/7_ahead_pacf_lag12/optimal_model_results.csv')
h_lstm_1 = pd.read_csv(root_path+'/Huaxian/projects/lstm/1_ahead/optimal/opt_pred.csv')
h_lstm_3 = pd.read_csv(root_path+'/Huaxian/projects/lstm/3_ahead/optimal/opt_pred.csv')
h_lstm_5 = pd.read_csv(root_path+'/Huaxian/projects/lstm/5_ahead/optimal/opt_pred.csv')
h_lstm_7 = pd.read_csv(root_path+'/Huaxian/projects/lstm/7_ahead/optimal/opt_pred.csv')
x_lstm_1 = pd.read_csv(root_path+'/Xianyang/projects/lstm/1_ahead/optimal/opt_pred.csv')
x_lstm_3 = pd.read_csv(root_path+'/Xianyang/projects/lstm/3_ahead/optimal/opt_pred.csv')
x_lstm_5 =
|
pd.read_csv(root_path+'/Xianyang/projects/lstm/5_ahead/optimal/opt_pred.csv')
|
pandas.read_csv
|
import os
import warnings
from pathlib import Path
from urllib.request import urlretrieve
import numpy as np
import pandas as pd
from scvelo.core import clean_obs_names as _clean_obs_names
from scvelo.core import get_df as _get_df
from scvelo.core import merge as _merge
from scvelo.core._anndata import obs_df as _obs_df
from scvelo.core._anndata import var_df as _var_df
def load(filename, backup_url=None, header="infer", index_col="infer", **kwargs):
"""Load a csv, txt, tsv or npy file."""
numpy_ext = {"npy", "npz"}
pandas_ext = {"csv", "txt", "tsv"}
if not os.path.exists(filename) and backup_url is None:
raise FileNotFoundError(f"Did not find file {filename}.")
elif not os.path.exists(filename):
d = os.path.dirname(filename)
if not os.path.exists(d):
os.makedirs(d)
urlretrieve(backup_url, filename)
ext = Path(filename).suffixes[-1][1:]
if ext in numpy_ext:
return np.load(filename, **kwargs)
elif ext in pandas_ext:
df = pd.read_csv(
filename,
header=header,
index_col=None if index_col == "infer" else index_col,
**kwargs,
)
if index_col == "infer" and len(df.columns) > 1:
is_int_index = all(np.arange(0, len(df)) == df.iloc[:, 0])
is_str_index = isinstance(df.iloc[0, 0], str) and all(
[not isinstance(d, str) for d in df.iloc[0, 1:]]
)
if is_int_index or is_str_index:
df.set_index(df.columns[0], inplace=True)
return df
else:
raise ValueError(
f"'{filename}' does not end on a valid extension.\n"
"Please, provide one of the available extensions.\n"
f"{numpy_ext | pandas_ext}\n"
)
read_csv = load
def clean_obs_names(data, base="[AGTCBDHKMNRSVWY]", ID_length=12, copy=False):
warnings.warn(
"`scvelo.read_load.clean_obs_names` is deprecated since scVelo v0.2.4 and will "
"be removed in a future version. Please use `scvelo.core.clean_obs_names` "
"instead.",
DeprecationWarning,
stacklevel=2,
)
return _clean_obs_names(data=data, base=base, ID_length=ID_length, copy=copy)
def merge(adata, ldata, copy=True):
warnings.warn(
"`scvelo.read_load.merge` is deprecated since scVelo v0.2.4 and will be "
"removed in a future version. Please use `scvelo.core.merge` instead.",
DeprecationWarning,
stacklevel=2,
)
return _merge(adata=adata, ldata=ldata, copy=True)
def obs_df(adata, keys, layer=None):
warnings.warn(
"`scvelo.read_load.obs_df` is deprecated since scVelo v0.2.4 and will be "
"removed in a future version. Please use `scvelo.core._anndata.obs_df` "
"instead.",
DeprecationWarning,
stacklevel=2,
)
return _obs_df(adata=adata, keys=keys, layer=layer)
def var_df(adata, keys, layer=None):
warnings.warn(
"`scvelo.read_load.var_df` is deprecated since scVelo v0.2.4 and will be "
"removed in a future version. Please use `scvelo.core._anndata.var_df` "
"instead.",
DeprecationWarning,
stacklevel=2,
)
return _var_df(adata=adata, keys=keys, layer=layer)
def get_df(
data,
keys=None,
layer=None,
index=None,
columns=None,
sort_values=None,
dropna="all",
precision=None,
):
warnings.warn(
"`scvelo.read_load.get_df` is deprecated since scVelo v0.2.4 and will be "
"removed in a future version. Please use `scvelo.core.get_df` instead.",
DeprecationWarning,
stacklevel=2,
)
return _get_df(
data=data,
keys=keys,
layer=layer,
index=index,
columns=columns,
sort_values=sort_values,
dropna=dropna,
precision=precision,
)
DataFrame = get_df
def load_biomart():
# human genes from https://biomart.genenames.org/martform
# mouse genes from http://www.ensembl.org/biomart/martview
# antibodies from https://www.biolegend.com/en-us/totalseq
nb_url = "https://github.com/theislab/scvelo_notebooks/raw/master/"
filename = "data/biomart/mart_export_human.txt"
df = load(filename, sep="\t", backup_url=f"{nb_url}{filename}")
df.columns = ["ensembl", "gene name"]
df.index = df.pop("ensembl")
filename = "data/biomart/mart_export_mouse.txt"
df2 = load(filename, sep="\t", backup_url=f"{nb_url}{filename}")
df2.columns = ["ensembl", "gene name"]
df2.index = df2.pop("ensembl")
df =
|
pd.concat([df, df2])
|
pandas.concat
|
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
import nltk
from nltk import wordpunct_tokenize
from nltk.stem.snowball import EnglishStemmer
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.pipeline import make_pipeline, make_union
from tpot.builtins import StackingEstimator
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.model_selection import train_test_split
vectorizer = TfidfVectorizer(input='content', analyzer='word')
svd = TruncatedSVD(n_components=500, n_iter=5, random_state=27)
nltk.download('punkt')
nltk.download('stopwords')
stop_words = set(stopwords.words('english'))
#After we use get_text, use nltk's clean_html function.
def nltkPipe(soup_text):
#Convert to tokens
tokens = [x.lower() for x in wordpunct_tokenize(soup_text)]
text = nltk.Text(tokens)
#Get lowercase words. No single letters, and no stop words
words = [w.lower() for w in text if w.isalpha() and len(w) > 1 and w.lower() not in stop_words]
#Remove prefix/suffixes to cut down on vocab
stemmer = EnglishStemmer()
words_nostems = [stemmer.stem(w) for w in words]
return words_nostems
def getTitleTokens(html):
soup = BeautifulSoup(html,'html.parser')
soup_title = soup.title
if soup_title != None:
soup_title_text = soup.title.get_text()
text_arr = nltkPipe(soup_title_text)
return text_arr
else:
return []
def getBodyTokens(html):
soup = BeautifulSoup(html,'html.parser')
#Get the text body
soup_para = soup.find_all('p')
soup_para_clean = ' '.join([x.get_text() for x in soup_para if x.span==None and x.a==None])
text_arr = nltkPipe(soup_para_clean)
return text_arr
#Build the model
def get_html(in_df):
keep_cols = ["Webpage_id","Tag"]
use_df = in_df[keep_cols]
html_reader_obj = pd.read_csv(data_dir+'html_data.csv',iterator=True, chunksize=10000)
frames = []
match_indices = use_df['Webpage_id'].values.tolist()
print(len(match_indices),' indices left...')
while len(match_indices) > 0:
for chunk in html_reader_obj:
merge_df = pd.merge(use_df,chunk,how='inner',on='Webpage_id')
merge_indices = merge_df['Webpage_id'].values.tolist()
match_indices = [x for x in match_indices if x not in merge_indices]
print(len(match_indices),' indices left...')
frames.append(merge_df)
#Process HTMl for bags of words of the body and title.
process_df =
|
pd.concat(frames)
|
pandas.concat
|
"""
Fetch meteorological data from the SMEAR website and bind them as a CSV table.
Hyytiälä COS campaign, April-November 2016
(c) 2016-2017 <NAME> <<EMAIL>>
"""
import io
import argparse
import copy
import datetime
import requests
import numpy as np
import pandas as pd
import preproc_config
def timestamp_parser(*args):
"""
A timestamp parser for `pandas.read_csv()`.
Argument list: year, month, day, hour, minute, second
"""
return np.datetime64('%s-%s-%s %s:%s:%s' %
args)
# define terminal argument parser
parser = argparse.ArgumentParser(description='Get SMEAR meteorological data.')
parser.add_argument('-v', '--variable', dest='flag_get_variable',
action='store_true',
help='get one variable at a time, slow mode')
parser.add_argument('-n', '--now', dest='flag_now', action='store_true',
help='get the data from the starting date till now')
args = parser.parse_args()
# echo program starting
print('Retrieving meteorological data from ' +
'SMEAR <http://avaa.tdata.fi/web/smart/smear> ... ')
dt_start = datetime.datetime.now()
print(datetime.datetime.strftime(dt_start, '%Y-%m-%d %X'))
print('numpy version = ' + np.__version__)
print('pandas version = ' + pd.__version__)
output_dir = preproc_config.data_dir['met_data']
# local winter time is UTC+2
start_dt = '2016-04-01 00:00:00'
if not args.flag_now:
end_dt = '2016-11-11 00:00:00'
else:
end_dt = (datetime.datetime.utcnow() +
datetime.timedelta(2. / 24.)).strftime('%Y-%m-%d %H:%M:%S')
# variable names for retrieval from the SMEAR data website API
varnames = ['Pamb0', 'T1250', 'T672', 'T504', 'T336', 'T168', 'T84', 'T42',
'RHIRGA1250', 'RHIRGA672', 'RHIRGA504', 'RHIRGA336',
'RHIRGA168', 'RHIRGA84', 'RHIRGA42',
'RPAR', 'PAR', 'diffPAR', 'maaPAR',
'tsoil_humus', 'tsoil_A', 'tsoil_B1', 'tsoil_B2', 'tsoil_C1',
'wsoil_humus', 'wsoil_A', 'wsoil_B1', 'wsoil_B2', 'wsoil_C1',
'Precipacc']
# renaming will be done after filling all the variables in the met dataframe
renaming_dict = {
'Pamb0': 'pres',
'T1250': 'T_atm_125m',
'T672': 'T_atm_67m',
'T504': 'T_atm_50m',
'T336': 'T_atm_34m',
'T168': 'T_atm_17m',
'T84': 'T_atm_8m',
'T42': 'T_atm_4m',
'RHIRGA1250': 'RH_125m',
'RHIRGA672': 'RH_67m',
'RHIRGA504': 'RH_50m',
'RHIRGA336': 'RH_34m',
'RHIRGA168': 'RH_17m',
'RHIRGA84': 'RH_8m',
'RHIRGA42': 'RH_4m',
'RPAR': 'PAR_reflected',
'PAR': 'PAR',
'diffPAR': 'PAR_diffuse',
'maaPAR': 'PAR_below',
'tsoil_humus': 'T_soil_surf',
'tsoil_A': 'T_soil_A',
'tsoil_B1': 'T_soil_B1',
'tsoil_B2': 'T_soil_B2',
'tsoil_C1': 'T_soil_C1',
'wsoil_humus': 'w_soil_surf',
'wsoil_A': 'w_soil_A',
'wsoil_B1': 'w_soil_B1',
'wsoil_B2': 'w_soil_B2',
'wsoil_C1': 'w_soil_C1',
'Precipacc': 'precip', }
# an url example
# url = 'http://avaa.tdata.fi/palvelut/smeardata.jsp?' +
# 'variables=Pamb0,&table=HYY_META&' +
# 'from=2016-04-01 00:00:00&to=2016-04-02 00:00:00&'
# 'quality=ANY&averaging=30MIN&type=ARITHMETIC'
if not args.flag_get_variable:
# first, request all variables except precipitation
print("Fetching variables '%s' ..." % ', '.join(varnames[0:-1]), end=' ')
avg_type = 'ARITHMETIC'
url = 'http://avaa.tdata.fi/palvelut/smeardata.jsp?variables=' + \
','.join(varnames[0:-1]) + ',&table=HYY_META&from=' + \
start_dt + '&to=' + end_dt + \
'&quality=ANY&averaging=30MIN&type=' + avg_type
response = requests.get(url, verify=True)
# set `verify=True` to check SSL certificate
if response.status_code != 200:
print('Status %d: No response from the request.' %
response.status_code)
else:
print('Successful!')
df_met = pd.read_csv(
io.BytesIO(response.text.encode('utf-8')), sep=',', header=0,
names=['year', 'month', 'day', 'hour', 'minute', 'second',
*varnames[0:-1]],
parse_dates={'timestamp': [0, 1, 2, 3, 4, 5]},
date_parser=timestamp_parser,
engine='c', encoding='utf-8')
start_year = df_met['timestamp'][0].year
df_met.insert(
1, 'doy',
(df_met['timestamp'] - pd.Timestamp('%d-01-01' % start_year)) /
pd.Timedelta(days=1))
print('Timestamps parsed.')
# mask zero pressure as NaN
df_met.loc[df_met['Pamb0'] == 0., 'Pamb0'] = np.nan
# append precipitation; it's treated separately due to different averaging
del url, response
print("Fetching variable '%s' ..." % varnames[-1], end=' ')
avg_type = 'SUM'
url = 'http://avaa.tdata.fi/palvelut/smeardata.jsp?variables=' + \
varnames[-1] + ',&table=HYY_META&from=' + \
start_dt + '&to=' + end_dt + \
'&quality=ANY&averaging=30MIN&type=' + avg_type
response = requests.get(url, verify=True)
# set `verify=True` to check SSL certificate
if response.status_code != 200:
print('Status %d: No response from the request.' %
response.status_code)
else:
print('Successful!')
df_precip = pd.read_csv(
io.BytesIO(response.text.encode('utf-8')), sep=',', header=0,
names=[varnames[-1]], usecols=[6],
parse_dates=False,
engine='c', encoding='utf-8')
df_met = pd.concat([df_met, df_precip], axis=1)
else:
# one variable a time
# make a copy and insert custom timestamps
colnames = copy.copy(varnames)
colnames.insert(0, 'timestamp')
colnames.insert(1, 'doy')
df_met = pd.DataFrame(columns=colnames)
flag_timestamp_parsed = False
# fetch and dump data: dump each variable into TXT and combine 'em as CSV
for var in varnames:
print("Fetching variable '%s' ..." % var, end=' ')
# precipitation must be summed not averaged over the 30 min interval
if var != 'Precipacc':
avg_type = 'ARITHMETIC'
else:
avg_type = 'SUM'
url = 'http://avaa.tdata.fi/palvelut/smeardata.jsp?variables=' + \
var + ',&table=HYY_META&from=' + start_dt + '&to=' + end_dt + \
'&quality=ANY&averaging=30MIN&type=' + avg_type
response = requests.get(url, verify=True)
# set `verify=True` to check SSL certificate
if response.status_code != 200:
print(
"Status %d: No response from the request for variable '%s'." %
(response.status_code, var))
continue
else:
print('Successful!')
if not flag_timestamp_parsed:
fetched_data = pd.read_csv(
io.BytesIO(response.text.encode('utf-8')), sep=',', header=0,
names=['year', 'month', 'day',
'hour', 'minute', 'second', var],
parse_dates={'timestamp': [0, 1, 2, 3, 4, 5]},
date_parser=timestamp_parser,
engine='c', encoding='utf-8')
else:
fetched_data = pd.read_csv(
io.BytesIO(response.text.encode('utf-8')), sep=',', header=0,
names=[var], usecols=[6],
parse_dates=False,
engine='c', encoding='utf-8')
# if var == 'Pamb0':
# fetched_data[var][fetched_data[var] == 0.] = np.nan
if var == 'Pamb0':
fetched_data.loc[fetched_data[var] == 0., var] = np.nan
if not flag_timestamp_parsed:
# fill timestamps and convert to day of year
df_met['timestamp'] = fetched_data['timestamp']
flag_timestamp_parsed = True
df_met['doy'] = (
df_met['timestamp'] -
pd.Timestamp('%d-01-01' %
fetched_data['timestamp'][0].year)) / \
|
pd.Timedelta(days=1)
|
pandas.Timedelta
|
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
# from http://imachordata.com/2016/02/05/you-complete-me/
@pytest.fixture
def df1():
return pd.DataFrame(
{
"Year": [1999, 2000, 2004, 1999, 2004],
"Taxon": [
"Saccharina",
"Saccharina",
"Saccharina",
"Agarum",
"Agarum",
],
"Abundance": [4, 5, 2, 1, 8],
}
)
def test_empty_column(df1):
"""Return dataframe if `columns` is empty."""
assert_frame_equal(df1.complete(), df1)
def test_MultiIndex_column(df1):
"""Raise ValueError if column is a MultiIndex."""
df = df1
df.columns = [["A", "B", "C"], list(df.columns)]
with pytest.raises(ValueError):
df1.complete(["Year", "Taxon"])
def test_column_duplicated(df1):
"""Raise ValueError if column is duplicated in `columns`"""
with pytest.raises(ValueError):
df1.complete(
columns=[
"Year",
"Taxon",
{"Year": lambda x: range(x.Year.min().x.Year.max() + 1)},
]
)
def test_type_columns(df1):
"""Raise error if columns is not a list object."""
with pytest.raises(TypeError):
df1.complete(columns="Year")
def test_fill_value_is_a_dict(df1):
"""Raise error if fill_value is not a dictionary"""
with pytest.raises(TypeError):
df1.complete(columns=["Year", "Taxon"], fill_value=0)
def test_wrong_column_fill_value(df1):
"""Raise ValueError if column in `fill_value` does not exist."""
with pytest.raises(ValueError):
df1.complete(columns=["Taxon", "Year"], fill_value={"year": 0})
def test_wrong_data_type_dict(df1):
"""
Raise ValueError if value in dictionary
is not a 1-dimensional object.
"""
with pytest.raises(ValueError):
df1.complete(columns=[{"Year": pd.DataFrame([2005, 2006, 2007])}])
frame = pd.DataFrame(
{
"Year": [1999, 2000, 2004, 1999, 2004],
"Taxon": [
"Saccharina",
"Saccharina",
"Saccharina",
"Agarum",
"Agarum",
],
"Abundance": [4, 5, 2, 1, 8],
}
)
wrong_columns = (
(frame, ["b", "Year"]),
(frame, [{"Yayay": range(7)}]),
(frame, ["Year", ["Abundant", "Taxon"]]),
(frame, ["Year", ("Abundant", "Taxon")]),
)
empty_sub_columns = [
(frame, ["Year", []]),
(frame, ["Year", {}]),
(frame, ["Year", ()]),
]
@pytest.mark.parametrize("frame,wrong_columns", wrong_columns)
def test_wrong_columns(frame, wrong_columns):
"""Test that ValueError is raised if wrong column is supplied."""
with pytest.raises(ValueError):
frame.complete(columns=wrong_columns)
@pytest.mark.parametrize("frame,empty_sub_cols", empty_sub_columns)
def test_empty_subcols(frame, empty_sub_cols):
"""Raise ValueError for an empty group in columns"""
with pytest.raises(ValueError):
frame.complete(columns=empty_sub_cols)
def test_fill_value(df1):
"""Test fill_value argument."""
output1 = pd.DataFrame(
{
"Year": [1999, 1999, 2000, 2000, 2004, 2004],
"Taxon": [
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
],
"Abundance": [1, 4.0, 0, 5, 8, 2],
}
)
result = df1.complete(
columns=["Year", "Taxon"], fill_value={"Abundance": 0}
)
assert_frame_equal(result, output1)
@pytest.fixture
def df1_output():
return pd.DataFrame(
{
"Year": [
1999,
1999,
2000,
2000,
2001,
2001,
2002,
2002,
2003,
2003,
2004,
2004,
],
"Taxon": [
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
],
"Abundance": [1.0, 4, 0, 5, 0, 0, 0, 0, 0, 0, 8, 2],
}
)
def test_fill_value_all_years(df1, df1_output):
"""
Test the complete function accurately replicates for
all the years from 1999 to 2004.
"""
result = df1.complete(
columns=[
{"Year": lambda x: range(x.Year.min(), x.Year.max() + 1)},
"Taxon",
],
fill_value={"Abundance": 0},
)
assert_frame_equal(result, df1_output)
def test_dict_series(df1, df1_output):
"""
Test the complete function if a dictionary containing a Series
is present in `columns`.
"""
result = df1.complete(
columns=[
{
"Year": lambda x: pd.Series(
range(x.Year.min(), x.Year.max() + 1)
)
},
"Taxon",
],
fill_value={"Abundance": 0},
)
assert_frame_equal(result, df1_output)
def test_dict_series_duplicates(df1, df1_output):
"""
Test the complete function if a dictionary containing a
Series (with duplicates) is present in `columns`.
"""
result = df1.complete(
columns=[
{
"Year": pd.Series(
[1999, 2000, 2000, 2001, 2002, 2002, 2002, 2003, 2004]
)
},
"Taxon",
],
fill_value={"Abundance": 0},
)
assert_frame_equal(result, df1_output)
def test_dict_values_outside_range(df1):
"""
Test the output if a dictionary is present,
and none of the values in the dataframe,
for the corresponding label, is not present
in the dictionary's values.
"""
result = df1.complete(
columns=[("Taxon", "Abundance"), {"Year": np.arange(2005, 2007)}]
)
expected = pd.DataFrame(
[
{"Taxon": "Agarum", "Abundance": 1, "Year": 1999},
{"Taxon": "Agarum", "Abundance": 1, "Year": 2005},
{"Taxon": "Agarum", "Abundance": 1, "Year": 2006},
{"Taxon": "Agarum", "Abundance": 8, "Year": 2004},
{"Taxon": "Agarum", "Abundance": 8, "Year": 2005},
{"Taxon": "Agarum", "Abundance": 8, "Year": 2006},
{"Taxon": "Saccharina", "Abundance": 2, "Year": 2004},
{"Taxon": "Saccharina", "Abundance": 2, "Year": 2005},
{"Taxon": "Saccharina", "Abundance": 2, "Year": 2006},
{"Taxon": "Saccharina", "Abundance": 4, "Year": 1999},
{"Taxon": "Saccharina", "Abundance": 4, "Year": 2005},
{"Taxon": "Saccharina", "Abundance": 4, "Year": 2006},
{"Taxon": "Saccharina", "Abundance": 5, "Year": 2000},
{"Taxon": "Saccharina", "Abundance": 5, "Year": 2005},
{"Taxon": "Saccharina", "Abundance": 5, "Year": 2006},
]
)
assert_frame_equal(result, expected)
# adapted from https://tidyr.tidyverse.org/reference/complete.html
complete_parameters = [
(
pd.DataFrame(
{
"group": [1, 2, 1],
"item_id": [1, 2, 2],
"item_name": ["a", "b", "b"],
"value1": [1, 2, 3],
"value2": [4, 5, 6],
}
),
["group", "item_id", "item_name"],
pd.DataFrame(
{
"group": [1, 1, 1, 1, 2, 2, 2, 2],
"item_id": [1, 1, 2, 2, 1, 1, 2, 2],
"item_name": ["a", "b", "a", "b", "a", "b", "a", "b"],
"value1": [
1.0,
np.nan,
np.nan,
3.0,
np.nan,
np.nan,
np.nan,
2.0,
],
"value2": [
4.0,
np.nan,
np.nan,
6.0,
np.nan,
np.nan,
np.nan,
5.0,
],
}
),
),
(
pd.DataFrame(
{
"group": [1, 2, 1],
"item_id": [1, 2, 2],
"item_name": ["a", "b", "b"],
"value1": [1, 2, 3],
"value2": [4, 5, 6],
}
),
["group", ("item_id", "item_name")],
pd.DataFrame(
{
"group": [1, 1, 2, 2],
"item_id": [1, 2, 1, 2],
"item_name": ["a", "b", "a", "b"],
"value1": [1.0, 3.0, np.nan, 2.0],
"value2": [4.0, 6.0, np.nan, 5.0],
}
),
),
]
@pytest.mark.parametrize("df,columns,output", complete_parameters)
def test_complete(df, columns, output):
"Test the complete function, with and without groupings."
assert_frame_equal(df.complete(columns), output)
@pytest.fixture
def duplicates():
return pd.DataFrame(
{
"row": [
"21.08.2020",
"21.08.2020",
"21.08.2020",
"21.08.2020",
"22.08.2020",
"22.08.2020",
"22.08.2020",
"22.08.2020",
],
"column": ["A", "A", "B", "C", "A", "B", "B", "C"],
"value": [43.0, 36, 36, 28, 16, 40, 34, 0],
}
)
# https://stackoverflow.com/questions/63541729/
# pandas-how-to-include-all-columns-for-all-rows-although-value-is-missing-in-a-d
# /63543164#63543164
def test_duplicates(duplicates):
"""Test that the complete function works for duplicate values."""
df = pd.DataFrame(
{
"row": {
0: "21.08.2020",
1: "21.08.2020",
2: "21.08.2020",
3: "21.08.2020",
4: "22.08.2020",
5: "22.08.2020",
6: "22.08.2020",
},
"column": {0: "A", 1: "A", 2: "B", 3: "C", 4: "A", 5: "B", 6: "B"},
"value": {0: 43, 1: 36, 2: 36, 3: 28, 4: 16, 5: 40, 6: 34},
}
)
result = df.complete(columns=["row", "column"], fill_value={"value": 0})
assert_frame_equal(result, duplicates)
def test_unsorted_duplicates(duplicates):
"""Test output for unsorted duplicates."""
df = pd.DataFrame(
{
"row": {
0: "22.08.2020",
1: "22.08.2020",
2: "21.08.2020",
3: "21.08.2020",
4: "21.08.2020",
5: "21.08.2020",
6: "22.08.2020",
},
"column": {
0: "B",
1: "B",
2: "A",
3: "A",
4: "B",
5: "C",
6: "A",
},
"value": {0: 40, 1: 34, 2: 43, 3: 36, 4: 36, 5: 28, 6: 16},
}
)
result = df.complete(columns=["row", "column"], fill_value={"value": 0})
|
assert_frame_equal(result, duplicates)
|
pandas.testing.assert_frame_equal
|
#!/usr/bin/env python2
from __future__ import division
import sys, os
sys.path.append(os.path.join(os.getcwd(), '../src'))
import time
import pickle
from collections import OrderedDict
import numpy as np
from scipy import optimize
import matplotlib.pyplot as plt
from matplotlib import cm
import pandas as pd
from binary_response import *
from figure_presets import *
from plotting_functions import *
from adaptive_response.adaptive_threshold import AdaptiveThresholdTheoryReceptorFactors
Nr, alpha = 32, 1.5
Ns = 256
s = 0.1 * Ns
#r_list = [6, 4, 2]
an_list = [0.5, 0.2, 0.1]
width = 1
with open('data/mutual_information_an.pkl', 'rb') as fp:
res = pickle.load(fp)
factors = res['factors']
df =
|
pd.DataFrame(res['data'])
|
pandas.DataFrame
|
'''xgb-ens for education/age/gender'''
import numpy as np
import pandas as pd
import xgboost as xgb
import cfg
label_revserv_dict = {0: '人类作者',
1: '机器作者',
2: '机器翻译',
3: '自动摘要'}
def xgb_acc_score(preds, dtrain):
y_true = dtrain.get_label()
y_pred = np.argmax(preds, axis=1)
return [('acc', np.mean(y_true == y_pred))]
df_lr = pd.read_csv(cfg.data_path + 'tfidf_lr_stack_20W.csv')
df_svm =
|
pd.read_csv(cfg.data_path + 'tfidf_svm_stack_20W.csv')
|
pandas.read_csv
|
import numpy as np
np.random.seed(1337) # for reproducibility
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics._regression import r2_score, mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import accuracy_score
from dbn import SupervisedDBNRegression
from dbn import SupervisedDBNClassification
import time
import matplotlib.pyplot as plt
def testing(csv_filepath , save_output_image , model_path):
df =
|
pd.read_csv(csv_filepath)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 5 11:27:42 2020
@author: Mansi
"""
# Importing the modules
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
""" PART 1 """
# Importing covid19 dataset
corona_dataset_csv = pd.read_csv('covid19_Confirmed_dataset.csv')
# Deleting the useless columns
corona_dataset_csv.drop(['Lat','Long'],axis=1,inplace=True)
# Aggregating the rows by the country
corona_dataset_aggregated = corona_dataset_csv.groupby("Country/Region").sum()
# Visualizing data related to a country for example Australia, Italy and Spain
corona_dataset_aggregated.loc['Australia'].plot()
corona_dataset_aggregated.loc['Italy'].plot()
corona_dataset_aggregated.loc['Spain'].plot()
plt.legend()
# Calculating a good measure of a country for example Australia
corona_dataset_aggregated.loc['Australia'].plot()
plt.legend()
# Caculating the first derivative of the curve
corona_dataset_aggregated.loc['Australia'].diff().plot()
plt.legend()
# Find maxmimum infection rate for Australia
corona_dataset_aggregated.loc['Australia'].diff().max()
# Find maximum infection rate for all of the countries
countries = list(corona_dataset_aggregated.index)
max_infection_rates = []
for country in countries :
max_infection_rates.append(corona_dataset_aggregated.loc[country].diff().max())
corona_dataset_aggregated['max infection rate'] = max_infection_rates
# Create a new dataframe with only needed column
corona_data = pd.DataFrame(corona_dataset_aggregated['max infection rate'])
""" PART 2 """
# Importing the WorldHappinessReport.csv dataset
world_happiness_report =
|
pd.read_csv("worldwide_happiness_report.csv")
|
pandas.read_csv
|
import pytest
import numpy as np
import pandas
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
import matplotlib
import modin.pandas as pd
from modin.pandas.utils import to_pandas
from numpy.testing import assert_array_equal
from .utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
df_is_empty,
arg_keys,
name_contains,
test_data_values,
test_data_keys,
test_data_with_duplicates_values,
test_data_with_duplicates_keys,
numeric_dfs,
no_numeric_dfs,
test_func_keys,
test_func_values,
query_func_keys,
query_func_values,
agg_func_keys,
agg_func_values,
numeric_agg_funcs,
quantiles_keys,
quantiles_values,
indices_keys,
indices_values,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
)
# TODO remove once modin-project/modin#469 is resolved
agg_func_keys.remove("str")
agg_func_values.remove(str)
pd.DEFAULT_NPARTITIONS = 4
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
class TestDFPartOne:
# Test inter df math functions
def inter_df_math_helper(self, modin_df, pandas_df, op):
# Test dataframe to datframe
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
# Test dataframe to int
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
# Test dataframe to float
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
# Test transposed dataframes to float
try:
pandas_result = getattr(pandas_df.T, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df.T, op)(4.0)
else:
modin_result = getattr(modin_df.T, op)(4.0)
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
# Test dataframe to different dataframe shape
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
# Test dataframe to list
list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_df.shape[1]))
try:
pandas_result = getattr(pandas_df, op)(list_test, axis=1)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(list_test, axis=1)
else:
modin_result = getattr(modin_df, op)(list_test, axis=1)
df_equals(modin_result, pandas_result)
# Test dataframe to series
series_test_modin = modin_df[modin_df.columns[0]]
series_test_pandas = pandas_df[pandas_df.columns[0]]
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Test dataframe to series with different index
series_test_modin = modin_df[modin_df.columns[0]].reset_index(drop=True)
series_test_pandas = pandas_df[pandas_df.columns[0]].reset_index(drop=True)
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Level test
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "add")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_div(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "div")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_divide(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "divide")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_floordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "floordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_multiply(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "multiply")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pow(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive
# values
try:
pandas_df = pandas_df.abs()
except Exception:
pass
else:
modin_df = modin_df.abs()
self.inter_df_math_helper(modin_df, pandas_df, "pow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "sub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_subtract(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "subtract")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_truediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "truediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___div__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__div__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___add__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__add__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___radd__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__radd__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___pow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__pow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rpow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rpow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___sub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__sub__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___floordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__floordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rfloordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rfloordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___truediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__truediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rtruediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rtruediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rdiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rdiv__")
# END test inter df math functions
# Test comparison of inter operation functions
def comparison_inter_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)("a")
except TypeError:
with pytest.raises(TypeError):
repr(getattr(modin_df, op)("a"))
else:
modin_result = getattr(modin_df, op)("a")
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_eq(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "eq")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ge(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ge")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_gt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "gt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_le(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "le")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_lt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "lt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ne(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ne")
# END test comparison of inter operation functions
# Test dataframe right operations
def inter_df_math_right_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_radd(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "radd")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rdiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rdiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rfloordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rfloordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rpow(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive values
# We need to check that negative integers are not used efficiently
if "100x100" not in request.node.name:
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rpow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rsub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rsub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rtruediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rtruediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rsub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "__rsub__")
# END test dataframe right operations
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_abs(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.abs()
except Exception as e:
with pytest.raises(type(e)):
modin_df.abs()
else:
modin_result = modin_df.abs()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_prefix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_prefix = "TEST"
new_modin_df = modin_df.add_prefix(test_prefix)
new_pandas_df = pandas_df.add_prefix(test_prefix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
x = 2
modin_df.applymap(x)
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap_numeric(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_suffix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_suffix = "TEST"
new_modin_df = modin_df.add_suffix(test_suffix)
new_pandas_df = pandas_df.add_suffix(test_suffix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_at(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
# Scaler
assert modin_df.at[0, key1] == pandas_df.at[0, key1]
# Series
df_equals(modin_df.loc[0].at[key1], pandas_df.loc[0].at[key1])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.at[1, key1] = modin_df.at[0, key1]
pandas_df_copy.at[1, key1] = pandas_df.at[0, key1]
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
for modin_axis, pd_axis in zip(modin_df.axes, pandas_df.axes):
assert np.array_equal(modin_axis, pd_axis)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_copy(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused but there so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
new_modin_df = modin_df.copy()
assert new_modin_df is not modin_df
assert np.array_equal(
new_modin_df._query_compiler._modin_frame._partitions,
modin_df._query_compiler._modin_frame._partitions,
)
assert new_modin_df is not modin_df
df_equals(new_modin_df, modin_df)
# Shallow copy tests
modin_df = pd.DataFrame(data)
modin_df_cp = modin_df.copy(False)
modin_df[modin_df.columns[0]] = 0
df_equals(modin_df, modin_df_cp)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dtypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.dtypes, pandas_df.dtypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ftypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.ftypes, pandas_df.ftypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("key", indices_values, ids=indices_keys)
def test_get(self, data, key):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get(key), pandas_df.get(key))
df_equals(
modin_df.get(key, default="default"), pandas_df.get(key, default="default")
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_dtype_counts(self, data):
modin_result = pd.DataFrame(data).get_dtype_counts().sort_index()
pandas_result = pandas.DataFrame(data).get_dtype_counts().sort_index()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"dummy_na", bool_arg_values, ids=arg_keys("dummy_na", bool_arg_keys)
)
@pytest.mark.parametrize(
"drop_first", bool_arg_values, ids=arg_keys("drop_first", bool_arg_keys)
)
def test_get_dummies(self, request, data, dummy_na, drop_first):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas.get_dummies(
pandas_df, dummy_na=dummy_na, drop_first=drop_first
)
except Exception as e:
with pytest.raises(type(e)):
pd.get_dummies(modin_df, dummy_na=dummy_na, drop_first=drop_first)
else:
modin_result = pd.get_dummies(
modin_df, dummy_na=dummy_na, drop_first=drop_first
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_ftype_counts(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get_ftype_counts(), pandas_df.get_ftype_counts())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg(self, data, axis, func):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.aggregate(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.aggregate(func, axis)
else:
modin_result = modin_df.aggregate(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_aggregate_error_checking(self, data):
modin_df = pd.DataFrame(data)
assert modin_df.aggregate("ndim") == 2
with pytest.warns(UserWarning):
modin_df.aggregate(
{modin_df.columns[0]: "sum", modin_df.columns[1]: "mean"}
)
with pytest.warns(UserWarning):
modin_df.aggregate("cumproduct")
with pytest.raises(ValueError):
modin_df.aggregate("NOT_EXISTS")
def test_align(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).align(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_all(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.all(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.all(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.T.all(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_any(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.any(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_append(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
data_to_append = {"append_a": 2, "append_b": 1000}
ignore_idx_values = [True, False]
for ignore in ignore_idx_values:
try:
pandas_result = pandas_df.append(data_to_append, ignore_index=ignore)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(data_to_append, ignore_index=ignore)
else:
modin_result = modin_df.append(data_to_append, ignore_index=ignore)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(pandas_df.iloc[-1])
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df.iloc[-1])
else:
modin_result = modin_df.append(modin_df.iloc[-1])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(list(pandas_df.iloc[-1]))
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(list(modin_df.iloc[-1]))
else:
modin_result = modin_df.append(list(modin_df.iloc[-1]))
df_equals(modin_result, pandas_result)
verify_integrity_values = [True, False]
for verify_integrity in verify_integrity_values:
try:
pandas_result = pandas_df.append(
[pandas_df, pandas_df], verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
else:
modin_result = modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(
pandas_df, verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df, verify_integrity=verify_integrity)
else:
modin_result = modin_df.append(
modin_df, verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_apply(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(TypeError):
modin_df.apply({"row": func}, axis=1)
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
def test_apply_metadata(self):
def add(a, b, c):
return a + b + c
data = {"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}
modin_df = pd.DataFrame(data)
modin_df["add"] = modin_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
pandas_df = pandas.DataFrame(data)
pandas_df["add"] = pandas_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_apply_numeric(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.apply(lambda df: df.drop(key), axis=1)
pandas_result = pandas_df.apply(lambda df: df.drop(key), axis=1)
df_equals(modin_result, pandas_result)
def test_as_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).as_blocks()
def test_as_matrix(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
mat = frame.as_matrix()
frame_columns = frame.columns
for i, row in enumerate(mat):
for j, value in enumerate(row):
col = frame_columns[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
mat = pd.DataFrame(test_data.mixed_frame).as_matrix(["foo", "A"])
assert mat[0, 0] == "bar"
df = pd.DataFrame({"real": [1, 2, 3], "complex": [1j, 2j, 3j]})
mat = df.as_matrix()
assert mat[0, 1] == 1j
# single block corner case
mat = pd.DataFrame(test_data.frame).as_matrix(["A", "B"])
expected = test_data.frame.reindex(columns=["A", "B"]).values
tm.assert_almost_equal(mat, expected)
def test_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
assert_array_equal(frame.values, test_data.frame.values)
def test_partition_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
for (
partition
) in frame._query_compiler._modin_frame._partitions.flatten().tolist():
assert_array_equal(partition.to_pandas().values, partition.to_numpy())
def test_asfreq(self):
index = pd.date_range("1/1/2000", periods=4, freq="T")
series = pd.Series([0.0, None, 2.0, 3.0], index=index)
df = pd.DataFrame({"s": series})
with pytest.warns(UserWarning):
# We are only testing that this defaults to pandas, so we will just check for
# the warning
df.asfreq(freq="30S")
def test_asof(self):
df = pd.DataFrame(
{"a": [10, 20, 30, 40, 50], "b": [None, None, None, None, 500]},
index=pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
),
)
with pytest.warns(UserWarning):
df.asof(pd.DatetimeIndex(["2018-02-27 09:03:30", "2018-02-27 09:04:30"]))
def test_assign(self):
data = test_data_values[0]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.warns(UserWarning):
modin_result = modin_df.assign(new_column=pd.Series(modin_df.iloc[:, 0]))
pandas_result = pandas_df.assign(new_column=pd.Series(pandas_df.iloc[:, 0]))
df_equals(modin_result, pandas_result)
def test_astype(self):
td = TestData()
modin_df = pd.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
expected_df = pandas.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
modin_df_casted = modin_df.astype(np.int32)
expected_df_casted = expected_df.astype(np.int32)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(np.float64)
expected_df_casted = expected_df.astype(np.float64)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(str)
expected_df_casted = expected_df.astype(str)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype("category")
expected_df_casted = expected_df.astype("category")
df_equals(modin_df_casted, expected_df_casted)
dtype_dict = {"A": np.int32, "B": np.int64, "C": str}
modin_df_casted = modin_df.astype(dtype_dict)
expected_df_casted = expected_df.astype(dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
# Ignore lint because this is testing bad input
bad_dtype_dict = {"B": np.int32, "B": np.int64, "B": str} # noqa F601
modin_df_casted = modin_df.astype(bad_dtype_dict)
expected_df_casted = expected_df.astype(bad_dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
with pytest.raises(KeyError):
modin_df.astype({"not_exists": np.uint8})
def test_astype_category(self):
modin_df = pd.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
pandas_df = pandas.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
modin_result = modin_df.astype({"col1": "category"})
pandas_result = pandas_df.astype({"col1": "category"})
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
modin_result = modin_df.astype("category")
pandas_result = pandas_df.astype("category")
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
def test_at_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.at_time("12:00")
def test_between_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.between_time("0:15", "0:45")
def test_bfill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.bfill(), test_data.tsframe.bfill())
def test_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).blocks
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_bool(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(ValueError):
modin_df.bool()
modin_df.__bool__()
single_bool_pandas_df = pandas.DataFrame([True])
single_bool_modin_df = pd.DataFrame([True])
assert single_bool_pandas_df.bool() == single_bool_modin_df.bool()
with pytest.raises(ValueError):
# __bool__ always raises this error for DataFrames
single_bool_modin_df.__bool__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_boxplot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
assert modin_df.boxplot() == to_pandas(modin_df).boxplot()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower, upper = np.sort(random_state.random_integers(RAND_LOW, RAND_HIGH, 2))
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test only upper scalar bound
modin_result = modin_df.clip(None, upper, axis=axis)
pandas_result = pandas_df.clip(None, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper scalar bound
modin_result = modin_df.clip(lower, upper, axis=axis)
pandas_result = pandas_df.clip(lower, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper list bound on each column
modin_result = modin_df.clip(lower_list, upper_list, axis=axis)
pandas_result = pandas_df.clip(lower_list, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
# test only upper list bound on each column
modin_result = modin_df.clip(np.nan, upper_list, axis=axis)
pandas_result = pandas_df.clip(np.nan, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
with pytest.raises(ValueError):
modin_df.clip(lower=[1, 2, 3], axis=None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_lower(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test lower scalar bound
pandas_result = pandas_df.clip_lower(lower, axis=axis)
modin_result = modin_df.clip_lower(lower, axis=axis)
df_equals(modin_result, pandas_result)
# test lower list bound on each column
pandas_result = pandas_df.clip_lower(lower_list, axis=axis)
modin_result = modin_df.clip_lower(lower_list, axis=axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_upper(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
upper = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test upper scalar bound
modin_result = modin_df.clip_upper(upper, axis=axis)
pandas_result = pandas_df.clip_upper(upper, axis=axis)
df_equals(modin_result, pandas_result)
# test upper list bound on each column
modin_result = modin_df.clip_upper(upper_list, axis=axis)
pandas_result = pandas_df.clip_upper(upper_list, axis=axis)
df_equals(modin_result, pandas_result)
def test_combine(self):
df1 = pd.DataFrame({"A": [0, 0], "B": [4, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine(df2, lambda s1, s2: s1 if s1.sum() < s2.sum() else s2)
def test_combine_first(self):
df1 = pd.DataFrame({"A": [None, 0], "B": [None, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine_first(df2)
def test_compound(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).compound()
def test_corr(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corr()
def test_corrwith(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corrwith(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_count(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.T.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
try: # test error
pandas_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
try: # test error
pandas_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
for level in list(range(levels)) + (axis_names if axis_names else []):
modin_multi_level_result = modin_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
pandas_multi_level_result = pandas_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
def test_cov(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).cov()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummax(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummin(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumprod(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumsum(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# pandas exhibits weird behavior for this case
# Remove this case when we can pull the error messages from backend
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.T.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_describe(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.describe(), pandas_df.describe())
percentiles = [0.10, 0.11, 0.44, 0.78, 0.99]
df_equals(
modin_df.describe(percentiles=percentiles),
pandas_df.describe(percentiles=percentiles),
)
try:
pandas_result = pandas_df.describe(exclude=[np.float64])
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=[np.float64])
else:
modin_result = modin_df.describe(exclude=[np.float64])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(exclude=np.float64)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=np.float64)
else:
modin_result = modin_df.describe(exclude=np.float64)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
else:
modin_result = modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=str(modin_df.dtypes.values[0]))
pandas_result = pandas_df.describe(include=str(pandas_df.dtypes.values[0]))
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=[np.number])
pandas_result = pandas_df.describe(include=[np.number])
df_equals(modin_result, pandas_result)
df_equals(modin_df.describe(include="all"), pandas_df.describe(include="all"))
modin_df = pd.DataFrame(data).applymap(str)
pandas_df = pandas.DataFrame(data).applymap(str)
try:
df_equals(modin_df.describe(), pandas_df.describe())
except AssertionError:
# We have to do this because we choose the highest count slightly differently
# than pandas. Because there is no true guarantee which one will be first,
# If they don't match, make sure that the `freq` is the same at least.
df_equals(
modin_df.describe().loc[["count", "unique", "freq"]],
pandas_df.describe().loc[["count", "unique", "freq"]],
)
def test_describe_dtypes(self):
modin_df = pd.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
pandas_df = pandas.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
modin_result = modin_df.describe()
pandas_result = pandas_df.describe()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"periods", int_arg_values, ids=arg_keys("periods", int_arg_keys)
)
def test_diff(self, request, data, axis, periods):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.T.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
def test_drop(self):
frame_data = {"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]}
simple = pandas.DataFrame(frame_data)
modin_simple = pd.DataFrame(frame_data)
df_equals(modin_simple.drop("A", axis=1), simple[["B"]])
df_equals(modin_simple.drop(["A", "B"], axis="columns"), simple[[]])
df_equals(modin_simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])
df_equals(modin_simple.drop([0, 3], axis="index"), simple.loc[[1, 2], :])
pytest.raises(ValueError, modin_simple.drop, 5)
pytest.raises(ValueError, modin_simple.drop, "C", 1)
pytest.raises(ValueError, modin_simple.drop, [1, 5])
pytest.raises(ValueError, modin_simple.drop, ["A", "C"], 1)
# errors = 'ignore'
df_equals(modin_simple.drop(5, errors="ignore"), simple)
df_equals(modin_simple.drop([0, 5], errors="ignore"), simple.loc[[1, 2, 3], :])
df_equals(modin_simple.drop("C", axis=1, errors="ignore"), simple)
df_equals(modin_simple.drop(["A", "C"], axis=1, errors="ignore"), simple[["B"]])
# non-unique
nu_df = pandas.DataFrame(
zip(range(3), range(-3, 1), list("abc")), columns=["a", "a", "b"]
)
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("a", axis=1), nu_df[["b"]])
df_equals(modin_nu_df.drop("b", axis="columns"), nu_df["a"])
df_equals(modin_nu_df.drop([]), nu_df)
nu_df = nu_df.set_index(pandas.Index(["X", "Y", "X"]))
nu_df.columns = list("abc")
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("X", axis="rows"), nu_df.loc[["Y"], :])
df_equals(modin_nu_df.drop(["X", "Y"], axis=0), nu_df.loc[[], :])
# inplace cache issue
frame_data = random_state.randn(10, 3)
df = pandas.DataFrame(frame_data, columns=list("abc"))
modin_df = pd.DataFrame(frame_data, columns=list("abc"))
expected = df[~(df.b > 0)]
modin_df.drop(labels=df[df.b > 0].index, inplace=True)
df_equals(modin_df, expected)
midx = pd.MultiIndex(
levels=[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
df = pd.DataFrame(
index=midx,
columns=["big", "small"],
data=[
[45, 30],
[200, 100],
[1.5, 1],
[30, 20],
[250, 150],
[1.5, 0.8],
[320, 250],
[1, 0.8],
[0.3, 0.2],
],
)
with pytest.warns(UserWarning):
df.drop(index="length", level=1)
def test_drop_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
frame_data = [[1, 2, 3], [3, 4, 5], [5, 6, 7]]
modin_df = pd.DataFrame(
frame_data, index=["a", "b", "c"], columns=["d", "e", "f"]
)
modin_df1 = modin_df.drop("a")
modin_df2 = modin_df.drop(index="a")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop("d", 1)
modin_df2 = modin_df.drop(columns="d")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(labels="e", axis=1)
modin_df2 = modin_df.drop(columns="e")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0)
modin_df2 = modin_df.drop(index=["a"])
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0).drop(["d"], axis=1)
modin_df2 = modin_df.drop(index=["a"], columns=["d"])
df_equals(modin_df1, modin_df2)
with pytest.raises(ValueError):
modin_df.drop(labels="a", index="b")
with pytest.raises(ValueError):
modin_df.drop(labels="a", columns="b")
with pytest.raises(ValueError):
modin_df.drop(axis=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_drop_transpose(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.T.drop(columns=[0, 1, 2])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(index=["col3", "col1"])
pandas_result = pandas_df.T.drop(index=["col3", "col1"])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
df_equals(modin_result, pandas_result)
def test_droplevel(self):
df = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
df.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
with pytest.warns(UserWarning):
df.droplevel("a")
with pytest.warns(UserWarning):
df.droplevel("level_2", axis=1)
@pytest.mark.parametrize(
"data", test_data_with_duplicates_values, ids=test_data_with_duplicates_keys
)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
@pytest.mark.parametrize(
"subset", [None, ["col1", "col3", "col7"]], ids=["None", "subset"]
)
def test_drop_duplicates(self, data, keep, subset):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
pandas_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
)
modin_results = modin_df.drop_duplicates(keep=keep, inplace=True, subset=subset)
pandas_results = pandas_df.drop_duplicates(
keep=keep, inplace=True, subset=subset
)
df_equals(modin_results, pandas_results)
def test_drop_duplicates_with_missing_index_values(self):
data = {
"columns": ["value", "time", "id"],
"index": [
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
20,
21,
22,
23,
24,
25,
26,
27,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
],
"data": [
["3", 1279213398000.0, 88.0],
["3", 1279204682000.0, 88.0],
["0", 1245772835000.0, 448.0],
["0", 1270564258000.0, 32.0],
["0", 1267106669000.0, 118.0],
["7", 1300621123000.0, 5.0],
["0", 1251130752000.0, 957.0],
["0", 1311683506000.0, 62.0],
["9", 1283692698000.0, 89.0],
["9", 1270234253000.0, 64.0],
["0", 1285088818000.0, 50.0],
["0", 1218212725000.0, 695.0],
["2", 1383933968000.0, 348.0],
["0", 1368227625000.0, 257.0],
["1", 1454514093000.0, 446.0],
["1", 1428497427000.0, 134.0],
["1", 1459184936000.0, 568.0],
["1", 1502293302000.0, 599.0],
["1", 1491833358000.0, 829.0],
["1", 1485431534000.0, 806.0],
["8", 1351800505000.0, 101.0],
["0", 1357247721000.0, 916.0],
["0", 1335804423000.0, 370.0],
["24", 1327547726000.0, 720.0],
["0", 1332334140000.0, 415.0],
["0", 1309543100000.0, 30.0],
["18", 1309541141000.0, 30.0],
["0", 1298979435000.0, 48.0],
["14", 1276098160000.0, 59.0],
["0", 1233936302000.0, 109.0],
],
}
pandas_df = pandas.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_df = pd.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_result = modin_df.sort_values(["id", "time"]).drop_duplicates(["id"])
pandas_result = pandas_df.sort_values(["id", "time"]).drop_duplicates(["id"])
df_equals(modin_result, pandas_result)
def test_drop_duplicates_after_sort(self):
data = [
{"value": 1, "time": 2},
{"value": 1, "time": 1},
{"value": 2, "time": 1},
{"value": 2, "time": 2},
]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
pandas_result = pandas_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("how", ["any", "all"], ids=["any", "all"])
def test_dropna(self, data, axis, how):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.dropna(axis=axis, how="invalid")
with pytest.raises(TypeError):
modin_df.dropna(axis=axis, how=None, thresh=None)
with pytest.raises(KeyError):
modin_df.dropna(axis=axis, subset=["NotExists"], how=how)
modin_result = modin_df.dropna(axis=axis, how=how)
pandas_result = pandas_df.dropna(axis=axis, how=how)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.dropna()
modin_df.dropna(inplace=True)
df_equals(modin_df, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(thresh=2, inplace=True)
modin_df.dropna(thresh=2, inplace=True)
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(axis=1, how="any", inplace=True)
modin_df.dropna(axis=1, how="any", inplace=True)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.dropna(how="all", axis=[0, 1]),
pandas_df.dropna(how="all", axis=[0, 1]),
)
df_equals(
modin_df.dropna(how="all", axis=(0, 1)),
pandas_df.dropna(how="all", axis=(0, 1)),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
pandas_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
pandas_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
column_subset = modin_df.columns[0:2]
df_equals(
modin_df.dropna(how="all", subset=column_subset),
pandas_df.dropna(how="all", subset=column_subset),
)
df_equals(
modin_df.dropna(how="any", subset=column_subset),
pandas_df.dropna(how="any", subset=column_subset),
)
row_subset = modin_df.index[0:2]
df_equals(
modin_df.dropna(how="all", axis=1, subset=row_subset),
pandas_df.dropna(how="all", axis=1, subset=row_subset),
)
df_equals(
modin_df.dropna(how="any", axis=1, subset=row_subset),
pandas_df.dropna(how="any", axis=1, subset=row_subset),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset_error(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
with pytest.raises(KeyError):
modin_df.dropna(subset=list("EF"))
if len(modin_df.columns) < 5:
with pytest.raises(KeyError):
modin_df.dropna(axis=1, subset=[4, 5])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
col_len = len(modin_df.columns)
# Test list input
arr = np.arange(col_len)
modin_result = modin_df.dot(arr)
pandas_result = pandas_df.dot(arr)
df_equals(modin_result, pandas_result)
# Test bad dimensions
with pytest.raises(ValueError):
modin_result = modin_df.dot(np.arange(col_len + 10))
# Test series input
modin_series = pd.Series(np.arange(col_len), index=modin_df.columns)
pandas_series = pandas.Series(np.arange(col_len), index=modin_df.columns)
modin_result = modin_df.dot(modin_series)
pandas_result = pandas_df.dot(pandas_series)
df_equals(modin_result, pandas_result)
# Test when input series index doesn't line up with columns
with pytest.raises(ValueError):
modin_result = modin_df.dot(pd.Series(np.arange(col_len)))
with pytest.warns(UserWarning):
modin_df.dot(modin_df.T)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
def test_duplicated(self, data, keep):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.duplicated(keep=keep)
modin_result = modin_df.duplicated(keep=keep)
df_equals(modin_result, pandas_result)
import random
subset = random.sample(
list(pandas_df.columns), random.randint(1, len(pandas_df.columns))
)
pandas_result = pandas_df.duplicated(keep=keep, subset=subset)
modin_result = modin_df.duplicated(keep=keep, subset=subset)
df_equals(modin_result, pandas_result)
def test_empty_df(self):
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
def test_equals(self):
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 4, 1]}
modin_df1 = pd.DataFrame(frame_data)
modin_df2 = pd.DataFrame(frame_data)
assert modin_df1.equals(modin_df2)
df_equals(modin_df1, modin_df2)
df_equals(modin_df1, pd.DataFrame(modin_df1))
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 5, 1]}
modin_df3 = pd.DataFrame(frame_data, index=list("abcd"))
assert not modin_df1.equals(modin_df3)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df1)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df2)
assert modin_df1.equals(modin_df2._query_compiler.to_pandas())
def test_eval_df_use_case(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
# test eval for series results
tmp_pandas = df.eval("arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"arctan2(sin(a), b)", engine="python", parser="pandas"
)
assert isinstance(tmp_modin, pd.Series)
df_equals(tmp_modin, tmp_pandas)
# Test not inplace assignments
tmp_pandas = df.eval("e = arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas"
)
df_equals(tmp_modin, tmp_pandas)
# Test inplace assignments
df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_eval_df_arithmetic_subexpression(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df.eval("not_e = sin(a + b)", engine="python", parser="pandas", inplace=True)
modin_df.eval(
"not_e = sin(a + b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_ewm(self):
df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
with pytest.warns(UserWarning):
df.ewm(com=0.5).mean()
def test_expanding(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).expanding()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_explode(self, data):
modin_df = pd.DataFrame(data)
with pytest.warns(UserWarning):
modin_df.explode(modin_df.columns[0])
def test_ffill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.ffill(), test_data.tsframe.ffill())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"method",
["backfill", "bfill", "pad", "ffill", None],
ids=["backfill", "bfill", "pad", "ffill", "None"],
)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("limit", int_arg_values, ids=int_arg_keys)
def test_fillna(self, data, method, axis, limit):
# We are not testing when limit is not positive until pandas-27042 gets fixed.
# We are not testing when axis is over rows until pandas-17399 gets fixed.
if limit > 0 and axis != 1 and axis != "columns":
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.fillna(
0, method=method, axis=axis, limit=limit
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.fillna(0, method=method, axis=axis, limit=limit)
else:
modin_result = modin_df.fillna(0, method=method, axis=axis, limit=limit)
df_equals(modin_result, pandas_result)
def test_fillna_sanity(self):
test_data = TestData()
tf = test_data.tsframe
tf.loc[tf.index[:5], "A"] = np.nan
tf.loc[tf.index[-5:], "A"] = np.nan
zero_filled = test_data.tsframe.fillna(0)
modin_df = pd.DataFrame(test_data.tsframe).fillna(0)
df_equals(modin_df, zero_filled)
padded = test_data.tsframe.fillna(method="pad")
modin_df = pd.DataFrame(test_data.tsframe).fillna(method="pad")
df_equals(modin_df, padded)
# mixed type
mf = test_data.mixed_frame
mf.loc[mf.index[5:20], "foo"] = np.nan
mf.loc[mf.index[-10:], "A"] = np.nan
result = test_data.mixed_frame.fillna(value=0)
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(value=0)
df_equals(modin_df, result)
result = test_data.mixed_frame.fillna(method="pad")
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(method="pad")
df_equals(modin_df, result)
pytest.raises(ValueError, test_data.tsframe.fillna)
pytest.raises(ValueError, pd.DataFrame(test_data.tsframe).fillna)
with pytest.raises(ValueError):
pd.DataFrame(test_data.tsframe).fillna(5, method="ffill")
# mixed numeric (but no float16)
mf = test_data.mixed_float.reindex(columns=["A", "B", "D"])
mf.loc[mf.index[-10:], "A"] = np.nan
result = mf.fillna(value=0)
modin_df = pd.DataFrame(mf).fillna(value=0)
df_equals(modin_df, result)
result = mf.fillna(method="pad")
modin_df = pd.DataFrame(mf).fillna(method="pad")
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# empty frame
# df = DataFrame(columns=['x'])
# for m in ['pad', 'backfill']:
# df.x.fillna(method=m, inplace=True)
# df.x.fillna(method=m)
# with different dtype
frame_data = [
["a", "a", np.nan, "a"],
["b", "b", np.nan, "b"],
["c", "c", np.nan, "c"],
]
df = pandas.DataFrame(frame_data)
result = df.fillna({2: "foo"})
modin_df = pd.DataFrame(frame_data).fillna({2: "foo"})
df_equals(modin_df, result)
modin_df = pd.DataFrame(df)
df.fillna({2: "foo"}, inplace=True)
modin_df.fillna({2: "foo"}, inplace=True)
df_equals(modin_df, result)
frame_data = {
"Date": [pandas.NaT, pandas.Timestamp("2014-1-1")],
"Date2": [pandas.Timestamp("2013-1-1"), pandas.NaT],
}
df = pandas.DataFrame(frame_data)
result = df.fillna(value={"Date": df["Date2"]})
modin_df = pd.DataFrame(frame_data).fillna(value={"Date": df["Date2"]})
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# with timezone
"""
frame_data = {'A': [pandas.Timestamp('2012-11-11 00:00:00+01:00'),
pandas.NaT]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna(method='pad'), df.fillna(method='pad'))
frame_data = {'A': [pandas.NaT,
pandas.Timestamp('2012-11-11 00:00:00+01:00')]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data).fillna(method='bfill')
df_equals(modin_df, df.fillna(method='bfill'))
"""
def test_fillna_downcast(self):
# infer int64 from float64
frame_data = {"a": [1.0, np.nan]}
df =
|
pandas.DataFrame(frame_data)
|
pandas.DataFrame
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import print_function
from abc import abstractmethod
import re
import pandas as pd
import numpy as np
import abc
from .data import Cal, DatasetD
class BaseDFilter(abc.ABC):
"""Dynamic Instruments Filter Abstract class
Users can override this class to construct their own filter
Override __init__ to input filter regulations
Override filter_main to use the regulations to filter instruments
"""
def __init__(self):
pass
@staticmethod
def from_config(config):
"""Construct an instance from config dict.
Parameters
----------
config : dict
dict of config parameters.
"""
raise NotImplementedError("Subclass of BaseDFilter must reimplement `from_config` method")
@abstractmethod
def to_config(self):
"""Construct an instance from config dict.
Returns
----------
dict
return the dict of config parameters.
"""
raise NotImplementedError("Subclass of BaseDFilter must reimplement `to_config` method")
class SeriesDFilter(BaseDFilter):
"""Dynamic Instruments Filter Abstract class to filter a series of certain features
Filters should provide parameters:
- filter start time
- filter end time
- filter rule
Override __init__ to assign a certain rule to filter the series.
Override _getFilterSeries to use the rule to filter the series and get a dict of {inst => series}, or override filter_main for more advanced series filter rule
"""
def __init__(self, fstart_time=None, fend_time=None):
"""Init function for filter base class.
Filter a set of instruments based on a certain rule within a certain period assigned by fstart_time and fend_time.
Parameters
----------
fstart_time: str
the time for the filter rule to start filter the instruments.
fend_time: str
the time for the filter rule to stop filter the instruments.
"""
super(SeriesDFilter, self).__init__()
self.filter_start_time = pd.Timestamp(fstart_time) if fstart_time else None
self.filter_end_time = pd.Timestamp(fend_time) if fend_time else None
def _getTimeBound(self, instruments):
"""Get time bound for all instruments.
Parameters
----------
instruments: dict
the dict of instruments in the form {instrument_name => list of timestamp tuple}.
Returns
----------
pd.Timestamp, pd.Timestamp
the lower time bound and upper time bound of all the instruments.
"""
trange = Cal.calendar(freq=self.filter_freq)
ubound, lbound = trange[0], trange[-1]
for _, timestamp in instruments.items():
if timestamp:
lbound = timestamp[0][0] if timestamp[0][0] < lbound else lbound
ubound = timestamp[-1][-1] if timestamp[-1][-1] > ubound else ubound
return lbound, ubound
def _toSeries(self, time_range, target_timestamp):
"""Convert the target timestamp to a pandas series of bool value within a time range.
Make the time inside the target_timestamp range TRUE, others FALSE.
Parameters
----------
time_range : D.calendar
the time range of the instruments.
target_timestamp : list
the list of tuple (timestamp, timestamp).
Returns
----------
pd.Series
the series of bool value for an instrument.
"""
# Construct a whole dict of {date => bool}
timestamp_series = {timestamp: False for timestamp in time_range}
# Convert to pd.Series
timestamp_series = pd.Series(timestamp_series)
# Fill the date within target_timestamp with TRUE
for start, end in target_timestamp:
timestamp_series[Cal.calendar(start_time=start, end_time=end, freq=self.filter_freq)] = True
return timestamp_series
def _filterSeries(self, timestamp_series, filter_series):
"""Filter the timestamp series with filter series by using element-wise AND operation of the two series.
Parameters
----------
timestamp_series : pd.Series
the series of bool value indicating existing time.
filter_series : pd.Series
the series of bool value indicating filter feature.
Returns
----------
pd.Series
the series of bool value indicating whether the date satisfies the filter condition and exists in target timestamp.
"""
fstart, fend = list(filter_series.keys())[0], list(filter_series.keys())[-1]
filter_series = filter_series.astype("bool") # Make sure the filter_series is boolean
timestamp_series[fstart:fend] = timestamp_series[fstart:fend] & filter_series
return timestamp_series
def _toTimestamp(self, timestamp_series):
"""Convert the timestamp series to a list of tuple (timestamp, timestamp) indicating a continuous range of TRUE.
Parameters
----------
timestamp_series: pd.Series
the series of bool value after being filtered.
Returns
----------
list
the list of tuple (timestamp, timestamp).
"""
# sort the timestamp_series according to the timestamps
timestamp_series.sort_index()
timestamp = []
_lbool = None
_ltime = None
for _ts, _bool in timestamp_series.items():
# there is likely to be NAN when the filter series don't have the
# bool value, so we just change the NAN into False
if _bool == np.nan:
_bool = False
if _lbool is None:
_cur_start = _ts
_lbool = _bool
_ltime = _ts
continue
if (_lbool, _bool) == (True, False):
if _cur_start:
timestamp.append((_cur_start, _ltime))
elif (_lbool, _bool) == (False, True):
_cur_start = _ts
_lbool = _bool
_ltime = _ts
if _lbool:
timestamp.append((_cur_start, _ltime))
return timestamp
def __call__(self, instruments, start_time=None, end_time=None, freq="day"):
"""Call this filter to get filtered instruments list"""
self.filter_freq = freq
return self.filter_main(instruments, start_time, end_time)
@abstractmethod
def _getFilterSeries(self, instruments, fstart, fend):
"""Get filter series based on the rules assigned during the initialization and the input time range.
Parameters
----------
instruments : dict
the dict of instruments to be filtered.
fstart : pd.Timestamp
start time of filter.
fend : pd.Timestamp
end time of filter.
.. note:: fstart/fend indicates the intersection of instruments start/end time and filter start/end time.
Returns
----------
pd.Dataframe
a series of {pd.Timestamp => bool}.
"""
raise NotImplementedError("Subclass of SeriesDFilter must reimplement `getFilterSeries` method")
def filter_main(self, instruments, start_time=None, end_time=None):
"""Implement this method to filter the instruments.
Parameters
----------
instruments: dict
input instruments to be filtered.
start_time: str
start of the time range.
end_time: str
end of the time range.
Returns
----------
dict
filtered instruments, same structure as input instruments.
"""
lbound, ubound = self._getTimeBound(instruments)
start_time = pd.Timestamp(start_time or lbound)
end_time = pd.Timestamp(end_time or ubound)
_instruments_filtered = {}
_all_calendar = Cal.calendar(start_time=start_time, end_time=end_time, freq=self.filter_freq)
_filter_calendar = Cal.calendar(
start_time=self.filter_start_time and max(self.filter_start_time, _all_calendar[0]) or _all_calendar[0],
end_time=self.filter_end_time and min(self.filter_end_time, _all_calendar[-1]) or _all_calendar[-1],
freq=self.filter_freq,
)
_all_filter_series = self._getFilterSeries(instruments, _filter_calendar[0], _filter_calendar[-1])
for inst, timestamp in instruments.items():
# Construct a whole map of date
_timestamp_series = self._toSeries(_all_calendar, timestamp)
# Get filter series
if inst in _all_filter_series:
_filter_series = _all_filter_series[inst]
else:
if self.keep:
_filter_series =
|
pd.Series({timestamp: True for timestamp in _filter_calendar})
|
pandas.Series
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: pd.Timestamp("2013-04-12 00:00:00"),
346: pd.Timestamp("2013-04-13 00:00:00"),
347: pd.Timestamp("2013-04-14 00:00:00"),
348: pd.Timestamp("2013-04-15 00:00:00"),
349: pd.Timestamp("2013-04-16 00:00:00"),
350: pd.Timestamp("2013-04-17 00:00:00"),
351: pd.Timestamp("2013-04-18 00:00:00"),
352: pd.Timestamp("2013-04-19 00:00:00"),
353: pd.Timestamp("2013-04-20 00:00:00"),
354: pd.Timestamp("2013-04-21 00:00:00"),
355: pd.Timestamp("2013-04-22 00:00:00"),
356: pd.Timestamp("2013-04-23 00:00:00"),
357: pd.Timestamp("2013-04-24 00:00:00"),
358: pd.Timestamp("2013-04-25 00:00:00"),
359: pd.Timestamp("2013-04-26 00:00:00"),
360: pd.Timestamp("2013-04-27 00:00:00"),
361: pd.Timestamp("2013-04-28 00:00:00"),
362: pd.Timestamp("2013-04-29 00:00:00"),
363: pd.Timestamp("2013-04-30 00:00:00"),
364: pd.Timestamp("2013-05-01 00:00:00"),
365: pd.Timestamp("2013-05-02 00:00:00"),
366: pd.Timestamp("2013-05-03 00:00:00"),
367: pd.Timestamp("2013-05-04 00:00:00"),
368: pd.Timestamp("2013-05-05 00:00:00"),
369: pd.Timestamp("2013-05-06 00:00:00"),
370: pd.Timestamp("2013-05-07 00:00:00"),
371: pd.Timestamp("2013-05-08 00:00:00"),
372: pd.Timestamp("2013-05-09 00:00:00"),
373: pd.Timestamp("2013-05-10 00:00:00"),
374: pd.Timestamp("2013-05-11 00:00:00"),
375: pd.Timestamp("2013-05-12 00:00:00"),
376: pd.Timestamp("2013-05-13 00:00:00"),
377: pd.Timestamp("2013-05-14 00:00:00"),
378: pd.Timestamp("2013-05-15 00:00:00"),
379: pd.Timestamp("2013-05-16 00:00:00"),
380: pd.Timestamp("2013-05-17 00:00:00"),
381: pd.Timestamp("2013-05-18 00:00:00"),
382: pd.Timestamp("2013-05-19 00:00:00"),
383: pd.Timestamp("2013-05-20 00:00:00"),
384: pd.Timestamp("2013-05-21 00:00:00"),
385: pd.Timestamp("2013-05-22 00:00:00"),
386: pd.Timestamp("2013-05-23 00:00:00"),
387: pd.Timestamp("2013-05-24 00:00:00"),
388: pd.Timestamp("2013-05-25 00:00:00"),
389: pd.Timestamp("2013-05-26 00:00:00"),
390: pd.Timestamp("2013-05-27 00:00:00"),
391: pd.Timestamp("2013-05-28 00:00:00"),
392: pd.Timestamp("2013-05-29 00:00:00"),
393: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.348604308646497,
1: 8.348964254851197,
2: 8.349324201055898,
3: 8.349684147260598,
4: 8.350044093465298,
5: 8.350404039669998,
6: 8.3507639858747,
7: 8.3511239320794,
8: 8.3514838782841,
9: 8.351843824488801,
10: 8.352203770693501,
11: 8.352563716898201,
12: 8.352923663102903,
13: 8.353283609307603,
14: 8.353643555512303,
15: 8.354003501717003,
16: 8.354363447921704,
17: 8.354723394126404,
18: 8.355083340331104,
19: 8.355443286535806,
20: 8.355803232740506,
21: 8.356163178945206,
22: 8.356523125149906,
23: 8.356883071354607,
24: 8.357243017559307,
25: 8.357602963764007,
26: 8.357962909968709,
27: 8.358322856173409,
28: 8.358682802378109,
29: 8.35904274858281,
30: 8.35940269478751,
31: 8.35976264099221,
32: 8.36012258719691,
33: 8.360482533401612,
34: 8.360842479606312,
35: 8.361202425811012,
36: 8.361562372015714,
37: 8.361922318220413,
38: 8.362282264425113,
39: 8.362642210629813,
40: 8.363002156834515,
41: 8.363362103039215,
42: 8.363722049243915,
43: 8.364081995448617,
44: 8.364441941653316,
45: 8.364801887858016,
46: 8.365161834062716,
47: 8.365521780267418,
48: 8.365881726472118,
49: 8.366241672676818,
50: 8.36660161888152,
51: 8.36696156508622,
52: 8.36732151129092,
53: 8.367681457495621,
54: 8.368041403700321,
55: 8.368401349905021,
56: 8.36876129610972,
57: 8.369121242314423,
58: 8.369481188519122,
59: 8.369841134723822,
60: 8.370201080928524,
61: 8.370561027133224,
62: 8.370920973337924,
63: 8.371280919542624,
64: 8.371640865747326,
65: 8.372000811952026,
66: 8.372360758156725,
67: 8.372720704361427,
68: 8.373080650566127,
69: 8.373440596770827,
70: 8.373800542975529,
71: 8.374160489180229,
72: 8.374520435384929,
73: 8.374880381589628,
74: 8.37524032779433,
75: 8.37560027399903,
76: 8.37596022020373,
77: 8.376320166408432,
78: 8.376680112613132,
79: 8.377040058817832,
80: 8.377400005022531,
81: 8.377759951227233,
82: 8.378119897431933,
83: 8.378479843636633,
84: 8.378839789841335,
85: 8.379199736046035,
86: 8.379559682250735,
87: 8.379919628455436,
88: 8.380279574660136,
89: 8.380639520864836,
90: 8.380999467069536,
91: 8.381359413274238,
92: 8.381719359478938,
93: 8.382079305683638,
94: 8.38243925188834,
95: 8.38279919809304,
96: 8.38315914429774,
97: 8.383519090502439,
98: 8.38387903670714,
99: 8.38423898291184,
100: 8.38459892911654,
101: 8.384958875321242,
102: 8.385318821525942,
103: 8.385678767730642,
104: 8.386038713935344,
105: 8.386398660140044,
106: 8.386758606344744,
107: 8.387118552549444,
108: 8.387478498754145,
109: 8.387838444958845,
110: 8.388198391163545,
111: 8.388558337368247,
112: 8.388918283572947,
113: 8.389278229777647,
114: 8.389638175982347,
115: 8.389998122187048,
116: 8.390358068391748,
117: 8.390718014596448,
118: 8.39107796080115,
119: 8.39143790700585,
120: 8.39179785321055,
121: 8.392157799415251,
122: 8.392517745619951,
123: 8.392877691824651,
124: 8.393237638029351,
125: 8.393597584234053,
126: 8.393957530438753,
127: 8.394317476643453,
128: 8.394677422848154,
129: 8.395037369052854,
130: 8.395397315257554,
131: 8.395757261462254,
132: 8.396117207666956,
133: 8.396477153871656,
134: 8.396837100076356,
135: 8.397197046281057,
136: 8.397556992485757,
137: 8.397916938690457,
138: 8.398276884895157,
139: 8.398636831099859,
140: 8.398996777304559,
141: 8.399356723509259,
142: 8.39971666971396,
143: 8.40007661591866,
144: 8.40043656212336,
145: 8.400796508328062,
146: 8.401156454532762,
147: 8.401516400737462,
148: 8.401876346942162,
149: 8.402236293146863,
150: 8.402596239351563,
151: 8.402956185556263,
152: 8.403316131760965,
153: 8.403676077965665,
154: 8.404036024170365,
155: 8.404395970375065,
156: 8.404755916579767,
157: 8.405115862784466,
158: 8.405475808989166,
159: 8.405835755193868,
160: 8.406195701398568,
161: 8.406555647603268,
162: 8.40691559380797,
163: 8.40727554001267,
164: 8.40763548621737,
165: 8.40799543242207,
166: 8.408355378626771,
167: 8.408715324831471,
168: 8.409075271036171,
169: 8.409435217240873,
170: 8.409795163445573,
171: 8.410155109650272,
172: 8.410515055854972,
173: 8.410875002059674,
174: 8.411234948264374,
175: 8.411594894469074,
176: 8.411954840673776,
177: 8.412314786878476,
178: 8.412674733083175,
179: 8.413034679287877,
180: 8.413394625492577,
181: 8.413754571697277,
182: 8.414114517901977,
183: 8.414474464106679,
184: 8.414834410311379,
185: 8.415194356516078,
186: 8.41555430272078,
187: 8.41591424892548,
188: 8.41627419513018,
189: 8.41663414133488,
190: 8.416994087539582,
191: 8.417354033744282,
192: 8.417713979948982,
193: 8.418073926153683,
194: 8.418433872358383,
195: 8.418793818563083,
196: 8.419153764767785,
197: 8.419513710972485,
198: 8.419873657177185,
199: 8.420233603381885,
200: 8.420593549586586,
201: 8.420953495791286,
202: 8.421313441995986,
203: 8.421673388200688,
204: 8.422033334405388,
205: 8.422393280610088,
206: 8.422753226814788,
207: 8.42311317301949,
208: 8.42347311922419,
209: 8.423833065428889,
210: 8.42419301163359,
211: 8.42455295783829,
212: 8.42491290404299,
213: 8.42527285024769,
214: 8.425632796452392,
215: 8.425992742657092,
216: 8.426352688861792,
217: 8.426712635066494,
218: 8.427072581271194,
219: 8.427432527475894,
220: 8.427792473680595,
221: 8.428152419885295,
222: 8.428512366089995,
223: 8.428872312294695,
224: 8.429232258499397,
225: 8.429592204704097,
226: 8.429952150908797,
227: 8.430312097113498,
228: 8.430672043318198,
229: 8.431031989522898,
230: 8.431391935727598,
231: 8.4317518819323,
232: 8.432111828137,
233: 8.4324717743417,
234: 8.432831720546401,
235: 8.433191666751101,
236: 8.433551612955801,
237: 8.433911559160503,
238: 8.434271505365203,
239: 8.434631451569903,
240: 8.434991397774603,
241: 8.435351343979304,
242: 8.435711290184004,
243: 8.436071236388704,
244: 8.436431182593406,
245: 8.436791128798106,
246: 8.437151075002806,
247: 8.437511021207506,
248: 8.437870967412207,
249: 8.438230913616907,
250: 8.438590859821607,
251: 8.438950806026309,
252: 8.439310752231009,
253: 8.439670698435709,
254: 8.44003064464041,
255: 8.44039059084511,
256: 8.44075053704981,
257: 8.44111048325451,
258: 8.441470429459212,
259: 8.441830375663912,
260: 8.442190321868612,
261: 8.442550268073314,
262: 8.442910214278013,
263: 8.443270160482713,
264: 8.443630106687413,
265: 8.443990052892115,
266: 8.444349999096815,
267: 8.444709945301515,
268: 8.445069891506217,
269: 8.445429837710916,
270: 8.445789783915616,
271: 8.446149730120318,
272: 8.446509676325018,
273: 8.446869622529718,
274: 8.447229568734418,
275: 8.44758951493912,
276: 8.44794946114382,
277: 8.44830940734852,
278: 8.448669353553221,
279: 8.449029299757921,
280: 8.449389245962621,
281: 8.449749192167321,
282: 8.450109138372023,
283: 8.450469084576723,
284: 8.450829030781422,
285: 8.451188976986124,
286: 8.451548923190824,
287: 8.451908869395524,
288: 8.452268815600226,
289: 8.452628761804926,
290: 8.452988708009626,
291: 8.453348654214325,
292: 8.453708600419027,
293: 8.454068546623727,
294: 8.454428492828427,
295: 8.454788439033129,
296: 8.455148385237829,
297: 8.455508331442529,
298: 8.455868277647228,
299: 8.45622822385193,
300: 8.45658817005663,
301: 8.45694811626133,
302: 8.457308062466032,
303: 8.457668008670732,
304: 8.458027954875432,
305: 8.458387901080131,
306: 8.458747847284833,
307: 8.459107793489533,
308: 8.459467739694233,
309: 8.459827685898935,
310: 8.460187632103635,
311: 8.460547578308335,
312: 8.460907524513036,
313: 8.461267470717736,
314: 8.461627416922436,
315: 8.461987363127136,
316: 8.462347309331838,
317: 8.462707255536538,
318: 8.463067201741238,
319: 8.46342714794594,
320: 8.46378709415064,
321: 8.46414704035534,
322: 8.464506986560039,
323: 8.46486693276474,
324: 8.46522687896944,
325: 8.46558682517414,
326: 8.465946771378842,
327: 8.466306717583542,
328: 8.466666663788242,
329: 8.467026609992944,
330: 8.467386556197644,
331: 8.467746502402344,
332: 8.468106448607044,
333: 8.468466394811745,
334: 8.468826341016445,
335: 8.469186287221145,
336: 8.469546233425847,
337: 8.469906179630547,
338: 8.470266125835247,
339: 8.470626072039947,
340: 8.470986018244648,
341: 8.471345964449348,
342: 8.471705910654048,
343: 8.47206585685875,
344: 8.47242580306345,
345: 8.47278574926815,
346: 8.473145695472851,
347: 8.473505641677551,
348: 8.473865587882251,
349: 8.474225534086951,
350: 8.474585480291653,
351: 8.474945426496353,
352: 8.475305372701053,
353: 8.475665318905754,
354: 8.476025265110454,
355: 8.476385211315154,
356: 8.476745157519854,
357: 8.477105103724556,
358: 8.477465049929256,
359: 8.477824996133956,
360: 8.478184942338657,
361: 8.478544888543357,
362: 8.478904834748057,
363: 8.479264780952759,
364: 8.479624727157459,
365: 8.479984673362159,
366: 8.480344619566859,
367: 8.48070456577156,
368: 8.48106451197626,
369: 8.48142445818096,
370: 8.481784404385662,
371: 8.482144350590362,
372: 8.482504296795062,
373: 8.482864242999762,
374: 8.483224189204464,
375: 8.483584135409163,
376: 8.483944081613863,
377: 8.484304027818565,
378: 8.484663974023265,
379: 8.485023920227965,
380: 8.485383866432667,
381: 8.485743812637367,
382: 8.486103758842066,
383: 8.486463705046766,
384: 8.486823651251468,
385: 8.487183597456168,
386: 8.487543543660868,
387: 8.48790348986557,
388: 8.48826343607027,
389: 8.48862338227497,
390: 8.48898332847967,
391: 8.489343274684371,
392: 8.489703220889071,
393: 8.490063167093771,
},
"fcst_lower": {
0: -np.inf,
1: -np.inf,
2: -np.inf,
3: -np.inf,
4: -np.inf,
5: -np.inf,
6: -np.inf,
7: -np.inf,
8: -np.inf,
9: -np.inf,
10: -np.inf,
11: -np.inf,
12: -np.inf,
13: -np.inf,
14: -np.inf,
15: -np.inf,
16: -np.inf,
17: -np.inf,
18: -np.inf,
19: -np.inf,
20: -np.inf,
21: -np.inf,
22: -np.inf,
23: -np.inf,
24: -np.inf,
25: -np.inf,
26: -np.inf,
27: -np.inf,
28: -np.inf,
29: -np.inf,
30: -np.inf,
31: -np.inf,
32: -np.inf,
33: -np.inf,
34: -np.inf,
35: -np.inf,
36: -np.inf,
37: -np.inf,
38: -np.inf,
39: -np.inf,
40: -np.inf,
41: -np.inf,
42: -np.inf,
43: -np.inf,
44: -np.inf,
45: -np.inf,
46: -np.inf,
47: -np.inf,
48: -np.inf,
49: -np.inf,
50: -np.inf,
51: -np.inf,
52: -np.inf,
53: -np.inf,
54: -np.inf,
55: -np.inf,
56: -np.inf,
57: -np.inf,
58: -np.inf,
59: -np.inf,
60: -np.inf,
61: -np.inf,
62: -np.inf,
63: -np.inf,
64: -np.inf,
65: -np.inf,
66: -np.inf,
67: -np.inf,
68: -np.inf,
69: -np.inf,
70: -np.inf,
71: -np.inf,
72: -np.inf,
73: -np.inf,
74: -np.inf,
75: -np.inf,
76: -np.inf,
77: -np.inf,
78: -np.inf,
79: -np.inf,
80: -np.inf,
81: -np.inf,
82: -np.inf,
83: -np.inf,
84: -np.inf,
85: -np.inf,
86: -np.inf,
87: -np.inf,
88: -np.inf,
89: -np.inf,
90: -np.inf,
91: -np.inf,
92: -np.inf,
93: -np.inf,
94: -np.inf,
95: -np.inf,
96: -np.inf,
97: -np.inf,
98: -np.inf,
99: -np.inf,
100: -np.inf,
101: -np.inf,
102: -np.inf,
103: -np.inf,
104: -np.inf,
105: -np.inf,
106: -np.inf,
107: -np.inf,
108: -np.inf,
109: -np.inf,
110: -np.inf,
111: -np.inf,
112: -np.inf,
113: -np.inf,
114: -np.inf,
115: -np.inf,
116: -np.inf,
117: -np.inf,
118: -np.inf,
119: -np.inf,
120: -np.inf,
121: -np.inf,
122: -np.inf,
123: -np.inf,
124: -np.inf,
125: -np.inf,
126: -np.inf,
127: -np.inf,
128: -np.inf,
129: -np.inf,
130: -np.inf,
131: -np.inf,
132: -np.inf,
133: -np.inf,
134: -np.inf,
135: -np.inf,
136: -np.inf,
137: -np.inf,
138: -np.inf,
139: -np.inf,
140: -np.inf,
141: -np.inf,
142: -np.inf,
143: -np.inf,
144: -np.inf,
145: -np.inf,
146: -np.inf,
147: -np.inf,
148: -np.inf,
149: -np.inf,
150: -np.inf,
151: -np.inf,
152: -np.inf,
153: -np.inf,
154: -np.inf,
155: -np.inf,
156: -np.inf,
157: -np.inf,
158: -np.inf,
159: -np.inf,
160: -np.inf,
161: -np.inf,
162: -np.inf,
163: -np.inf,
164: -np.inf,
165: -np.inf,
166: -np.inf,
167: -np.inf,
168: -np.inf,
169: -np.inf,
170: -np.inf,
171: -np.inf,
172: -np.inf,
173: -np.inf,
174: -np.inf,
175: -np.inf,
176: -np.inf,
177: -np.inf,
178: -np.inf,
179: -np.inf,
180: -np.inf,
181: -np.inf,
182: -np.inf,
183: -np.inf,
184: -np.inf,
185: -np.inf,
186: -np.inf,
187: -np.inf,
188: -np.inf,
189: -np.inf,
190: -np.inf,
191: -np.inf,
192: -np.inf,
193: -np.inf,
194: -np.inf,
195: -np.inf,
196: -np.inf,
197: -np.inf,
198: -np.inf,
199: -np.inf,
200: -np.inf,
201: -np.inf,
202: -np.inf,
203: -np.inf,
204: -np.inf,
205: -np.inf,
206: -np.inf,
207: -np.inf,
208: -np.inf,
209: -np.inf,
210: -np.inf,
211: -np.inf,
212: -np.inf,
213: -np.inf,
214: -np.inf,
215: -np.inf,
216: -np.inf,
217: -np.inf,
218: -np.inf,
219: -np.inf,
220: -np.inf,
221: -np.inf,
222: -np.inf,
223: -np.inf,
224: -np.inf,
225: -np.inf,
226: -np.inf,
227: -np.inf,
228: -np.inf,
229: -np.inf,
230: -np.inf,
231: -np.inf,
232: -np.inf,
233: -np.inf,
234: -np.inf,
235: -np.inf,
236: -np.inf,
237: -np.inf,
238: -np.inf,
239: -np.inf,
240: -np.inf,
241: -np.inf,
242: -np.inf,
243: -np.inf,
244: -np.inf,
245: -np.inf,
246: -np.inf,
247: -np.inf,
248: -np.inf,
249: -np.inf,
250: -np.inf,
251: -np.inf,
252: -np.inf,
253: -np.inf,
254: -np.inf,
255: -np.inf,
256: -np.inf,
257: -np.inf,
258: -np.inf,
259: -np.inf,
260: -np.inf,
261: -np.inf,
262: -np.inf,
263: -np.inf,
264: -np.inf,
265: -np.inf,
266: -np.inf,
267: -np.inf,
268: -np.inf,
269: -np.inf,
270: -np.inf,
271: -np.inf,
272: -np.inf,
273: -np.inf,
274: -np.inf,
275: -np.inf,
276: -np.inf,
277: -np.inf,
278: -np.inf,
279: -np.inf,
280: -np.inf,
281: -np.inf,
282: -np.inf,
283: -np.inf,
284: -np.inf,
285: -np.inf,
286: -np.inf,
287: -np.inf,
288: -np.inf,
289: -np.inf,
290: -np.inf,
291: -np.inf,
292: -np.inf,
293: -np.inf,
294: -np.inf,
295: -np.inf,
296: -np.inf,
297: -np.inf,
298: -np.inf,
299: -np.inf,
300: -np.inf,
301: -np.inf,
302: -np.inf,
303: -np.inf,
304: -np.inf,
305: -np.inf,
306: -np.inf,
307: -np.inf,
308: -np.inf,
309: -np.inf,
310: -np.inf,
311: -np.inf,
312: -np.inf,
313: -np.inf,
314: -np.inf,
315: -np.inf,
316: -np.inf,
317: -np.inf,
318: -np.inf,
319: -np.inf,
320: -np.inf,
321: -np.inf,
322: -np.inf,
323: -np.inf,
324: -np.inf,
325: -np.inf,
326: -np.inf,
327: -np.inf,
328: -np.inf,
329: -np.inf,
330: -np.inf,
331: -np.inf,
332: -np.inf,
333: -np.inf,
334: -np.inf,
335: -np.inf,
336: -np.inf,
337: -np.inf,
338: -np.inf,
339: -np.inf,
340: -np.inf,
341: -np.inf,
342: -np.inf,
343: -np.inf,
344: -np.inf,
345: -np.inf,
346: -np.inf,
347: -np.inf,
348: -np.inf,
349: -np.inf,
350: -np.inf,
351: -np.inf,
352: -np.inf,
353: -np.inf,
354: -np.inf,
355: -np.inf,
356: -np.inf,
357: -np.inf,
358: -np.inf,
359: -np.inf,
360: -np.inf,
361: -np.inf,
362: -np.inf,
363: -np.inf,
364: -np.inf,
365: -np.inf,
366: -np.inf,
367: -np.inf,
368: -np.inf,
369: -np.inf,
370: -np.inf,
371: -np.inf,
372: -np.inf,
373: -np.inf,
374: -np.inf,
375: -np.inf,
376: -np.inf,
377: -np.inf,
378: -np.inf,
379: -np.inf,
380: -np.inf,
381: -np.inf,
382: -np.inf,
383: -np.inf,
384: -np.inf,
385: -np.inf,
386: -np.inf,
387: -np.inf,
388: -np.inf,
389: -np.inf,
390: -np.inf,
391: -np.inf,
392: -np.inf,
393: -np.inf,
},
"fcst_upper": {
0: np.inf,
1: np.inf,
2: np.inf,
3: np.inf,
4: np.inf,
5: np.inf,
6: np.inf,
7: np.inf,
8: np.inf,
9: np.inf,
10: np.inf,
11: np.inf,
12: np.inf,
13: np.inf,
14: np.inf,
15: np.inf,
16: np.inf,
17: np.inf,
18: np.inf,
19: np.inf,
20: np.inf,
21: np.inf,
22: np.inf,
23: np.inf,
24: np.inf,
25: np.inf,
26: np.inf,
27: np.inf,
28: np.inf,
29: np.inf,
30: np.inf,
31: np.inf,
32: np.inf,
33: np.inf,
34: np.inf,
35: np.inf,
36: np.inf,
37: np.inf,
38: np.inf,
39: np.inf,
40: np.inf,
41: np.inf,
42: np.inf,
43: np.inf,
44: np.inf,
45: np.inf,
46: np.inf,
47: np.inf,
48: np.inf,
49: np.inf,
50: np.inf,
51: np.inf,
52: np.inf,
53: np.inf,
54: np.inf,
55: np.inf,
56: np.inf,
57: np.inf,
58: np.inf,
59: np.inf,
60: np.inf,
61: np.inf,
62: np.inf,
63: np.inf,
64: np.inf,
65: np.inf,
66: np.inf,
67: np.inf,
68: np.inf,
69: np.inf,
70: np.inf,
71: np.inf,
72: np.inf,
73: np.inf,
74: np.inf,
75: np.inf,
76: np.inf,
77: np.inf,
78: np.inf,
79: np.inf,
80: np.inf,
81: np.inf,
82: np.inf,
83: np.inf,
84: np.inf,
85: np.inf,
86: np.inf,
87: np.inf,
88: np.inf,
89: np.inf,
90: np.inf,
91: np.inf,
92: np.inf,
93: np.inf,
94: np.inf,
95: np.inf,
96: np.inf,
97: np.inf,
98: np.inf,
99: np.inf,
100: np.inf,
101: np.inf,
102: np.inf,
103: np.inf,
104: np.inf,
105: np.inf,
106: np.inf,
107: np.inf,
108: np.inf,
109: np.inf,
110: np.inf,
111: np.inf,
112: np.inf,
113: np.inf,
114: np.inf,
115: np.inf,
116: np.inf,
117: np.inf,
118: np.inf,
119: np.inf,
120: np.inf,
121: np.inf,
122: np.inf,
123: np.inf,
124: np.inf,
125: np.inf,
126: np.inf,
127: np.inf,
128: np.inf,
129: np.inf,
130: np.inf,
131: np.inf,
132: np.inf,
133: np.inf,
134: np.inf,
135: np.inf,
136: np.inf,
137: np.inf,
138: np.inf,
139: np.inf,
140: np.inf,
141: np.inf,
142: np.inf,
143: np.inf,
144: np.inf,
145: np.inf,
146: np.inf,
147: np.inf,
148: np.inf,
149: np.inf,
150: np.inf,
151: np.inf,
152: np.inf,
153: np.inf,
154: np.inf,
155: np.inf,
156: np.inf,
157: np.inf,
158: np.inf,
159: np.inf,
160: np.inf,
161: np.inf,
162: np.inf,
163: np.inf,
164: np.inf,
165: np.inf,
166: np.inf,
167: np.inf,
168: np.inf,
169: np.inf,
170: np.inf,
171: np.inf,
172: np.inf,
173: np.inf,
174: np.inf,
175: np.inf,
176: np.inf,
177: np.inf,
178: np.inf,
179: np.inf,
180: np.inf,
181: np.inf,
182: np.inf,
183: np.inf,
184: np.inf,
185: np.inf,
186: np.inf,
187: np.inf,
188: np.inf,
189: np.inf,
190: np.inf,
191: np.inf,
192: np.inf,
193: np.inf,
194: np.inf,
195: np.inf,
196: np.inf,
197: np.inf,
198: np.inf,
199: np.inf,
200: np.inf,
201: np.inf,
202: np.inf,
203: np.inf,
204: np.inf,
205: np.inf,
206: np.inf,
207: np.inf,
208: np.inf,
209: np.inf,
210: np.inf,
211: np.inf,
212: np.inf,
213: np.inf,
214: np.inf,
215: np.inf,
216: np.inf,
217: np.inf,
218: np.inf,
219: np.inf,
220: np.inf,
221: np.inf,
222: np.inf,
223: np.inf,
224: np.inf,
225: np.inf,
226: np.inf,
227: np.inf,
228: np.inf,
229: np.inf,
230: np.inf,
231: np.inf,
232: np.inf,
233: np.inf,
234: np.inf,
235: np.inf,
236: np.inf,
237: np.inf,
238: np.inf,
239: np.inf,
240: np.inf,
241: np.inf,
242: np.inf,
243: np.inf,
244: np.inf,
245: np.inf,
246: np.inf,
247: np.inf,
248: np.inf,
249: np.inf,
250: np.inf,
251: np.inf,
252: np.inf,
253: np.inf,
254: np.inf,
255: np.inf,
256: np.inf,
257: np.inf,
258: np.inf,
259: np.inf,
260: np.inf,
261: np.inf,
262: np.inf,
263: np.inf,
264: np.inf,
265: np.inf,
266: np.inf,
267: np.inf,
268: np.inf,
269: np.inf,
270: np.inf,
271: np.inf,
272: np.inf,
273: np.inf,
274: np.inf,
275: np.inf,
276: np.inf,
277: np.inf,
278: np.inf,
279: np.inf,
280: np.inf,
281: np.inf,
282: np.inf,
283: np.inf,
284: np.inf,
285: np.inf,
286: np.inf,
287: np.inf,
288: np.inf,
289: np.inf,
290: np.inf,
291: np.inf,
292: np.inf,
293: np.inf,
294: np.inf,
295: np.inf,
296: np.inf,
297: np.inf,
298: np.inf,
299: np.inf,
300: np.inf,
301: np.inf,
302: np.inf,
303: np.inf,
304: np.inf,
305: np.inf,
306: np.inf,
307: np.inf,
308: np.inf,
309: np.inf,
310: np.inf,
311: np.inf,
312: np.inf,
313: np.inf,
314: np.inf,
315: np.inf,
316: np.inf,
317: np.inf,
318: np.inf,
319: np.inf,
320: np.inf,
321: np.inf,
322: np.inf,
323: np.inf,
324: np.inf,
325: np.inf,
326: np.inf,
327: np.inf,
328: np.inf,
329: np.inf,
330: np.inf,
331: np.inf,
332: np.inf,
333: np.inf,
334: np.inf,
335: np.inf,
336: np.inf,
337: np.inf,
338: np.inf,
339: np.inf,
340: np.inf,
341: np.inf,
342: np.inf,
343: np.inf,
344: np.inf,
345: np.inf,
346: np.inf,
347: np.inf,
348: np.inf,
349: np.inf,
350: np.inf,
351: np.inf,
352: np.inf,
353: np.inf,
354: np.inf,
355: np.inf,
356: np.inf,
357: np.inf,
358: np.inf,
359: np.inf,
360: np.inf,
361: np.inf,
362: np.inf,
363: np.inf,
364: np.inf,
365: np.inf,
366: np.inf,
367: np.inf,
368: np.inf,
369: np.inf,
370: np.inf,
371: np.inf,
372: np.inf,
373: np.inf,
374: np.inf,
375: np.inf,
376: np.inf,
377: np.inf,
378: np.inf,
379: np.inf,
380: np.inf,
381: np.inf,
382: np.inf,
383: np.inf,
384: np.inf,
385: np.inf,
386: np.inf,
387: np.inf,
388: np.inf,
389: np.inf,
390: np.inf,
391: np.inf,
392: np.inf,
393: np.inf,
},
}
)
PEYTON_FCST_LINEAR_INVALID_NEG_ONE = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111:
|
pd.Timestamp("2012-08-21 00:00:00")
|
pandas.Timestamp
|
import os
import json
import numpy as np
import pandas as pd
from abc import ABC, abstractmethod
from scipy.integrate import solve_ivp
from models.noise import build_noise
from models.stimulation import build_stimulator
from models.oscillators import build_oscillators, build_connection, build_sensitivity
class Network(ABC):
"""
Base class for any collection of connected, interacting elements
"""
def __init__(self):
self.nodes = []
self.time_course = None
self.summary = None
self.specification = None
@abstractmethod
def build(self, specification):
pass
@abstractmethod
def build_equation(self):
"""Return a function that returns the vector dy/dt at any time step"""
pass
@abstractmethod
def get_initial(self):
"""Return the initial conditions imposed on this cluster"""
pass
@abstractmethod
def log_solution(self, solution):
for node in self.nodes:
try:
node.ready_log()
except AttributeError:
pass
def solve(self, t_span, max_step, method='RK45'):
equation = self.build_equation()
initial = self.get_initial()
solution = solve_ivp(equation, t_span, initial, max_step=max_step, method=method)
return solution
class Cluster(Network):
"""
A special kind of network intended to be part of a larger collection of networks
"""
def __init__(self):
self.oscillators = []
self.stimulator = None
self.noise = None
super(Cluster, self).__init__()
def build(self, specification):
self.specification = specification
self.oscillators = build_oscillators(specification['oscillators'])
self.stimulator = build_stimulator(specification['stimulator'])
self.noise = build_noise(specification['noise'])
connection = build_connection(specification['connection'])
sensitivity = build_sensitivity(specification['sensitivity'])
for oscillator in self.oscillators:
oscillator.build(len(self.oscillators), connection, sensitivity, self.stimulator, self.noise)
self.nodes.extend(self.oscillators)
self.nodes.append(self.stimulator)
self.nodes.append(self.noise)
def get_initial(self):
return [oscillator.initial_phi for oscillator in self.oscillators]
def build_equation(self):
stim = self.stimulator
noise = self.noise
def equation(t, phases):
stim.update(t)
noise.update(t)
return [
o.equation(t, phases[i], phases)
for i, o in enumerate(self.oscillators)
]
return equation
def log_solution(self, solution):
"""Convert the ode solver solution in a format that is ready to be saved"""
self.time_course = solution.t
phases = np.mod(solution.y.T, 2 * np.pi)
if self.stimulator:
self.stimulator.ready_log(self.time_course)
if self.noise:
self.noise.ready_log(self.time_course)
phase_summary = {'Time': self.time_course}
for i, node in enumerate(self.oscillators):
node.ready_log(self.time_course, phases[:, i])
phase_summary[f'Phase {i}'] = node.summary['Phase']
self.summary =
|
pd.DataFrame.from_dict(phase_summary)
|
pandas.DataFrame.from_dict
|
#!/usr/bin/env python3
""" Base class to analyze a JETSCAPE output file
You should create a user class that inherits from this one. See analyze_events_STAT.py for an example.
The outputdir should contain a JETSCAPE output file in parquet format
See README for pre-requisites.
.. codeauthor:: <NAME> <<EMAIL>>, UC Berkeley
"""
from __future__ import print_function
# General
import os
import yaml
import time
from numba import jit
# Analysis
import itertools
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import numpy as np
import ROOT
from pathlib import Path
# Fastjet via python (from external library heppy)
import fjext
from jetscape_analysis.base import common_base
################################################################
class AnalyzeJetscapeEvents_BaseSTAT(common_base.CommonBase):
# ---------------------------------------------------------------
# Constructor
# ---------------------------------------------------------------
def __init__(self, config_file="", input_file="", output_dir="", **kwargs):
super(AnalyzeJetscapeEvents_BaseSTAT, self).__init__(**kwargs)
self.config_file = config_file
self.input_file_hadrons = input_file
self.output_dir = Path(output_dir)
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
with open(self.config_file, 'r') as f:
config = yaml.safe_load(f)
if 'n_event_max' in config:
self.n_event_max = config['n_event_max']
else:
self.n_event_max = -1
# ---------------------------------------------------------------
# Main processing function
# ---------------------------------------------------------------
def analyze_jetscape_events(self):
print('Analyzing events ...')
# Initialize output objects
self.initialize_output_objects()
# Read chunk of events into a dataframe
# Fields: particle_ID, status, E, px, py, pz
df_event_chunk = pd.read_parquet(self.input_file_hadrons)
if self.n_event_max < 0:
self.n_event_max = df_event_chunk.shape[0]
# Iterate through events
self.analyze_event_chunk(df_event_chunk)
# Write analysis task output to ROOT file
self.write_output_objects()
# ---------------------------------------------------------------
# Analyze event chunk
# ---------------------------------------------------------------
def analyze_event_chunk(self, df_event_chunk):
# Loop through events
start = time.time()
for i,event in df_event_chunk.iterrows():
if i % 1000 == 0:
print(f'event: {i} (time elapsed: {time.time() - start} s)')
if i > self.n_event_max:
return
# Store dictionary of all observables for the event
self.observable_dict_event = {}
# Call user-defined function to analyze event
self.analyze_event(event)
# Fill the observables dict to a new entry in the event list
if self.event_has_entries(self.observable_dict_event):
# Fill event cross-section weight
self.observable_dict_event['event_weight'] = event['event_weight']
self.output_event_list.append(self.observable_dict_event)
# Get total cross-section (same for all events at this point)
if i == 0:
self.cross_section_dict['cross_section'] = event['cross_section']
self.cross_section_dict['cross_section_error'] = event['cross_section_error']
# ---------------------------------------------------------------
# Initialize output objects
# ---------------------------------------------------------------
def initialize_output_objects(self):
# Initialize list to store observables
# Each entry in the list stores a dict for a given event
self.output_event_list = []
# Store also the total cross-section (one number per file)
self.cross_section_dict = {}
# ---------------------------------------------------------------
# Save output event list into a dataframe
# ---------------------------------------------------------------
def event_has_entries(self, event_dict):
return bool([obs for obs in event_dict.values() if obs != []])
# ---------------------------------------------------------------
# Save output event list into a dataframe
# ---------------------------------------------------------------
def write_output_objects(self):
# Convert to pandas, and then arrow.
self.output_dataframe = pd.DataFrame(self.output_event_list)
table = pa.Table.from_pandas(self.output_dataframe)
# Write to parquet
# Determine the types for improved compression when writing
# See writing to parquet in the final state hadrons parser for more info.
float_types = [np.float32, np.float64]
float_columns = list(self.output_dataframe.select_dtypes(include=float_types).keys())
other_columns = list(self.output_dataframe.select_dtypes(exclude=float_types).keys())
# NOTE: As of 27 April 2021, this doesn't really work right because too many columns
# are of the "object" type. We may need to revise the output format to optimize
# the output size.
print(f"float_columns: {float_columns}")
print(f"other_columns: {other_columns}")
pq.write_table(
table, self.output_dir / self.output_file, compression="zstd",
use_dictionary=other_columns,
use_byte_stream_split=float_columns,
)
print(self.output_dataframe)
# Write cross-section to separate file
cross_section_dataframe =
|
pd.DataFrame(self.cross_section_dict, index=[0])
|
pandas.DataFrame
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data loading and preprocessing functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import os
import zipfile
from PIL import Image
import numpy as np
import pandas as pd
from six.moves import urllib
from sklearn import preprocessing
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1.keras import backend
from tensorflow.compat.v1.keras import datasets
from sklearn.model_selection import train_test_split
import dvrl_utils
def load_tabular_data(data_name, dict_no, noise_rate):
"""Loads Adult Income and Blog Feedback datasets.
This module loads the two tabular datasets and saves train.csv, valid.csv and
test.csv files under data_files directory.
UCI Adult data link: https://archive.ics.uci.edu/ml/datasets/Adult
UCI Blog data link: https://archive.ics.uci.edu/ml/datasets/BlogFeedback
If noise_rate > 0.0, adds noise on the datasets.
Then, saves train.csv, valid.csv, test.csv on './data_files/' directory
Args:
data_name: 'adult' or 'blog'
dict_no: training and validation set numbers
noise_rate: label corruption ratio
Returns:
noise_idx: indices of noisy samples
"""
# Loads datasets from links
uci_base_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/'
# Adult Income dataset
if data_name == 'adult':
train_url = uci_base_url + 'adult/adult.data'
test_url = uci_base_url + 'adult/adult.test'
data_train = pd.read_csv(train_url, header=None)
data_test = pd.read_csv(test_url, skiprows=1, header=None)
df = pd.concat((data_train, data_test), axis=0)
# Column names
df.columns = ['Age', 'WorkClass', 'fnlwgt', 'Education', 'EducationNum',
'MaritalStatus', 'Occupation', 'Relationship', 'Race',
'Gender', 'CapitalGain', 'CapitalLoss', 'HoursPerWeek',
'NativeCountry', 'Income']
# Creates binary labels
df['Income'] = df['Income'].map({' <=50K': 0, ' >50K': 1,
' <=50K.': 0, ' >50K.': 1})
# Changes string to float
df.Age = df.Age.astype(float)
df.fnlwgt = df.fnlwgt.astype(float)
df.EducationNum = df.EducationNum.astype(float)
df.EducationNum = df.EducationNum.astype(float)
df.CapitalGain = df.CapitalGain.astype(float)
df.CapitalLoss = df.CapitalLoss.astype(float)
# One-hot encoding
df = pd.get_dummies(df, columns=['WorkClass', 'Education', 'MaritalStatus',
'Occupation', 'Relationship',
'Race', 'Gender', 'NativeCountry'])
# Sets label name as Y
df = df.rename(columns={'Income': 'Y'})
df['Y'] = df['Y'].astype(int)
# Resets index
df = df.reset_index()
df = df.drop(columns=['index'])
# Blog Feedback dataset
elif data_name == 'blog':
resp = urllib.request.urlopen(uci_base_url + '00304/BlogFeedback.zip')
zip_file = zipfile.ZipFile(io.BytesIO(resp.read()))
# Loads train dataset
train_file_name = 'blogData_train.csv'
data_train = pd.read_csv(zip_file.open(train_file_name), header=None)
# Loads test dataset
data_test = []
for i in range(29):
if i < 9:
file_name = 'blogData_test-2012.02.0'+ str(i+1) + '.00_00.csv'
else:
file_name = 'blogData_test-2012.02.'+ str(i+1) + '.00_00.csv'
temp_data = pd.read_csv(zip_file.open(file_name), header=None)
if i == 0:
data_test = temp_data
else:
data_test = pd.concat((data_test, temp_data), axis=0)
for i in range(31):
if i < 9:
file_name = 'blogData_test-2012.03.0'+ str(i+1) + '.00_00.csv'
elif i < 25:
file_name = 'blogData_test-2012.03.'+ str(i+1) + '.00_00.csv'
else:
file_name = 'blogData_test-2012.03.'+ str(i+1) + '.01_00.csv'
temp_data = pd.read_csv(zip_file.open(file_name), header=None)
data_test = pd.concat((data_test, temp_data), axis=0)
df = pd.concat((data_train, data_test), axis=0)
# Removes rows with missing data
df = df.dropna()
# Sets label and named as Y
df.columns = df.columns.astype(str)
df['280'] = 1*(df['280'] > 0)
df = df.rename(columns={'280': 'Y'})
df['Y'] = df['Y'].astype(int)
# Resets index
df = df.reset_index()
df = df.drop(columns=['index'])
# load california housing dataset (./data_files/california_housing_train.csv
# and ./data_files/california_housing_test.csv)
elif data_name == 'cali':
train_url = './data_files/california_housing_train.csv'
test_url = './data_files/california_housing_test.csv'
data_train = pd.read_csv(train_url, header=0)
data_test = pd.read_csv(test_url, header=0)
df = pd.concat((data_train, data_test), axis=0)
# Column names
df.columns = ['longitude', 'latitude', 'housing_median_age', 'total_rooms', 'total_bedrooms',
'population', 'households', 'median_income', 'median_house_value']
df['longitude'] = pd.to_numeric(df['longitude'], downcast="float")
df['latitude'] = pd.to_numeric(df['latitude'], downcast="float")
df['housing_median_age'] = pd.to_numeric(df['housing_median_age'], downcast="float")
df['total_rooms'] = pd.to_numeric(df['total_rooms'], downcast="float")
df['total_bedrooms'] = pd.to_numeric(df['total_bedrooms'], downcast="float")
df['population'] = pd.to_numeric(df['population'], downcast="float")
df['households'] = pd.to_numeric(df['households'], downcast="float")
df['median_income'] = pd.to_numeric(df['median_income'], downcast="float")
df['median_house_value'] = pd.to_numeric(df['median_house_value'], downcast="float")
df['median_house_value'].where(df['median_house_value'] > 200000, 0, inplace=True)
df['median_house_value'].where(df['median_house_value'] <= 200000, 1, inplace=True)
# Sets label name as Y
df = df.rename(columns={'median_house_value': 'Y'})
df['Y'] = df['Y'].astype(int)
# Resets index
df = df.reset_index()
df = df.drop(columns=['index'])
# Extension: load fish dataset
elif data_name == 'fish':
train_url = './data_files/fish.csv'
df = pd.read_csv(train_url, header=0)
df.columns = ['species', 'length', 'weight']
df = df[(df[['length','weight']] != 0).all(axis=1)]
data_train, data_test = train_test_split(df, test_size=0.2)
df = pd.concat((data_train, data_test), axis=0)
# Column names
df.columns = ['species', 'length', 'weight']
df['length'] = pd.to_numeric(df['length'], downcast="float")
df['weight'] = pd.to_numeric(df['weight'], downcast="float")
# One-hot encoding
df = pd.get_dummies(df, columns=['species'])
df['weight'].where(df['weight'] > 31, 0, inplace=True)
df['weight'].where(df['weight'] <= 31, 1, inplace=True)
# Sets label name as Y
df = df.rename(columns={'weight': 'Y'})
df['Y'] = df['Y'].astype(int)
# Resets index
df = df.reset_index()
df = df.drop(columns=['index'])
elif data_name == 'covid':
train_url = './data_files/covid_train.csv'
df = pd.read_csv(train_url, header=0)
df = df[df.Target != 'Fatalities']
df.drop('Id', 1, inplace=True)
df.drop('County', 1,inplace=True)
df.drop('Province_State', 1, inplace=True)
df.drop('Date', 1, inplace=True)
df.drop('Target', 1, inplace=True)
data_train, data_test = train_test_split(df, test_size=0.2)
df = pd.concat((data_train, data_test), axis=0)
# Column names
df['Population'] = pd.to_numeric(df['Population'], downcast="float")
df['Weight'] = pd.to_numeric(df['Weight'], downcast="float")
df['TargetValue'] = pd.to_numeric(df['TargetValue'], downcast="float")
# One-hot encoding
df = pd.get_dummies(df, columns=['Country_Region'])
df['TargetValue'].where(df['TargetValue'] > 13, 0, inplace=True)
df['TargetValue'].where(df['TargetValue'] <= 13, 1, inplace=True)
# Sets label name as Y
df = df.rename(columns={'TargetValue': 'Y'})
df['Y'] = df['Y'].astype(int)
# Resets index
df = df.reset_index()
df = df.drop(columns=['index'])
# Splits train, valid and test sets
train_idx = range(len(data_train))
train = df.loc[train_idx]
test_idx = range(len(data_train), len(df))
test = df.loc[test_idx]
train_idx_final = np.random.permutation(len(train))[:dict_no['train']]
temp_idx = np.random.permutation(len(test))
valid_idx_final = temp_idx[:dict_no['valid']] + len(data_train)
test_idx_final = temp_idx[dict_no['valid']:] + len(data_train)
train = train.loc[train_idx_final]
valid = test.loc[valid_idx_final]
test = test.loc[test_idx_final]
# Adds noise on labels
y_train = np.asarray(train['Y'])
y_train, noise_idx = dvrl_utils.corrupt_label(y_train, noise_rate)
train['Y'] = y_train
# Saves data
if not os.path.exists('data_files'):
os.makedirs('data_files')
train.to_csv('./data_files/train.csv', index=False)
valid.to_csv('./data_files/valid.csv', index=False)
test.to_csv('./data_files/test.csv', index=False)
# Returns indices of noisy samples
return noise_idx
def load_rossmann_data(dict_no, setting, test_store_type):
"""Loads Rossmann data.
This module loads Rossmann data for a domain adaptation application.
Rossmann data link: https://www.kaggle.com/c/rossmann-store-sales
The users should download 'rossmann-store-sales.zip' from the above link and
save it in './data_files/' directory
Args:
dict_no: the number of source and valid samples
setting: 'train-on-all', 'train-on-rest', or 'train-on-specific'
test_store_type: 'A', 'B', 'C', or 'D'
"""
# Loads datasets
zip_file = zipfile.ZipFile('./data_files/rossmann-store-sales.zip')
train_data = pd.read_csv(zip_file.open('train.csv'))
store_data = pd.read_csv(zip_file.open('store.csv'))
# Extracts features
train_data = train_data[['Store', 'Sales', 'DayOfWeek', 'Customers', 'Open',
'Promo', 'StateHoliday', 'SchoolHoliday']]
store_data = store_data[['Store', 'StoreType', 'Assortment',
'CompetitionDistance', 'CompetitionOpenSinceMonth',
'Promo2', 'Promo2SinceWeek']]
# Data preprocessing
# Fill na to 0
store_data = store_data.fillna(0)
# Converts string to int
train_data['StateHoliday'] = train_data['StateHoliday'].replace(['a', 'b',
'c'], 1)
# One-hot encoding
store_data = pd.get_dummies(store_data)
# Combines store data and train data
data_x = pd.merge(train_data, store_data, on='Store')
# Removes the samples when close
remove_idx = data_x.index[data_x['Sales'] == 0].tolist()
data_x = data_x.drop(remove_idx, axis=0)
# Renames target variable to 'Y'
data_x = data_x.rename(columns={'Sales': 'Y'})
# Defines store types
data_c = data_x[['StoreType_a', 'StoreType_b', 'StoreType_c', 'StoreType_d']]
data_c = data_c.rename(columns={'StoreType_a': 'A', 'StoreType_b': 'B',
'StoreType_c': 'C', 'StoreType_d': 'D'})
# Defines features
data_x = data_x.drop(['StoreType_a', 'StoreType_b',
'StoreType_c', 'StoreType_d'], axis=1)
# Resets index
data_x = data_x.reset_index()
data_c = data_c.reset_index()
data_x = data_x.drop(['index'], axis=1)
data_c = data_c.drop(['index'], axis=1)
# Splits source, valid, and target sets
# Random partitioning
idx = np.random.permutation(len(data_x))
source_idx = idx[:dict_no['source']]
valid_idx = idx[dict_no['source']:(dict_no['source']+dict_no['valid'])]
target_idx = idx[(dict_no['source']+dict_no['valid']):]
x_source = data_x.loc[source_idx]
c_source = data_c.loc[source_idx]
x_valid = data_x.loc[valid_idx]
c_valid = data_c.loc[valid_idx]
x_target = data_x.loc[target_idx]
c_target = data_c.loc[target_idx]
# Selects source dataset based on the setting and test_store_type
if setting == 'train-on-all':
source_sub_idx = c_source.index[c_source[test_store_type] >= 0].tolist()
elif setting == 'train-on-rest':
source_sub_idx = c_source.index[c_source[test_store_type] == 0].tolist()
elif setting == 'train-on-specific':
source_sub_idx = c_source.index[c_source[test_store_type] == 1].tolist()
# Selects valid and target datasets based on test_store_type
valid_sub_idx = c_valid.index[c_valid[test_store_type] == 1].tolist()
target_sub_idx = c_target.index[c_target[test_store_type] == 1].tolist()
# Divides source, valid, and target datasets
source = x_source.loc[source_sub_idx]
valid = x_valid.loc[valid_sub_idx]
target = x_target.loc[target_sub_idx]
source.to_csv('./data_files/source.csv', index=False)
valid.to_csv('./data_files/valid.csv', index=False)
target.to_csv('./data_files/target.csv', index=False)
return
def preprocess_data(normalization,
train_file_name, valid_file_name, test_file_name):
"""Loads datasets, divides features and labels, and normalizes features.
Args:
normalization: 'minmax' or 'standard'
train_file_name: file name of training set
valid_file_name: file name of validation set
test_file_name: file name of testing set
Returns:
x_train: training features
y_train: training labels
x_valid: validation features
y_valid: validation labels
x_test: testing features
y_test: testing labels
col_names: column names
"""
# Loads datasets
train =
|
pd.read_csv('./data_files/'+train_file_name)
|
pandas.read_csv
|
import logging
import os
import pandas as pd
from jade.utils.utils import load_data
from disco.analysis import Analysis, Input
from disco.exceptions import AnalysisRunException
from disco.utils.custom_type import CustomType
from disco.utils.dss_utils import extract_upgrade_results
logger = logging.getLogger(__name__)
class UpgradeCostAnalysis(Analysis):
INPUTS = [
Input("unit_cost_data_file", CustomType(str), "DISCO_cost_database.xlsx")
]
def run(self, output, *args, **kwargs):
# unit_cost_data_file
unit_cost_data_file = self.get_input("unit_cost_data_file").current_value
# relative job paths
job_output = os.path.join(output, self._job_name)
# output_path
post_process_output = os.path.join(job_output, "post_process")
os.makedirs(post_process_output, exist_ok=True)
# upgrade files
project_path = os.path.join(job_output, "pydss_project")
upgrade_files = extract_upgrade_results(project_path, file_ext=".json")
thermal_upgrade_file = upgrade_files["thermal"]
voltage_upgrade_file = upgrade_files["voltage"]
try:
# Cost calculation
thermal_df = self.get_thermal_costs(
thermal_upgrade_file, unit_cost_data_file, post_process_output
)
metadata = load_data(voltage_upgrade_file)
vreg_df = self.get_vreg_costs(
voltage_upgrade_file,
unit_cost_data_file,
metadata["feederhead_basekV"],
)
cap_df = self.get_cap_costs(voltage_upgrade_file, unit_cost_data_file)
# Cost summary
total_costs_df = self.get_total_costs(thermal_df, vreg_df, cap_df)
# Output CSV file
summary_of_upgrade_costs_file = os.path.join(
post_process_output, "summary_of_upgrade_costs.csv"
)
total_costs_df.to_csv(summary_of_upgrade_costs_file, index=False)
# total_costs_df.to_feather(output_path + 'summary_of_upgrade_costs.feather')
self._add_to_results(
"summary_of_upgrade_costs", summary_of_upgrade_costs_file
)
except AnalysisRunException:
logger.exception("Unexcepted UpgradeCostAnalysis Error.")
raise
finally:
if os.path.exists(thermal_upgrade_file):
os.remove(thermal_upgrade_file)
if os.path.exists(voltage_upgrade_file):
os.remove(voltage_upgrade_file)
def indiv_line_cost(self, upgrade_df, unit_cost_lines):
"""Function to calculate costs of upgrading each individual line that is overloaded.
Returns a dataframe with columns containing the line ID's and cost to upgrade.
"""
# Dictionary used to convert between different length units and meters, which are used for all the calculations.
# OpenDSS can output results in any of these lengths.
len_unit_mult = {
"mi": 1609.34,
"kft": 0.00328084,
"km": 0.001,
"ft": 3.28084,
"in": 39.3701,
"cm": 100,
}
line_costs_df = pd.DataFrame()
for k in upgrade_df.keys():
# print(k)
if "Line." in k:
new_line_len = upgrade_df[k]["new"][1][
"length"
] # upgraded lines and new lines run along exisiting circuit, so length is the same for both
if upgrade_df[k]["new"][0] > 0:
# print(k)
new_line_len_unit = upgrade_df[k]["new"][1]["length_unit"]
if new_line_len_unit == "m":
new_line_len_m = new_line_len
else:
new_line_len_m = new_line_len / len_unit_mult[new_line_len_unit]
# print('line length is ',new_line_len, new_line_len_unit, 'or', new_line_len_m, 'm')
line_count = upgrade_df[k]["new"][
0
] # count of new lines added to address overload. Often 1, but could be > 1 with severe overloads
new_line_cost_per_line = new_line_len_m * float(
unit_cost_lines[
unit_cost_lines["description"] == "new_line"
].cost_per_m
)
new_line_cost = line_count * new_line_cost_per_line
elif upgrade_df[k]["new"][0] == 0:
new_line_cost = 0
new_line_cost_per_line = 0
elif upgrade_df[k]["new"][0] < 0:
logger.error(
"Error: number of new lines is negative: %s",
upgrade_df[k]["new"][0],
)
raise AnalysisRunException(
"Error: number of new lines is negative: {}".format(
upgrade_df[k]["new"][0]
)
)
upgraded_line_count = upgrade_df[k]["upgrade"][0]
upgraded_line_cost = (
new_line_cost_per_line * upgraded_line_count
) # TODO: update to take ampacities as an option. X data currently does not have sufficient resolution
dict_k = {
"id": [k],
"new_equip_cost": [new_line_cost],
"upgraded_equip_cost": [upgraded_line_cost],
}
df_k = pd.DataFrame.from_dict(dict_k)
line_costs_df = line_costs_df.append(df_k)
return line_costs_df
def get_xfmr_unit_costs(self, kva, unit_cost_xfmrs):
unit_cost = unit_cost_xfmrs[unit_cost_xfmrs["rated_kva"] == kva].total_cost
return unit_cost
def indiv_xfmr_costs(self, upgrade_df, unit_cost_xfmrs):
"""Function to calculate costs of upgrading each individual transformers that is overloaded.
Returns a dataframe with columns containing the transformer ID's and cost to upgrade.
"""
xfmr_costs_df = pd.DataFrame()
rated_kva_list = [float(x) for x in unit_cost_xfmrs["rated_kva"]]
for k in upgrade_df.keys():
if "Transformer." in k:
if (
upgrade_df[k]["new"][0] > 0
): # TODO: make functions for getting new equipment and upgraded equipment costs to make cleaner
# print(k)
new_xfmr_kva = upgrade_df[k]["new"][1]["wdg_kvas"][
0
] # TODO: decide how to handle oh vs ug for LA100
new_xfmr_count = upgrade_df[k]["new"][
0
] # count of new transformers added to address overload.
if new_xfmr_kva > 5000: # TODO: update later?
new_unit_cost = unit_cost_xfmrs[
unit_cost_xfmrs["system"] == "substation"
].install_cost
elif not (new_xfmr_kva in rated_kva_list):
closest_kva = min(
unit_cost_xfmrs["rated_kva"],
key=lambda x: abs(x - new_xfmr_kva),
)
new_unit_cost = unit_cost_xfmrs[
unit_cost_xfmrs["rated_kva"] == closest_kva
].install_cost
else:
new_unit_cost = unit_cost_xfmrs[
unit_cost_xfmrs["rated_kva"] == new_xfmr_kva
].install_cost
# print(k)
# print('new unit cost is:', new_unit_cost)
# print('new_xfmr_count is:', new_xfmr_count)
# print('new_xfmr_kva is:',new_xfmr_kva)
new_xfmr_cost = new_unit_cost * new_xfmr_count
new_xfmr_cost = new_xfmr_cost.iloc[0]
elif upgrade_df[k]["new"][0] == 0:
new_xfmr_cost = 0
elif upgrade_df[k]["new"][0] < 0:
logger.exception(
"Error: number of new transformers is negative: %s",
upgrade_df[k]["new"][0],
)
raise AnalysisRunException(
"Error: number of new transformers is negative: {}".format(
upgrade_df[k]["new"][0]
)
)
if upgrade_df[k]["upgrade"][0] > 0:
# print(upgrade_df[k]['upgrade'][1][0]['kva'][0])
upgrade_xfmr_kva = float(upgrade_df[k]["upgrade"][1][0]["kva"][0])
upgrade_xfmr_count = upgrade_df[k]["upgrade"][0]
if upgrade_xfmr_kva > 5000: # TODO: update later?
upgrade_xfmr_unit_cost = (
unit_cost_xfmrs[
unit_cost_xfmrs["system"] == "substation"
].install_cost
+ unit_cost_xfmrs[
unit_cost_xfmrs["system"] == "substation"
].remove_cost
)
elif not (upgrade_xfmr_kva in rated_kva_list):
closest_kva = min(
unit_cost_xfmrs["rated_kva"],
key=lambda x: abs(x - upgrade_xfmr_kva),
)
upgrade_xfmr_unit_cost = (
unit_cost_xfmrs[
unit_cost_xfmrs["rated_kva"] == closest_kva
].install_cost
+ unit_cost_xfmrs[
unit_cost_xfmrs["rated_kva"] == closest_kva
].remove_cost
)
else:
upgrade_xfmr_unit_cost = (
unit_cost_xfmrs[
unit_cost_xfmrs["rated_kva"] == upgrade_xfmr_kva
].install_cost
+ unit_cost_xfmrs[
unit_cost_xfmrs["rated_kva"] == upgrade_xfmr_kva
].remove_cost
)
upgrade_xfmr_cost = upgrade_xfmr_unit_cost * upgrade_xfmr_count
upgrade_xfmr_cost = upgrade_xfmr_cost.iloc[0]
elif upgrade_df[k]["upgrade"][0] == 0:
upgrade_xfmr_cost = 0
elif upgrade_df[k]["upgrade"][0] < 0:
logger.error(
"Error: number of upgraded transformers is negative: %s",
upgrade_df[k]["upgrade"][0],
)
raise AnalysisRunException(
"Error: number of upgraded transformers is negative: {}".format(
upgrade_df[k]["upgrade"][0]
)
)
else:
upgrade_xfmr_cost = None
logger.warning(
"Warning: unintentified error. Assigning upgrade_xfmr_cost to None"
)
# print(k)
dict_k = {
"id": [k],
"new_equip_cost": [new_xfmr_cost],
"upgraded_equip_cost": [upgrade_xfmr_cost],
}
df_k = pd.DataFrame.from_dict(dict_k)
xfmr_costs_df = xfmr_costs_df.append(df_k)
return xfmr_costs_df
def get_cap_costs(self, upgrade_file, unit_cost_data_file):
"""
note we currently are never adding new capacitors to integrate PV. We may want these to
accomodate new load or EVs. Right now only cap changes are new controllers or control
setting changes
"""
# pd.read_json() works if the data looks like {'a': [1,2], 'b': [3,4]}, as the values are indexed.
# But would fail if just scalar value, like {'a':1, 'b':2}, in this case use typ="series".
try:
upgrade_df = pd.read_json(upgrade_file)
except ValueError:
upgrade_df = pd.read_json(upgrade_file, typ="series").to_frame("Value")
if upgrade_df.empty:
cap_dict = {
"type": ["new capacitor controller", "capacitor setting changes"],
"count": [0, 0],
"total_cost_usd": [0, 0],
}
cap_cost_df = pd.DataFrame.from_dict(cap_dict)
return cap_cost_df
cap_cols = upgrade_df.columns.str.contains("Capacitor")
cap_df = upgrade_df[upgrade_df.columns[cap_cols]]
if cap_df.empty:
cap_dict = {
"type": ["new capacitor controller", "capacitor setting changes"],
"count": [0, 0],
"total_cost_usd": [0, 0],
}
cap_cost_df =
|
pd.DataFrame.from_dict(cap_dict)
|
pandas.DataFrame.from_dict
|
import pandas as pd
from gutenberg.acquire import load_etext
from gutenberg.query import get_metadata
from gutenberg.cleanup import strip_headers
from top_1k import book_list
from gutenberg._domain_model.exceptions import UnknownDownloadUriException
import chapterize
# from gutenberg.acquire import get_metadata_cache
# cache = get_metadata_cache()
# cache.populate()
# text = strip_headers(load_etext(2701)).strip()
# print(text) # prints 'MOBY DICK; OR THE WHALE\n\nBy <NAME> ...'
df =
|
pd.DataFrame(columns=['book_num', 'title', 'author'])
|
pandas.DataFrame
|
# pylint: disable-msg=E1101,W0612
from __future__ import with_statement # for Python 2.5
from datetime import datetime, time, timedelta
import sys
import os
import unittest
import nose
import numpy as np
from pandas import (Index, Series, TimeSeries, DataFrame, isnull,
date_range, Timestamp)
from pandas import DatetimeIndex, Int64Index, to_datetime
from pandas.core.daterange import DateRange
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
from pandas.tseries.index import bdate_range, date_range
import pandas.tseries.tools as tools
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
import pandas.lib as lib
import cPickle as pickle
import pandas.core.datetools as dt
from numpy.random import rand
from pandas.util.testing import assert_frame_equal
import pandas.util.py3compat as py3compat
from pandas.core.datetools import BDay
import pandas.core.common as com
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest
try:
import pytz
except ImportError:
pass
class TestTimeZoneSupport(unittest.TestCase):
def setUp(self):
_skip_if_no_pytz()
def test_utc_to_local_no_modify(self):
rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc')
rng_eastern = rng.tz_convert('US/Eastern')
# Values are unmodified
self.assert_(np.array_equal(rng.asi8, rng_eastern.asi8))
self.assert_(rng_eastern.tz == pytz.timezone('US/Eastern'))
def test_localize_utc_conversion(self):
# Localizing to time zone should:
# 1) check for DST ambiguities
# 2) convert to UTC
rng = date_range('3/10/2012', '3/11/2012', freq='30T')
converted = rng.tz_localize('US/Eastern')
expected_naive = rng + offsets.Hour(5)
self.assert_(np.array_equal(converted.asi8, expected_naive.asi8))
# DST ambiguity, this should fail
rng = date_range('3/11/2012', '3/12/2012', freq='30T')
self.assertRaises(Exception, rng.tz_localize, 'US/Eastern')
def test_tz_localize_dti(self):
from pandas.tseries.offsets import Hour
dti = DatetimeIndex(start='1/1/2005', end='1/1/2005 0:00:30.256',
freq='L')
dti2 = dti.tz_localize('US/Eastern')
dti_utc = DatetimeIndex(start='1/1/2005 05:00',
end='1/1/2005 5:00:30.256', freq='L',
tz='utc')
self.assert_(np.array_equal(dti2.values, dti_utc.values))
dti3 = dti2.tz_convert('US/Pacific')
self.assert_(np.array_equal(dti3.values, dti_utc.values))
dti = DatetimeIndex(start='11/6/2011 1:59',
end='11/6/2011 2:00', freq='L')
self.assertRaises(pytz.AmbiguousTimeError, dti.tz_localize,
'US/Eastern')
dti = DatetimeIndex(start='3/13/2011 1:59', end='3/13/2011 2:00',
freq='L')
self.assertRaises(pytz.AmbiguousTimeError, dti.tz_localize,
'US/Eastern')
def test_utc_box_timestamp_and_localize(self):
rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc')
rng_eastern = rng.tz_convert('US/Eastern')
tz = pytz.timezone('US/Eastern')
expected = tz.normalize(rng[-1])
stamp = rng_eastern[-1]
self.assertEquals(stamp, expected)
self.assertEquals(stamp.tzinfo, expected.tzinfo)
# right tzinfo
rng = date_range('3/13/2012', '3/14/2012', freq='H', tz='utc')
rng_eastern = rng.tz_convert('US/Eastern')
self.assert_('EDT' in repr(rng_eastern[0].tzinfo))
def test_timestamp_tz_convert(self):
strdates = ['1/1/2012', '3/1/2012', '4/1/2012']
idx = DatetimeIndex(strdates, tz='US/Eastern')
conv = idx[0].tz_convert('US/Pacific')
expected = idx.tz_convert('US/Pacific')[0]
self.assertEquals(conv, expected)
def test_pass_dates_convert_to_utc(self):
strdates = ['1/1/2012', '3/1/2012', '4/1/2012']
idx = DatetimeIndex(strdates)
conv = idx.tz_convert('US/Eastern')
fromdates = DatetimeIndex(strdates, tz='US/Eastern')
self.assert_(conv.tz == fromdates.tz)
self.assert_(np.array_equal(conv.values, fromdates.values))
def test_field_access_localize(self):
strdates = ['1/1/2012', '3/1/2012', '4/1/2012']
rng = DatetimeIndex(strdates, tz='US/Eastern')
self.assert_((rng.hour == 0).all())
def test_with_tz(self):
tz = pytz.timezone('US/Central')
# just want it to work
start = datetime(2011, 3, 12, tzinfo=pytz.utc)
dr = bdate_range(start, periods=50, freq=datetools.Hour())
self.assert_(dr.tz is pytz.utc)
# DateRange with naive datetimes
dr = bdate_range('1/1/2005', '1/1/2009', tz=pytz.utc)
dr = bdate_range('1/1/2005', '1/1/2009', tz=tz)
# normalized
central = dr.tz_convert(tz)
self.assert_(central.tz is tz)
self.assert_(central[0].tz is tz)
# datetimes with tzinfo set
dr = bdate_range(datetime(2005, 1, 1, tzinfo=pytz.utc),
'1/1/2009', tz=pytz.utc)
self.assertRaises(Exception, bdate_range,
datetime(2005, 1, 1, tzinfo=pytz.utc),
'1/1/2009', tz=tz)
def test_tz_localize(self):
dr = bdate_range('1/1/2009', '1/1/2010')
dr_utc = bdate_range('1/1/2009', '1/1/2010', tz=pytz.utc)
localized = dr.tz_localize(pytz.utc)
self.assert_(np.array_equal(dr_utc, localized))
def test_with_tz_ambiguous_times(self):
tz = pytz.timezone('US/Eastern')
rng = bdate_range(datetime(2009, 1, 1), datetime(2010, 1, 1))
# regular no problem
self.assert_(rng.tz_validate())
# March 13, 2011, spring forward, skip from 2 AM to 3 AM
dr = date_range(datetime(2011, 3, 13, 1, 30), periods=3,
freq=datetools.Hour())
self.assertRaises(pytz.AmbiguousTimeError, dr.tz_localize, tz)
# after dst transition, it works
dr = date_range(datetime(2011, 3, 13, 3, 30), periods=3,
freq=datetools.Hour(), tz=tz)
# November 6, 2011, fall back, repeat 2 AM hour
dr = date_range(datetime(2011, 11, 6, 1, 30), periods=3,
freq=datetools.Hour())
self.assertRaises(pytz.AmbiguousTimeError, dr.tz_localize, tz)
# UTC is OK
dr = date_range(datetime(2011, 3, 13), periods=48,
freq=datetools.Minute(30), tz=pytz.utc)
# test utility methods
def test_infer_tz(self):
eastern = pytz.timezone('US/Eastern')
utc = pytz.utc
_start = datetime(2001, 1, 1)
_end = datetime(2009, 1, 1)
start = eastern.localize(_start)
end = eastern.localize(_end)
assert(tools._infer_tzinfo(start, end) is eastern)
assert(tools._infer_tzinfo(start, None) is eastern)
assert(
|
tools._infer_tzinfo(None, end)
|
pandas.tseries.tools._infer_tzinfo
|
from __future__ import division
from builtins import str
from builtins import object
__copyright__ = "Copyright 2015 Contributing Entities"
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import numpy as np
import pandas as pd
from .Error import NetworkInputError, NotImplementedError, UnexpectedError
from .Logger import FastTripsLogger
from .Util import Util
class Route(object):
"""
Route class.
One instance represents all of the Routes.
Stores route information in :py:attr:`Route.routes_df` and agency information in
:py:attr:`Route.agencies_df`. Each are instances of :py:class:`pandas.DataFrame`.
Fare information is in :py:attr:`Route.fare_attrs_df`, :py:attr:`Route.fare_rules_df` and
:py:attr:`Route.fare_transfer_rules_df`.
"""
#: File with fasttrips routes information (this extends the
#: `gtfs routes <https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/routes.md>`_ file).
#: See `routes_ft specification <https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/routes_ft.md>`_.
INPUT_ROUTES_FILE = "routes_ft.txt"
#: gtfs Routes column name: Unique identifier
ROUTES_COLUMN_ROUTE_ID = "route_id"
#: gtfs Routes column name: Short name
ROUTES_COLUMN_ROUTE_SHORT_NAME = "route_short_name"
#: gtfs Routes column name: Long name
ROUTES_COLUMN_ROUTE_LONG_NAME = "route_long_name"
#: gtfs Routes column name: Route type
ROUTES_COLUMN_ROUTE_TYPE = "route_type"
#: gtfs Routes column name: Agency ID
ROUTES_COLUMN_AGENCY_ID = "agency_id"
#: fasttrips Routes column name: Mode
ROUTES_COLUMN_MODE = "mode"
#: fasttrips Routes column name: Proof of Payment
ROUTES_COLUMN_PROOF_OF_PAYMENT = "proof_of_payment"
# ========== Added by fasttrips =======================================================
#: fasttrips Routes column name: Mode number
ROUTES_COLUMN_ROUTE_ID_NUM = "route_id_num"
#: fasttrips Routes column name: Mode number
ROUTES_COLUMN_MODE_NUM = "mode_num"
#: fasttrips Routes column name: Mode type
ROUTES_COLUMN_MODE_TYPE = "mode_type"
#: Value for :py:attr:`Route.ROUTES_COLUMN_MODE_TYPE` column: access
MODE_TYPE_ACCESS = "access"
#: Value for :py:attr:`Route.ROUTES_COLUMN_MODE_TYPE` column: egress
MODE_TYPE_EGRESS = "egress"
#: Value for :py:attr:`Route.ROUTES_COLUMN_MODE_TYPE` column: transit
MODE_TYPE_TRANSIT = "transit"
#: Value for :py:attr:`Route.ROUTES_COLUMN_MODE_TYPE` column: transfer
MODE_TYPE_TRANSFER = "transfer"
#: Access mode numbers start from here
MODE_NUM_START_ACCESS = 101
#: Egress mode numbers start from here
MODE_NUM_START_EGRESS = 201
#: Route mode numbers start from here
MODE_NUM_START_ROUTE = 301
#: File with fasttrips fare attributes information (this *subsitutes rather than extends* the
#: `gtfs fare_attributes <https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/fare_attributes_ft.md>`_ file).
#: See `fare_attributes_ft specification <https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/fare_attributes_ft.md>`_.
INPUT_FARE_ATTRIBUTES_FILE = "fare_attributes_ft.txt"
# fasttrips Fare attributes column name: Fare Period
FARE_ATTR_COLUMN_FARE_PERIOD = "fare_period"
# fasttrips Fare attributes column name: Price
FARE_ATTR_COLUMN_PRICE = "price"
# fasttrips Fare attributes column name: Currency Type
FARE_ATTR_COLUMN_CURRENCY_TYPE = "currency_type"
# fasttrips Fare attributes column name: Payment Method
FARE_ATTR_COLUMN_PAYMENT_METHOD = "payment_method"
# fasttrips Fare attributes column name: Transfers (number permitted on this fare)
FARE_ATTR_COLUMN_TRANSFERS = "transfers"
# fasttrips Fare attributes column name: Transfer duration (Integer length of time in seconds before transfer expires. Omit or leave empty if they do not.)
FARE_ATTR_COLUMN_TRANSFER_DURATION = "transfer_duration"
#: File with fasttrips fare periods information
#: See `fare_rules_ft specification <https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/fare_rules_ft.md>`_.
INPUT_FARE_PERIODS_FILE = "fare_periods_ft.txt"
#: fasttrips Fare rules column name: Fare ID
FARE_RULES_COLUMN_FARE_ID = "fare_id"
#: GTFS fare rules column name: Route ID
FARE_RULES_COLUMN_ROUTE_ID = ROUTES_COLUMN_ROUTE_ID
#: GTFS fare rules column name: Origin Zone ID
FARE_RULES_COLUMN_ORIGIN_ID = "origin_id"
#: GTFS fare rules column name: Destination Zone ID
FARE_RULES_COLUMN_DESTINATION_ID = "destination_id"
#: GTFS fare rules column name: Contains ID
FARE_RULES_COLUMN_CONTAINS_ID = "contains_id"
#: fasttrips Fare rules column name: Fare class
FARE_RULES_COLUMN_FARE_PERIOD = FARE_ATTR_COLUMN_FARE_PERIOD
#: fasttrips Fare rules column name: Start time for the fare. A DateTime
FARE_RULES_COLUMN_START_TIME = "start_time"
#: fasttrips Fare rules column name: End time for the fare rule. A DateTime.
FARE_RULES_COLUMN_END_TIME = "end_time"
# ========== Added by fasttrips =======================================================
#: fasttrips Fare rules column name: Fare ID num
FARE_RULES_COLUMN_FARE_ID_NUM = "fare_id_num"
#: fasttrips Fare rules column name: Route ID num
FARE_RULES_COLUMN_ROUTE_ID_NUM = ROUTES_COLUMN_ROUTE_ID_NUM
#: fasttrips fare rules column name: Origin Zone ID number
FARE_RULES_COLUMN_ORIGIN_ID_NUM = "origin_id_num"
#: fasttrips fare rules column name: Destination ID number
FARE_RULES_COLUMN_DESTINATION_ID_NUM = "destination_id_num"
#: File with fasttrips fare transfer rules information.
#: See `fare_transfer_rules specification <https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/fare_transfer_rules_ft.md>`_.
INPUT_FARE_TRANSFER_RULES_FILE = "fare_transfer_rules_ft.txt"
#: fasttrips Fare transfer rules column name: From Fare Class
FARE_TRANSFER_RULES_COLUMN_FROM_FARE_PERIOD = "from_fare_period"
#: fasttrips Fare transfer rules column name: To Fare Class
FARE_TRANSFER_RULES_COLUMN_TO_FARE_PERIOD = "to_fare_period"
#: fasttrips Fare transfer rules column name: Transfer type?
FARE_TRANSFER_RULES_COLUMN_TYPE = "transfer_fare_type"
#: fasttrips Fare transfer rules column name: Transfer amount (discount or fare)
FARE_TRANSFER_RULES_COLUMN_AMOUNT = "transfer_fare"
#: Value for :py:attr:`Route.FARE_TRANSFER_RULES_COLUMN_TYPE`: transfer discount
TRANSFER_TYPE_TRANSFER_DISCOUNT = "transfer_discount"
#: Value for :py:attr:`Route.FARE_TRANSFER_RULES_COLUMN_TYPE`: free transfer
TRANSFER_TYPE_TRANSFER_FREE = "transfer_free"
#: Value for :py:attr:`Route.FARE_TRANSFER_RULES_COLUMN_TYPE`: transfer fare cost
TRANSFER_TYPE_TRANSFER_COST = "transfer_cost"
#: Valid options for :py:attr:`Route.FARE_TRANSFER_RULES_COLUMN_TYPE`
TRANSFER_TYPE_OPTIONS = [TRANSFER_TYPE_TRANSFER_DISCOUNT,
TRANSFER_TYPE_TRANSFER_FREE,
TRANSFER_TYPE_TRANSFER_COST]
#: File with route ID, route ID number correspondence (and fare id num)
OUTPUT_ROUTE_ID_NUM_FILE = "ft_intermediate_route_id.txt"
#: File with fare id num, fare id, fare class, price, xfers
OUTPUT_FARE_ID_FILE = "ft_intermediate_fare.txt"
#: File with fare transfer rules
OUTPUT_FARE_TRANSFER_FILE = "ft_intermediate_fare_transfers.txt"
#: File with mode, mode number correspondence
OUTPUT_MODE_NUM_FILE = "ft_intermediate_supply_mode_id.txt"
def __init__(self, input_archive, output_dir, gtfs, today, stops):
"""
Constructor. Reads the gtfs data from the transitfeed schedule, and the additional
fast-trips routes data from the input file in *input_archive*.
"""
self.output_dir = output_dir
self.routes_df = gtfs.routes
FastTripsLogger.info("Read %7d %15s from %25d %25s" %
(len(self.routes_df), 'date valid route', len(gtfs.routes), 'total routes'))
# Read the fast-trips supplemental routes data file
routes_ft_df = gtfs.get(Route.INPUT_ROUTES_FILE)
# verify required columns are present
routes_ft_cols = list(routes_ft_df.columns.values)
assert(Route.ROUTES_COLUMN_ROUTE_ID in routes_ft_cols)
assert(Route.ROUTES_COLUMN_MODE in routes_ft_cols)
# verify no routes_ids are duplicated
if routes_ft_df.duplicated(subset=[Route.ROUTES_COLUMN_ROUTE_ID]).sum()>0:
error_msg = "Found %d duplicate %s in %s" % (routes_ft_df.duplicated(subset=[Route.ROUTES_COLUMN_ROUTE_ID]).sum(),
Route.ROUTES_COLUMN_ROUTE_ID, Route.INPUT_ROUTES_FILE)
FastTripsLogger.fatal(error_msg)
FastTripsLogger.fatal("\nDuplicates:\n%s" % \
str(routes_ft_df.loc[routes_ft_df.duplicated(subset=[Route.ROUTES_COLUMN_ROUTE_ID])]))
raise NetworkInputError(Route.INPUT_ROUTES_FILE, error_msg)
# Join to the routes dataframe
self.routes_df = pd.merge(left=self.routes_df, right=routes_ft_df,
how='left',
on=Route.ROUTES_COLUMN_ROUTE_ID)
# Get the mode list
self.modes_df = self.routes_df[[Route.ROUTES_COLUMN_MODE]].drop_duplicates().reset_index(drop=True)
self.modes_df[Route.ROUTES_COLUMN_MODE_NUM] = self.modes_df.index + Route.MODE_NUM_START_ROUTE
self.modes_df[Route.ROUTES_COLUMN_MODE_TYPE] = Route.MODE_TYPE_TRANSIT
# Join to mode numbering
self.routes_df = Util.add_new_id(self.routes_df, Route.ROUTES_COLUMN_MODE, Route.ROUTES_COLUMN_MODE_NUM,
self.modes_df, Route.ROUTES_COLUMN_MODE, Route.ROUTES_COLUMN_MODE_NUM)
# Route IDs are strings. Create a unique numeric route ID.
self.route_id_df = Util.add_numeric_column(self.routes_df[[Route.ROUTES_COLUMN_ROUTE_ID]],
id_colname=Route.ROUTES_COLUMN_ROUTE_ID,
numeric_newcolname=Route.ROUTES_COLUMN_ROUTE_ID_NUM)
FastTripsLogger.debug("Route ID to number correspondence\n" + str(self.route_id_df.head()))
FastTripsLogger.debug(str(self.route_id_df.dtypes))
self.routes_df = self.add_numeric_route_id(self.routes_df,
id_colname=Route.ROUTES_COLUMN_ROUTE_ID,
numeric_newcolname=Route.ROUTES_COLUMN_ROUTE_ID_NUM)
FastTripsLogger.debug("=========== ROUTES ===========\n" + str(self.routes_df.head()))
FastTripsLogger.debug("\n"+str(self.routes_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s, %25s" %
(len(self.routes_df), "routes", "routes.txt", Route.INPUT_ROUTES_FILE))
self.agencies_df = gtfs.agency
FastTripsLogger.debug("=========== AGENCIES ===========\n" + str(self.agencies_df.head()))
FastTripsLogger.debug("\n"+str(self.agencies_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.agencies_df), "agencies", "agency.txt"))
self.fare_attrs_df = gtfs.fare_attributes
FastTripsLogger.debug("=========== FARE ATTRIBUTES ===========\n" + str(self.fare_attrs_df.head()))
FastTripsLogger.debug("\n"+str(self.fare_attrs_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.fare_attrs_df), "fare attributes", "fare_attributes.txt"))
# subsitute fasttrips fare attributes
self.fare_attrs_df = gtfs.get(Route.INPUT_FARE_ATTRIBUTES_FILE)
if not self.fare_attrs_df.empty:
# verify required columns are present
fare_attrs_cols = list(self.fare_attrs_df.columns.values)
assert(Route.FARE_ATTR_COLUMN_FARE_PERIOD in fare_attrs_cols)
assert(Route.FARE_ATTR_COLUMN_PRICE in fare_attrs_cols)
assert(Route.FARE_ATTR_COLUMN_CURRENCY_TYPE in fare_attrs_cols)
assert(Route.FARE_ATTR_COLUMN_PAYMENT_METHOD in fare_attrs_cols)
assert(Route.FARE_ATTR_COLUMN_TRANSFERS in fare_attrs_cols)
if Route.FARE_ATTR_COLUMN_TRANSFER_DURATION not in fare_attrs_cols:
self.fare_attrs_df[Route.FARE_ATTR_COLUMN_TRANSFER_DURATION] = np.nan
FastTripsLogger.debug("===> REPLACED BY FARE ATTRIBUTES FT\n" + str(self.fare_attrs_df.head()))
FastTripsLogger.debug("\n"+str(self.fare_attrs_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.fare_attrs_df), "fare attributes", Route.INPUT_FARE_ATTRIBUTES_FILE))
#: fares are by fare_period rather than by fare_id
self.fare_by_class = True
else:
self.fare_by_class = False
# Fare rules (map routes to fare_id)
self.fare_rules_df = gtfs.fare_rules
if len(self.fare_rules_df) > 0:
self.fare_ids_df = Util.add_numeric_column(self.fare_rules_df[[Route.FARE_RULES_COLUMN_FARE_ID]],
id_colname=Route.FARE_RULES_COLUMN_FARE_ID,
numeric_newcolname=Route.FARE_RULES_COLUMN_FARE_ID_NUM)
self.fare_rules_df = pd.merge(left =self.fare_rules_df,
right =self.fare_ids_df,
how ="left")
else:
self.fare_ids_df = pd.DataFrame()
# optionally reverse those with origin/destinations if configured
from .Assignment import Assignment
if Assignment.FARE_ZONE_SYMMETRY:
FastTripsLogger.debug("applying FARE_ZONE_SYMMETRY to %d fare rules" % len(self.fare_rules_df))
# select only those with an origin and destination
reverse_fare_rules = self.fare_rules_df.loc[ pd.notnull(self.fare_rules_df[Route.FARE_RULES_COLUMN_ORIGIN_ID])&
pd.notnull(self.fare_rules_df[Route.FARE_RULES_COLUMN_DESTINATION_ID]) ].copy()
# FastTripsLogger.debug("reverse_fare_rules 1 head()=\n%s" % str(reverse_fare_rules.head()))
# reverse them
reverse_fare_rules.rename(columns={Route.FARE_RULES_COLUMN_ORIGIN_ID : Route.FARE_RULES_COLUMN_DESTINATION_ID,
Route.FARE_RULES_COLUMN_DESTINATION_ID : Route.FARE_RULES_COLUMN_ORIGIN_ID},
inplace=True)
# FastTripsLogger.debug("reverse_fare_rules 2 head()=\n%s" % str(reverse_fare_rules.head()))
# join them to eliminate dupes
reverse_fare_rules = pd.merge(left =reverse_fare_rules,
right =self.fare_rules_df,
how ="left",
on =[Route.FARE_RULES_COLUMN_FARE_ID,
Route.FARE_RULES_COLUMN_FARE_ID_NUM,
Route.FARE_RULES_COLUMN_ROUTE_ID,
Route.FARE_RULES_COLUMN_ORIGIN_ID,
Route.FARE_RULES_COLUMN_DESTINATION_ID,
Route.FARE_RULES_COLUMN_CONTAINS_ID],
indicator=True)
# dupes exist in both -- drop those
reverse_fare_rules = reverse_fare_rules.loc[ reverse_fare_rules["_merge"]=="left_only"]
reverse_fare_rules.drop(["_merge"], axis=1, inplace=True)
# add them to fare rules
self.fare_rules_df = pd.concat([self.fare_rules_df, reverse_fare_rules])
FastTripsLogger.debug("fare rules with symmetry %d head()=\n%s" % (len(self.fare_rules_df), str(self.fare_rules_df.head())))
# sort by fare ID num so zone-to-zone and their reverse are together
if len(self.fare_rules_df) > 0:
self.fare_rules_df.sort_values(by=[Route.FARE_RULES_COLUMN_FARE_ID_NUM], inplace=True)
fare_rules_ft_df = gtfs.get(Route.INPUT_FARE_PERIODS_FILE)
if not fare_rules_ft_df.empty:
# verify required columns are present
fare_rules_ft_cols = list(fare_rules_ft_df.columns.values)
assert(Route.FARE_RULES_COLUMN_FARE_ID in fare_rules_ft_cols)
assert(Route.FARE_RULES_COLUMN_FARE_PERIOD in fare_rules_ft_cols)
assert(Route.FARE_RULES_COLUMN_START_TIME in fare_rules_ft_cols)
assert(Route.FARE_RULES_COLUMN_END_TIME in fare_rules_ft_cols)
# Split fare classes so they don't overlap
fare_rules_ft_df = self.remove_fare_period_overlap(fare_rules_ft_df)
# join to fare rules dataframe
self.fare_rules_df = pd.merge(left=self.fare_rules_df, right=fare_rules_ft_df,
how='left',
on=Route.FARE_RULES_COLUMN_FARE_ID)
# add route id numbering if applicable
if Route.FARE_RULES_COLUMN_ROUTE_ID in list(self.fare_rules_df.columns.values):
self.fare_rules_df = self.add_numeric_route_id(self.fare_rules_df,
Route.FARE_RULES_COLUMN_ROUTE_ID,
Route.FARE_RULES_COLUMN_ROUTE_ID_NUM)
# add origin zone numbering if applicable
if (Route.FARE_RULES_COLUMN_ORIGIN_ID in list(self.fare_rules_df.columns.values)) and \
(pd.notnull(self.fare_rules_df[Route.FARE_RULES_COLUMN_ORIGIN_ID]).sum() > 0):
self.fare_rules_df = stops.add_numeric_stop_zone_id(self.fare_rules_df,
Route.FARE_RULES_COLUMN_ORIGIN_ID,
Route.FARE_RULES_COLUMN_ORIGIN_ID_NUM)
# add destination zone numbering if applicable
if (Route.FARE_RULES_COLUMN_DESTINATION_ID in list(self.fare_rules_df.columns.values)) and \
(pd.notnull(self.fare_rules_df[Route.FARE_RULES_COLUMN_DESTINATION_ID]).sum() > 0):
self.fare_rules_df = stops.add_numeric_stop_zone_id(self.fare_rules_df,
Route.FARE_RULES_COLUMN_DESTINATION_ID,
Route.FARE_RULES_COLUMN_DESTINATION_ID_NUM)
# They should both be present
# This is unlikely
if Route.FARE_RULES_COLUMN_ORIGIN_ID not in list(self.fare_rules_df.columns.values):
error_str = "Fast-trips only supports both origin_id and destination_id or neither in fare rules"
FastTripsLogger.fatal(error_str)
raise NotImplementedError(error_str)
# check for each row, either both are present or neither -- use xor, or ^
xor_id = self.fare_rules_df.loc[
|
pd.isnull(self.fare_rules_df[Route.FARE_RULES_COLUMN_ORIGIN_ID])
|
pandas.isnull
|
import datetime
from .functions import read_json, aggregate_surveys_no_config
import glob
import json
import logging
import math
import numpy as np
import os
import pandas as pd
import pytz
from typing import List
def convert_time_to_date(submit_time, day, time):
"""
Takes a single array of timings and a single day
Args:
submit_time(datetime):
date in week for which we want to extract another date and time
day(int):
desired day of week
time(list):
List of timings times from the configuration surveys information
"""
# Convert inputted desired day into an integer between 0 and 6
day = day % 7
# Get the days of the given week using the dow of the given submit day
dow = submit_time.weekday()
days = [submit_time + datetime.timedelta(days=i) for i in range(0 - dow, 7 - dow)]
time = [str(datetime.timedelta(seconds=t)) for t in time]
time = [t.split(':') for t in time]
time = [[int(p) for p in t] for t in time]
# Get rid of timing
# https://stackoverflow.com/questions/26882499/reset-time-part-of-a-pandas-timestamp
# print(time)
days = [d - pd.offsets.Micro(0) for d in days]
days = [[d.replace(hour=t[0], minute=t[1], second=t[2], microsecond=0) for t in time] for d in days]
return days[day]
def generate_survey_times(time_start, time_end, timings=[], survey_type='weekly'):
"""
Takes a start time and end time and generates a schedule of all sent surveys in time frame for the given survey type
Args:
time_start(str):
The first date for which we want to generate survey times
time_end(str):
The last date for which we want to generate survey times
timings(list):
list of survey timings, directly from the configuration file survey information
survey_type(str):
What type of survey schedule to generate times for
NOTE: As of now this only works for weekly surveys
Returns:
surveys(list):
A list of all survey times that occur between the time_start and time_end per the given survey timings schedule
"""
if survey_type not in ['weekly', 'absolute', 'relative']:
raise ValueError('Incorrect type of survey. Ensure this is weekly, absolute, or relative.')
# Get the number of weeks between start and end time
t_start = pd.Timestamp(time_start)
t_end = pd.Timestamp(time_end)
weeks = pd.Timedelta(t_end - t_start).days
# Get ceiling number of weeks
weeks = math.ceil(weeks / 7.0)
# Roll dates
t_lag = list(np.roll(np.array(timings, dtype="object"), -1))
# for each week, generate the survey times and append to a list
start_dates = [time_start + datetime.timedelta(days=7 * (i)) for i in range(weeks)]
surveys = []
for s in start_dates:
# Get the starting day of week
# dow_s = s.weekday()
for i, t in enumerate(t_lag):
if len(t) > 0:
surveys.extend(convert_time_to_date(s, day=i, time=t))
return surveys
def gen_survey_schedule(config_path, time_start, time_end, beiwe_ids):
"""
Args:
config_path(str):
File path to study configuration file
time_start(str):
The first date of the survey data
time_end(str):
The last date of the survey data
beiwe_ids(list):
List of users in study for which we are generating a survey schedule
Returns:
times_sur(DataFrame):
DataFrame with a line for every survey deployed to every user in the study for the given time range
"""
# List of surveys
surveys = read_json(config_path)['surveys']
# For each survey create a list of survey times
times_sur = []
for u_id in beiwe_ids:
for i, s in enumerate(surveys):
if s['timings']:
s_times = generate_survey_times(time_start, time_end, timings=s['timings'])
# Add in relative and absolute survey timings here
###
tbl = pd.DataFrame(s_times, columns=['delivery_time'])
# Create the "next" time column too, which indicates the next time the survey will be deployed
tbl['next_delivery_time'] = tbl.delivery_time.shift(-1)
tbl['id'] = i
tbl['beiwe_id'] = u_id
# Get all question IDs for the survey
qs = [q['question_id'] for q in s['content'] if 'question_id' in q.keys()]
if len(qs) > 0:
q_ids = pd.DataFrame({'question_id': qs})
tbl =
|
pd.merge(tbl, q_ids, how='cross')
|
pandas.merge
|
import os
import collections
from os.path import join
import numpy as np
import pandas as pd
from itertools import chain
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import ShuffleSplit, GridSearchCV
from sklearn.metrics import (mean_absolute_error, mean_squared_error,
explained_variance_score, r2_score)
from ukbb_variables import (brain_dmri_fa, brain_dmri_icvf,
brain_dmri_isovf, brain_dmri_l1,
brain_dmri_l2, brain_dmri_l3,
brain_dmri_md, brain_dmri_mo,
brain_dmri_od, brain_smri_plus,
fluid_intelligence, neuroticism)
path_to_csv = '/storage/local/kdadi/work/rs_study/experiments/UKBB/ukb9543.csv'
path_to_matrices = '/storage/local/kdadi/work/data/UKBB/rfMRI_tangent_matrix_dim100'
path_to_merge_brain = '/storage/local/kdadi/work/rs_study/experiments/UKBB/para/roadmap/ukb_add1_merge_brain.csv'
X_iterate = zip([brain_dmri_fa, brain_dmri_icvf, brain_dmri_isovf, brain_dmri_l1,
brain_dmri_l2, brain_dmri_l3, brain_dmri_md, brain_dmri_mo,
brain_dmri_od, brain_smri_plus, fluid_intelligence, neuroticism],
['fa', 'icvf', 'isovf', 'l1', 'l2', 'l3', 'md', 'mo', 'od',
'smri', 'Fluid \n intelligence', 'Neuroticism'])
columns = []
for i in X_iterate:
columns.extend(i[0].keys())
columns.extend(['eid'])
ukbb = pd.read_csv(path_to_csv, usecols=['20016-2.0', 'eid',
'20127-0.0'])
y = ukbb[['eid', '20016-2.0']].dropna()
new_ukbb = pd.DataFrame(ukbb, index=y.index)
new_ukbb = new_ukbb.drop(columns=['20016-2.0'], errors='ignore')
# Random splitting of data to train our model
X_train, X_test, y_train, y_test = train_test_split(
new_ukbb, y, test_size=0.5, random_state=0)
X_train = X_train[['eid', '20127-0.0']].dropna()
X_test = X_test[['eid', '20127-0.0']].dropna()
merged_data =
|
pd.read_csv(path_to_merge_brain, usecols=columns)
|
pandas.read_csv
|
import pandas as pd
import pybedtools as pbt
def read_variants(fns, remove=['DBSNP'], keep_only=True,
min_tumor_f=0.1, min_tumor_cov=14,
min_normal_cov=8):
"""Read muTect results from the list of files fns
Parameters
----------
fns : list
List of MuTect output files.
Returns
-------
variants : pandas.DataFrame
Pandas DataFrame summarizing variant calling results.
remove : list
List of site types for column "dbsnp_site" to remove.
keep_only : boolean
If True, only keep variants with 'KEEP' in "judgement" column.
Otherwise, keep all variants.
min_tumor_f : float between 0 and 1
Minimum tumor allelic fraction.
min_tumor_cov : int > 0
Minimum coverage of the variant in the tumor.
min_normal_cov : int > 0
Minimum coverage of the variant in the normal.
"""
variants = []
for i, f in enumerate(fns):
# If keep_only, use awk to only grab those lines for big speedup.
if keep_only:
from numpy import dtype
import subprocess
res = subprocess.check_output(
'awk \'$35 == "KEEP"\' {}'.format(f), shell=True)
if res.strip() != '':
columns = [u'contig', u'position', u'context', u'ref_allele',
u'alt_allele', u'tumor_name', u'normal_name',
u'score', u'dbsnp_site', u'covered', u'power',
u'tumor_power', u'normal_power', u'total_pairs',
u'improper_pairs', u'map_Q0_reads', u't_lod_fstar',
u'tumor_f', u'contaminant_fraction',
u'contaminant_lod', u't_ref_count', u't_alt_count',
u't_ref_sum', u't_alt_sum', u't_ref_max_mapq',
u't_alt_max_mapq', u't_ins_count', u't_del_count',
u'normal_best_gt', u'init_n_lod', u'n_ref_count',
u'n_alt_count', u'n_ref_sum', u'n_alt_sum',
u'judgement']
tdf = pd.DataFrame(
[x.split('\t') for x in res.strip().split('\n')],
columns=columns)
tdf = tdf.convert_objects(convert_numeric=True)
else:
tdf = pd.DataFrame(columns=columns)
tdf['contig'] = tdf.contig.astype(object)
else:
tdf = pd.read_table(f, index_col=None, header=0, skiprows=1,
low_memory=False,
dtype={'contig':object})
for t in remove:
tdf = tdf[tdf.dbsnp_site != t]
tdf = tdf[tdf.tumor_f > min_tumor_f]
tdf = tdf[tdf.t_ref_count + tdf.t_alt_count > min_tumor_cov]
tdf = tdf[tdf.n_ref_count + tdf.n_alt_count > min_normal_cov]
variants.append(tdf)
variants =
|
pd.concat(variants)
|
pandas.concat
|
import json
import pandas as pd
import os
import time
configPath = os.path.join(os.getcwd(), 'config.json')
with open(configPath, 'r') as f:
config = json.load(f)
files = []
for (dirpath, dirnames, filenames) in os.walk(config['csvFolder']):
for f in filenames:
if f.lower().endswith(('.csv')):
files.append({ 'uri': os.path.join(dirpath, f), 'filename': f })
resultFile = None
baseCSV = pd.read_csv("/home/kelvin/Desktop/WCC-1.csv", skip_blank_lines=True)
baseCSV['Date'] = baseCSV['Date'] + ' ' + baseCSV['Time']
baseCSV['Date'] = pd.to_datetime(baseCSV['Date'])
baseCSV = baseCSV.set_index('Date')
# print(baseCSV)
resultFile = baseCSV
for csv in files:
print(csv)
csvConfig = config['defaultSettings']
if csv['filename'] in config['csvFiles']:
csvConfig = config['csvFiles'][csv['filename']]
df = pd.read_csv(csv['uri'], skip_blank_lines=True)
df['Date'] = df['Report Timings:'] + ' ' + df['All Hours']
df['Date'] =
|
pd.to_datetime(df['Date'], format='%d/%m/%Y %H:%M:%S')
|
pandas.to_datetime
|
# This file considers the control group of South Horizons & Lei Tung , Ocean Park & Wong Chuk Hang as
# two areas
import pandas as pd
import numpy as np
import csv
import os
from collections import Counter
from statsmodels.stats.outliers_influence import variance_inflation_factor
import pytz
from datetime import datetime
from dateutil.relativedelta import relativedelta
import data_paths
from transit_non_transit_comparision.before_and_after_final_tpu import pos_percent_minus_neg_percent
import utils
# statistics
import statsmodels.formula.api as smf
# Hong Kong and Shanghai share the same time zone.
# Hence, we transform the utc time in our dataset into Shanghai time
time_zone_hk = pytz.timezone('Asia/Shanghai')
october_1_start = datetime(2016, 10, 1, 0, 0, 0, tzinfo=time_zone_hk)
october_31_end = datetime(2016, 10, 31, 23, 59, 59, tzinfo=time_zone_hk)
december_1_start = datetime(2016, 12, 1, 0, 0, 0, tzinfo=time_zone_hk)
december_31_end = datetime(2016, 12, 31, 23, 59, 59, tzinfo=time_zone_hk)
## TPU selection before the first review
# kwun_tong_line_treatment_tpu_set = {'243', '245', '236', '213'}
# kwun_tong_line_control_tpu_set = {'247', '234', '242', '212', '235'}
# south_horizons_lei_tung_treatment_tpu_set = {'174'}
# south_horizons_lei_tung_control_tpu_set = {'172', '182'}
# ocean_park_wong_chuk_hang_treatment_tpu_set = {'175'}
# ocean_park_wong_chuk_hang_control_tpu_set = {'184', '183', '182'}
kwun_tong_line_treatment_tpu_set = {'236', '243', '245'}
kwun_tong_line_control_tpu_set = {'247', '234', '242', '212', '235'}
south_horizons_lei_tung_treatment_tpu_set = {'174'}
south_horizons_lei_tung_control_tpu_set = {'172', '181', '182'}
ocean_park_wong_chuk_hang_treatment_tpu_set = {'175', '176'}
ocean_park_wong_chuk_hang_control_tpu_set = {'184', '183', '182', '181'}
kwun_tong_line_treatment_tpu_group_set = {'236', '243', '245'}
kwun_tong_line_control_tpu_group_set = {'247', '234', '242', '212', '235'}
south_horizons_treatment_tpu_group_set = {'174'}
south_horizons_control_tpu_group_set = {'172', '181 - 182'}
ocean_park_treatment_tpu_group_set = {'175 - 176'}
ocean_park_control_tpu_group_set = {'181 - 182', '183 - 184'}
class StudyArea(object):
def __init__(self, area_name, open_month):
assert open_month in ['Oct', 'Dec'], "The open month should be either 'Oct' or 'Dec'."
self.area_name = area_name
if open_month == 'Oct':
self.open_start_date = october_1_start
self.open_end_date = october_31_end
else:
self.open_start_date = december_1_start
self.open_end_date = december_31_end
def transform_string_time_to_datetime(string):
"""
:param string: the string which records the time of the posted tweets
:return: a datetime object which could get access to the year, month, day easily
"""
assert isinstance(string, str)
datetime_object = datetime.strptime(string, '%Y-%m-%d %H:%M:%S+08:00')
final_time_object = datetime_object.replace(tzinfo=time_zone_hk)
return final_time_object
def compute_corr(dataframe: pd.DataFrame, select_column_list: list, corr_method: str = 'spearman'):
"""
Compute the correlation between variables in a pandas dataframe
:param dataframe: a pandas dataframe saving the dependent and independent variables
:param select_column_list: a list saving the considered column names
:param corr_method: the correlation method. In this study, the default is set to 'spearman'
:return: a pandas dataframe presenting the correlation coefficient between the interested variables
"""
dataframe_copy = dataframe.copy()
if 'T_Post' not in dataframe_copy:
dataframe_copy['T_Post'] = dataframe_copy.apply(lambda row: row['Treatment'] * row['Post'], axis=1)
else:
pass
select_column_list.append('T_Post')
dataframe_select = dataframe_copy[select_column_list]
return dataframe_select.corr(method=corr_method).round(2)
def compute_vif(dataframe, select_column_list):
"""
Compute the Variance Inflation Factor (VIF) between interested variables
:param dataframe: a pandas dataframe saving the dependent and independent variables
:param select_column_list: a list saving the considered column names
:return: a pandas dataframe saving the VIF values between the interested variables
"""
dataframe_copy = dataframe.copy()
if 'T_Post' not in dataframe_copy:
dataframe_copy['T_Post'] = dataframe_copy.apply(lambda row: row['Treatment'] * row['Post'], axis=1)
else:
pass
select_column_list.append('T_Post')
X = dataframe_copy[select_column_list]
# VIF dataframe
vif_data = pd.DataFrame()
vif_data["feature"] = X.columns
# calculating VIF for each feature
vif_data["VIF"] = [variance_inflation_factor(X.values, i) for i in range(len(X.columns))]
return vif_data
def add_post_variable(string, opening_start_date, opening_end_date, check_window=0):
"""
Add the value of the POST variable in the DID analysis.
In this case, we assume that the introduction of MTR stations immediately changes the tweet sentiment and tweet
activity
:param string: the time string
:param opening_start_date: the opening date of the studied station
:param opening_end_date: the closing date of the studied station
:param check_window: the month window size used to check the temporal effect of the studied station
:return: the post variable based on the time of one tweet
"""
time_object = transform_string_time_to_datetime(string)
if check_window == 0:
if time_object > opening_end_date:
return 1
elif time_object < opening_start_date:
return 0
else:
return 'not considered'
else:
left_time_range = opening_start_date - relativedelta(months=check_window)
right_time_range = opening_end_date + relativedelta(months=check_window)
if left_time_range <= time_object < opening_start_date:
return 0
elif opening_end_date < time_object <= right_time_range:
return 1
else:
return 'not considered'
def add_post_variable_lag_effect(string, opening_start_date, opening_end_date, lag_effect_month=0):
"""
Add the value of the POST variable in the DID analysis (Consider the lag effect)
In this case, we believe that the operation of MTR stations does not immediately change the tweet sentiment and
tweet activity. The lag effect exists.
:param string: the time string
:param opening_start_date: the opening date of the studied station
:param opening_end_date: the closing date of the studied station
:param lag_effect_month: the number of lag effect months
:return: the post variable based on the time of one tweet
"""
time_object = transform_string_time_to_datetime(string)
if lag_effect_month == np.inf:
if time_object > opening_end_date:
return 1
elif time_object < opening_start_date:
return 0
else:
return 'not considered'
else:
left_time_range = opening_start_date - relativedelta(months=12)
right_time_range = opening_end_date + relativedelta(months=lag_effect_month + 12)
opening_end_date = opening_end_date + relativedelta(months=lag_effect_month)
if left_time_range <= time_object < opening_start_date:
return 0
elif opening_end_date < time_object <= right_time_range:
return 1
else:
return 'not considered'
def get_population_one_area_combined(dataframe: pd.DataFrame, treatment_census_data, control_census_data):
"""
Get the population data based on treatment and control setting
:param dataframe: a pandas dataframe containing the data for DID analysis
:param treatment_census_data: the census dataframe for treatment area
:param control_census_data: the census dataframe for control area
:return: dataframe containing the population
"""
assert 'T_i_t' in dataframe, 'The dataframe should have treatment and control indicator'
assert 'Post' in dataframe, 'The dataframe should have post or not indicator'
dataframe_copy = dataframe.copy()
population_list = []
for index, row in dataframe_copy.iterrows():
if row['T_i_t'] == 1 and row['Post'] == 1:
select_columns = [col for col in treatment_census_data if '2016' in col] + ['SmallTPU']
select_dataframe = treatment_census_data[select_columns]
population_list.append(sum(select_dataframe['population_2016']))
elif row['T_i_t'] == 1 and row['Post'] == 0:
select_columns = [col for col in treatment_census_data if '2011' in col] + ['SmallTPU']
select_dataframe = treatment_census_data[select_columns]
population_list.append(sum(select_dataframe['population_2011']))
elif row['T_i_t'] == 0 and row['Post'] == 1:
select_columns = [col for col in control_census_data if '2016' in col] + ['SmallTPU']
select_dataframe = control_census_data[select_columns]
population_list.append(sum(select_dataframe['population_2016']))
elif row['T_i_t'] == 0 and row['Post'] == 0:
select_columns = [col for col in control_census_data if '2011' in col] + ['SmallTPU']
select_dataframe = control_census_data[select_columns]
population_list.append(sum(select_dataframe['population_2011']))
else:
raise ValueError('Something wrong with the T and Post variables...')
dataframe_copy['Population'] = population_list
dataframe_copy['Population_log'] = dataframe_copy.apply(lambda row: np.log(row['Population']), axis=1)
return dataframe_copy
def get_population_one_area_seperate(dataframe: pd.DataFrame, census_dict: dict):
"""
Get the population data based on treatment and control setting
:param dataframe: a pandas dataframe containing the data for DID analysis
:param census_dict: dictionary saving the population and median income information
:return: dataframe containing the population
"""
assert 'T_i_t' in dataframe, 'The dataframe should have treatment and control indicator'
dataframe['Population_log'] = dataframe.apply(lambda row: np.log(census_dict[row['SmallTPU']][0]), axis=1)
return dataframe
def get_median_income_one_area_combined(dataframe: pd.DataFrame, treatment_census_data, control_census_data):
"""
Get the population data based on treatment and control setting
:param dataframe: a pandas dataframe containing the data for DID analysis
:param treatment_census_data: the census dataframe for treatment area
:param control_census_data: the census dataframe for control area
:return: dataframe containing the population
"""
assert 'T_i_t' in dataframe, 'The dataframe should have treatment and control indicator'
assert 'Post' in dataframe, 'The dataframe should have post or not indicator'
dataframe_copy = dataframe.copy()
median_income_list = []
for index, row in dataframe_copy.iterrows():
if row['T_i_t'] == 1 and row['Post'] == 1:
select_columns = [col for col in treatment_census_data if '2016' in col] + ['SmallTPU']
select_dataframe = treatment_census_data[select_columns]
median_income_list.append(utils.weighted_average(group=select_dataframe,
value_col='m_income_2016',
weight_col='population_2016'))
elif row['T_i_t'] == 1 and row['Post'] == 0:
select_columns = [col for col in treatment_census_data if '2011' in col] + ['SmallTPU']
select_dataframe = treatment_census_data[select_columns]
median_income_list.append(utils.weighted_average(group=select_dataframe,
value_col='m_income_2011',
weight_col='population_2011'))
elif row['T_i_t'] == 0 and row['Post'] == 1:
select_columns = [col for col in control_census_data if '2016' in col] + ['SmallTPU']
select_dataframe = control_census_data[select_columns]
median_income_list.append(utils.weighted_average(group=select_dataframe,
value_col='m_income_2016',
weight_col='population_2016'))
elif row['T_i_t'] == 0 and row['Post'] == 0:
select_columns = [col for col in control_census_data if '2011' in col] + ['SmallTPU']
select_dataframe = control_census_data[select_columns]
median_income_list.append(utils.weighted_average(group=select_dataframe,
value_col='m_income_2011',
weight_col='population_2011'))
else:
raise ValueError('Something wrong with the T and Post variables...')
dataframe_copy['Median_Income'] = median_income_list
dataframe_copy['Median_Income_log'] = dataframe_copy.apply(lambda row: np.log(row['Median_Income']), axis=1)
return dataframe_copy
def get_median_income_one_area_seperate(dataframe: pd.DataFrame, census_dict: dict):
"""
Get the population data based on treatment and control setting
:param dataframe: a pandas dataframe containing the data for DID analysis
:param census_dict: dictionary saving the population and median income information
:return: dataframe containing the population
"""
assert 'T_i_t' in dataframe, 'The dataframe should have treatment and control indicator'
dataframe['Median_Income_log'] = dataframe.apply(lambda row: np.log(census_dict[row['SmallTPU']][1]), axis=1)
return dataframe
def get_population_three_areas_combined(dataframe: pd.DataFrame, tpu_info_dataframe: pd.DataFrame):
"""
Get the population data based on treatment and control setting
:param dataframe: a pandas dataframe containing the data for DID analysis
:param tpu_info_dataframe: a pandas dataframe saving the TPU info information
:return: dataframe containing the population
"""
result_population_list = []
dataframe_copy = dataframe.copy()
for index, row in dataframe_copy.iterrows():
if (row['T_i_t'] == 1) and (row['Area_name'] == 'kwun_tong') and (row['Post'] == 0):
select_columns = [col for col in tpu_info_dataframe if '2011' in col] + ['SmallTPU']
select_dataframe = tpu_info_dataframe[select_columns]
select_rows = select_dataframe.loc[select_dataframe['SmallTPU'].isin(
kwun_tong_line_treatment_tpu_group_set)]
result_population_list.append(sum(select_rows['population_2011']))
elif (row['T_i_t'] == 1) and (row['Area_name'] == 'kwun_tong') and (row['Post'] == 1):
select_columns = [col for col in tpu_info_dataframe if '2016' in col] + ['SmallTPU']
select_dataframe = tpu_info_dataframe[select_columns]
select_rows = select_dataframe.loc[select_dataframe['SmallTPU'].isin(
kwun_tong_line_treatment_tpu_group_set)]
result_population_list.append(sum(select_rows['population_2016']))
elif (row['T_i_t'] == 0) and (row['Area_name'] == 'kwun_tong') and (row['Post'] == 0):
select_columns = [col for col in tpu_info_dataframe if '2011' in col] + ['SmallTPU']
select_dataframe = tpu_info_dataframe[select_columns]
select_rows = select_dataframe.loc[select_dataframe['SmallTPU'].isin(kwun_tong_line_control_tpu_group_set)]
result_population_list.append(sum(select_rows['population_2011']))
elif (row['T_i_t'] == 0) and (row['Area_name'] == 'kwun_tong') and (row['Post'] == 1):
select_columns = [col for col in tpu_info_dataframe if '2016' in col] + ['SmallTPU']
select_dataframe = tpu_info_dataframe[select_columns]
select_rows = select_dataframe.loc[select_dataframe['SmallTPU'].isin(kwun_tong_line_control_tpu_group_set)]
result_population_list.append(sum(select_rows['population_2016']))
elif (row['T_i_t'] == 1) and (row['Area_name'] == 'south_horizons') and (row['Post'] == 0):
select_columns = [col for col in tpu_info_dataframe if '2011' in col] + ['SmallTPU']
select_dataframe = tpu_info_dataframe[select_columns]
select_rows = select_dataframe.loc[
select_dataframe['SmallTPU'].isin(south_horizons_treatment_tpu_group_set)]
result_population_list.append(sum(select_rows['population_2011']))
elif (row['T_i_t'] == 1) and (row['Area_name'] == 'south_horizons') and (row['Post'] == 1):
select_columns = [col for col in tpu_info_dataframe if '2016' in col] + ['SmallTPU']
select_dataframe = tpu_info_dataframe[select_columns]
select_rows = select_dataframe.loc[
select_dataframe['SmallTPU'].isin(south_horizons_treatment_tpu_group_set)]
result_population_list.append(sum(select_rows['population_2016']))
elif (row['T_i_t'] == 0) and (row['Area_name'] == 'south_horizons') and (row['Post'] == 0):
select_columns = [col for col in tpu_info_dataframe if '2011' in col] + ['SmallTPU']
select_dataframe = tpu_info_dataframe[select_columns]
select_rows = select_dataframe.loc[select_dataframe['SmallTPU'].isin(south_horizons_control_tpu_group_set)]
result_population_list.append(sum(select_rows['population_2011']))
elif (row['T_i_t'] == 0) and (row['Area_name'] == 'south_horizons') and (row['Post'] == 1):
select_columns = [col for col in tpu_info_dataframe if '2016' in col] + ['SmallTPU']
select_dataframe = tpu_info_dataframe[select_columns]
select_rows = select_dataframe.loc[select_dataframe['SmallTPU'].isin(south_horizons_control_tpu_group_set)]
result_population_list.append(sum(select_rows['population_2016']))
elif (row['T_i_t'] == 1) and (row['Area_name'] == 'ocean_park') and (row['Post'] == 0):
select_columns = [col for col in tpu_info_dataframe if '2011' in col] + ['SmallTPU']
select_dataframe = tpu_info_dataframe[select_columns]
select_rows = select_dataframe.loc[select_dataframe['SmallTPU'].isin(ocean_park_treatment_tpu_group_set)]
result_population_list.append(sum(select_rows['population_2011']))
elif (row['T_i_t'] == 1) and (row['Area_name'] == 'ocean_park') and (row['Post'] == 1):
select_columns = [col for col in tpu_info_dataframe if '2016' in col] + ['SmallTPU']
select_dataframe = tpu_info_dataframe[select_columns]
select_rows = select_dataframe.loc[select_dataframe['SmallTPU'].isin(ocean_park_treatment_tpu_group_set)]
result_population_list.append(sum(select_rows['population_2016']))
elif (row['T_i_t'] == 0) and (row['Area_name'] == 'ocean_park') and (row['Post'] == 0):
select_columns = [col for col in tpu_info_dataframe if '2011' in col] + ['SmallTPU']
select_dataframe = tpu_info_dataframe[select_columns]
select_rows = select_dataframe.loc[select_dataframe['SmallTPU'].isin(ocean_park_control_tpu_group_set)]
result_population_list.append(sum(select_rows['population_2011']))
elif (row['T_i_t'] == 0) and (row['Area_name'] == 'ocean_park') and (row['Post'] == 1):
select_columns = [col for col in tpu_info_dataframe if '2016' in col] + ['SmallTPU']
select_dataframe = tpu_info_dataframe[select_columns]
select_rows = select_dataframe.loc[select_dataframe['SmallTPU'].isin(ocean_park_control_tpu_group_set)]
result_population_list.append(sum(select_rows['population_2016']))
else:
raise ValueError('Something wrong with the area name...')
dataframe_copy['Population'] = result_population_list
dataframe_copy['Population_log'] = dataframe_copy.apply(lambda row: np.log(row['Population']), axis=1)
return dataframe_copy
def get_median_income_three_areas_combined(dataframe: pd.DataFrame, tpu_info_dataframe: pd.DataFrame):
"""
Get the median income data based on treatment and control setting
:param dataframe: a pandas dataframe containing the data for DID analysis
:param tpu_info_dataframe: the pandas dataframe for tpu census data
:return: dataframe containing the median income
"""
median_income_list = []
dataframe_copy = dataframe.copy()
for index, row in dataframe_copy.iterrows():
if (row['T_i_t'] == 1) and (row['Area_name'] == 'kwun_tong') and (row['Post'] == 0):
select_columns = [col for col in tpu_info_dataframe if '2011' in col] + ['SmallTPU']
select_dataframe = tpu_info_dataframe[select_columns]
select_rows = select_dataframe.loc[
select_dataframe['SmallTPU'].isin(kwun_tong_line_treatment_tpu_group_set)]
median_income_list.append(utils.weighted_average(select_rows,
value_col='m_income_2011', weight_col='population_2011'))
elif (row['T_i_t'] == 1) and (row['Area_name'] == 'kwun_tong') and (row['Post'] == 1):
select_columns = [col for col in tpu_info_dataframe if '2016' in col] + ['SmallTPU']
select_dataframe = tpu_info_dataframe[select_columns]
select_rows = select_dataframe.loc[
select_dataframe['SmallTPU'].isin(kwun_tong_line_treatment_tpu_group_set)]
median_income_list.append(utils.weighted_average(select_rows,
value_col='m_income_2016', weight_col='population_2016'))
elif (row['T_i_t'] == 0) and (row['Area_name'] == 'kwun_tong') and (row['Post'] == 0):
select_columns = [col for col in tpu_info_dataframe if '2011' in col] + ['SmallTPU']
select_dataframe = tpu_info_dataframe[select_columns]
select_rows = select_dataframe.loc[select_dataframe['SmallTPU'].isin(kwun_tong_line_control_tpu_group_set)]
median_income_list.append(utils.weighted_average(select_rows,
value_col='m_income_2011', weight_col='population_2011'))
elif (row['T_i_t'] == 0) and (row['Area_name'] == 'kwun_tong') and (row['Post'] == 1):
select_columns = [col for col in tpu_info_dataframe if '2016' in col] + ['SmallTPU']
select_dataframe = tpu_info_dataframe[select_columns]
select_rows = select_dataframe.loc[select_dataframe['SmallTPU'].isin(kwun_tong_line_control_tpu_group_set)]
median_income_list.append(utils.weighted_average(select_rows,
value_col='m_income_2016', weight_col='population_2016'))
elif (row['T_i_t'] == 1) and (row['Area_name'] == 'south_horizons') and (row['Post'] == 0):
select_columns = [col for col in tpu_info_dataframe if '2011' in col] + ['SmallTPU']
select_dataframe = tpu_info_dataframe[select_columns]
select_rows = select_dataframe.loc[
select_dataframe['SmallTPU'].isin(south_horizons_treatment_tpu_group_set)]
median_income_list.append(utils.weighted_average(select_rows,
value_col='m_income_2011', weight_col='population_2011'))
elif (row['T_i_t'] == 1) and (row['Area_name'] == 'south_horizons') and (row['Post'] == 1):
select_columns = [col for col in tpu_info_dataframe if '2016' in col] + ['SmallTPU']
select_dataframe = tpu_info_dataframe[select_columns]
select_rows = select_dataframe.loc[
select_dataframe['SmallTPU'].isin(south_horizons_treatment_tpu_group_set)]
median_income_list.append(utils.weighted_average(select_rows,
value_col='m_income_2016', weight_col='population_2016'))
elif (row['T_i_t'] == 0) and (row['Area_name'] == 'south_horizons') and (row['Post'] == 0):
select_columns = [col for col in tpu_info_dataframe if '2011' in col] + ['SmallTPU']
select_dataframe = tpu_info_dataframe[select_columns]
select_rows = select_dataframe.loc[select_dataframe['SmallTPU'].isin(south_horizons_control_tpu_group_set)]
median_income_list.append(utils.weighted_average(select_rows,
value_col='m_income_2011', weight_col='population_2011'))
elif (row['T_i_t'] == 0) and (row['Area_name'] == 'south_horizons') and (row['Post'] == 1):
select_columns = [col for col in tpu_info_dataframe if '2016' in col] + ['SmallTPU']
select_dataframe = tpu_info_dataframe[select_columns]
select_rows = select_dataframe.loc[select_dataframe['SmallTPU'].isin(south_horizons_control_tpu_group_set)]
median_income_list.append(utils.weighted_average(select_rows,
value_col='m_income_2016', weight_col='population_2016'))
elif (row['T_i_t'] == 1) and (row['Area_name'] == 'ocean_park') and (row['Post'] == 0):
select_columns = [col for col in tpu_info_dataframe if '2011' in col] + ['SmallTPU']
select_dataframe = tpu_info_dataframe[select_columns]
select_rows = select_dataframe.loc[select_dataframe['SmallTPU'].isin(ocean_park_treatment_tpu_group_set)]
median_income_list.append(utils.weighted_average(select_rows,
value_col='m_income_2011', weight_col='population_2011'))
elif (row['T_i_t'] == 1) and (row['Area_name'] == 'ocean_park') and (row['Post'] == 1):
select_columns = [col for col in tpu_info_dataframe if '2016' in col] + ['SmallTPU']
select_dataframe = tpu_info_dataframe[select_columns]
select_rows = select_dataframe.loc[select_dataframe['SmallTPU'].isin(ocean_park_treatment_tpu_group_set)]
median_income_list.append(utils.weighted_average(select_rows,
value_col='m_income_2016', weight_col='population_2016'))
elif (row['T_i_t'] == 0) and (row['Area_name'] == 'ocean_park') and (row['Post'] == 0):
select_columns = [col for col in tpu_info_dataframe if '2011' in col] + ['SmallTPU']
select_dataframe = tpu_info_dataframe[select_columns]
select_rows = select_dataframe.loc[select_dataframe['SmallTPU'].isin(ocean_park_control_tpu_group_set)]
median_income_list.append(utils.weighted_average(select_rows,
value_col='m_income_2011', weight_col='population_2011'))
elif (row['T_i_t'] == 0) and (row['Area_name'] == 'ocean_park') and (row['Post'] == 1):
select_columns = [col for col in tpu_info_dataframe if '2016' in col] + ['SmallTPU']
select_dataframe = tpu_info_dataframe[select_columns]
select_rows = select_dataframe.loc[select_dataframe['SmallTPU'].isin(ocean_park_control_tpu_group_set)]
median_income_list.append(utils.weighted_average(select_rows,
value_col='m_income_2016', weight_col='population_2016'))
else:
raise ValueError('Something wrong with the area name...')
dataframe_copy['Median_Income'] = median_income_list
dataframe_copy['Median_Income_log'] = dataframe_copy.apply(lambda row: np.log(row['Median_Income']), axis=1)
return dataframe_copy
def build_dataframe_based_on_set(datapath: str, tpu_set: set, selected_user_set: set):
"""
Build the dataframes based on the given tpu set
:param datapath: the datapath saving the tweets posted in each tpu
:param tpu_set: a python set saving the considered tpu names
:param selected_user_set: a set containing the id of users we are interested in
:return: a pandas dataframe saving the tweets posted in the considered tpus
"""
tpu_name_list = []
dataframe_list = []
for tpu in tpu_set:
tpu_name_list.append(tpu)
dataframe = pd.read_csv(os.path.join(datapath, tpu, tpu + '_data.csv'), encoding='utf-8', dtype='str',
quoting=csv.QUOTE_NONNUMERIC)
dataframe['user_id_str'] = dataframe.apply(lambda row: np.int64(float(row['user_id_str'])), axis=1)
dataframe_select = dataframe.loc[dataframe['user_id_str'].isin(selected_user_set)]
dataframe_list.append(dataframe_select)
combined_dataframe = pd.concat(dataframe_list, axis=0)
return combined_dataframe
def build_regress_data_three_areas_combined(kwun_tong_treatment, kwun_tong_control, south_horizons_treatment,
south_horizons_control, ocean_park_treatment, ocean_park_control,
tpu_info_dataframe, check_window_value=0, consider_lag_effect=True):
"""
Build dataframes for the combined DID analysis based on treatment & control dataframes of each station
:param kwun_tong_treatment: the dataframe saving tweets for kwun tong treatment area
:param kwun_tong_control: the dataframe saving tweets for kwun tong control area
:param south_horizons_treatment: the dataframe saving tweets for south horizons treatment area
:param south_horizons_control: the dataframe saving tweets for south horizons control area
:param ocean_park_treatment: the dataframe saving tweets for ocean park treatment area
:param ocean_park_control: the dataframe saving tweets for ocean park control area
:param tpu_info_dataframe: the dataframe saving the census data for each tpu setting
:param check_window_value: the month window we consider when doing the DID analysis
:param consider_lag_effect: whether consider the lag effect or not
:return: a combined dataframe which could be used for combined DID analysis
"""
result_dataframe = pd.DataFrame()
# build the treatment control binary variable
kwun_tong_treatment['T_i_t'] = [1] * kwun_tong_treatment.shape[0]
kwun_tong_treatment['Area_num'] = [1] * kwun_tong_treatment.shape[0]
kwun_tong_treatment['Area_name'] = ['kwun_tong'] * kwun_tong_treatment.shape[0]
kwun_tong_control['T_i_t'] = [0] * kwun_tong_control.shape[0]
kwun_tong_control['Area_num'] = [2] * kwun_tong_control.shape[0]
kwun_tong_control['Area_name'] = ['kwun_tong'] * kwun_tong_control.shape[0]
south_horizons_treatment['T_i_t'] = [1] * south_horizons_treatment.shape[0]
south_horizons_treatment['Area_num'] = [3] * south_horizons_treatment.shape[0]
south_horizons_treatment['Area_name'] = ['south_horizons'] * south_horizons_treatment.shape[0]
south_horizons_control['T_i_t'] = [0] * south_horizons_control.shape[0]
south_horizons_control['Area_num'] = [4] * south_horizons_control.shape[0]
south_horizons_control['Area_name'] = ['south_horizons'] * south_horizons_control.shape[0]
ocean_park_treatment['T_i_t'] = [1] * ocean_park_treatment.shape[0]
ocean_park_treatment['Area_num'] = [5] * ocean_park_treatment.shape[0]
ocean_park_treatment['Area_name'] = ['ocean_park'] * ocean_park_treatment.shape[0]
ocean_park_control['T_i_t'] = [0] * ocean_park_control.shape[0]
ocean_park_control['Area_num'] = [6] * ocean_park_control.shape[0]
ocean_park_control['Area_name'] = ['ocean_park'] * ocean_park_control.shape[0]
# add the post variable
dataframe_list = [kwun_tong_treatment, kwun_tong_control, south_horizons_treatment,
south_horizons_control, ocean_park_treatment, ocean_park_control]
start_months = ['Oct', 'Oct', 'Dec', 'Dec', 'Dec', 'Dec']
area_names = ['Whampoa Treatment', 'Whampoa Control',
'South Horizons Treatment', 'South Horizons Control',
'Ocean Park Treatment', 'Ocean Park Control']
for area_name, tweet_dataframe, start_month in zip(area_names, dataframe_list, start_months):
study_obj = StudyArea(area_name=area_name, open_month=start_month)
print('Adding the Post variable for {}'.format(study_obj.area_name))
if consider_lag_effect:
tweet_dataframe['Post'] = tweet_dataframe.apply(
lambda row_lag: add_post_variable_lag_effect(row_lag['hk_time'],
opening_start_date=study_obj.open_start_date,
opening_end_date=study_obj.open_end_date,
lag_effect_month=check_window_value), axis=1)
else:
tweet_dataframe['Post'] = tweet_dataframe.apply(
lambda row: add_post_variable(row['hk_time'],
opening_start_date=study_obj.open_start_date,
opening_end_date=study_obj.open_end_date,
check_window=check_window_value), axis=1)
# Create the tweet dataframe containing the tweets with year_month information
combined_dataframe = pd.concat(dataframe_list, axis=0, sort=True)
combined_dataframe = combined_dataframe.reset_index(drop=True)
combined_dataframe_without_not_considered = combined_dataframe.loc[combined_dataframe['Post'] != 'not considered']
combined_data_copy = combined_dataframe_without_not_considered.copy()
combined_data_copy['month_plus_year'] = combined_data_copy.apply(
lambda row: str(int(float(row['year']))) + '_' + str(int(float(row['month']))), axis=1)
combined_data_copy['sentiment_vader_percent'] = combined_data_copy.apply(
lambda row: int(float(row['sentiment_vader_percent'])), axis=1)
# Construct the data for the difference in difference analysis
result_dataframe_copy = result_dataframe.copy()
area_name_list = []
time_list = []
t_i_t_list = []
post_list = []
sentiment_list = []
positive_list, neutral_list, negative_list = [], [], []
sentiment_dict = {}
pos_dict, neutral_dict, neg_dict = {}, {}, {}
for _, dataframe in combined_data_copy.groupby(['month_plus_year', 'T_i_t', 'Post', 'Area_name']):
time = str(list(dataframe['month_plus_year'])[0])
t_i_t = str(list(dataframe['T_i_t'])[0])
post = str(list(dataframe['Post'])[0])
area_name = list(dataframe['Area_name'])[0]
sentiment_dict[time + '+' + t_i_t + '+' + post + '+' + area_name] = pos_percent_minus_neg_percent(dataframe)
pos_dict[time + '+' + t_i_t + '+' + post + '+' + area_name] = dataframe.loc[
dataframe['sentiment_vader_percent'] == 2].shape[0]
neutral_dict[time + '+' + t_i_t + '+' + post + '+' + area_name] = dataframe.loc[
dataframe['sentiment_vader_percent'] == 1].shape[0]
neg_dict[time + '+' + t_i_t + '+' + post + '+' + area_name] = dataframe.loc[
dataframe['sentiment_vader_percent'] == 0].shape[0]
for key in list(sentiment_dict.keys()):
# don't consider the tweets posted in 2016_10(for Whampoa and Ho Man Tin) or 2016_12(for other stations)
info_list = key.split('+')
if info_list[0] not in ['2016_10', '2016_12']:
time_list.append(info_list[0])
t_i_t_list.append(int(info_list[1]))
post_list.append(int(info_list[2]))
area_name_list.append(info_list[3])
sentiment_list.append(sentiment_dict[key])
positive_list.append(pos_dict[key])
neutral_list.append(neutral_dict[key])
negative_list.append(neg_dict[key])
result_dataframe_copy['Time'] = time_list
result_dataframe_copy['T_i_t'] = t_i_t_list
result_dataframe_copy['Area_name'] = area_name_list
result_dataframe_copy['Post'] = post_list
result_dataframe_copy['Sentiment'] = sentiment_list
result_dataframe_copy['Positive_count'] = positive_list
result_dataframe_copy['Neutral_count'] = neutral_list
result_dataframe_copy['Negative_count'] = negative_list
result_dataframe_copy['Activity'] = result_dataframe_copy['Positive_count'] + \
result_dataframe_copy['Neutral_count'] + \
result_dataframe_copy['Negative_count']
result_dataframe_copy['log_Activity'] = result_dataframe_copy.apply(
lambda row: np.log(row['Activity']), axis=1)
result_dataframe_copy['month'] = result_dataframe_copy.apply(lambda row: int(row['Time'][5:]), axis=1)
dataframe_with_population = get_population_three_areas_combined(result_dataframe_copy,
tpu_info_dataframe=tpu_info_dataframe)
regres_dataframe = get_median_income_three_areas_combined(dataframe_with_population,
tpu_info_dataframe=tpu_info_dataframe)
# Don't consider last two months of tweet data for South Horizons & Lei Tung
remove_mask = (regres_dataframe['Area_name'] == 'south_horizons') & (
regres_dataframe['Time'].isin(['2018_11', '2018_12']))
regres_dataframe_select = regres_dataframe.loc[~remove_mask]
# Make sure that we only consider months having both treatment and control for three areas
month_counter = Counter(regres_dataframe_select['Time'])
not_considered_months = [month for month in month_counter if month_counter[month] != 6]
final_dataframe = regres_dataframe_select.loc[~regres_dataframe_select['Time'].isin(not_considered_months)]
return final_dataframe.reset_index(drop=True)
def build_regress_data_three_areas_seperate(kwun_tong_treatment, kwun_tong_control, south_horizons_treatment,
south_horizons_control, ocean_park_treatment, ocean_park_control,
tpu_info_dataframe, check_window_value=0):
"""
Build dataframes for the combined DID analysis based on treatment & control dataframes of each station
:param kwun_tong_treatment: the dataframe saving tweets for kwun tong treatment area
:param kwun_tong_control: the dataframe saving tweets for kwun tong control area
:param south_horizons_treatment: the dataframe saving tweets for south horizons treatment area
:param south_horizons_control: the dataframe saving tweets for south horizons control area
:param ocean_park_treatment: the dataframe saving tweets for ocean park treatment area
:param ocean_park_control: the dataframe saving tweets for ocean park control area
:param tpu_info_dataframe: the dataframe saving the census data for each tpu setting
:param check_window_value: the month window we consider when doing the DID analysis
:return: a combined dataframe which could be used for combined DID analysis
"""
result_dataframe = pd.DataFrame()
treatment_set = set(list(kwun_tong_line_treatment_tpu_set) + list(south_horizons_lei_tung_treatment_tpu_set) +
list(ocean_park_wong_chuk_hang_treatment_tpu_set))
control_set = set(list(kwun_tong_line_control_tpu_set) + list(south_horizons_lei_tung_control_tpu_set) +
list(ocean_park_wong_chuk_hang_control_tpu_set))
print('The treatment set is: {}'.format(treatment_set))
print('The control set is: {}'.format(control_set))
# build the treatment control binary variable
kwun_tong_treatment['T_i_t'] = [1] * kwun_tong_treatment.shape[0]
kwun_tong_treatment['Area_name'] = ['Whampoa & Ho Man Tin'] * kwun_tong_treatment.shape[0]
kwun_tong_control['T_i_t'] = [0] * kwun_tong_control.shape[0]
kwun_tong_control['Area_name'] = ['Whampoa & Ho Man Tin'] * kwun_tong_control.shape[0]
south_horizons_treatment['T_i_t'] = [1] * south_horizons_treatment.shape[0]
south_horizons_treatment['Area_name'] = ['South Horizons & Lei Tung'] * south_horizons_treatment.shape[0]
south_horizons_control['T_i_t'] = [0] * south_horizons_control.shape[0]
south_horizons_control['Area_name'] = ['South Horizons & Lei Tung'] * south_horizons_control.shape[0]
ocean_park_treatment['T_i_t'] = [1] * ocean_park_treatment.shape[0]
ocean_park_treatment['Area_name'] = ['Ocean Park & Wong Chuk Hang'] * ocean_park_treatment.shape[0]
ocean_park_control['T_i_t'] = [0] * ocean_park_control.shape[0]
ocean_park_control['Area_name'] = ['Ocean Park & Wong Chuk Hang'] * ocean_park_control.shape[0]
# add the post variable
kwun_tong_treatment['Post'] = kwun_tong_treatment.apply(
lambda row: add_post_variable_lag_effect(row['hk_time'], opening_start_date=october_1_start,
opening_end_date=october_31_end, lag_effect_month=check_window_value),
axis=1)
kwun_tong_control['Post'] = kwun_tong_control.apply(
lambda row: add_post_variable_lag_effect(row['hk_time'], opening_start_date=october_1_start,
opening_end_date=october_31_end, lag_effect_month=check_window_value),
axis=1)
south_horizons_treatment['Post'] = south_horizons_treatment.apply(
lambda row: add_post_variable_lag_effect(row['hk_time'], opening_start_date=december_1_start,
opening_end_date=december_31_end, lag_effect_month=check_window_value),
axis=1)
south_horizons_control['Post'] = south_horizons_control.apply(
lambda row: add_post_variable_lag_effect(row['hk_time'], opening_start_date=december_1_start,
opening_end_date=december_31_end, lag_effect_month=check_window_value),
axis=1)
ocean_park_treatment['Post'] = ocean_park_treatment.apply(
lambda row: add_post_variable_lag_effect(row['hk_time'], opening_start_date=december_1_start,
opening_end_date=december_31_end, lag_effect_month=check_window_value),
axis=1)
ocean_park_control['Post'] = ocean_park_control.apply(
lambda row: add_post_variable_lag_effect(row['hk_time'], opening_start_date=december_1_start,
opening_end_date=december_31_end, lag_effect_month=check_window_value),
axis=1)
# Construct the dictionary having the census data for the treatment area and control area
tpu_info_dataframe['SmallTPU'] = tpu_info_dataframe.apply(lambda row: str(row['SmallTPU']), axis=1)
census_dict = {} # [log(population), log(median_income), tpu_index]
for _, row in tpu_info_dataframe.iterrows():
census_dict[row['SmallTPU']] = [row['population_2011'], row['population_2016'],
row['m_income_2011'], row['m_income_2016']]
# Create the tweet dataframe containing the tweets with year_month information
dataframe_list = [kwun_tong_treatment, kwun_tong_control, south_horizons_treatment,
south_horizons_control, ocean_park_treatment, ocean_park_control]
combined_dataframe = pd.concat(dataframe_list, axis=0, sort=True)
combined_dataframe = combined_dataframe.reset_index(drop=True)
combined_dataframe_without_not_considered = combined_dataframe.loc[combined_dataframe['Post'] != 'not considered']
combined_data_copy = combined_dataframe_without_not_considered.copy()
combined_data_copy['month_plus_year'] = combined_data_copy.apply(
lambda row: str(int(float(row['year']))) + '_' + str(int(float(row['month']))), axis=1)
combined_data_copy['sentiment_vader_percent'] = combined_data_copy.apply(
lambda row: int(float(row['sentiment_vader_percent'])), axis=1)
# Construct the data for the difference in difference analysis
result_dataframe_copy = result_dataframe.copy()
tpu_list, area_name_list = [], []
time_list = []
t_i_t_list = []
post_list = []
sentiment_list = []
positive_list, neutral_list, negative_list = [], [], []
sentiment_dict = {}
pos_dict, neutral_dict, neg_dict = {}, {}, {}
for _, dataframe in combined_data_copy.groupby(['TPU_cross_sectional', 'month_plus_year', 'T_i_t', 'Post',
'Area_name']):
if dataframe.shape[0] != 0:
time = str(list(dataframe['month_plus_year'])[0])
t_i_t = str(list(dataframe['T_i_t'])[0])
post = str(list(dataframe['Post'])[0])
tpu_info = str(list(dataframe['TPU_cross_sectional'])[0])
area_name = str(list(dataframe['Area_name'])[0])
sentiment_dict[time + '+' + t_i_t + '+' + post + '+' + tpu_info + '+' + area_name] = \
pos_percent_minus_neg_percent(dataframe)
pos_dict[time + '+' + t_i_t + '+' + post + '+' + tpu_info + '+' + area_name] = dataframe.loc[
dataframe['sentiment_vader_percent'] == 2].shape[0]
neutral_dict[time + '+' + t_i_t + '+' + post + '+' + tpu_info + '+' + area_name] = dataframe.loc[
dataframe['sentiment_vader_percent'] == 1].shape[0]
neg_dict[time + '+' + t_i_t + '+' + post + '+' + tpu_info + '+' + area_name] = dataframe.loc[
dataframe['sentiment_vader_percent'] == 0].shape[0]
for key in list(sentiment_dict.keys()):
# don't consider the tweets posted in 2016_10(for Whampoa and Ho Man Tin) or 2016_12(for other stations)
info_list = key.split('+')
if info_list[0] not in ['2016_10', '2016_12']:
time_list.append(info_list[0])
t_i_t_list.append(int(info_list[1]))
post_list.append(int(info_list[2]))
tpu_list.append(str(info_list[3]))
area_name_list.append(str(info_list[4]))
sentiment_list.append(sentiment_dict[key])
positive_list.append(pos_dict[key])
neutral_list.append(neutral_dict[key])
negative_list.append(neg_dict[key])
result_dataframe_copy['TPU'] = tpu_list
result_dataframe_copy['Area_name'] = area_name_list
result_dataframe_copy['Time'] = time_list
result_dataframe_copy['Treatment'] = t_i_t_list
result_dataframe_copy['Post'] = post_list
result_dataframe_copy['Sentiment'] = sentiment_list
result_dataframe_copy['month'] = result_dataframe_copy.apply(lambda row: int(row['Time'][5:]), axis=1)
result_dataframe_copy['Positive_count'] = positive_list
result_dataframe_copy['Neutral_count'] = neutral_list
result_dataframe_copy['Negative_count'] = negative_list
result_dataframe_copy['Activity'] = result_dataframe_copy['Positive_count'] + \
result_dataframe_copy['Neutral_count'] + \
result_dataframe_copy['Negative_count']
# Add the population, median income and tpu index information
population_list, median_income_list = [], []
for index, row in result_dataframe_copy.iterrows():
if row['Post'] == 0: # Use the census 2011 data
population_list.append(census_dict[row['TPU']][0])
median_income_list.append(census_dict[row['TPU']][2])
else: # Use the census 2016 data
population_list.append(census_dict[row['TPU']][1])
median_income_list.append(census_dict[row['TPU']][3])
result_dataframe_copy['Population'] = population_list
result_dataframe_copy['Median_Income'] = median_income_list
return result_dataframe_copy
def build_regress_dataframe_for_one_station_combined(treatment_dataframe, control_dataframe,
station_open_month_start, station_open_month_end,
open_year_plus_month, tpu_info_dataframe,
check_window_value=0, check_area_name=None,
consider_lag_effect=True):
"""
Build the dataframe for one influenced area
:param treatment_dataframe: the tweet dataframe for treatment area
:param control_dataframe: the tweet dataframe for control area
:param station_open_month_start: the starting time of the month when the studied station opens
:param station_open_month_end: the ending time of the month when the studied station opens
:param open_year_plus_month: the month plus year information
:param tpu_info_dataframe: the dataframe saving the census data for each tpu setting
:param check_window_value: the window size for DID analysis
:param check_area_name: the name of the study area
:return: a pandas dataframe which could be used for the following DID analysis
"""
# check the date
assert open_year_plus_month in ['2016_10', '2016_12']
result_dataframe = pd.DataFrame()
# build the T_i_t variable
ones_list = [1] * treatment_dataframe.shape[0]
treatment_dataframe['T_i_t'] = ones_list
zeros_list = [0] * control_dataframe.shape[0]
control_dataframe['T_i_t'] = zeros_list
# build the post variable
if consider_lag_effect:
treatment_dataframe['Post'] = treatment_dataframe.apply(
lambda row: add_post_variable_lag_effect(row['hk_time'], opening_start_date=station_open_month_start,
opening_end_date=station_open_month_end,
lag_effect_month=check_window_value), axis=1)
control_dataframe['Post'] = control_dataframe.apply(
lambda row: add_post_variable_lag_effect(row['hk_time'], opening_start_date=station_open_month_start,
opening_end_date=station_open_month_end,
lag_effect_month=check_window_value), axis=1)
else:
treatment_dataframe['Post'] = treatment_dataframe.apply(
lambda row: add_post_variable(row['hk_time'], opening_start_date=station_open_month_start,
opening_end_date=station_open_month_end,
check_window=check_window_value), axis=1)
control_dataframe['Post'] = control_dataframe.apply(
lambda row: add_post_variable(row['hk_time'], opening_start_date=station_open_month_start,
opening_end_date=station_open_month_end,
check_window=check_window_value), axis=1)
# Check the distribution of T_i_t and POST variables
print('Check the post variable distribution of treatment group: {}'.format(
Counter(treatment_dataframe['Post'])))
print('Check the T_i_t variable distribution of treatment group: {}'.format(
Counter(treatment_dataframe['T_i_t'])))
print('Check the post variable distribution of control group: {}'.format(
Counter(control_dataframe['Post'])))
print('Check the T_i_t variable distribution of control group: {}'.format(
Counter(control_dataframe['T_i_t'])))
# Construct the dictionary having the census data for the treatment area and control area
treatment_dataframe['TPU_cross_sectional'] = treatment_dataframe['TPU_cross_sectional'].astype(str)
control_dataframe['TPU_cross_sectional'] = control_dataframe['TPU_cross_sectional'].astype(str)
treatment_set = set(treatment_dataframe['TPU_cross_sectional'])
control_set = set(control_dataframe['TPU_cross_sectional'])
tpu_info_dataframe['SmallTPU'] = tpu_info_dataframe.apply(lambda row: str(row['SmallTPU']), axis=1)
treatment_info_data = tpu_info_dataframe.loc[tpu_info_dataframe['SmallTPU'].isin(treatment_set)]
control_info_data = tpu_info_dataframe.loc[tpu_info_dataframe['SmallTPU'].isin(control_set)]
# Construct the dataframe for the DID regression analysis
combined_dataframe = pd.concat([treatment_dataframe, control_dataframe], axis=0)
combined_dataframe = combined_dataframe.reset_index(drop=True)
# We don't consider the tweets posted on the open month of the MTR stations
combined_dataframe_without_not_considered = combined_dataframe.loc[combined_dataframe['Post'] != 'not considered']
combined_data_copy = combined_dataframe_without_not_considered.copy()
combined_data_copy['month_plus_year'] = combined_data_copy.apply(
lambda row: str(int(float(row['year']))) + '_' + str(int(float(row['month']))), axis=1)
sentiment_dict = {}
activity_dict = {}
activity_dict_log = {}
for _, dataframe in combined_data_copy.groupby(['month_plus_year', 'T_i_t', 'Post']):
time = str(list(dataframe['month_plus_year'])[0])
t_i_t = str(list(dataframe['T_i_t'])[0])
post = str(list(dataframe['Post'])[0])
sentiment_dict[time + '+' + t_i_t + '+' + post] = pos_percent_minus_neg_percent(dataframe)
activity_dict[time + '+' + t_i_t + '+' + post] = dataframe.shape[0]
activity_dict_log[time + '+' + t_i_t + '+' + post] = np.log(dataframe.shape[0])
result_dataframe_copy = result_dataframe.copy()
t_i_t_list = []
time_list, post_list = [], []
sentiment_list = []
activity_list, activity_log_list = [], []
for key in list(sentiment_dict.keys()):
# don't consider the tweets posted in 2016_10(for Whampoa and Ho Man Tin) or 2016_12(for other stations)
info_list = key.split('+')
if info_list[0] != open_year_plus_month:
time_list.append(info_list[0])
t_i_t_list.append(int(info_list[1]))
post_list.append(int(info_list[2]))
sentiment_list.append(sentiment_dict[key])
activity_list.append(activity_dict[key])
activity_log_list.append(activity_dict_log[key])
result_dataframe_copy['Time'] = time_list
result_dataframe_copy['month'] = result_dataframe_copy.apply(lambda row: int(row['Time'][5:]), axis=1)
result_dataframe_copy['T_i_t'] = t_i_t_list
result_dataframe_copy['Post'] = post_list
result_dataframe_copy['Sentiment'] = sentiment_list
result_dataframe_copy['Activity'] = activity_list
result_dataframe_copy['log_Activity'] = activity_log_list
dataframe_with_population = get_population_one_area_combined(result_dataframe_copy,
treatment_census_data=treatment_info_data,
control_census_data=control_info_data)
final_dataframe = get_median_income_one_area_combined(dataframe_with_population,
treatment_census_data=treatment_info_data,
control_census_data=control_info_data)
if 'south_horizons' in check_area_name: # For South Horizons & Lei Tung, do not consider the last two months
final_dataframe = final_dataframe.loc[~final_dataframe['Time'].isin(['2018_11', '2018_12'])]
return final_dataframe.reset_index(drop=True)
def output_did_result(ols_model, variable_list: list, time_window):
"""
Create a pandas dataframe saving the DID regression analysis result
:param ols_model: a linear model containing the regression result.
type: statsmodels.regression.linear_model.RegressionResultsWrapper
:param variable_list: a list of interested variable names
:param time_window: the time window
:return: a pandas dataframe saving the regression coefficient, pvalues, standard errors, aic,
number of observations, adjusted r squared
"""
coef_dict = ols_model.params.to_dict() # coefficient dictionary
std_error_dict = ols_model.bse.to_dict() # standard error dictionary
pval_dict = ols_model.pvalues.to_dict() # pvalues dictionary
num_observs = np.int(ols_model.nobs) # number of observations
aic_val = round(ols_model.aic, 2) # aic value
adj_rsqured = round(ols_model.rsquared_adj, 3) # adjusted rsqured
info_index = ['Num', 'AIC', 'Adjusted R2']
index_list = variable_list + info_index
for variable in variable_list:
assert variable in coef_dict, 'Something wrong with variable name!'
coef_vals = []
for variable in variable_list:
coef_val = coef_dict[variable]
std_val = std_error_dict[variable]
p_val = pval_dict[variable]
if p_val <= 0.01:
coef_vals.append('{}***({})'.format(round(coef_val, 4), round(std_val, 3)))
elif 0.01 < p_val <= 0.05:
coef_vals.append('{}**({})'.format(round(coef_val, 4), round(std_val, 3)))
elif 0.05 < p_val <= 0.1:
coef_vals.append('{}*({})'.format(round(coef_val, 4), round(std_val, 3)))
else:
coef_vals.append('{}({})'.format(round(coef_val, 4), round(std_val, 3)))
coef_vals.extend([num_observs, aic_val, adj_rsqured])
result_data =
|
pd.DataFrame()
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest
import ibis
from ibis.expr import datatypes as dt
from ibis.expr import schema as sch
pytestmark = pytest.mark.pandas
@pytest.mark.parametrize(
('column', 'expected_dtype'),
[
([True, False, False], dt.boolean),
(np.int8([-3, 9, 17]), dt.int8),
(np.uint8([3, 0, 16]), dt.uint8),
(np.int16([-5, 0, 12]), dt.int16),
(np.uint16([5569, 1, 33]), dt.uint16),
(np.int32([-12, 3, 25000]), dt.int32),
(np.uint32([100, 0, 6]), dt.uint32),
(np.uint64([666, 2, 3]), dt.uint64),
(np.int64([102, 67228734, -0]), dt.int64),
(np.float32([45e-3, -0.4, 99.0]), dt.float),
(np.float64([-3e43, 43.0, 10000000.0]), dt.double),
(['foo', 'bar', 'hello'], dt.string),
(
[
pd.Timestamp('2010-11-01 00:01:00'),
pd.Timestamp('2010-11-01 00:02:00.1000'),
pd.Timestamp('2010-11-01 00:03:00.300000'),
],
dt.timestamp,
),
(
pd.date_range('20130101', periods=3, tz='US/Eastern'),
dt.Timestamp('US/Eastern'),
),
(
[
pd.Timedelta('1 days'),
pd.Timedelta('-1 days 2 min 3us'),
pd.Timedelta('-2 days +23:57:59.999997'),
],
dt.Interval('ns'),
),
(pd.Series(['a', 'b', 'c', 'a']).astype('category'), dt.Category()),
],
)
def test_infer_simple_dataframe(column, expected_dtype):
df =
|
pd.DataFrame({'col': column})
|
pandas.DataFrame
|
import pandas as pd
import scipy.sparse
from cirrocumulus.dotplot_aggregator import DotPlotAggregator
from cirrocumulus.embedding_aggregator import EmbeddingAggregator, get_basis
from cirrocumulus.feature_aggregator import FeatureAggregator
from cirrocumulus.ids_aggregator import IdsAggregator
from cirrocumulus.unique_aggregator import UniqueAggregator
def apply_filter(df, data_filter):
keep_expr = get_filter_expr(df, data_filter)
return df[keep_expr] if keep_expr is not None else df
def get_filter_expr(df, data_filter):
if df is None:
raise ValueError('df is None')
keep_expr = None
if data_filter is not None:
user_filters = data_filter.get('filters', [])
combine_filters = data_filter.get('combine', 'and')
for filter_obj in user_filters:
field = filter_obj[0]
op = filter_obj[1]
value = filter_obj[2]
if isinstance(field, dict): # selection box
selected_points_basis = get_basis(field['basis'], field.get('nbins'),
field.get('agg'), field.get('ndim', '2'), field.get('precomputed', False))
if 'points' in value:
p = value['points']
field = selected_points_basis['full_name'] if selected_points_basis[
'nbins'] is not None else 'index'
if field == 'index':
keep = df.index.isin(p)
else:
keep = df[field].isin(p)
else:
keep = None
for p in value['path']:
if 'z' in p: # 3d
selection_keep = \
(df[selected_points_basis['coordinate_columns'][0]] >= p['x']) & \
(df[selected_points_basis['coordinate_columns'][0]] <= p['x'] + p['width']) & \
(df[selected_points_basis['coordinate_columns'][1]] >= p['y']) & \
(df[selected_points_basis['coordinate_columns'][1]] <= p['y'] + p[
'height']) & \
(df[selected_points_basis['coordinate_columns'][2]] >= p['z']) & \
(df[selected_points_basis['coordinate_columns'][2]] <= p['z'] + p['depth'])
else:
selection_keep = \
(df[selected_points_basis['coordinate_columns'][0]] >= p['x']) & \
(df[selected_points_basis['coordinate_columns'][0]] <= p['x'] + p['width']) & \
(df[selected_points_basis['coordinate_columns'][1]] >= p['y']) & \
(df[selected_points_basis['coordinate_columns'][1]] <= p['y'] + p['height'])
keep = selection_keep | keep if keep is not None else selection_keep
elif field == '__index':
import numpy as np
keep = np.zeros(len(df), dtype=bool)
keep[value] = True
else:
series = df[field]
if op == 'in':
keep = (series.isin(value)).values
elif op == '>':
keep = (series > value)
elif op == '=':
keep = (series == value)
elif op == '<':
keep = (series < value)
elif op == '!=':
keep = (series != value)
elif op == '>=':
keep = (series >= value)
elif op == '<=':
keep = (series <= value)
else:
raise ValueError('Unknown filter')
if scipy.sparse.issparse(keep):
keep = keep.toarray().flatten()
if hasattr(keep, 'sparse'):
keep = keep.sparse.to_dense()
if isinstance(keep, pd.Series):
keep = keep.values
if keep_expr is not None:
if combine_filters == 'and':
keep_expr = keep_expr & keep
else:
keep_expr = keep_expr | keep
else:
keep_expr = keep
return keep_expr
def precomputed_summary(dataset_api, dataset, obs_measures, var_measures, dimensions):
if '__count' in var_measures:
var_measures.remove('__count')
return dataset_api.read_precomputed_stats(dataset, obs_keys=dimensions + obs_measures, var_keys=var_measures)
def precomputed_grouped_stats(dataset_api, dataset, var_measures, dimensions):
if (len(var_measures)) > 0 and len(dimensions) > 0:
return dataset_api.read_precomputed_grouped_stats(dataset,
var_keys=var_measures, obs_keys=dimensions)
return []
def precomputed_embedding(dataset_api, dataset, basis, obs_measures, var_measures,
dimensions):
if (len(obs_measures) + len(var_measures) + len(dimensions)) == 0:
obs_measures = ['__count']
return dataset_api.read_precomputed_basis(dataset, obs_keys=obs_measures + dimensions, var_keys=var_measures,
basis=basis)
def get_var_name_type(key):
index = key.find('/')
if index == -1:
return key, 'X'
else:
key_type = key[0:index]
name = key[index + 1:]
return name, key_type
def get_type_to_measures(measures):
type2measures = dict(X=[], obs=[])
for measure in measures:
name, key_type = get_var_name_type(measure)
type_measures = type2measures.get(key_type)
if type_measures is None:
type_measures = []
type2measures[key_type] = type_measures
type_measures.append(name)
return type2measures
def check_bin_input(nbins):
if nbins is not None:
nbins = int(nbins)
nbins = min(1000, nbins)
if nbins <= 0:
nbins = None
return nbins
def handle_export_dataset_filters(dataset_api, dataset, data_filters):
import json
reformatted_filters = []
filter_names = []
for data_filter_obj in data_filters:
filter_value = json.loads(data_filter_obj['value'])
filter_names.append(data_filter_obj['name'])
reformatted_filters.append(filter_value)
df = get_df(dataset_api, dataset, measures=['obs/index'], data_filters=reformatted_filters)
result_df =
|
pd.DataFrame(index=df['index'])
|
pandas.DataFrame
|
from __future__ import print_function
import sys
import pandas as pd
from pyranges.pyranges import PyRanges
from pyranges.version import __version__
def read_bed(f, output_df=False, nrows=None):
columns = "Chromosome Start End Name Score Strand ThickStart ThickEnd ItemRGB BlockCount BlockSizes BlockStarts".split(
)
if f.endswith(".gz"):
import gzip
first_start = gzip.open(f).readline().split()[1]
else:
first_start = open(f).readline().split()[1]
header = None
try:
int(first_start)
except ValueError:
header = 0
df = pd.read_csv(
f,
dtype={
"Chromosome": "category",
"Strand": "category"
},
nrows=nrows,
header=header,
sep="\t")
df.columns = columns[:df.shape[1]]
if not output_df:
return PyRanges(df)
else:
return df
def read_bam(f, output_df=False, mapq=0, required_flag=0, filter_flag=1540):
try:
import bamread
except ModuleNotFoundError:
print("bamread must be installed to read bam. Use `conda install -c bioconda bamread` or `pip install bamread` to install it.")
sys.exit(1)
df = bamread.read_bam(f, mapq, required_flag, filter_flag)
if output_df:
return df
else:
return PyRanges(df)
return bamread.read_bam(f, mapq, required_flag, filter_flag)
def _fetch_gene_transcript_exon_id(attribute, annotation):
no_quotes = attribute.str.replace('"', '').str.replace("'", "")
df = no_quotes.str.extract(
"gene_id.?(.+?);(?:.*transcript_id.?(.+?);)?(?:.*exon_number.?(.+?);)?(?:.*exon_id.?(.+?);)?",
expand=True) # .iloc[:, [1, 2, 3]]
df.columns = "GeneID TranscriptID ExonNumber ExonID".split()
# df.loc[:, "ExonNumber"] = df.ExonNumber.astype(int)
if annotation == "ensembl":
newdf = []
for c in "GeneID TranscriptID ExonID".split():
r = df[c].astype(str).str.extract('(\d+)').astype(float)
newdf.append(r)
newdf = pd.concat(newdf, axis=1)
newdf.insert(2, "ExonNumber", df["ExonNumber"])
df = newdf
return df
def skiprows(f):
try:
import gzip
fh = gzip.open(f)
for i, l in enumerate(fh):
if l.decode()[0] != "#":
break
except (OSError, TypeError): # not a gzipped file, or StringIO
fh = open(f)
for i, l in enumerate(fh):
if l[0] != "#":
break
fh.close()
return i
def read_gtf(f, full=True, annotation=None, output_df=False, nrows=None):
_skiprows = skiprows(f)
if full:
return read_gtf_full(f, annotation, output_df, nrows, _skiprows)
else:
return read_gtf_restricted(f, annotation, output_df, nrows, _skiprows)
def read_gtf_full(f, annotation=None, output_df=False, nrows=None, skiprows=0):
"""seqname - name of the chromosome or scaffold; chromosome names can be given with or without the 'chr' prefix. Important note: the seqname must be one used within Ensembl, i.e. a standard chromosome name or an Ensembl identifier such as a scaffold ID, without any additional content such as species or assembly. See the example GFF output below.
# source - name of the program that generated this feature, or the data source (database or project name)
feature - feature type name, e.g. Gene, Variation, Similarity
start - Start position of the feature, with sequence numbering starting at 1.
end - End position of the feature, with sequence numbering starting at 1.
score - A floating point value.
strand - defined as + (forward) or - (reverse).
# frame - One of '0', '1' or '2'. '0' indicates that the first base of the feature is the first base of a codon, '1' that the second base is the first base of a codon, and so on..
attribute - A semicolon-separated list of tag-value pairs, providing additional information about each feature."""
dtypes = {
"Chromosome": "category",
"Feature": "category",
"Strand": "category"
}
names = "Chromosome Source Feature Start End Score Strand Frame Attribute".split(
)
# names = "Chromosome Start End Score Strand Source Feature Frame Attribute".split()
df_iter = pd.read_csv(
f,
sep="\t",
header=None,
names=names,
dtype=dtypes,
chunksize=int(1e5),
skiprows=skiprows,
nrows=nrows)
dfs = []
for df in df_iter:
extra = to_rows(df.Attribute)
df = df.drop("Attribute", axis=1)
ndf = pd.concat([df, extra], axis=1, sort=False)
dfs.append(ndf)
df = pd.concat(dfs, sort=False)
if not output_df:
return PyRanges(df)
else:
return df
def to_rows(anno):
rowdicts = []
for l in anno:
l = l.replace('"', '').replace(";", "").split()
rowdicts.append({k: v for k, v in zip(*([iter(l)] * 2))})
return pd.DataFrame.from_dict(rowdicts).set_index(anno.index)
def read_gtf_restricted(f,
annotation=None,
output_df=False,
skiprows=0,
nrows=None):
"""seqname - name of the chromosome or scaffold; chromosome names can be given with or without the 'chr' prefix. Important note: the seqname must be one used within Ensembl, i.e. a standard chromosome name or an Ensembl identifier such as a scaffold ID, without any additional content such as species or assembly. See the example GFF output below.
# source - name of the program that generated this feature, or the data source (database or project name)
feature - feature type name, e.g. Gene, Variation, Similarity
start - Start position of the feature, with sequence numbering starting at 1.
end - End position of the feature, with sequence numbering starting at 1.
score - A floating point value.
strand - defined as + (forward) or - (reverse).
# frame - One of '0', '1' or '2'. '0' indicates that the first base of the feature is the first base of a codon, '1' that the second base is the first base of a codon, and so on..
attribute - A semicolon-separated list of tag-value pairs, providing additional information about each feature."""
dtypes = {
"Chromosome": "category",
"Feature": "category",
"Strand": "category"
}
df_iter = pd.read_csv(
f,
sep="\t",
comment="#",
usecols=[0, 2, 3, 4, 5, 6, 8],
header=None,
names="Chromosome Feature Start End Score Strand Attribute".split(),
dtype=dtypes,
chunksize=int(1e5),
nrows=nrows)
dfs = []
for df in df_iter:
# Since Start is 1-indexed
df.Start -= 1
if sum(df.Score == ".") == len(df):
cols_to_concat = "Chromosome Start End Strand Feature".split()
else:
cols_to_concat = "Chromosome Start End Strand Feature Score".split(
)
extract = _fetch_gene_transcript_exon_id(df.Attribute, annotation)
extract.columns = "GeneID TranscriptID ExonNumber ExonID".split()
extract.ExonNumber = extract.ExonNumber.astype(float)
df = pd.concat([df[cols_to_concat], extract], axis=1, sort=False)
dfs.append(df)
df = pd.concat(dfs, sort=False)
if not output_df:
return PyRanges(df)
else:
return df
def to_rows_gff3(anno):
rowdicts = []
for l in list(anno):
l = ( it.split("=") for it in l.split(";") )
rowdicts.append({k: v for k, v in l})
return pd.DataFrame.from_dict(rowdicts).set_index(anno.index)
def read_gff3(f, annotation=None, output_df=False, nrows=None, skiprows=0):
"""seqid - name of the chromosome or scaffold; chromosome names can be given with or without the 'chr' prefix. Important note: the seq ID must be one used within Ensembl, i.e. a standard chromosome name or an Ensembl identifier such as a scaffold ID, without any additional content such as species or assembly. See the example GFF output below.
source - name of the program that generated this feature, or the data source (database or project name)
type - type of feature. Must be a term or accession from the SOFA sequence ontology
start - Start position of the feature, with sequence numbering starting at 1.
end - End position of the feature, with sequence numbering starting at 1.
score - A floating point value.
strand - defined as + (forward) or - (reverse).
phase - One of '0', '1' or '2'. '0' indicates that the first base of the feature is the first base of a codon, '1' that the second base is the first base of a codon, and so on..
attributes - A semicolon-separated list of tag-value pairs, providing additional information about each feature. Some of these tags are predefined, e.g. ID, Name, Alias, Parent - see the GFF documentation for more details."""
dtypes = {
"Chromosome": "category",
"Feature": "category",
"Strand": "category"
}
names = "Chromosome Source Feature Start End Score Strand Frame Attribute".split(
)
df_iter = pd.read_csv(
f,
comment="#",
sep="\t",
header=None,
names=names,
dtype=dtypes,
chunksize=int(1e5),
skiprows=skiprows,
nrows=nrows)
dfs = []
for df in df_iter:
extra = to_rows_gff3(df.Attribute.astype(str))
df = df.drop("Attribute", axis=1)
ndf = pd.concat([df, extra], axis=1, sort=False)
dfs.append(ndf)
df =
|
pd.concat(dfs, sort=False)
|
pandas.concat
|
import pandas as pd
c1 = pd.read_csv('machine/Calling/Sensors_1.csv')
c2 = pd.read_csv('machine/Calling/Sensors_2.csv')
c3 = pd.read_csv('machine/Calling/Sensors_3.csv')
c4 = pd.read_csv('machine/Calling/Sensors_4.csv')
c5 = pd.read_csv('machine/Calling/Sensors_5.csv')
c6 = pd.read_csv('machine/Calling/Sensors_6.csv')
c7 = pd.read_csv('machine/Calling/Sensors_7.csv')
c8 = pd.read_csv('machine/Calling/Sensors_8.csv')
c9 = pd.read_csv('machine/Calling/Sensors_9.csv')
c10 = pd.read_csv('machine/Calling/Sensors_10.csv')
calling = pd.concat([c1,c2,c3,c4,c5,c6,c7,c8,c9,c10], axis = 0)
t1 = pd.read_csv('machine/Texting/Sensors_1.csv')
t2 = pd.read_csv('machine/Texting/Sensors_2.csv')
t3 = pd.read_csv('machine/Texting/Sensors_3.csv')
t4 = pd.read_csv('machine/Texting/Sensors_4.csv')
t5 = pd.read_csv('machine/Texting/Sensors_5.csv')
t6 =
|
pd.read_csv('machine/Texting/Sensors_6.csv')
|
pandas.read_csv
|
import icd9
import pandas as pd
import numpy as np
from pandas.util.testing import assert_frame_equal
#
# Test without dates
#
cat1 = ["12345", "54321"]
cat2 = ["44", "323"]
full = {"group1": cat1}
init = {"group2": cat2}
counter = icd9.Counter(codes_full=full, codes_initial=init)
chunk = pd.DataFrame()
chunk['id'] = [1, 2, 3, 4, 5]
chunk['code'] = ["12345", "12345", "32", "441", "54321"]
counter.update(chunk, 'id')
expected = pd.DataFrame([[1, 0], [1, 0], [0, 0], [0, 1], [1, 0]],
index=[1, 2, 3, 4, 5], dtype=np.float64,
columns=["group1 [N]", "group2 [N]"])
counter.table = counter.table.loc[:, expected.columns]
assert_frame_equal(counter.table, expected)
chunk = pd.DataFrame()
chunk['id'] = [1, 2, 5, 6, 6]
chunk['code'] = ["12345", "440", "32", "441", "54321"]
counter.update(chunk, 'id')
expected = pd.DataFrame([[2, 0], [1, 1], [0, 0], [0, 1], [1, 0], [1, 1]],
index=[1, 2, 3, 4, 5, 6], dtype=np.float64,
columns=["group1 [N]", "group2 [N]"])
# The columns need to be in the same order.
counter.table = counter.table.loc[:, expected.columns]
assert_frame_equal(counter.table, expected)
#
# Test with dates
#
cat1 = ["12345"]
cat2 = ["66"]
cat3 = ["44"]
full = {"group1": cat1}
init = {"group1": cat2, "group2": cat3}
dt = icd9.Counter(codes_full=full, codes_initial=init, calculate_dates=True)
chunk = pd.DataFrame()
chunk['id'] = [1, 1, 2, 3, 4, 5]
chunk["code"] = ["12345", "12345", "4424", "99", "12345", "6600"]
chunk["date"] = ["2014-6-1", "2014-4-1", "2014-5-1", "2014-5-1", "2014-3-1", "2014-5-1"]
chunk["date"] = pd.to_datetime(chunk["date"])
dt.update(chunk, 'id', 'date')
chunk = pd.DataFrame()
chunk["id"] = [1, 1, 2, 2, 5, 5, 5, 5]
chunk["code"] = ["66xx", "12345", "99", "12345", "12345", "4400", "66", "663"]
chunk["date"] = ["2014-2-1", "2014-8-1", "2014-5-1", "2014-4-1", "2014-5-1", "2014-6-1", "2014-7-1", "2014-4-1"]
chunk["date"] = pd.to_datetime(chunk["date"])
dt.update(chunk, 'id', 'date')
df = [['index', 'group1 [N]', 'group1 [first]', 'group1 [last]', 'group2 [N]', 'group2 [first]', 'group2 [last]'],
[1, 4, '2014-02-01', '2014-08-01', 0, 'NaT', 'NaT'],
[2, 1, '2014-04-01', '2014-04-01', 1, '2014-05-01', '2014-05-01'],
[3, 0, 'NaT', 'NaT', 0, 'NaT', 'NaT'],
[4, 1, '2014-03-01', '2014-03-01', 0, 'NaT', 'NaT'],
[5, 4, '2014-04-01', '2014-07-01', 1, '2014-06-01', '2014-06-01']]
expected =
|
pd.DataFrame(df[1:], columns=df[0])
|
pandas.DataFrame
|
import warnings
from typing import Any, Dict, List, Optional, Sequence, Tuple
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from pandas import DataFrame
from xarray import Dataset
from sgkit.stats.association import gwas_linear_regression, linear_regression
from sgkit.typing import ArrayLike
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
# Ignore: DeprecationWarning: Using or importing the ABCs from 'collections'
# instead of from 'collections.abc' is deprecated since Python 3.3,
# and in 3.9 it will stop working
import statsmodels.api as sm
from statsmodels.regression.linear_model import RegressionResultsWrapper
def _generate_test_data(
n: int = 100,
m: int = 10,
p: int = 3,
e_std: float = 0.001,
b_zero_slice: Optional[slice] = None,
seed: Optional[int] = 1,
) -> Tuple[ArrayLike, ArrayLike, ArrayLike, ArrayLike]:
"""Test data simulator for multiple variant associations to a continuous outcome
Outcomes for each variant are simulated separately based on linear combinations
of randomly generated fixed effect covariates as well as the variant itself.
This does not add an intercept term in covariates.
Parameters
----------
n : int, optional
Number of samples
m : int, optional
Number of variants
p : int, optional
Number of covariates
e_std : float, optional
Standard deviation for noise term
b_zero_slice : slice
Variant beta values to zero out, defaults to `slice(m // 2)`
meaning that the first half will all be 0.
Set to `slice(0)` to disable.
Returns
-------
g : (n, m) array-like
Simulated genotype dosage
x : (n, p) array-like
Simulated covariates
bg : (m,) array-like
Variant betas
ys : (m, n) array-like
Outcomes for each column in genotypes i.e. variant
"""
if b_zero_slice is None:
b_zero_slice = slice(m // 2)
rs = np.random.RandomState(seed)
g = rs.uniform(size=(n, m), low=0, high=2)
x = rs.normal(size=(n, p))
bg = rs.normal(size=m)
bg[b_zero_slice or slice(m // 2)] = 0
bx = rs.normal(size=p)
e = rs.normal(size=n, scale=e_std)
# Simulate y values using each variant independently
ys = np.array([g[:, i] * bg[i] + x @ bx + e for i in range(m)])
return g, x, bg, ys
def _generate_test_dataset(**kwargs: Any) -> Dataset:
g, x, bg, ys = _generate_test_data(**kwargs)
data_vars = {}
data_vars["dosage"] = (["variants", "samples"], g.T)
for i in range(x.shape[1]):
data_vars[f"covar_{i}"] = (["samples"], x[:, i])
for i in range(ys.shape[0]):
# Traits are NOT multivariate simulations based on
# values of multiple variants; they instead correspond
# 1:1 with variants such that variant i has no causal
# relationship with trait j where i != j
data_vars[f"trait_{i}"] = (["samples"], ys[i])
attrs = dict(beta=bg, n_trait=ys.shape[0], n_covar=x.shape[1])
return xr.Dataset(data_vars, attrs=attrs) # type: ignore[arg-type]
@pytest.fixture(scope="module")
def ds() -> Dataset:
return _generate_test_dataset()
def _sm_statistics(
ds: Dataset, i: int, add_intercept: bool
) -> RegressionResultsWrapper:
X = []
# Make sure first independent variable is variant
X.append(ds["dosage"].values[i])
for v in [c for c in list(ds.keys()) if c.startswith("covar_")]:
X.append(ds[v].values)
if add_intercept:
X.append(np.ones(ds.dims["samples"]))
X = np.stack(X).T
y = ds[f"trait_{i}"].values
return sm.OLS(y, X, hasconst=True).fit()
def _get_statistics(
ds: Dataset, add_intercept: bool, **kwargs: Any
) -> Tuple[DataFrame, DataFrame]:
df_pred: List[Dict[str, Any]] = []
df_true: List[Dict[str, Any]] = []
for i in range(ds.dims["variants"]):
dsr = gwas_linear_regression(
ds,
dosage="dosage",
traits=[f"trait_{i}"],
add_intercept=add_intercept,
**kwargs,
)
res = _sm_statistics(ds, i, add_intercept)
df_pred.append(
dsr.to_dataframe() # type: ignore[no-untyped-call]
.rename(columns=lambda c: c.replace("variant_", ""))
.iloc[i]
.to_dict()
)
# First result in satsmodels RegressionResultsWrapper for
# [t|p]values will correspond to variant (not covariate/intercept)
df_true.append(dict(t_value=res.tvalues[0], p_value=res.pvalues[0]))
return
|
pd.DataFrame(df_pred)
|
pandas.DataFrame
|
import pandas as pd
from pandas.tseries.offsets import DateOffset
import configparser
import fire
import os
import math
import numpy as np
import qlib
from qlib.data import D
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Agg')
from sklearn.metrics.pairwise import cosine_similarity
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from src.util import getLatestFile, getFolderNameInConfig
def analyzeHistoricalValue(ifUseNewIssues = True, ifUseOldIssues = True, ifUseWatchList = False, ifUseAdjustFactorToLatestDay = False, ifPrintFundCode = False):
'''
Args:
ifUseNewIssues: if use those funds whose days range are less than daysRangeToAnalyze
ifUseOldIssues: if use those funds whose days range are more than daysRangeToAnalyze
ifUseWatchList: if only figure funds in config/watchlist.txt
ifUseAdjustFactorToLatestDay: if use adjustFactorToLatestDay generated by trainGBDT.py
ifPrintFundCode: if print fund code, if so, the image would be larger
'''
print ("------------------------ Begin to analyze historical value... ------------------------")
# read config file
cf = configparser.ConfigParser()
cf.read("config/config.ini")
# offset of days
numberOfYears = int(cf.get("Parameter", "numberOfYears"))
numberOfMonths = int(cf.get("Parameter", "numberOfMonths"))
numberOfDays = int(cf.get("Parameter", "numberOfDays"))
minDaysRange = int(cf.get("Parameter", "minDaysRange"))
daysRangeInOneYear = int(cf.get("Parameter", "daysRangeInOneYear"))
if ifUseAdjustFactorToLatestDay:
dfAdjustFactorToLatestDay = pd.read_csv(cf.get("Analyze", "pathOfDfAdjustFactorToLatestDay"), dtype={'Unnamed: 0':object})
# read watchlist
watchlist = []
for line in open("./config/watchlist.txt", "r"): # ['110011', '161028', '110020', '180003', '006479', '007994', '001015']
watchlist.append(line.split("\n")[0])
# we should ignore some strange funds
ignorelist = []
for line in open("./config/ignorelist.txt", "r"): # ['009317', '009763', '009764']
ignorelist.append(line.split("\n")[0])
# qlib init
qlib.init(provider_uri='data/bin')
# use one fund be the standard of trading day
calendar = D.calendar(freq='day')
lastDay = calendar[-1] # 2021-02-10 00:00:00
firstDay = lastDay - DateOffset(years=numberOfYears, months=numberOfMonths, days=numberOfDays) # 2018-02-10 00:00:00
# exclude the influence of days without trading
calendarBetweenFirstDayAndLastDay = D.calendar(freq='day', start_time=firstDay, end_time=lastDay)
firstDayToAnalyze = calendarBetweenFirstDayAndLastDay[0]
lastDayToAnalyze = calendarBetweenFirstDayAndLastDay[-1]
daysRangeToAnalyze = (lastDayToAnalyze - firstDayToAnalyze).days # 1094
count = 0
riskListForOldIssues = []
returnListForOldIssues = []
fundCodeListForOldIssues = []
riskListForNewIssues = []
returnListForNewIssues = []
fundCodeListForNewIssues = []
instruments = D.instruments(market='all')
for file in D.list_instruments(instruments=instruments, as_list=True):
fundCode = file.split("_")[0] # 000001
# exclude some funds
if fundCode in ignorelist:
continue
if ifUseWatchList and fundCode not in watchlist:
continue
if count % 100 == 0:
print ("\ncount = %s\tfundCode = %s" % (count, fundCode)) # 180003
try:
# read file and remove empty line
df = D.features([file], [
'$AccumulativeNetAssetValue',
'($AccumulativeNetAssetValue - Ref($AccumulativeNetAssetValue, 1)) / Ref($AccumulativeNetAssetValue, 1)'
], start_time=firstDayToAnalyze, end_time=lastDayToAnalyze)
df.columns = [
'AccumulativeNetAssetValue',
'GrowthRatio'
]
#df = df.unstack(level=0)
df["datetime"] = df.index.levels[1]
# abandom those values before the date when GrowthRatio is too large (abs >= 1.0)
df["AbsoluteGrowthRatio"] = df["GrowthRatio"].abs()
if df[df["AbsoluteGrowthRatio"] > 1].shape[0] > 0:
df = df.loc[0:df[df["AbsoluteGrowthRatio"] > 1].first_valid_index() - 1]
# reset the index
df = df.dropna(axis=0, subset=['datetime', 'GrowthRatio']).reset_index(drop=True)
# like http://fundf10.eastmoney.com/jjjz_010476.html, the return in 30 days is 26%, so the annualized return is too high
if df.shape[0] <= minDaysRange:
continue
# count the days between first day and last day
day = df['datetime']
# TODO: how about fund 519858, which trade in 2018-01-28 (Sunday)
firstDayInThisFund = day[day.first_valid_index()] # 2018-02-12 00:00:00, 2018-02-10 is Satuaday
lastDayInThisFund = day[day.last_valid_index()] # 2021-02-10 00:00:00
daysRange = (lastDayInThisFund - firstDayInThisFund).days # 1094
# get the value in important days
earliestNetValue = df[df['datetime'] == firstDayInThisFund]["AccumulativeNetAssetValue"].tolist()[0] # 3.49
lastestNetValue = df[df['datetime'] == lastDayInThisFund]["AccumulativeNetAssetValue"].tolist()[0] # 4.046
# standardrize the risk in one year
# assume the value is a list like (0, 1, 0, 1,...), growth ratio is a list like (1, -1, 1, -1,...)
# set ddof be 0 to standardrize the risk by n, not (n - 1), then the std is 1, not related to daysRange
riskCurrent = df["GrowthRatio"].std(ddof=0)
returnCurrent = (lastestNetValue-earliestNetValue)/earliestNetValue/daysRange*daysRangeInOneYear
if not ifUseNewIssues:
if (firstDayInThisFund - firstDayToAnalyze).days > 0:
continue
else:
# use latest value to reflect the true percentage gain
# this is worthful if the fund rise rapidly recently but have no change in long previous days
if ifUseAdjustFactorToLatestDay:
if (firstDayInThisFund - firstDayToAnalyze).days > 0:
# if the fund code locates in dfAdjustFactorToLatestDay, adjust the latest value and days range
adjustedFactor = dfAdjustFactorToLatestDay[fundCode]
adjustedFactor = adjustedFactor[adjustedFactor.first_valid_index()] # 0.987561058590916
lastestNetValue = lastestNetValue * adjustedFactor
returnCurrent = (lastestNetValue-earliestNetValue)/earliestNetValue/daysRangeToAnalyze*daysRangeInOneYear
# new issues
if (firstDayInThisFund - firstDayToAnalyze).days > 0:
riskListForNewIssues.append(riskCurrent)
returnListForNewIssues.append(returnCurrent)
fundCodeListForNewIssues.append(fundCode)
else:
riskListForOldIssues.append(riskCurrent)
returnListForOldIssues.append(returnCurrent)
fundCodeListForOldIssues.append(fundCode)
count += 1
except Exception as e:
print ("fundCode = %s\terror = %s" % (fundCode, e))
continue
if not ifUseWatchList and ifPrintFundCode:
plt.figure(figsize=(10, 10))
if ifUseOldIssues:
plt.scatter(riskListForOldIssues, returnListForOldIssues, c='k')
if ifUseNewIssues:
plt.scatter(riskListForNewIssues, returnListForNewIssues, c='k')
plt.xlabel("Risk")
plt.ylabel("Annualized return")
ax = plt.gca()
# no line in right and top border
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
if ifPrintFundCode:
if ifUseOldIssues:
for i in range(len(fundCodeListForOldIssues)):
x = riskListForOldIssues[i]
y = returnListForOldIssues[i]
fundCode = fundCodeListForOldIssues[i]
plt.text(x, y, fundCode, fontsize=10)
if ifUseNewIssues:
for i in range(len(fundCodeListForNewIssues)):
x = riskListForNewIssues[i]
y = returnListForNewIssues[i]
fundCode = fundCodeListForNewIssues[i]
plt.text(x, y, fundCode, fontsize=10)
nameOfPicture = "risk_return"
nameOfPicture = nameOfPicture + "_watchlist" if ifUseWatchList else nameOfPicture + "_noWatchlist"
nameOfPicture = nameOfPicture + "_useNewIssues" if ifUseNewIssues else nameOfPicture + "_notUseNewIssues"
nameOfPicture = nameOfPicture + "_useOldIssues" if ifUseOldIssues else nameOfPicture + "_notUseOldIssues"
nameOfPicture = nameOfPicture + "_useAdjustFactor" if ifUseAdjustFactorToLatestDay else nameOfPicture + "_notUseAdjustFactor"
plt.savefig("./image/%s.png" % nameOfPicture)
print ("------------------------ Done. ------------------------")
def getAverageSlopeForFundsInSameRange(ifUseAdjustFactorToLatestDay=True):
'''
in return-risk figure, the return is proportional to risk in most cases,
so we can use slope(return/risk) as the feature of this fund, if we want
to summarize funds in same range, we can use average slope to represent it.
'''
print ("------------------------ Begin to get average slope for funds in same range... ------------------------")
# read config file
cf = configparser.ConfigParser()
cf.read("config/config.ini")
# offset of days
numberOfYears = int(cf.get("Parameter", "numberOfYears"))
numberOfMonths = int(cf.get("Parameter", "numberOfMonths"))
numberOfDays = int(cf.get("Parameter", "numberOfDays"))
minDaysRange = int(cf.get("Parameter", "minDaysRange"))
daysRangeInOneYear = int(cf.get("Parameter", "daysRangeInOneYear"))
# qlib init
qlib.init(provider_uri='data/bin')
# use one fund be the standard of trading day
calendar = D.calendar(freq='day')
lastDay = calendar[-1] # 2021-02-10 00:00:00
firstDay = lastDay - DateOffset(years=numberOfYears, months=numberOfMonths, days=numberOfDays) # 2018-02-10 00:00:00
# exclude the influence of days without trading
calendarBetweenFirstDayAndLastDay = D.calendar(freq='day', start_time=firstDay, end_time=lastDay)
firstDayToAnalyze = calendarBetweenFirstDayAndLastDay[0]
lastDayToAnalyze = calendarBetweenFirstDayAndLastDay[-1]
daysRangeToAnalyze = (lastDayToAnalyze - firstDayToAnalyze).days # 1094
divideNumber = int(cf.get("Analyze", "divideNumber"))
# if use adjustFactorToLatestDay generated by trainGBDT.py
if ifUseAdjustFactorToLatestDay:
dfAdjustFactorToLatestDay = pd.read_csv(cf.get("Analyze", "pathOfDfAdjustFactorToLatestDay"), dtype={'Unnamed: 0':object})
dictOfSlopeInCountNetValue = {}
dictOfReturnInCountNetValue = {}
dictOfRiskInCountNetValue = {}
count = 0
instruments = D.instruments(market='all')
for file in D.list_instruments(instruments=instruments, as_list=True):
fundCode = file.split("_")[0] # 000001
if count % 100 == 0:
print ("count = %s\tfundCode = %s" % (count, fundCode)) # 180003
try:
# read file and remove empty line
df = D.features([file], [
'$AccumulativeNetAssetValue',
'($AccumulativeNetAssetValue - Ref($AccumulativeNetAssetValue, 1)) / Ref($AccumulativeNetAssetValue, 1)'
], start_time=firstDayToAnalyze, end_time=lastDayToAnalyze)
df.columns = [
'AccumulativeNetAssetValue',
'GrowthRatio'
]
#df = df.unstack(level=0)
df["datetime"] = df.index.levels[1]
# abandom those values before the date when GrowthRatio is too large (abs >= 1.0)
df["AbsoluteGrowthRatio"] = df["GrowthRatio"].abs()
if df[df["AbsoluteGrowthRatio"] > 1].shape[0] > 0:
df = df.loc[0:df[df["AbsoluteGrowthRatio"] > 1].first_valid_index() - 1]
# reset the index
df = df.dropna(axis=0, subset=['datetime', 'GrowthRatio']).reset_index(drop=True)
# like http://fundf10.eastmoney.com/jjjz_010476.html, the return in 30 days is 26%, so the annualized return is too high
if df.shape[0] <= minDaysRange:
continue
# count the days between first day and last day
day = df['datetime']
# TODO: how about fund 519858, which trade in 2018-01-28 (Sunday)
firstDayInThisFund = day[day.first_valid_index()] # 2018-02-12 00:00:00, 2018-02-10 is Satuaday
lastDayInThisFund = day[day.last_valid_index()] # 2021-02-10 00:00:00
daysRange = (lastDayInThisFund - firstDayInThisFund).days # 1094
# get the value in important days
earliestNetValue = df[df['datetime'] == firstDayInThisFund]["AccumulativeNetAssetValue"].tolist()[0] # 3.49
lastestNetValue = df[df['datetime'] == lastDayInThisFund]["AccumulativeNetAssetValue"].tolist()[0] # 4.046
# standardrize the risk in one year
# assume the value is a list like (0, 1, 0, 1,...), growth ratio is a list like (1, -1, 1, -1,...)
# set ddof be 0 to standardrize the risk by n, not (n - 1), then the std is 1, not related to daysRange
riskCurrent = df["GrowthRatio"].std(ddof=0)
returnCurrent = (lastestNetValue-earliestNetValue)/earliestNetValue/daysRange*daysRangeInOneYear
# standardrize the risk in one year
# assume the value is a list like (0, 1, 0, 1,...), growth ratio is a list like (1, -1, 1, -1,...)
# set ddof be 0 to standardrize the risk by n, not (n - 1), then the std is 1, not related to daysRange
riskCurrent = df["GrowthRatio"].std(ddof=0)
returnCurrent = (lastestNetValue-earliestNetValue)/earliestNetValue/daysRange*daysRangeInOneYear
# use latest value to reflect the true percentage gain
# this is worthful if the fund rise rapidly recently but have no change in long previous days
if ifUseAdjustFactorToLatestDay:
if (firstDayInThisFund - firstDayToAnalyze).days > 0:
# if the fund code locates in dfAdjustFactorToLatestDay, adjust the latest value and days range
adjustedFactor = dfAdjustFactorToLatestDay[fundCode]
adjustedFactor = adjustedFactor[adjustedFactor.first_valid_index()] # 0.987561058590916
lastestNetValue = lastestNetValue * adjustedFactor
returnCurrent = (lastestNetValue-earliestNetValue)/earliestNetValue/daysRangeToAnalyze*daysRangeInOneYear
slope = returnCurrent / riskCurrent # 28.136361711631576
# TODO: exclude 005337
if math.isnan(slope):
continue
# count them in period, not a single day
approximateDaysRange = daysRange // divideNumber * divideNumber
if approximateDaysRange not in dictOfSlopeInCountNetValue.keys():
dictOfSlopeInCountNetValue[approximateDaysRange] = []
dictOfSlopeInCountNetValue[approximateDaysRange].append(slope)
if approximateDaysRange not in dictOfReturnInCountNetValue.keys():
dictOfReturnInCountNetValue[approximateDaysRange] = []
dictOfReturnInCountNetValue[approximateDaysRange].append(returnCurrent)
if approximateDaysRange not in dictOfRiskInCountNetValue.keys():
dictOfRiskInCountNetValue[approximateDaysRange] = []
dictOfRiskInCountNetValue[approximateDaysRange].append(riskCurrent)
count += 1
except Exception as e:
print ("fundCode = %s\terror = %s" % (fundCode, e))
continue
# ------------------------ Plot Return/Risk ------------------------
plt.xlabel("Count of trading days")
plt.ylabel("Return/Risk")
ax = plt.gca()
# no line in right and top border
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
for key in dictOfSlopeInCountNetValue.keys():
n = len(dictOfSlopeInCountNetValue[key]) # Number of observations
mean = sum(dictOfSlopeInCountNetValue[key]) / n # Mean of the data
deviations = [(x - mean) ** 2 for x in dictOfSlopeInCountNetValue[key]] # Square deviations
standardDeviation = math.sqrt(sum(deviations) / n) # standard deviation
plt.errorbar(key, mean, standardDeviation, c='k', marker='+')
nameOfReturnRisk = "averageSlopeForReturnRisk_%s" % divideNumber
if ifUseAdjustFactorToLatestDay:
nameOfReturnRisk += "_useAdjustFactor"
else:
nameOfReturnRisk += "_notUseAdjustFactor"
plt.savefig("./image/%s.png" % nameOfReturnRisk)
# ------------------------ Plot Return ------------------------
plt.clf()
plt.xlabel("Count of trading days")
plt.ylabel("Return")
ax = plt.gca()
# no line in right and top border
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
listOfMean = []
for key in dictOfReturnInCountNetValue.keys():
n = len(dictOfReturnInCountNetValue[key]) # Number of observations
mean = sum(dictOfReturnInCountNetValue[key]) / n # Mean of the data
listOfMean.append(mean)
deviations = [(x - mean) ** 2 for x in dictOfReturnInCountNetValue[key]] # Square deviations
standardDeviation = math.sqrt(sum(deviations) / n) # standard deviation
plt.errorbar(key, mean, standardDeviation, c='k', marker='+')
nameOfReturn = "averageReturn_%s" % divideNumber
# get the standard deviation of mean
standardDeviationOfReturn = np.std(listOfMean, ddof = 0)
print ("standardDeviationOfReturn = %s" % standardDeviationOfReturn)
if ifUseAdjustFactorToLatestDay:
nameOfReturn += "_useAdjustFactor"
else:
nameOfReturn += "_notUseAdjustFactor"
plt.savefig("./image/%s.png" % nameOfReturn)
# ------------------------ Plot Risk ------------------------
plt.clf()
plt.xlabel("Count of trading days")
plt.ylabel("Risk")
ax = plt.gca()
# no line in right and top border
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
for key in dictOfRiskInCountNetValue.keys():
n = len(dictOfRiskInCountNetValue[key]) # Number of observations
mean = sum(dictOfRiskInCountNetValue[key]) / n # Mean of the data
deviations = [(x - mean) ** 2 for x in dictOfRiskInCountNetValue[key]] # Square deviations
standardDeviation = math.sqrt(sum(deviations) / n) # standard deviation
plt.errorbar(key, mean, standardDeviation, c='k', marker='+')
nameOfRisk = "averageRisk_%s" % divideNumber
if ifUseAdjustFactorToLatestDay:
nameOfRisk += "_useAdjustFactor"
else:
nameOfRisk += "_notUseAdjustFactor"
plt.savefig("./image/%s.png" % nameOfRisk)
print ("------------------------ Done. ------------------------")
def getDfMerge():
print ("------------------------ Begin to get dfMerge... ------------------------")
# read config file
cf = configparser.ConfigParser()
cf.read("config/config.ini")
# offset of days
numberOfYears = int(cf.get("Parameter", "numberOfYears"))
numberOfMonths = int(cf.get("Parameter", "numberOfMonths"))
numberOfDays = int(cf.get("Parameter", "numberOfDays"))
# use one fund be the standard of trading day
calendar = D.calendar(freq='day')
lastDay = calendar[-1] # 2021-02-10 00:00:00
firstDay = lastDay - DateOffset(years=numberOfYears, months=numberOfMonths, days=numberOfDays) # 2018-02-10 00:00:00
# exclude the influence of days without trading
calendarBetweenFirstDayAndLastDay = D.calendar(freq='day', start_time=firstDay, end_time=lastDay)
firstDayToAnalyze = calendarBetweenFirstDayAndLastDay[0]
lastDayToAnalyze = calendarBetweenFirstDayAndLastDay[-1]
count = 0
instruments = D.instruments(market='all')
for file in D.list_instruments(instruments=instruments, as_list=True):
fundCode = file.split("_")[0]
if count <= 700:
count += 1
continue
if count % 100 == 0:
print ("count = %s\tfundCode = %s" % (count, fundCode))
# read file and remove empty line
df = D.features([file], [
'$AccumulativeNetAssetValue'
], start_time=firstDayToAnalyze, end_time=lastDayToAnalyze)
df.columns = [
"AccumulativeNetAssetValue_%s" % fundCode
]
#df = df.unstack(level=0)
try:
df["datetime"] = df.index.levels[1]
except:
continue
# reset the index
df = df.dropna(axis=0, subset=['datetime']).reset_index(drop=True)
try:
dfMerge = pd.merge(dfMerge, df, on=['datetime'], how='outer')
except:
dfMerge = df
count += 1
dfMerge.to_csv("data/dfMerge.csv")
print ("------------------------ Done. ------------------------")
return dfMerge
def getCorrelationMatrixForOneFund(ifGetCorrFromFile = True, ifGetDfMergeFromFile = True, fundCodeToAnalyze="110011"):
print ("------------------------ Begin to get Pearson's correlation matrix for fund '%s'... ------------------------" % fundCodeToAnalyze)
# qlib init
qlib.init(provider_uri='data/bin')
if ifGetCorrFromFile:
if not os.path.exists("data/corr.csv"):
ifGetCorrFromFile = False
if not ifGetCorrFromFile:
if ifGetDfMergeFromFile:
if not os.path.exists("data/dfMerge.csv"):
ifGetDfMergeFromFile = False
if ifGetDfMergeFromFile:
dfMerge = pd.read_csv("data/dfMerge.csv", index_col=0)
else:
dfMerge = getDfMerge()
dfMerge = dfMerge.drop(labels='datetime',axis=1)
# count correlation
corr = dfMerge.corr()
corr.to_csv("data/corr.csv")
else:
corr = pd.read_csv("data/corr.csv", index_col=0)
print ("corr = %s" % corr)
corrFund = corr["AccumulativeNetAssetValue_%s" % fundCodeToAnalyze].dropna(axis=0)
dictOfCorr = {}
minNumber = 0.98
nameFund = "%s" % fundCodeToAnalyze
instruments = D.instruments(market='all')
for file in D.list_instruments(instruments=instruments, as_list=True):
fund = file.split("_")[0]
if fund == nameFund:
continue
nameDf = "AccumulativeNetAssetValue_%s" % fund
try:
corrSingle = float("%.1f" % corrFund[nameDf])
if corrSingle not in dictOfCorr:
dictOfCorr[corrSingle] = 1
else:
dictOfCorr[corrSingle] += 1
except:
continue
# show it in image
plt.figure(figsize=(10, 5))
plt.ylim((0, 3000))
ax = plt.gca()
# no line in right and top border
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
plt.xlabel("Correlation")
plt.ylabel("Count")
for key in sorted(dictOfCorr.keys()):
if key != 'nan':
if key == minNumber:
plt.bar("<=%s" % str(key), dictOfCorr[key], width=0.8, fc='k')
else:
plt.bar(str(key), dictOfCorr[key], width=0.8, fc='k')
plt.savefig("./image/correlation_%s.png" % nameFund)
print ("------------------------ Done. ------------------------")
def getCorrelationMatrixForAllFunds(ifGetCorrFromFile = True, ifGetDfMergeFromFile = True):
print ("Begin to get Pearson's correlation matrix for all funds...")
# qlib init
qlib.init(provider_uri='data/bin')
if ifGetCorrFromFile:
if not os.path.exists("data/corr.csv"):
ifGetCorrFromFile = False
if not ifGetCorrFromFile:
if ifGetDfMergeFromFile:
if not os.path.exists("data/dfMerge.csv"):
ifGetDfMergeFromFile = False
if ifGetDfMergeFromFile:
dfMerge = pd.read_csv("data/dfMerge.csv", index_col=0)
else:
dfMerge = getDfMerge()
dfMerge = dfMerge.drop(labels='datetime',axis=1)
# count correlation
corr = dfMerge.corr()
corr.to_csv("data/corr.csv")
else:
corr = pd.read_csv("data/corr.csv", index_col=0)
print (corr)
dictOfMaxCorr = {}
minNumber = 0.9
instruments = D.instruments(market='all')
for file in D.list_instruments(instruments=instruments, as_list=True):
fund = file.split("_")[0]
nameDf = "AccumulativeNetAssetValue_%s" % fund
# nameDf don't exist in corr
try:
corrSingle = corr[nameDf].dropna(axis=0)
corrWithoutSelf = corrSingle.drop(labels=nameDf, axis=0)
except:
continue
maxCorr = float(corrWithoutSelf.max())
maxCorr = float("%.2f" % maxCorr)
if maxCorr <= minNumber:
maxCorr = minNumber
if maxCorr not in dictOfMaxCorr:
dictOfMaxCorr[maxCorr] = 1
else:
dictOfMaxCorr[maxCorr] += 1
print (dictOfMaxCorr)
# show it in image
plt.figure(figsize=(15, 5))
plt.ylim((0, 5000))
ax = plt.gca()
# no line in right and top border
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
plt.xlabel("Maximum correlation")
plt.ylabel("Count")
for key in sorted(dictOfMaxCorr.keys()):
if key != 'nan':
if key == minNumber:
plt.bar("<=%s" % str(key), dictOfMaxCorr[key], width=0.8, fc='k')
else:
plt.bar(str(key), dictOfMaxCorr[key], width=0.8, fc='k')
plt.savefig("./image/maximum_correlation.png")
print ("END.")
def getAllElementsInPortfolio():
print ("------------------------ Begin to get all elements in portfolio... ------------------------")
# read config file
cf = configparser.ConfigParser()
cf.read("config/config.ini")
folderOfPortfolio = getFolderNameInConfig("folderOfPortfolio") # the folder to save the portfolio
pathOfDfMergeFullElements = cf.get("Analyze", "pathOfDfMergeFullElements")
count = 0
countAllElements = 0
for file in os.listdir(folderOfPortfolio):
if count % 100 == 0:
print ("count = %s\tfile = %s" % (count, file))
pathFund = os.path.join(folderOfPortfolio, file)
df =
|
pd.read_csv(pathFund)
|
pandas.read_csv
|
import warnings
import numpy as np
import datetime as dt
import os
import json
import pandas as pd
from datetimerange import DateTimeRange
import dateparser
OPERAND_MAPPING_DICT = {
">": 5,
">=": 4,
"=": 3,
"<=": 2,
"<": 1
}
def check_valid_signal(x):
"""Check whether signal is valid, i.e. an array_like numeric, or raise errors.
Parameters
----------
x :
array_like, array of signal
Returns
-------
"""
if isinstance(x, dict) or isinstance(x, tuple):
raise ValueError("Expected array_like input, instead found {"
"0}:".format(type(x)))
if len(x) == 0:
raise ValueError("Empty signal")
types = []
for i in range(len(x)):
types.append(str(type(x[i])))
type_unique = np.unique(np.array(types))
if len(type_unique) != 1 and (type_unique[0].find("int") != -1 or
type_unique[0].find("float") != -1):
raise ValueError("Invalid signal: Expect numeric array, instead found "
"array with types {0}: ".format(type_unique))
if type_unique[0].find("int") == -1 and type_unique[0].find("float") == -1:
raise ValueError("Invalid signal: Expect numeric array, instead found "
"array with types {0}: ".format(type_unique))
return True
def calculate_sampling_rate(timestamps):
"""
Parameters
----------
x : array_like of timestamps, float (unit second)
Returns
-------
float : sampling rate
"""
if isinstance(timestamps[0], float):
timestamps_second = timestamps
else:
try:
v_parse_datetime = np.vectorize(parse_datetime)
timestamps = v_parse_datetime(timestamps)
timestamps_second = []
timestamps_second.append(0)
for i in range(1, len(timestamps)):
timestamps_second.append((timestamps[i] - timestamps[
i - 1]).total_seconds())
except Exception:
sampling_rate = None
return sampling_rate
steps = np.diff(timestamps_second)
sampling_rate = round(1 / np.min(steps[steps != 0]))
return sampling_rate
def generate_timestamp(start_datetime, sampling_rate, signal_length):
"""
Parameters
----------
start_datetime :
sampling_rate : float
signal_length : int
Returns
-------
list : list of timestamps with length equal to signal_length.
"""
number_of_seconds = (signal_length - 1) / sampling_rate
if start_datetime is None:
start_datetime = dt.datetime.now()
end_datetime = start_datetime + dt.timedelta(seconds=number_of_seconds)
time_range = DateTimeRange(start_datetime, end_datetime)
timestamps = []
for value in time_range.range(dt.timedelta(seconds=1 / sampling_rate)):
timestamps.append(value)
return timestamps
def parse_datetime(string, type='datetime'):
"""
A simple dateparser that detects common datetime formats
Parameters
----------
string : str
a date string in format as denoted below.
Returns
-------
datetime.datetime
datetime object of a time.
"""
# some common formats.
date_formats = ['%Y-%m-%d',
'%d-%m-%Y',
'%d.%m.%Y',
'%Y.%m.%d',
'%d %b %Y',
'%Y/%m/%d',
'%d/%m/%Y']
datime_formats = ['%Y-%m-%d %H:%M:%S.%f',
'%d-%m-%Y %H:%M:%S.%f',
'%d.%m.%Y %H:%M:%S.%f',
'%Y.%m.%d %H:%M:%S.%f',
'%d %b %Y %H:%M:%S.%f',
'%Y/%m/%d %H:%M:%S.%f',
'%d/%m/%Y %H:%M:%S.%f',
'%Y-%m-%d %I:%M:%S.%f',
'%d-%m-%Y %I:%M:%S.%f',
'%d.%m.%Y %I:%M:%S.%f',
'%Y.%m.%d %I:%M:%S.%f',
'%d %b %Y %I:%M:%S.%f',
'%Y/%m/%d %I:%M:%S.%f',
'%d/%m/%Y %I:%M:%S.%f']
if type == 'date':
formats = date_formats
if type == 'datetime':
formats = datime_formats
for f in formats:
try:
return dt.datetime.strptime(string, f)
except:
pass
try:
return dateparser.parse(string)
except:
raise ValueError('Datetime string must be of standard Python format '
'(https://docs.python.org/3/library/time.html), '
'e.g., `%d-%m-%Y`, eg. `24-01-2020`')
def get_moving_average(q, w):
q_padded = np.pad(q, (w // 2, w - 1 - w // 2), mode='edge')
convole = np.convolve(q_padded, np.ones(w) / w, 'valid')
return convole
def parse_rule(name, source):
assert os.path.isfile(source) is True, 'Source file not found'
with open(source) as json_file:
all = json.load(json_file)
try:
sqi = all[name]
except:
raise Exception("SQI {0} not found".format(name))
rule_def, boundaries, label_list = update_rule(sqi['def'],
is_update=False)
return rule_def, \
boundaries, \
label_list
def update_rule(rule_def, threshold_list=[], is_update=True):
if rule_def is None or is_update:
all_rules = []
else:
all_rules = list(np.copy(rule_def))
for threshold in threshold_list:
all_rules.append(threshold)
df = sort_rule(all_rules)
df = decompose_operand(df.to_dict('records'))
boundaries = np.sort(df["value"].unique())
inteveral_label_list = get_inteveral_label_list(df, boundaries)
value_label_list = get_value_label_list(df, boundaries, inteveral_label_list)
label_list = []
for i in range(len(value_label_list)):
label_list.append(inteveral_label_list[i])
label_list.append(value_label_list[i])
label_list.append(inteveral_label_list[-1])
return all_rules, boundaries, label_list
def sort_rule(rule_def):
df = pd.DataFrame(rule_def)
df["value"] = pd.to_numeric(df["value"])
df['operand_order'] = df['op'].map(OPERAND_MAPPING_DICT)
df.sort_values(by=['value', 'operand_order'],
inplace=True,
ascending=[True, True],
ignore_index=True)
return df
def decompose_operand(rule_dict):
df = pd.DataFrame(rule_dict)
df["value"] =
|
pd.to_numeric(df["value"])
|
pandas.to_numeric
|
# Data up-to-date as of April 25, 2018
#!/usr/bin/env python
import pandas as pd
from bs4 import BeautifulSoup
import urllib2
from re import sub
from decimal import Decimal
the_numbers_data = pd.DataFrame(columns=['date','name', 'budget', 'domestic', 'worldwide'])
for j in range (0, 56):
page_num = j*100 + 1
url = 'https://www.the-numbers.com/movie/budgets/all/' + str(page_num)
page = urllib2.urlopen(url).read()
soup = BeautifulSoup(page, "lxml")
raw_data = []
tds = soup.find('table').find_all('td')
for i in range (0, 100):
temp = []
temp.append(str(tds[6*i+1].contents[0].contents[0]))
temp.append(tds[6*i+2].contents[0].contents[0].contents[0].encode('utf-8'))
temp.append(int(Decimal(sub(r'[^\d.]', '', tds[6*i+3].contents[0]))))
temp.append(int(Decimal(sub(r'[^\d.]', '', tds[6*i+4].contents[0]))))
temp.append(int(Decimal(sub(r'[^\d.]', '', tds[6*i+5].contents[0]))))
raw_data.append(temp)
if j == 55 and i == 18:
break
this_page_df =
|
pd.DataFrame(raw_data, columns=['date','name', 'budget', 'domestic', 'worldwide'])
|
pandas.DataFrame
|
#!/usr/bin/env python3
import os
import sys
import random
import time
from random import seed, randint
import argparse
import platform
from datetime import datetime
import imp
import subprocess
import glob
import re
from helperFunctions.myFunctions_helper import *
import numpy as np
import pandas as pd
import fileinput
from itertools import product
from Bio.PDB.PDBParser import PDBParser
from Bio.PDB import PDBList
from pdbfixer import PDBFixer
from simtk.openmm.app import PDBFile
# compute cross Q for every pdb pair in one folder
# parser = argparse.ArgumentParser(description="Compute cross q")
# parser.add_argument("-m", "--mode",
# type=int, default=1)
# args = parser.parse_args()
def getFromTerminal(CMD):
return subprocess.Popen(CMD,stdout=subprocess.PIPE,shell=True).communicate()[0].decode()
def read_hydrophobicity_scale(seq, isNew=False):
seq_dataFrame = pd.DataFrame({"oneLetterCode":list(seq)})
HFscales = pd.read_table("~/opt/small_script/Whole_residue_HFscales.txt")
if not isNew:
# Octanol Scale
# new and old difference is at HIS.
code = {"GLY" : "G", "ALA" : "A", "LEU" : "L", "ILE" : "I",
"ARG+" : "R", "LYS+" : "K", "MET" : "M", "CYS" : "C",
"TYR" : "Y", "THR" : "T", "PRO" : "P", "SER" : "S",
"TRP" : "W", "ASP-" : "D", "GLU-" : "E", "ASN" : "N",
"GLN" : "Q", "PHE" : "F", "HIS+" : "H", "VAL" : "V",
"M3L" : "K", "MSE" : "M", "CAS" : "C"}
else:
code = {"GLY" : "G", "ALA" : "A", "LEU" : "L", "ILE" : "I",
"ARG+" : "R", "LYS+" : "K", "MET" : "M", "CYS" : "C",
"TYR" : "Y", "THR" : "T", "PRO" : "P", "SER" : "S",
"TRP" : "W", "ASP-" : "D", "GLU-" : "E", "ASN" : "N",
"GLN" : "Q", "PHE" : "F", "HIS0" : "H", "VAL" : "V",
"M3L" : "K", "MSE" : "M", "CAS" : "C"}
HFscales_with_oneLetterCode = HFscales.assign(oneLetterCode=HFscales.AA.str.upper().map(code)).dropna()
data = seq_dataFrame.merge(HFscales_with_oneLetterCode, on="oneLetterCode", how="left")
return data
def create_zim(seqFile, isNew=False):
a = seqFile
seq = getFromTerminal("cat " + a).rstrip()
data = read_hydrophobicity_scale(seq, isNew=isNew)
z = data["DGwoct"].values
np.savetxt("zim", z, fmt="%.2f")
def expand_grid(dictionary):
return pd.DataFrame([row for row in product(*dictionary.values())],
columns=dictionary.keys())
def duplicate_pdb(From, To, offset_x=0, offset_y=0, offset_z=0, new_chain="B"):
with open(To, "w") as out:
with open(From, "r") as f:
for line in f:
tmp = list(line)
atom = line[0:4]
atomSerialNumber = line[6:11]
atomName = line[12:16]
atomResidueName = line[17:20]
chain = line[21]
residueNumber = line[22:26]
# change chain A to B
# new_chain = "B"
tmp[21] = new_chain
if atom == "ATOM":
x = float(line[30:38])
y = float(line[38:46])
z = float(line[46:54])
# add 40 to the x
new_x = x + offset_x
new_y = y + offset_y
new_z = z + offset_z
tmp[30:38] = "{:8.3f}".format(new_x)
tmp[38:46] = "{:8.3f}".format(new_y)
tmp[46:54] = "{:8.3f}".format(new_z)
a = "".join(tmp)
out.write(a)
def compute_native_contacts(coords, MAX_OFFSET=4, DISTANCE_CUTOFF=9.5):
native_coords = np.array(coords)
a= native_coords[:,np.newaxis]
dis = np.sqrt(np.sum((a - native_coords)**2, axis=2))
n = len(dis)
remove_band = np.eye(n)
for i in range(1, MAX_OFFSET):
remove_band += np.eye(n, k=i)
remove_band += np.eye(n, k=-i)
dis[remove_band==1] = np.max(dis)
native_contacts = dis < DISTANCE_CUTOFF
return native_contacts.astype("int")
def compute_contacts(coords, native_contacts, DISTANCE_CUTOFF=9.5):
native_coords = np.array(coords)
a= native_coords[:,np.newaxis]
dis = np.sqrt(np.sum((a - native_coords)**2, axis=2))
constacts = dis < DISTANCE_CUTOFF
constacts = constacts*native_contacts # remove non native contacts
return np.sum(constacts, axis=1).astype("float")
def compute_localQ_init(MAX_OFFSET=4, DISTANCE_CUTOFF=9.5):
from pathlib import Path
home = str(Path.home())
struct_id = '2xov'
filename = os.path.join(home, "opt/pulling/2xov.pdb")
p = PDBParser(PERMISSIVE=1)
s = p.get_structure(struct_id, filename)
chains = s[0].get_list()
# import pdb file
native_coords = []
for chain in chains:
dis = []
all_res = []
for res in chain:
is_regular_res = res.has_id('CA') and res.has_id('O')
res_id = res.get_id()[0]
if (res.get_resname()=='GLY'):
native_coords.append(res['CA'].get_coord())
elif (res_id==' ' or res_id=='H_MSE' or res_id=='H_M3L' or res_id=='H_CAS') and is_regular_res:
native_coords.append(res['CB'].get_coord())
else:
print('ERROR: irregular residue at %s!' % res)
exit()
native_contacts_table = compute_native_contacts(native_coords, MAX_OFFSET, DISTANCE_CUTOFF)
return native_contacts_table
def compute_localQ(native_contacts_table, pre=".", ii=-1, MAX_OFFSET=4, DISTANCE_CUTOFF=9.5):
native_contacts = np.sum(native_contacts_table, axis=1).astype("float")
dump = read_lammps(os.path.join(pre, f"dump.lammpstrj.{ii}"), ca=False)
localQ_list = []
for atom in dump:
contacts = compute_contacts(np.array(atom), native_contacts_table, DISTANCE_CUTOFF=DISTANCE_CUTOFF)
c = np.divide(contacts, native_contacts, out=np.zeros_like(contacts), where=native_contacts!=0)
localQ_list.append(c)
data = pd.DataFrame(localQ_list)
data.columns = ["Res" + str(i+1) for i in data.columns]
data.to_csv(os.path.join(pre, f"localQ.{ii}.csv"), index=False)
def readPMF_basic(pre):
# perturbation_table = {0:"original", 1:"p_mem",
# 2:"m_mem", 3:"p_lipid",
# 4:"m_lipid", 5:"p_go",
# 6:"m_go", 7:"p_rg", 8:"m_rg"}
perturbation_table = {0:"original", 1:"m_go",
2:"p_go", 3:"m_lipid",
4:"p_lipid", 5:"m_mem",
6:"p_mem", 7:"m_rg", 8:"p_rg"}
pmf_list = {
"perturbation":list(perturbation_table.keys())
}
pmf_list_data = expand_grid(pmf_list)
all_pmf_list = []
for index, row in pmf_list_data.iterrows():
perturbation = row["perturbation"]
if perturbation == 0:
location = pre + f"/pmf-*.dat"
pmf_list = glob.glob(location)
change = "none"
upOrDown = "none"
else:
location = pre + f"/perturbation-{perturbation}-pmf-*.dat"
pmf_list = glob.glob(location)
change = perturbation_table[perturbation].split("_")[-1]
upOrDown = perturbation_table[perturbation].split("_")[0]
# print(location)
name_list = ["f", "df", "e", "s"]
names = ["bin", "x"] + name_list
for location in pmf_list:
# print(location)
temp = re.findall(r'pmf-(\d+)', location)
if len(temp) != 1:
raise ValueError('Not expected to see more than one or none')
else:
temp = temp[0]
data = pd.read_table(location, skiprows=2, sep='\s+', names=names).assign(upOrDown=upOrDown, change=change, temp=temp, perturbation=perturbation_table[perturbation])
all_pmf_list.append(data)
return pd.concat(all_pmf_list).dropna().reset_index()
def make_metadata_3(k=1000.0, temps_list=["450"], i=-1, biasLow=None, biasHigh=None):
print("make metadata")
cwd = os.getcwd()
files = glob.glob(f"../data_{i}/*")
kconstant = k
with open("metadatafile", "w") as out:
for oneFile in sorted(files):
tmp = oneFile.split("/")[-1].replace('.dat', '')
t = tmp.split("_")[1]
bias = tmp.split("_")[3]
if biasLow:
if float(bias) < biasLow:
continue
if biasHigh:
if float(bias) > biasHigh:
continue
# print(tmp)
# if int(float(dis)) > 150:
# continue
if t in temps_list:
target = "../{} {} {} {}\n".format(oneFile, t, kconstant, bias)
out.write(target)
def readPMF(pre, is2d=False, force_list=["0.0", "0.1", "0.2"]):
# perturbation_table = {0:"original", 1:"p_mem",
# 2:"m_mem", 3:"p_lipid",
# 4:"m_lipid", 5:"p_go",
# 6:"m_go", 7:"p_rg", 8:"m_rg"}
perturbation_table = {0:"original", 1:"m_go",
2:"p_go", 3:"m_lipid",
4:"p_lipid", 5:"m_mem",
6:"p_mem", 7:"m_rg", 8:"p_rg"}
pmf_list = {
"perturbation":list(perturbation_table.keys()),
"force":force_list
}
pmf_list_data = expand_grid(pmf_list)
all_pmf_list = []
for index, row in pmf_list_data.iterrows():
force = row["force"]
perturbation = row["perturbation"]
if perturbation == 0:
location = pre + f"/force_{force}/pmf-*.dat"
pmf_list = glob.glob(location)
change = "none"
upOrDown = "none"
else:
location = pre + f"/force_{force}/perturbation-{perturbation}-pmf-*.dat"
pmf_list = glob.glob(location)
change = perturbation_table[perturbation].split("_")[-1]
upOrDown = perturbation_table[perturbation].split("_")[0]
# print(pmf_list)
name_list = ["f", "df", "e", "s"]
if is2d:
names = ["x", "y"] + name_list
else:
names = ["bin", "x"] + name_list
for location in pmf_list:
# print(location)
temp = re.findall(r'pmf-(\d+)', location)
if len(temp) != 1:
raise ValueError('Not expected to see more than one or none')
else:
temp = temp[0]
data = pd.read_table(location, skiprows=2, sep='\s+', names=names).assign(upOrDown=upOrDown, change=change, force=force, temp=temp, perturbation=perturbation_table[perturbation])
all_pmf_list.append(data)
return pd.concat(all_pmf_list).dropna().reset_index()
def readPMF_2(pre, is2d=0, force_list=["0.0", "0.1", "0.2"]):
if is2d:
print("reading 2d pmfs")
else:
print("reading 1d dis, qw and z")
if is2d == 1:
mode_list = ["2d_qw_dis", "2d_z_dis", "2d_z_qw"]
elif is2d == 2:
mode_list = ["quick"]
else:
mode_list = ["1d_dis", "1d_qw", "1d_z"]
all_data_list =[]
for mode in mode_list:
tmp = readPMF(mode, is2d, force_list).assign(mode=mode)
all_data_list.append(tmp)
return pd.concat(all_data_list).dropna().reset_index()
def shrinkage(n=552, shrink_size=6, max_frame=2000, fileName="dump.lammpstrj"):
print("Shrinkage: size: {}, max_frame: {}".format(shrink_size, max_frame))
bashCommand = "wc " + fileName
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
line_number = int(output.decode("utf-8").split()[0])
print(line_number)
print(line_number/552)
# number of atom = 543
n = 552
count = 0
with open("small.lammpstrj", "w") as out:
with open(fileName, "r") as f:
for i, line in enumerate(f):
if (i // n) % shrink_size == 0:
if count >= max_frame*n:
break
count += 1
out.write(line)
def compute_theta_for_each_helix(output="angles.csv", dumpName="../dump.lammpstrj.0"):
print("This is for 2xov only")
helices_list = [(94,114), (147,168), (171, 192), (200, 217), (226, 241), (250, 269)]
atoms_all_frames = read_lammps(dumpName)
# print(atoms[0])
# print(len(atoms), len(atoms[0]))
# helices_angles_all_frames = []
with open(output, "w") as out:
out.write("Frame, Helix, Angle\n")
for ii, frame in enumerate(atoms_all_frames):
# helices_angles = []
for count, (i, j) in enumerate(helices_list):
# print(i, j)
i = i-91
j = j-91
# end - start
a = np.array(frame[j]) - np.array(frame[i])
b = np.array([0, 0, 1])
angle = a[2]/length(a) # in form of cos theta
# helices_angles.append(angle)
# print(angle)
out.write("{}, {}, {}\n".format(ii, count+1, angle))
# helices_angles_all_frames.append(helices_angles)
def structure_prediction_run(protein):
print(protein)
protocol_list = ["awsemer", "frag", "er"]
do = os.system
cd = os.chdir
cd(protein)
# run = "frag"
for protocol in protocol_list:
do("rm -r " + protocol)
do("mkdir -p " + protocol)
do("cp -r {} {}/".format(protein, protocol))
cd(protocol)
cd(protein)
# do("cp ~/opt/gremlin/protein/{}/gremlin/go_rnativeC* .".format(protein))
do("cp ~/opt/gremlin/protein/{}/raptor/go_rnativeC* .".format(protein))
fileName = protein + "_multi.in"
backboneFile = "fix_backbone_coeff_" + protocol
with fileinput.FileInput(fileName, inplace=True, backup='.bak') as file:
for line in file:
tmp = line.replace("fix_backbone_coeff_er", backboneFile)
print(tmp, end='')
cd("..")
do("run.py -m 0 -n 20 {}".format(protein))
cd("..")
cd("..")
# do("")
def check_and_correct_fragment_memory(fragFile="fragsLAMW.mem"):
with open("tmp.mem", "w") as out:
with open(fragFile, "r") as f:
for i in range(4):
line = next(f)
out.write(line)
for line in f:
gro, _, i, n, _ = line.split()
delete = False
# print(gro, i, n)
# name = gro.split("/")[-1]
with open(gro, "r") as one:
next(one)
next(one)
all_residues = set()
for atom in one:
residue, *_ = atom.split()
# print(residue)
all_residues.add(int(residue))
for test in range(int(i), int(i)+int(n)):
if test not in all_residues:
print("ATTENTION", gro, i, n, "missing:",test)
delete = True
if not delete:
out.write(line)
os.system(f"mv {fragFile} fragsLAMW_back")
os.system(f"mv tmp.mem {fragFile}")
def read_complete_temper_2(n=4, location=".", rerun=-1, qnqc=False, average_z=False, localQ=False, disReal=False, dis_h56=False, goEnergy=False, goEnergy3H=False, goEnergy4H=False):
all_data_list = []
for i in range(n):
file = "lipid.{}.dat".format(i)
lipid = pd.read_csv(location+file)
lipid.columns = lipid.columns.str.strip()
remove_columns = ['Steps']
lipid = lipid.drop(remove_columns, axis=1)
file = "rgs.{}.dat".format(i)
rgs = pd.read_csv(location+file)
rgs.columns = rgs.columns.str.strip()
remove_columns = ['Steps']
rgs = rgs.drop(remove_columns, axis=1)
file = "energy.{}.dat".format(i)
energy = pd.read_csv(location+file)
energy.columns = energy.columns.str.strip()
energy = energy[["AMH-Go", "Membrane", "Rg"]]
file = "addforce.{}.dat".format(i)
dis = pd.read_csv(location+file)
dis.columns = dis.columns.str.strip()
remove_columns = ['Steps', 'AddedForce', 'Dis12', 'Dis34', 'Dis56']
dis.drop(remove_columns, axis=1,inplace=True)
file = "wham.{}.dat".format(i)
wham = pd.read_csv(location+file).assign(Run=i)
wham.columns = wham.columns.str.strip()
remove_columns = ['Rg', 'Tc']
wham = wham.drop(remove_columns, axis=1)
if qnqc:
qc = pd.read_table(location+f"qc_{i}", names=["qc"])[1:].reset_index(drop=True)
qn = pd.read_table(location+f"qn_{i}", names=["qn"])[1:].reset_index(drop=True)
qc2 = pd.read_table(location+f"qc2_{i}", names=["qc2"])[1:].reset_index(drop=True)
wham = pd.concat([wham, qn, qc, qc2],axis=1)
# if average_z:
# z = pd.read_table(location+f"z_{i}.dat", names=["AverageZ"])[1:].reset_index(drop=True)
# wham = pd.concat([wham, z],axis=1)
if disReal:
tmp = pd.read_csv(location+f"distance_{i}.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
# print(tmp)
tmp.columns = tmp.columns.str.strip()
wham = pd.concat([wham, tmp],axis=1)
if dis_h56:
tmp = pd.read_csv(location+f"distance_h56_{i}.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
tmp1 = pd.read_csv(location+f"distance_h12_{i}.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
tmp2 = pd.read_csv(location+f"distance_h34_{i}.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
# print(tmp)
tmp.columns = tmp.columns.str.strip()
tmp1.columns = tmp1.columns.str.strip()
tmp2.columns = tmp2.columns.str.strip()
wham = pd.concat([wham, tmp, tmp1, tmp2],axis=1)
if average_z:
z = pd.read_csv(location+f"z_complete_{i}.dat")[1:].reset_index(drop=True)
z.columns = z.columns.str.strip()
wham = pd.concat([wham, z],axis=1)
if localQ:
all_localQ = pd.read_csv(location+f"localQ.{i}.csv")[1:].reset_index(drop=True)
wham = pd.concat([wham, all_localQ], axis=1)
if goEnergy:
tmp = pd.read_csv(location+f"Go_{i}/goEnergy.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
# print(tmp)
tmp.columns = tmp.columns.str.strip()
wham = pd.concat([wham, tmp],axis=1)
if goEnergy3H:
nEnergy = pd.read_csv(location+f"Go_3helix_{i}/goEnergy.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
# print(tmp)
nEnergy.columns = nEnergy.columns.str.strip()
wham = pd.concat([wham, nEnergy],axis=1)
if goEnergy4H:
nEnergy = pd.read_csv(location+f"Go_4helix_{i}/goEnergy.dat")[1:].reset_index(drop=True).drop('Steps', axis=1)
# print(tmp)
nEnergy.columns = nEnergy.columns.str.strip()
wham = pd.concat([wham, nEnergy],axis=1)
data = pd.concat([wham, dis, energy, rgs, lipid], axis=1)
# lipid = lipid[["Steps","Lipid","Run"]]
all_data_list.append(data)
data = pd.concat(all_data_list)
file = f"../log{rerun}/log.lammps"
temper = pd.read_table(location+file, skiprows=2, sep=' ')
temper = temper.melt(id_vars=['Step'], value_vars=['T' + str(i) for i in range(n)], value_name="Temp", var_name="Run")
temper["Run"] = temper["Run"].str[1:].astype(int)
temper["Temp"] = "T" + temper["Temp"].astype(str)
# print(temper)
# print(wham)
t2 = temper.merge(data, how='inner', left_on=["Step", "Run"], right_on=["Steps", "Run"]).sort_values('Step').drop('Steps', axis=1)
# print(t2)
t3 = t2.assign(TotalE=t2.Energy + t2.Lipid)
return t3.sort_values(["Step", "Run"]).reset_index(drop=True)
def process_complete_temper_data_3(pre, data_folder, folder_list, rerun=-1, end=-1, n=12, bias="dis", qnqc=False, average_z=False, disReal=False, dis_h56=False, localQ=False, goEnergy=False, goEnergy3H=False, goEnergy4H=False, label=""):
print("process temp data")
dateAndTime = datetime.today().strftime('%d_%h_%H%M%S')
for folder in folder_list:
simulation_list = glob.glob(pre+folder+f"/simulation/{bias}_*")
print(pre+folder+f"/simulation/{bias}_*")
os.system("mkdir -p " + pre+folder+"/data")
# this one only consider rerun >=0, for the case rerun=-1, move log.lammps to log0
for i in range(rerun, end, -1):
all_data_list = []
for one_simulation in simulation_list:
bias_num = one_simulation.split("_")[-1]
print(bias_num, "!")
location = one_simulation + f"/{i}/"
print(location)
data = read_complete_temper_2(location=location, n=n, rerun=i, qnqc=qnqc, average_z=average_z, localQ=localQ, disReal=disReal, dis_h56=dis_h56, goEnergy=goEnergy, goEnergy3H=goEnergy3H, goEnergy4H=goEnergy4H)
print(data.shape)
# remove_columns = ['Step', "Run"]
# data = data.drop(remove_columns, axis=1)
all_data_list.append(data.assign(BiasTo=bias_num))
data = pd.concat(all_data_list).reset_index(drop=True)
# if localQ:
# print("hi")
# else:
# data.to_csv(os.path.join(pre, folder, f"data/rerun_{i}.csv"))
# complete_data_list.append(data)
# temps = list(dic.keys())
# complete_data = pd.concat(complete_data_list)
name = f"rerun_{2*i}_{dateAndTime}.feather"
data = data.reset_index(drop=True)
data.query(f'Step > {2*i}e7 & Step <= {2*i+1}e7').reset_index(drop=True).to_feather(pre+folder+"/" + name)
os.system("cp "+pre+folder+"/" + name + " "+data_folder+label+name)
name = f"rerun_{2*i+1}_{dateAndTime}.feather"
data = data.reset_index(drop=True)
data.query(f'Step > {2*i+1}e7 & Step <= {2*i+2}e7').reset_index(drop=True).to_feather(pre+folder+"/" + name)
os.system("cp "+pre+folder+"/" + name + " "+data_folder+label+name)
def move_data4(data_folder, freeEnergy_folder, folder_list, temp_dict_mode=1, sub_mode_name="", kmem=0.2, klipid=0.1, kgo=0.1, krg=0.2, sample_range_mode=0, biasName="dis", qnqc=False, average_z=0, chosen_mode=0):
print("move data")
# dic = {"T_defined":300, "T0":350, "T1":400, "T2":450, "T3":500, "T4":550, "T5":600, "T6":650, "T7":700, "T8":750, "T9":800, "T10":900, "T11":1000}
if temp_dict_mode == 1:
dic = {"T0":280, "T1":300, "T2":325, "T3":350, "T4":375, "T5":400, "T6":450, "T7":500, "T8":550, "T9":600, "T10":650, "T11":700}
if temp_dict_mode == 2:
dic = {"T0":280, "T1":290, "T2":300, "T3":315, "T4":335, "T5":355, "T6":380, "T7":410, "T8":440, "T9":470, "T10":500, "T11":530}
if temp_dict_mode == 3:
dic = {"T0":280, "T1":290, "T2":300, "T3":310, "T4":320, "T5":335, "T6":350, "T7":365, "T8":380, "T9":410, "T10":440, "T11":470}
if temp_dict_mode == 4:
dic = {"T0":300, "T1":335, "T2":373, "T3":417, "T4":465, "T5":519, "T6":579, "T7":645, "T8":720, "T9":803, "T10":896, "T11":1000}
# read in complete.feather
data_list = []
for folder in folder_list:
tmp = pd.read_feather(data_folder + folder +".feather")
data_list.append(tmp)
data = pd.concat(data_list)
os.system("mkdir -p "+freeEnergy_folder+"/"+sub_mode_name+f"/data_{sample_range_mode}")
for bias, oneBias in data.groupby("BiasTo"):
for tempSymbol, oneTempAndBias in oneBias.groupby("Temp"):
temp = dic[tempSymbol]
if float(temp) > 800:
continue
print(f"t_{temp}_{biasName}_{bias}.dat")
if sample_range_mode == 0:
queryCmd = 'Step > 0 & Step <= 1e7'
if sample_range_mode == 1:
queryCmd = 'Step > 1e7 & Step <= 2e7'
elif sample_range_mode == 2:
queryCmd ='Step > 2e7 & Step <= 3e7'
elif sample_range_mode == 3:
queryCmd ='Step > 3e7 & Step <= 4e7'
elif sample_range_mode == 4:
queryCmd ='Step > 4e7 & Step <= 5e7'
elif sample_range_mode == 5:
queryCmd ='Step > 5e7 & Step <= 6e7'
elif sample_range_mode == 6:
queryCmd ='Step > 6e7 & Step <= 7e7'
elif sample_range_mode == 7:
queryCmd ='Step > 7e7 & Step <= 8e7'
elif sample_range_mode == -1:
queryCmd ='Step > 4e7 & Step <= 6e7'
if sample_range_mode == -2:
tmp = oneTempAndBias.reset_index(drop=True)
else:
tmp = oneTempAndBias.query(queryCmd).reset_index()
if average_z < 5:
chosen_list = ["TotalE", "Qw", "Distance"]
elif average_z == 5:
chosen_list = ["TotalE", "Qw", "DisReal"]
chosen_list += ["z_h6"]
if average_z == 1:
chosen_list += ["abs_z_average"]
if average_z == 2 or average_z == 3:
chosen_list += ["z_h6"]
if average_z == 3:
chosen_list += ["DisReal"]
if average_z == 4:
tmp["z_h5_and_h6"] = tmp["z_h5"] + tmp["z_h6"]
chosen_list += ["z_h5_and_h6"]
chosen_list += ["DisReal"]
if average_z == 6:
chosen_list = ["TotalE", "Qw", "DisReal"]
tmp["z_h5_and_h6"] = tmp["z_h5"] + tmp["z_h6"]
chosen_list += ["z_h5_and_h6"]
chosen_list += ["z_h5"]
chosen_list += ["z_h6"]
chosen_list += ["Dis_h56"]
if average_z == 7:
chosen_list = ["TotalE", "Qw", "DisReal"]
tmp["z_h56"] = tmp["z_h5"] + tmp["z_h6"]
tmp["z_h14"] = tmp["z_h1"] + tmp["z_h2"] + tmp["z_h3"] + tmp["z_h4"]
chosen_list += ["z_h14"]
chosen_list += ["z_h56"]
chosen_list += ["z_h5"]
chosen_list += ["z_h6"]
chosen_list += ["Dis_h12"]
chosen_list += ["Dis_h34"]
chosen_list += ["Dis_h56"]
if chosen_mode == 0:
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_perturb_mem_p=tmp.TotalE + kmem*tmp.Membrane,
TotalE_perturb_mem_m=tmp.TotalE - kmem*tmp.Membrane,
TotalE_perturb_lipid_p=tmp.TotalE + klipid*tmp.Lipid,
TotalE_perturb_lipid_m=tmp.TotalE - klipid*tmp.Lipid,
TotalE_perturb_go_p=tmp.TotalE + kgo*tmp["AMH-Go"],
TotalE_perturb_go_m=tmp.TotalE - kgo*tmp["AMH-Go"],
TotalE_perturb_rg_p=tmp.TotalE + krg*tmp.Rg,
TotalE_perturb_rg_m=tmp.TotalE - krg*tmp.Rg)
if chosen_mode == 1:
chosen_list += ["Res" + str(i+1) for i in range(181)]
chosen = tmp[chosen_list]
if chosen_mode == 2:
chosen_list += ["Res" + str(i+1) for i in range(181)]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_perturb_go_m=tmp.TotalE - kgo*tmp["AMH-Go"],
TotalE_perturb_go_p=tmp.TotalE + kgo*tmp["AMH-Go"],
TotalE_perturb_lipid_m=tmp.TotalE - klipid*tmp.Lipid,
TotalE_perturb_lipid_p=tmp.TotalE + klipid*tmp.Lipid,
TotalE_perturb_mem_m=tmp.TotalE - kmem*tmp.Membrane,
TotalE_perturb_mem_p=tmp.TotalE + kmem*tmp.Membrane,
TotalE_perturb_rg_m=tmp.TotalE - krg*tmp.Rg,
TotalE_perturb_rg_p=tmp.TotalE + krg*tmp.Rg)
# print(tmp.count())
if chosen_mode == 3:
chosen_list += ["AMH-Go", "Lipid", "Membrane", "Rg"]
chosen = tmp[chosen_list]
if chosen_mode == 4:
chosen_list += ["Dis_h56"]
chosen = tmp[chosen_list]
if chosen_mode == 5:
chosen_list += ["Dis_h56"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_perturb_go_m=tmp.TotalE/10,
TotalE_perturb_go_p=0,
Go=tmp["AMH-Go"])
if chosen_mode == 6:
chosen_list += ["Dis_h56"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_1=tmp.TotalE + 0.1*tmp.AMH,
TotalE_2=tmp.TotalE + 0.2*tmp.AMH,
TotalE_3=tmp.TotalE + 0.5*tmp.AMH,
TotalE_4=tmp.TotalE + tmp.AMH,
TotalE_5=tmp.AMH)
if chosen_mode == 7:
chosen_list += ["Dis_h56"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_1=tmp.TotalE + 0.1*tmp.AMH_3H,
TotalE_2=tmp.TotalE + 0.2*tmp.AMH_3H,
TotalE_3=tmp.TotalE + 0.5*tmp.AMH_3H,
TotalE_4=tmp.TotalE + tmp.AMH_3H,
TotalE_5=tmp.TotalE + 0.1*tmp.AMH,
TotalE_6=tmp.TotalE + 0.2*tmp.AMH)
if chosen_mode == 8:
# chosen_list += ["Dis_h56"]
chosen_list += ["z_average"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_1=tmp.TotalE + 0.1*tmp.AMH_4H,
TotalE_2=tmp.TotalE + 0.2*tmp.AMH_4H,
TotalE_3=tmp.TotalE + 0.5*tmp.AMH_4H,
TotalE_4=tmp.TotalE + 0.1*tmp.AMH_3H,
TotalE_5=tmp.TotalE + 0.2*tmp.AMH_3H,
TotalE_6=tmp.TotalE + 0.5*tmp.AMH_3H)
if chosen_mode == 9:
# chosen_list += ["Dis_h56"]
chosen_list += ["z_average"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_1=tmp.TotalE + 0.1*tmp.AMH_4H,
TotalE_2=tmp.TotalE + 0.2*tmp.AMH_4H,
TotalE_3=tmp.TotalE + 0.5*tmp.AMH_4H)
chosen = chosen.assign(TotalE_perturb_1go_m=chosen.TotalE_2 - kgo*tmp["AMH-Go"],
TotalE_perturb_1go_p=chosen.TotalE_2 + kgo*tmp["AMH-Go"],
TotalE_perturb_2lipid_m=chosen.TotalE_2 - tmp.Lipid,
TotalE_perturb_2lipid_p=chosen.TotalE_2 + tmp.Lipid,
TotalE_perturb_3mem_m=chosen.TotalE_2 - tmp.Membrane,
TotalE_perturb_3mem_p=chosen.TotalE_2 + tmp.Membrane,
TotalE_perturb_4rg_m=chosen.TotalE_2 - tmp.Rg,
TotalE_perturb_4rg_p=chosen.TotalE_2 + tmp.Rg,
TotalE_perturb_5go=tmp["AMH-Go"],
TotalE_perturb_5lipid=tmp.Lipid,
TotalE_perturb_5mem=tmp.Membrane,
TotalE_perturb_5rg=tmp.Rg)
if chosen_mode == 10:
# chosen_list += ["Dis_h56"]
chosen_list += ["z_average"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_1=tmp.TotalE + 0.1*tmp.AMH_4H,
TotalE_2=tmp.TotalE + 0.2*tmp.AMH_4H,
TotalE_3=tmp.TotalE + 0.5*tmp.AMH_4H)
chosen = chosen.assign(TotalE_perturb_1lipid_m1=chosen.TotalE_2 - 0.1*tmp.Lipid,
TotalE_perturb_1lipid_p1=chosen.TotalE_2 + 0.1*tmp.Lipid,
TotalE_perturb_2lipid_m2=chosen.TotalE_2 - 0.2*tmp.Lipid,
TotalE_perturb_2lipid_p2=chosen.TotalE_2 + 0.2*tmp.Lipid,
TotalE_perturb_3lipid_m3=chosen.TotalE_2 - 0.3*tmp.Lipid,
TotalE_perturb_3lipid_p3=chosen.TotalE_2 + 0.3*tmp.Lipid,
TotalE_perturb_4lipid_m4=chosen.TotalE_2 - 0.5*tmp.Lipid,
TotalE_perturb_4lipid_p4=chosen.TotalE_2 + 0.5*tmp.Lipid,
TotalE_perturb_5go=tmp["AMH-Go"],
TotalE_perturb_5lipid=tmp.Lipid,
TotalE_perturb_5mem=tmp.Membrane,
TotalE_perturb_5rg=tmp.Rg)
if chosen_mode == 11:
# chosen_list += ["Dis_h56"]
chosen_list += ["z_average"]
chosen = tmp[chosen_list]
chosen = chosen.assign(TotalE_1=tmp.TotalE + 1.1*0.1*tmp.AMH_4H + 0.1*tmp["AMH-Go"],
TotalE_2=tmp.TotalE + 1.1*0.2*tmp.AMH_4H + 0.1*tmp["AMH-Go"],
TotalE_3=tmp.TotalE + 1.1*0.5*tmp.AMH_4H + 0.1*tmp["AMH-Go"])
chosen = chosen.assign(TotalE_perturb_1lipid_m1=chosen.TotalE_2 - 0.1*tmp.Lipid,
TotalE_perturb_1lipid_p1=chosen.TotalE_2 + 0.1*tmp.Lipid,
TotalE_perturb_2lipid_m2=chosen.TotalE_2 - 0.2*tmp.Lipid,
TotalE_perturb_2lipid_p2=chosen.TotalE_2 + 0.2*tmp.Lipid,
TotalE_perturb_3lipid_m3=chosen.TotalE_2 - 0.1*tmp.Membrane,
TotalE_perturb_3lipid_p3=chosen.TotalE_2 + 0.1*tmp.Membrane,
TotalE_perturb_4lipid_m4=chosen.TotalE_2 - 0.2*tmp.Membrane,
TotalE_perturb_4lipid_p4=chosen.TotalE_2 + 0.2*tmp.Membrane,
TotalE_perturb_5go=tmp["AMH-Go"],
TotalE_perturb_5lipid=tmp.Lipid,
TotalE_perturb_5mem=tmp.Membrane,
TotalE_perturb_5rg=tmp.Rg)
if chosen_mode == 12:
chosen = tmp[chosen_list]
# chosen["z_h56"] = (chosen["z_h5"] + chosen["z_h6"])/2
chosen = chosen.assign(TotalE_2=tmp.TotalE + 0.2*tmp.AMH_4H,
z_h56=(tmp.z_h5 + tmp.z_h6)/2)
if chosen_mode == 13:
chosen_list += ["z_average"]
chosen = tmp[chosen_list]
# chosen["z_h56"] = (chosen["z_h5"] + chosen["z_h6"])/2
force = 0.1
chosen = chosen.assign(TotalE_2=tmp.TotalE + 0.2*tmp.AMH_4H - (tmp.DisReal - 25.1)*force,
TotalE_3=tmp.TotalE - (tmp.DisReal - 25.1)*force,
TotalE_4=tmp.TotalE + 0.2*tmp.AMH_4H,
TotalE_5=tmp.TotalE + 0.2*tmp.AMH_4H - (tmp.DisReal)*force)
chosen.to_csv(freeEnergy_folder+"/"+sub_mode_name+f"/data_{sample_range_mode}/t_{temp}_{biasName}_{bias}.dat", sep=' ', index=False, header=False)
# perturbation_table = {0:"original", 1:"m_go",
# 2:"p_go", 3:"m_lipid",
# 4:"p_lipid", 5:"m_mem",
# 6:"p_mem", 7:"m_rg", 8:"p_rg"}
def compute_average_z(dumpFile, outFile):
# input dump, output z.dat
z_list = []
with open(outFile, "w") as f:
a = read_lammps(dumpFile)
for atoms in a:
b = np.array(atoms)
z = b.mean(axis=0)[2]
z_list.append(z)
f.write(str(z)+"\n")
def compute_average_z_2(dumpFile, outFile):
# input dump, output z.dat
helices_list = [(94,114), (147,168), (171, 192), (200, 217), (226, 241), (250, 269)]
with open(outFile, "w") as f:
a = read_lammps(dumpFile)
f.write("z_average, abs_z_average, z_h1, z_h2, z_h3, z_h4, z_h5, z_h6\n")
for atoms in a:
b = np.array(atoms)
z = b.mean(axis=0)[2]
f.write(str(z)+ ", ")
z = np.abs(b).mean(axis=0)[2]
f.write(str(z)+ ", ")
for count, (i,j) in enumerate(helices_list):
i = i - 91
j = j - 91
z = np.mean(b[i:j], axis=0)[2]
if count == 5:
f.write(str(z))
else:
f.write(str(z)+ ", ")
f.write("\n")
def read_simulation_2(location=".", i=-1, qnqc=False, average_z=False, localQ=False, disReal=False, **kwargs):
file = "lipid.dat"
lipid = pd.read_csv(location+file)
lipid.columns = lipid.columns.str.strip()
remove_columns = ['Steps']
lipid = lipid.drop(remove_columns, axis=1)
file = "rgs.dat"
rgs = pd.read_csv(location+file)
rgs.columns = rgs.columns.str.strip()
remove_columns = ['Steps']
rgs = rgs.drop(remove_columns, axis=1)
file = "energy.dat"
energy = pd.read_csv(location+file)
energy.columns = energy.columns.str.strip()
energy = energy[["AMH-Go", "Membrane", "Rg"]]
file = "addforce.dat"
dis = pd.read_csv(location+file)
dis.columns = dis.columns.str.strip()
remove_columns = ['Steps', 'AddedForce', 'Dis12', 'Dis34', 'Dis56']
dis.drop(remove_columns, axis=1,inplace=True)
file = "wham.dat"
wham = pd.read_csv(location+file).assign(Run=i)
wham.columns = wham.columns.str.strip()
remove_columns = ['Rg', 'Tc']
wham = wham.drop(remove_columns, axis=1)
if qnqc:
qc = pd.read_table(location+f"qc", names=["qc"])[1:].reset_index(drop=True)
qn = pd.read_table(location+f"qn", names=["qn"])[1:].reset_index(drop=True)
qc2 =
|
pd.read_table(location+f"qc2", names=["qc2"])
|
pandas.read_table
|
import csv
import fnmatch
import numpy as np
from natsort import natsorted
from .utils import getAreaID, getRegionID
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
def getAreaClf(Routes):
#get routes with only 1 zone change
trainData = []
for r in Routes:
interaction = []
firstStop = np.argsort(r.actual)[1]
firstZoneID = r.stops[firstStop].zone_id
firstRegion = getRegionID(firstZoneID)
lastZoneID = ' '
for s in np.argsort(r.actual)[1:]:
currentZone = r.stops[s].zone_id
currentRegion = getRegionID(currentZone)
if currentRegion == firstRegion:
interaction.append(currentZone)
lastZoneID = currentZone
else:
break
sourceZoneID = firstZoneID
dstZoneID = lastZoneID
firstStopArea = getAreaID(sourceZoneID)
lastStopArea = getAreaID(dstZoneID)
regionID = getRegionID(sourceZoneID)
reverse = False
temp = regionID.split('-')
if firstStopArea > lastStopArea:
reverse = True
codeNew = int(str(hash(regionID))[1:5])
trainData.append([temp[0],temp[1],codeNew,reverse,firstStopArea])
trainDF = pd.DataFrame(trainData,columns = ['Code','Number','CodeNew', 'Ascending',
'Area'])
features = ['CodeNew']
X = trainDF[features]
y = trainDF['Ascending']
clf = DecisionTreeClassifier(class_weight='balanced')
clf.fit(X,y)
return clf
def getRegionClf(S1_Routes):
uniqueList = []
for testRoute in S1_Routes:
sequence = list(np.argsort(testRoute.actual))
regionIds = [getRegionID(testRoute.stops[s].zone_id) for s in sequence]
values = list(pd.unique(regionIds))
values.remove(' ')
if len(values) > 1:
uniqueList.append([values[0],values[-1]])
trainData = []
for src, dst in uniqueList:
letter = src.split('-')[0]
if src[0] == dst[0]:
reverse = False
if src > dst:
reverse = True
trainData.append([letter, reverse])
trainDF = pd.DataFrame(trainData, columns=['Code', 'Ascending'])
trainDF['CodeNew'] = [ord(x) - 64 for x in trainDF.Code]
features = ['CodeNew']
X = trainDF[features]
y = trainDF['Ascending']
clf = DecisionTreeClassifier(class_weight='balanced')
clf.fit(X, y)
return clf
def getRouteArea(route,regionCode,reverse = False):
result = []
for s in route.stops:
if getRegionID(s.zone_id) == regionCode:
result.append(getAreaID(s.zone_id))
result = list(set(result))
result = natsorted(result, reverse=reverse)
return result
def getRegionCode(route):
zoneIds = [s.zone_id for s in route.stops]
zoneIds.remove(None)
RegionIds = [getRegionID(z) for z in zoneIds]
RegionIds = pd.unique(RegionIds)
R0 =
|
pd.unique([r[0] for r in RegionIds])
|
pandas.unique
|
# Import all necessary modules.
import basilica
import pandas as pd
import en_core_web_sm
# Decompressing file function.
def decompress_pickle(file):
import bz2
import pickle
import _pickle as cPickle
data = bz2.BZ2File(file)
data = cPickle.load(data)
return data
# Subreddit prediction pickle file.
clf_model = decompress_pickle(r'Models/post_here_model.pbz2')
nlp = en_core_web_sm.load()
# Get word vectors.
def get_word_vectors(docs):
return [nlp(doc).vector for doc in docs]
# Returns 1-5 best subreddit based on user inputs.
def subreddit_prediction(title, text, num_pred):
title = pd.Series(title)
text =
|
pd.Series(text)
|
pandas.Series
|
import logging
from pathlib import Path
from time import sleep
from typing import List, Tuple
import finviz
import numpy as np
import pandas as pd
import pandas_market_calendars as mcal
import pendulum
import requests
import yfinance as yf
from airflow.decorators import dag, task
from airflow.models import Variable
from airflow.utils import timezone
from newsapi import NewsApiClient
from newsapi.newsapi_exception import NewsAPIException
from pendulum.date import Date
logger = logging.getLogger(__name__)
# These args will get passed on to each operator
# You can override them on a per-task basis during operator initialization
default_args = {
"owner": "airflow",
"retries": 0,
}
def get_start_end_dates(
exec_date: Date, minimal_period_days: int = 0
) -> Tuple[Date, str, Date, str]:
subtract_days = max(minimal_period_days, int(Variable.get("sp500_recent_days")))
period = exec_date - exec_date.subtract(days=subtract_days)
start_date = period.start.date()
start_date_str = start_date.to_date_string()
end_date = period.end.date()
end_date_str = end_date.to_date_string()
return start_date, start_date_str, end_date, end_date_str
@dag(
default_args=default_args,
schedule_interval="50 7 * * *",
start_date=pendulum.parse("20210715"),
catchup=True,
max_active_runs=1,
)
def sp500_dag():
"""
### Download S&P 500 data
First, determine symbols comprising S&P 500 from Wikipedia.
Second, download recent market data for all symbols from YFinance.
Third, enrich with financial news headlines from finviz.
"""
@task()
def get_ticker_symbols() -> List[str]:
"""
#### Get S&P 500 ticker symbols as available from Wikipedia
Returns a list of ticker symbols, as strings.
"""
wiki_df = pd.read_html(
"https://en.wikipedia.org/wiki/List_of_S%26P_500_companies"
)[0]
# replace . with - in symbols as listed in yfinance
symbols = sorted((s.replace(".", "-") for s in wiki_df.loc[:, "Symbol"]))
# TODO: check below via great expectations etc?
if len(symbols) != len(set(symbols)):
raise ValueError("S&P500 contains duplicated symbols")
return symbols
@task()
def get_ticker_data(sp500_symbols: List[str], **context):
"""
#### Get ticker data from Yfinance
Only market days are fetched and data are stored locally as Parquet files,
partitioned by stock symbol.
"""
start_date, start_date_str, end_date, end_date_str = get_start_end_dates(
context["execution_date"]
)
# determine market days in period
market_cal = mcal.get_calendar("NYSE")
market_days = market_cal.schedule(
start_date=start_date, end_date=end_date
).index
market_days = {Date(year=n.year, month=n.month, day=n.day) for n in market_days}
ticker_interval = f"{int(Variable.get('sp500_ticker_interval_minutes'))}m"
# for each symbol, fetch data and store them as parquet files locally
output_dir = Path(Variable.get("sp500_output_dir")) / "ticker_data"
for s in sp500_symbols:
logger.info(f"Processing {s}")
ticker = yf.Ticker(s)
ticker_df = ticker.history(
start=start_date_str,
end=end_date.add(days=1).to_date_string(), # yf interval is right-open
interval=ticker_interval,
)
# TODO: check above for at least some data for market days via great expectations etc?
# To determine if a day is a market day, use:
# for current_date in period:
# if current_date.date() in market_days:
ticker_df = ticker_df.tz_convert("UTC", level=0)
ticker_df.reset_index(inplace=True)
ticker_df.insert(0, "Symbol", s)
current_out_dir = output_dir / f"Symbol={s}"
current_out_dir.mkdir(parents=True, exist_ok=True)
ticker_df.to_parquet(
current_out_dir / f"{start_date_str}_{end_date_str}.snappy.parquet",
engine="pyarrow",
compression="snappy",
)
sleep(1)
@task()
def get_news_finviz(sp500_symbols: List[str], **context):
"""
#### Get news from finviz
"""
start_date, start_date_str, end_date, end_date_str = get_start_end_dates(
context["execution_date"]
)
# Generate UTC datetime for given start and end days for comparison later
start_datetime = pendulum.datetime(
year=start_date.year,
month=start_date.month,
day=start_date.day,
)
end_datetime = pendulum.datetime(
year=end_date.year,
month=end_date.month,
day=end_date.day,
hour=23,
minute=59,
second=59,
microsecond=999999,
)
# for each symbol, fetch news for each day and store them as parquet
# files locally
output_dir = Path(Variable.get("sp500_output_dir")) / "news" / "finviz"
for s in sp500_symbols:
logger.info(f"Processing {s}")
try:
news_fv = finviz.get_news(s)
except requests.exceptions.HTTPError as e:
logger.warning(f"Failed for {s}")
continue
news_fv_df = pd.DataFrame(news_fv)
# TODO: check above for at least some news returned via great expectations etc?
news_fv_df.columns = ["Datetime", "Title", "URL", "Source"]
news_fv_df.insert(0, "Symbol", s)
news_fv_df.insert(3, "Description", pd.NA)
news_fv_df.insert(5, "Author", pd.NA)
news_fv_df = news_fv_df[
[
"Symbol",
"Datetime",
"Title",
"Description",
"Source",
"Author",
"URL",
]
]
news_fv_df["Datetime"] = pd.to_datetime(news_fv_df["Datetime"])
news_fv_df["Datetime"] = news_fv_df["Datetime"].dt.tz_localize("US/Eastern")
news_fv_df["Datetime"] = news_fv_df["Datetime"].dt.tz_convert("UTC")
news_fv_df = news_fv_df.loc[
(
np.logical_and(
start_datetime <= news_fv_df["Datetime"],
news_fv_df["Datetime"] <= end_datetime,
)
),
:,
]
news_fv_df.sort_values(by=["Datetime"], inplace=True, ascending=True)
news_fv_df.reset_index(inplace=True, drop=True)
current_out_dir = output_dir / f"Symbol={s}"
current_out_dir.mkdir(parents=True, exist_ok=True)
news_fv_df.to_parquet(
current_out_dir / f"{start_date_str}_{end_date_str}.snappy.parquet",
engine="pyarrow",
compression="snappy",
)
sleep(1)
@task()
def get_news_newsapi(sp500_symbols: List[str], **context):
"""
#### Get news from News API
"""
start_date, start_date_str, end_date, end_date_str = get_start_end_dates(
context["execution_date"], minimal_period_days=7
)
# for each symbol, fetch news for each day and store them as parquet
# files locally
output_dir = Path(Variable.get("sp500_output_dir")) / "news" / "newsapi"
client = NewsApiClient(api_key=Variable.get("news_api_key"))
# determine index of symbol fetched last in prev. run or start from scratch
start_index = 0
last_symbol = Variable.get("sp500_newsapi_last_symbol", default_var=None)
if last_symbol is not None:
for i, s in enumerate(sp500_symbols):
if s == last_symbol:
start_index = (i + 1) % len(sp500_symbols)
break
for i in range(len(sp500_symbols)):
ix = (start_index + i) % len(sp500_symbols)
s = sp500_symbols[ix]
logger.info(f"Processing {s}")
try:
now = timezone.utcnow()
news_api = client.get_everything(
q=s, from_param=start_date_str, to=end_date_str, language="en"
)
except NewsAPIException as e:
if e.get_code() == "rateLimited":
return
else:
raise e
news_api_list = []
for a in news_api["articles"]:
source = a["source"]["name"]
author = a["author"]
title = a["title"]
description = a["description"]
url = a["url"]
published = a["publishedAt"]
requested = now
news_api_list.append(
(s, published, title, description, source, author, url, requested)
)
news_api_df = pd.DataFrame(
news_api_list,
columns=[
"Symbol",
"Datetime",
"Title",
"Description",
"Source",
"Author",
"URL",
"Requested_Datetime",
],
)
news_api_df["Datetime"] =
|
pd.to_datetime(news_api_df["Datetime"])
|
pandas.to_datetime
|
"""Code for computing window functions with ibis.
"""
import operator
import re
from collections import OrderedDict
import toolz
import pandas as pd
from pandas.core.groupby import SeriesGroupBy
import ibis.common as com
import ibis.expr.window as win
import ibis.expr.operations as ops
import ibis.pandas.aggcontext as agg_ctx
from ibis.pandas.core import integer_types
from ibis.pandas.dispatch import execute_node
from ibis.pandas.core import execute
from ibis.pandas.execution import util
def _post_process_empty(scalar, index):
result =
|
pd.Series([scalar])
|
pandas.Series
|
import pandas
import time
import sklearn
import numpy as np
import Bio.SeqUtils as SeqUtil
import Bio.Seq as Seq
import azimuth.util
import sys
import Bio.SeqUtils.MeltingTemp as Tm
import pickle
import itertools
def featurize_data(data, learn_options, Y, gene_position, pam_audit=True, length_audit=True, quiet=True):
'''
assumes that data contains the 30mer
returns set of features from which one can make a kernel for each one
'''
all_lens = data['30mer'].apply(len).values
unique_lengths = np.unique(all_lens)
num_lengths = len(unique_lengths)
assert num_lengths == 1, "should only have sequences of a single length, but found %s: %s" % (num_lengths, str(unique_lengths))
if not quiet:
print("Constructing features...")
t0 = time.time()
feature_sets = {}
if learn_options["nuc_features"]:
# spectrum kernels (position-independent) and weighted degree kernels (position-dependent)
get_all_order_nuc_features(data['30mer'], feature_sets, learn_options, learn_options["order"], max_index_to_use=30, quiet=quiet)
check_feature_set(feature_sets)
if learn_options["gc_features"]:
gc_above_10, gc_below_10, gc_count = gc_features(data, length_audit)
feature_sets['gc_above_10'] = pandas.DataFrame(gc_above_10)
feature_sets['gc_below_10'] = pandas.DataFrame(gc_below_10)
feature_sets['gc_count'] = pandas.DataFrame(gc_count)
if learn_options["include_gene_position"]:
# gene_position_columns = ["Amino Acid Cut position", "Percent Peptide", "Nucleotide cut position"]
# gene_position_columns = ["Percent Peptide", "Nucleotide cut position"]
for set in gene_position.columns:
set_name = set
feature_sets[set_name] = pandas.DataFrame(gene_position[set])
feature_sets["Percent Peptide <50%"] = feature_sets["Percent Peptide"] < 50
feature_sets["Percent Peptide <50%"]['Percent Peptide <50%'] = feature_sets["Percent Peptide <50%"].pop("Percent Peptide")
if learn_options["include_gene_effect"]:
print("including gene effect")
gene_names = Y['Target gene']
enc = sklearn.preprocessing.OneHotEncoder()
label_encoder = sklearn.preprocessing.LabelEncoder()
label_encoder.fit(gene_names)
one_hot_genes = np.array(enc.fit_transform(label_encoder.transform(gene_names)[:, None]).todense())
feature_sets["gene effect"] = pandas.DataFrame(one_hot_genes,
columns=["gene_%d" % i for i in range(one_hot_genes.shape[1])], index=gene_names.index)
if learn_options['include_known_pairs']:
feature_sets['known pairs'] = pandas.DataFrame(Y['test'])
if learn_options["include_NGGX_interaction"]:
feature_sets["NGGX"] = NGGX_interaction_feature(data, pam_audit)
if learn_options["include_Tm"]:
feature_sets["Tm"] = Tm_feature(data, pam_audit, learn_options=None)
if learn_options["include_sgRNAscore"]:
feature_sets["sgRNA Score"] =
|
pandas.DataFrame(data["sgRNA Score"])
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import argparse
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
import math
import copy
import sklearn
import sklearn.cluster
import random
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from sklearn.metrics import silhouette_score, davies_bouldin_score,v_measure_score
from sklearn.preprocessing import MinMaxScaler
from sklearn.cluster import AgglomerativeClustering
from sklearn.decomposition import NMF
from sklearn.decomposition import PCA
import multiprocessing as mp
from functools import partial
from scipy.spatial import distance
import os
from scipy.stats import norm
from scipy.stats import multivariate_normal
from scipy.stats import ttest_ind
from scipy.stats import ks_2samp
from hmmlearn import hmm
from scipy.io import mmread
from scipy.sparse import csr_matrix
import multiprocessing
import warnings
os.environ['NUMEXPR_MAX_THREADS'] = '50'
def jointLikelihoodEnergyLabels_helper(label,data,states,norms):
e = 1e-50
r0 = [x for x in range(data.shape[0]) if states[x,label]==0]
l0 = np.sum(-np.log(np.asarray(norms[0].pdf(data[r0,:])+e)),axis=0)
r1 = [x for x in range(data.shape[0]) if states[x,label]==1]
l1 = np.sum(-np.log(np.asarray(norms[1].pdf(data[r1,:])+e)),axis=0)
r2 = [x for x in range(data.shape[0]) if states[x,label]==2]
l2 = np.sum(-np.log(np.asarray(norms[2].pdf(data[r2,:])+e)),axis=0)
return l0 + l1 + l2
def init_helper(i,data, n_clusters,normal,diff,labels,c):
l = []
for k in range(n_clusters):
pval = ks_2samp(data[i,labels==k],normal[i,:])[1]
mn = np.mean(normal[i,:])
if c[i,k]< mn and pval <= diff:
l.append(0)
elif c[i,k]> mn and pval <= diff:
l.append(2)
else:
l.append(1)
return np.asarray(l).astype(int)
def HMM_helper(inds, data, means, sigmas ,t, num_states, model,normal):
ind_bin,ind_spot,k = inds
data = data[np.asarray(ind_bin)[:, None],np.asarray(ind_spot)]
data2 = np.mean(data,axis=1)
X = np.asarray([[x] for x in data2])
C = np.asarray(model.predict(X))
score = model.score(X)
#bootstrap
b=3
for i in range(b):
inds = random.sample(range(data.shape[1]),int(data.shape[1]*.8+1))
data2 = np.mean(data[:,inds],axis=1)
X = np.asarray([[x] for x in data2])
C2 = np.asarray(model.predict(X))
for j,c in enumerate(C2):
if C[j] != c:
C[j] = 1
return [C,score]
class STARCH:
"""
This is a class for Hidden Markov Random Field for calling Copy Number Aberrations
using spatial relationships and gene adjacencies along chromosomes
"""
def __init__(self,data,normal_spots=[],labels=[],beta_spots=2,n_clusters=3,num_states=3,gene_mapping_file_name='hgTables_hg19.txt',nthreads=0):
"""
The constructor for HMFR_CNA
Parameters:
data (pandas data frame): gene x spot (or cell).
colnames = 2d or 3d indices (eg. 5x18, 5x18x2 if multiple layers).
rownames = HUGO gene name
"""
if nthreads == 0:
nthreads = int(multiprocessing.cpu_count() / 2 + 1)
logger.info('Running with ' + str(nthreads) + ' threads')
logger.info("initializing HMRF...")
self.beta_spots = beta_spots
self.gene_mapping_file_name = gene_mapping_file_name
self.n_clusters = int(n_clusters)
dat,data = self.preload(data)
logger.info(str(self.rows[0:20]))
logger.info(str(len(self.rows)) + ' ' + str(len(self.columns)) + ' ' + str(data.shape))
if isinstance(normal_spots, str):
self.read_normal_spots(normal_spots)
if normal_spots == []:
self.get_normal_spots(data)
else:
self.normal_spots = np.asarray([int(x) for x in normal_spots])
logger.info('normal spots ' + str(len(self.normal_spots)))
dat = self.preprocess_data(data,dat)
logger.info('done preprocessing...')
self.data = self.data * 1000
self.bins = self.data.shape[0]
self.spots = self.data.shape[1]
self.tumor_spots = np.asarray([int(x) for x in range(self.spots) if int(x) not in self.normal_spots])
self.normal = self.data[:,self.normal_spots]
self.data = self.data[:,self.tumor_spots]
self.bins = self.data.shape[0]
self.spots = self.data.shape[1]
self.num_states = int(num_states)
self.normal_state = int((self.num_states-1)/2)
logger.info('getting spot network...')
self.get_spot_network(self.data,self.columns[self.tumor_spots])
if isinstance(labels, str):
self.get_labels(labels)
if len(labels)>0:
self.labels = labels
else:
logger.info('initializing labels...')
self.initialize_labels()
logger.debug('starting labels: '+str(self.labels))
np.fill_diagonal(self.spot_network, 0)
logger.info('getting params...')
for d in range(10 ,20,1):
try:
self.init_params(d/10,nthreads)
break
except:
continue
self.states = np.zeros((self.bins,self.n_clusters))
logger.info('starting means: '+str(self.means))
logger.info('starting cov: '+str(self.sigmas))
logger.info(str(len(self.rows)) + ' ' + str(len(self.columns)) + ' ' + str(self.data.shape))
def to_transpose(self,sep,data):
dat = pd.read_csv(data,sep=sep,header=0,index_col=0)
if 'x' in dat.index.values[0] and 'x' in dat.index.values[1] and 'x' in dat.index.values[2]:
return True
return False
def which_sep(self,data):
dat = np.asarray(pd.read_csv(data,sep='\t',header=0,index_col=0)).size
dat2 = np.asarray(pd.read_csv(data,sep=',',header=0,index_col=0)).size
dat3 = np.asarray(pd.read_csv(data,sep=' ',header=0,index_col=0)).size
if dat > dat2 and dat > dat3:
return '\t'
elif dat2 > dat and dat2 > dat3:
return ','
else:
return ' '
def get_bin_size(self,data,chroms):
for bin_size in range(20,100):
test = self.bin_data2(data[:,self.normal_spots],chroms,bin_size=bin_size,step_size=1)
test = test[test!=0]
logger.debug(str(bin_size)+' mean expression binned ' + str(np.mean(test)))
logger.debug(str(bin_size)+' median expression binned ' + str(np.median(test)))
if np.median(test) >= 10:
break
logger.info('selected bin size: ' + str(bin_size))
return bin_size
def preload(self,l):
if isinstance(l,list): # list of multiple datasets
offset = 0
dats = []
datas = []
for data in l:
dat,data = self.load(data)
datas.append(data)
dats.append(dat)
conserved_genes = []
inds = []
for dat in dats:
inds.append([])
for gene in dats[0].index.values:
inall = True
for dat in dats:
if gene not in dat.index.values:
inall = False
if inall:
conserved_genes.append(gene)
for i,dat in enumerate(dats):
ind = inds[i]
ind.append(np.where(dat.index.values == gene)[0][0])
inds[i] = ind
conserved_genes = np.asarray(conserved_genes)
logger.info(str(conserved_genes))
newdatas = []
newdats = []
for i in range(len(datas)):
data = datas[i]
dat = dats[i]
ind = np.asarray(inds[i])
newdatas.append(data[ind,:])
newdats.append(dat.iloc[ind,:])
for dat in newdats:
spots = np.asarray([[float(y) for y in x.split('x')] for x in dat.columns.values])
for spot in spots:
spot[0] += offset
spots = ['x'.join([str(y) for y in x]) for x in spots]
dat.columns = spots
offset += 100
data = np.concatenate(newdatas,axis=1)
dat = pd.concat(newdats,axis=1)
self.rows = dat.index.values
self.columns = dat.columns.values
else:
dat,data = self.load(l)
return dat,data
def load(self,data):
try:
if isinstance(data, str) and ('.csv' in data or '.tsv' in data or '.txt' in data):
logger.info('Reading data...')
sep = self.which_sep(data)
if self.to_transpose(sep,data):
dat =
|
pd.read_csv(data,sep=sep,header=0,index_col=0)
|
pandas.read_csv
|
# encoding=utf-8
""" gs_data centralizes all data import functions such as reading csv's
"""
import pandas as pd
import datetime as dt
from gs_datadict import *
def do_regions(ctydf: pd.DataFrame, mergef: str):
"""
do_regions assigns FIPS missing for multi-county regions, primarily occuring in UT where covid
data is rolled up into 6 multi-county regions. reading from a reference file, do_regions
assigns FIPS and cumulative population for the region, and identifies member counties
"""
namecol = ctydf.columns.get_loc('County')
state_idx = ctydf.loc[ctydf.State=='Utah'].index.to_list()
nonnull_idx = ctydf.loc[(ctydf.State=='Utah') & (ctydf.FIPS>'')].index.to_list()
# list comprehension finds rows which are missing a county FIPS
null_idx = [x for x in state_idx if x not in nonnull_idx]
merge_df = pd.read_csv(mergef, dtype={'fips0': str,'fips1': str,'fips2': str, 'fips3': str, 'fips4': str,'fips5': str})
# add a column for county population, we'll add region pop here
ctydf['Pop']= [None for x in range(len(ctydf))]
ctydf['Multi_FIPS']= [[] for x in range(len(ctydf))]
for x in null_idx:
this_region = ctydf.iat[x,1]
y = merge_df.loc[merge_df['Region']==this_region].to_dict('list')
ctydf.iat[x,0]= y['fips0'][0]
ctydf.iat[x,4]= y['Lat'][0]
ctydf.iat[x, 5] = y['Long'][0]
ctydf.iat[x, 9] = y['Long_Name'][0]
ctydf.iat[x, 11] = y['Pop'][0]
# make a list of county fips in the region, and add the list in column 'Multi-Fips' in master df
z = [y['fips0'][0], y['fips1'][0]]
if pd.notnull(y['fips2'][0]):
z.append(y['fips2'][0])
if pd.notnull(y['fips3'][0]):
z.append(y['fips3'][0])
if pd.notnull(y['fips4'][0]):
z.append(y['fips4'][0])
if pd.notnull(y['fips5'][0]):
z.append(y['fips5'][0])
ctydf.iat[x, 12] = z
z = []
y = {}
# ALSO need to deal with Dukes and Nantucket region in MA:
y = {'UID':[84070002], 'Region':['Dukes and Nantucket'], 'stFIPS':[25], 'Lat':[41.40674725], 'Long':[-70.68763497],
'Long_Name':['Dukes-Nantucket Region MA'], 'Pop':[28731], 'fips0':['25007'], 'fips1':['25019'],
'name0':['Dukes'], 'name1':['Nantucket'], 'pop0':[17332], 'pop1':[11399]}
state_idx = ctydf.loc[ctydf.State=='Massachusetts'].index.to_list()
nonnull_idx = ctydf.loc[(ctydf.State=='Massachusetts')&(ctydf.FIPS>'')].index.to_list()
null_idx = [x for x in state_idx if x not in nonnull_idx]
x = null_idx[0]
this_region = ctydf.iat[x, 1]
ctydf.iat[x, 0] = y['fips0'][0]
ctydf.iat[x, 4] = y['Lat'][0]
ctydf.iat[x, 5] = y['Long'][0]
ctydf.iat[x, 9] = y['Long_Name'][0]
ctydf.iat[x, 11] = y['Pop'][0]
ctydf.iat[x, 12] = [y['fips0'][0], y['fips1'][0]]
# final one is fixing Kansas City MO, not a rollup but for some reason it is sometimes outlier with no fips
MO_merge = {'region_pop': [459787],
'region_name': ['Kansas City MO'],
'prior_fips': [29000],
'prior_names': ['Kansas City Missouri']
}
return ctydf
def get_countypop(popfile: str, excludefile: str):
"""
get_statepop builds a Dataframe, indexed on State Fips, with est 2020 population
to avoid corrupting data, it then removes any counties which are part of a multi-county
rollup for covid reporting
"""
dfpop = pd.read_csv(popfile, usecols={0,3}, dtype={'FIPS': str,'Pop': int})
dfpop.set_index('FIPS', drop=True, inplace=True, verify_integrity=True) # df has only fips index and Pop field
dfpop.sort_index(inplace=True)
excl = pd.read_csv(excludefile, usecols={7,8,9,10,11,12}, dtype={'fips0': str, 'fips1': str, 'fips2': str,
'fips3': str, 'fips4': str, 'fips5': str})
for x in range(len(excl)):
dfpop.drop(index=excl.iat[x, 0], inplace=True)
dfpop.drop(index=excl.iat[x, 1], inplace=True)
if pd.notnull(excl.iat[x, 2]):
dfpop.drop(index=excl.iat[x, 2], inplace=True)
if pd.notnull(excl.iat[x, 3]):
dfpop.drop(index=excl.iat[x, 3], inplace=True)
if pd.notnull(excl.iat[x, 4]):
dfpop.drop(index=excl.iat[x, 4], inplace=True)
if pd.notnull(excl.iat[x, 5]):
dfpop.drop(index=excl.iat[x, 5], inplace=True)
return dfpop
def get_counties(popdf: pd.DataFrame, jhucov: str, regionf: str):
""" get_counties reads in csv's and joins on 5-digit fips key
:param popf: file containing population for each U.S. county (fips key)
:type popf: str
:param covf: covid data file by county, in this case Johns Hopkins
:type covf: str
:return: df
:rtype: pd.DataFrame
"""
dfcov = pd.read_csv(jhucov, usecols=JHUC_COLNUM, dtype=JHUC_DTYPE, parse_dates=[3],
dayfirst=False, infer_datetime_format=True)
dfcov['Last_Update']= pd.to_datetime(dfcov['Last_Update'], format='%m/%d/%y', exact=True)
dfcov = dfcov.rename(JHUC_RENAM)
# deal with blank county FIPS, primarily in UT, do_regions handles these:
dfcov = do_regions(dfcov, regionf)
dfcov.dropna(inplace=True, subset=['FIPS'])
dfcov.set_index('FIPS', drop=False, inplace=True, verify_integrity=True)
dfcov.sort_index(inplace=True)
df = dfcov.combine_first(popdf)
df['DeathstoPop'] = 100*(df['Deaths'] / df['Pop'])
df['CasestoPop'] = 100*(df['Confirmed'] / df['Pop'])
# cleanup on aisle 'floats with NaN's'
df['DeathstoPop'].fillna(value=0, inplace=True)
df['CasestoPop'].fillna(value=0, inplace=True)
df['DeathstoPop'] = pd.to_numeric(df['DeathstoPop'])
df['CasestoPop'] = pd.to_numeric(df['CasestoPop'])
return df
def get_nytcounty(popdf: pd.DataFrame, nytcov: str):
"""
get_nytcounty reads in county level covid data and reads county population data and adds a
column for it
"""
dfnyt = pd.read_csv(nytcov, dtype={'fips':str,'county':str,'state':str}, parse_dates=[1],
dayfirst=False, infer_datetime_format=True)
dfnyt['date']= pd.to_datetime(dfnyt['date'], format='%m/%d/%Y', exact=True)
dfnyt.dropna(inplace=True, subset=['fips'])
dfnyt.set_index(['fips','date'], drop=False, inplace=True, verify_integrity=True)
dfnyt.sort_index(inplace=True)
return dfnyt
def get_statepop(stpopfile: str):
"""
get_statepop builds a Dataframe, indexed on State Fips, with est 2020 population
"""
df_st = pd.read_csv(stpopfile, usecols={1,2}, dtype={'fips':str, 'pop':int})
df_st.set_index('fips', drop=True, inplace=True)
df_st.sort_index()
return df_st
def get_states(df_pop: pd.DataFrame, jhu_stcov: str):
"""
:param df_pop: dataframe built in get county import, contains pop by county (fips)
:type df_pop: pd.DataFrame
:param covstate: csv file from jhu with state covid data
:type covstate: fully qualified path/filename
:return: df
:rtype: pd.DataFrame
"""
dfst = pd.read_csv(jhu_stcov, dtype={'fips': str}, parse_dates=[3],
dayfirst=False, infer_datetime_format=True)
dfst['Last_Update'] =
|
pd.to_datetime(dfst['Last_Update'], format='%m/%d/%y', exact=True)
|
pandas.to_datetime
|
from flask import Flask, render_template, request, redirect, url_for, session
import pandas as pd
import pymysql
import os
import io
#from werkzeug.utils import secure_filename
from pulp import *
import numpy as np
import pymysql
import pymysql.cursors
from pandas.io import sql
#from sqlalchemy import create_engine
import pandas as pd
import numpy as np
#import io
import statsmodels.formula.api as smf
import statsmodels.api as sm
import scipy.optimize as optimize
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#from flask import Flask, render_template, request, redirect, url_for, session, g
from sklearn.linear_model import LogisticRegression
from math import sin, cos, sqrt, atan2, radians
from statsmodels.tsa.arima_model import ARIMA
#from sqlalchemy import create_engine
from collections import defaultdict
from sklearn import linear_model
import statsmodels.api as sm
import scipy.stats as st
import pandas as pd
import numpy as np
from pulp import *
import pymysql
import math
app = Flask(__name__)
app.secret_key = os.urandom(24)
localaddress="D:\\home\\site\\wwwroot"
localpath=localaddress
os.chdir(localaddress)
@app.route('/')
def index():
return redirect(url_for('home'))
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/demandplanning')
def demandplanning():
return render_template("Demand_Planning.html")
@app.route("/elasticopt",methods = ['GET','POST'])
def elasticopt():
if request.method== 'POST':
start_date =request.form['from']
end_date=request.form['to']
prdct_name=request.form['typedf']
# connection = pymysql.connect(host='localhost',
# user='user',
# password='',
# db='test',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
#
# x=connection.cursor()
# x.execute("select * from `transcdata`")
# connection.commit()
# datass=pd.DataFrame(x.fetchall())
datass = pd.read_csv("C:\\Users\\1026819\\Downloads\\optimizdata.csv")
# datas = datass[(datass['Week']>=start_date) & (datass['Week']<=end_date )]
datas=datass
df = datas[datas['Product'] == prdct_name]
df=datass
changeData=pd.concat([df['Product_Price'],df['Product_Qty']],axis=1)
changep=[]
changed=[]
for i in range(0,len(changeData)-1):
changep.append(changeData['Product_Price'].iloc[i]-changeData['Product_Price'].iloc[i+1])
changed.append(changeData['Product_Qty'].iloc[1]-changeData['Product_Qty'].iloc[i+1])
cpd=pd.concat([pd.DataFrame(changep),pd.DataFrame(changed)],axis=1)
cpd.columns=['Product_Price','Product_Qty']
sortedpricedata=df.sort_values(['Product_Price'], ascending=[True])
spq=pd.concat([sortedpricedata['Product_Price'],sortedpricedata['Product_Qty']],axis=1).reset_index(drop=True)
pint=[]
dint=[]
x = spq['Product_Price']
num_bins = 5
# n, pint, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
y = spq['Product_Qty']
num_bins = 5
# n, dint, patches = plt.hist(y, num_bins, facecolor='blue', alpha=0.5)
arr= np.zeros(shape=(len(pint),len(dint)))
count=0
for i in range(0, len(pint)):
lbp=pint[i]
if i==len(pint)-1:
ubp=pint[i]+1
else:
ubp=pint[i+1]
for j in range(0, len(dint)):
lbd=dint[j]
if j==len(dint)-1:
ubd=dint[j]+1
else:
ubd=dint[j+1]
print(lbd,ubd)
for k in range(0, len(spq)):
if (spq['Product_Price'].iloc[k]>=lbp\
and spq['Product_Price'].iloc[k]<ubp):
if(spq['Product_Qty'].iloc[k]>=lbd\
and spq['Product_Qty'].iloc[k]<ubd):
count+=1
arr[i][j]+=1
price_range=np.zeros(shape=(len(pint),2))
for j in range(0,len(pint)):
lbp=pint[j]
price_range[j][0]=lbp
if j==len(pint)-1:
ubp=pint[j]+1
price_range[j][1]=ubp
else:
ubp=pint[j+1]
price_range[j][1]=ubp
demand_range=np.zeros(shape=(len(dint),2))
for j in range(0,len(dint)):
lbd=dint[j]
demand_range[j][0]=lbd
if j==len(dint)-1:
ubd=dint[j]+1
demand_range[j][1]=ubd
else:
ubd=dint[j+1]
demand_range[j][1]=ubd
pr=pd.DataFrame(price_range)
pr.columns=['Price','Demand']
dr=pd.DataFrame(demand_range)
dr.columns=['Price','Demand']
priceranges=pr.Price.astype(str).str.cat(pr.Demand.astype(str), sep='-')
demandranges=dr.Price.astype(str).str.cat(dr.Demand.astype(str), sep='-')
price=pd.DataFrame(arr)
price.columns=demandranges
price.index=priceranges
pp=price.reset_index()
global data
data=pd.concat([df['Week'],df['Product_Qty'],df['Product_Price'],df['Comp_Prod_Price'],df['Promo1'],df['Promo2'],df['overallsale']],axis=1)
return render_template('dataview.html',cpd=cpd.values,pp=pp.to_html(index=False),data=data.to_html(index=False),graphdata=data.values,ss=1)
return render_template('dataview.html')
@app.route('/priceelasticity',methods = ['GET','POST'])
def priceelasticity():
return render_template('Optimisation_heatmap_revenue.html')
@app.route("/elasticity",methods = ['GET','POST'])
def elasticity():
if request.method== 'POST':
Price=0
Average_Price=0
Promotions=0
Promotionss=0
if request.form.get('Price'):
Price=1
if request.form.get('Average_Price'):
Average_Price=1
if request.form.get('Promotion_1'):
Promotions=1
if request.form.get('Promotion_2'):
Promotionss=1
Modeldata=pd.DataFrame()
Modeldata['Product_Qty']=data.Product_Qty
lst=[]
for row in data.index:
lst.append(row+1)
Modeldata['Week']=np.log(lst)
if Price == 1:
Modeldata['Product_Price']=data['Product_Price']
if Price == 0:
Modeldata['Product_Price']=0
if Average_Price==1:
Modeldata['Comp_Prod_Price']=data['Comp_Prod_Price']
if Average_Price==0:
Modeldata['Comp_Prod_Price']=0
if Promotions==1:
Modeldata['Promo1']=data['Promo1']
if Promotions==0:
Modeldata['Promo1']=0
if Promotionss==1:
Modeldata['Promo2']=data['Promo2']
if Promotionss==0:
Modeldata['Promo2']=0
diffpriceprodvscomp= (Modeldata['Product_Price']-Modeldata['Comp_Prod_Price'])
promo1=Modeldata.Promo1
promo2=Modeldata.Promo2
week=Modeldata.Week
quantityproduct=Modeldata.Product_Qty
df=pd.concat([quantityproduct,diffpriceprodvscomp,promo1,promo2,week],axis=1)
df.columns=['quantityproduct','diffpriceprodvscomp','promo1','promo2','week']
Model = smf.ols(formula='df.quantityproduct ~ df.diffpriceprodvscomp + df.promo1 + df.promo2 + df.week', data=df)
res = Model.fit()
global intercept,diffpriceprodvscomp_param,promo1_param,promo2_param,week_param
intercept=res.params[0]
diffpriceprodvscomp_param=res.params[1]
promo1_param=res.params[2]
promo2_param=res.params[3]
week_param=res.params[4]
Product_Price_min=0
maxvalue_of_price=int(Modeldata['Product_Price'].max())
Product_Price_max=int(Modeldata['Product_Price'].max())
if maxvalue_of_price==0:
Product_Price_max=1
maxfunction=[]
pricev=[]
weeks=[]
dd=[]
ddl=[]
for vatr in range(0,len(Modeldata)):
weeks.append(lst[vatr])
for Product_Price in range(Product_Price_min,Product_Price_max+1):
function=0
function=(intercept+(Modeldata['Promo1'].iloc[vatr]*promo1_param)+(Modeldata['Promo2'].iloc[vatr]*promo2_param) +
(diffpriceprodvscomp_param*(Product_Price-Modeldata['Comp_Prod_Price'].iloc[vatr]))+(Modeldata['Week'].iloc[vatr]*lst[vatr]))
maxfunction.append(function)
dd.append(Product_Price)
ddl.append(vatr)
for Product_Price in range(Product_Price_min,Product_Price_max+1):
pricev.append(Product_Price)
df1=pd.DataFrame(maxfunction)
df2=pd.DataFrame(dd)
df3=pd.DataFrame(ddl)
dfo=pd.concat([df3,df2,df1],axis=1)
dfo.columns=['weeks','prices','Demandfunctions']
demand=[]
for rows in dfo.values:
w=int(rows[0])
p=int(rows[1])
d=int(rows[2])
demand.append([w,p,d])
Co_eff=pd.DataFrame(res.params.values)#intercept
standard_error=pd.DataFrame(res.bse.values)#standard error
p_values=pd.DataFrame(res.pvalues.values)
conf_lower =pd.DataFrame(res.conf_int()[0].values)
conf_higher =pd.DataFrame(res.conf_int()[1].values)
R_square=res.rsquared
atr=['Intercept','DeltaPrice','Promo1','Promo2','Week']
atribute=pd.DataFrame(atr)
SummaryTable=pd.concat([atribute,Co_eff,standard_error,p_values,conf_lower,conf_higher],axis=1)
SummaryTable.columns=['Atributes','Co_eff','Standard_error','P_values','conf_lower','conf_higher']
reshapedf=df1.values.reshape(len(Modeldata),(-Product_Price_min+(Product_Price_max+1)))
dataofmas=pd.DataFrame(reshapedf)
maxv=dataofmas.apply( max, axis=1 )
minv=dataofmas.apply(min,axis=1)
avgv=dataofmas.sum(axis=1)/(-Product_Price_min+(Product_Price_max+1))
wks=pd.DataFrame(weeks)
ddofs=pd.concat([wks,minv,avgv,maxv],axis=1)
dataofmas=pd.DataFrame(reshapedf)
kk=pd.DataFrame()
sums=0
for i in range(0,len(dataofmas.columns)):
sums=sums+i
vv=i*dataofmas[[i]]
kk=pd.concat([kk,vv],axis=1)
dfr=pd.DataFrame(kk)
mrevenue=dfr.apply( max, axis=1 )
prices=dfr.idxmax(axis=1)
wks=pd.DataFrame(weeks)
revenuedf=pd.concat([wks,mrevenue,prices],axis=1)
return render_template('Optimisation_heatmap_revenue.html',revenuedf=revenuedf.values,ddofs=ddofs.values,SummaryTable=SummaryTable.to_html(index=False),ss=1,weeks=weeks,demand=demand,pricev=pricev,R_square=R_square)
@app.route('/inputtomaxm',methods=["GET","POST"])
def inputtomaxm():
return render_template("Optimize.html")
@app.route("/maxm",methods=["GET","POST"])
def maxm():
if request.method=="POST":
week=request.form['TimePeriod']
price_low=request.form['Price_Lower']
price_max=request.form['Price_Upper']
promofirst=request.form['Promotion_1']
promosecond=request.form['Promotion_2']
# week=24
# price_low=6
# price_max=20
# promofirst=1
# promosecond=0
#
# time_period=24
#
# global a
# a=243.226225
# global b
# b=-9.699634
# global d
# d=1.671505
# global pr1
# pr1=21.866260
# global pr2
# pr2=-0.511606
# global cm
# cm=-14.559594
# global s_0
# s_0= 2000
# promo1=1
# promo2=0
time_period=int(week)
global a
a=intercept
global b
b=diffpriceprodvscomp_param
global d
d=week_param
global pr1
pr1=promo1_param
global pr2
pr2=promo2_param
global s_0
s_0= 2000
promo1=int(promofirst)
promo2=int(promosecond)
global comp
comp=np.random.randint(7,15,time_period)
def demand(p, a=a, b=b, d=d, promo1=promo1,promo2_param=promo2,comp=comp, t=np.linspace(1,time_period,time_period)):
""" Return demand given an array of prices p for times t
(see equation 5 above)"""
return a+(b*(p-comp))+(d*t)+(promo1*pr1)+(promo2*pr2)
def objective(p_t, a, b, d,promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
return -1.0 * np.sum( p_t * demand(p_t, a, b, d,promo1,promo2, comp, t) )
def constraint_1(p_t, s_0, a, b, d, promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
""" Inventory constraint. s_0 - np.sum(x_t) >= 0.
This is an inequality constraint. See more below.
"""
return s_0 - np.sum(demand(p_t, a, b, d,promo1,promo2, comp, t))
def constraint_2(p_t):
#""" Positive demand. Another inequality constraint x_t >= 0 """
return p_t
t = np.linspace(1,time_period,time_period)
# Starting values :
b_min=int(price_low)
p_start = b_min * np.ones(len(t))
# bounds on the values :
bmax=int(price_max)
bounds = tuple((0,bmax) for x in p_start)
import scipy.optimize as optimize
# Constraints :
constraints = ({'type': 'ineq', 'fun': lambda x, s_0=s_0: constraint_1(x,s_0, a, b, d,promo1,promo2, comp, t=t)},
{'type': 'ineq', 'fun': lambda x: constraint_2(x)}
)
opt_results = optimize.minimize(objective, p_start, args=(a, b, d,promo1,promo2, comp, t),
method='SLSQP', bounds=bounds, constraints=constraints)
np.sum(opt_results['x'])
opt_price=opt_results['x']
opt_demand=demand(opt_results['x'], a, b, d, promo1,promo2_param, comp, t=t)
weeks=[]
for row in range(1,len(opt_price)+1):
weeks.append(row)
d=pd.DataFrame(weeks).astype(int)
dd=pd.DataFrame(opt_price)
optimumumprice_perweek=pd.concat([d,dd,pd.DataFrame(opt_demand).astype(int)],axis=1)
optimumumprice_perweek.columns=['Week','Price','Demand']
dataval=optimumumprice_perweek
diff=[]
diffs=[]
for i in range(0,len(opt_demand)-1):
valss=opt_demand[i]-opt_demand[i+1]
diff.append(valss)
diffs.append(i+1)
differenceofdemand_df=pd.concat([pd.DataFrame(diffs),pd.DataFrame(diff)],axis=1)
MP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmin()],1)
minimumprice=pd.DataFrame(MP).T
MaxP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmax()],1)
maximumprice=pd.DataFrame(MaxP).T
averageprice=round((optimumumprice_perweek['Price'].sum()/len(optimumumprice_perweek)),2)
MD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmin()],0)
minimumDemand=pd.DataFrame(MD).T
MaxD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmax()],0)
maximumDemand=pd.DataFrame(MaxD).T
averageDemand=round((optimumumprice_perweek['Demand'].sum()/len(optimumumprice_perweek)),0)
totaldemand=round(optimumumprice_perweek['Demand'].sum(),0)
return render_template("Optimize.html",totaldemand=totaldemand,averageDemand=averageDemand,maximumDemand=maximumDemand.values,minimumDemand=minimumDemand.values,averageprice=averageprice,maximumprice=maximumprice.values,minimumprice=minimumprice.values,dataval=dataval.values,differenceofdemand_df=differenceofdemand_df.values,optimumumprice_perweek=optimumumprice_perweek.to_html(index=False),ll=1)
@app.route("/Inventorymanagment",methods=["GET","POST"])
def Inventorymanagment():
return render_template("Inventory_Management.html")
@app.route("/DISTRIBUTION_NETWORK_OPT",methods=["GET","POST"])
def DISTRIBUTION_NETWORK_OPT():
return render_template("DISTRIBUTION_NETWORK_OPTIMIZATION.html")
@app.route("/Procurement_Plan",methods=["GET","POST"])
def Procurement_Plan():
return render_template("Procurement_Planning.html")
#<NAME>
@app.route("/fleetallocation")
def fleetallocation():
return render_template('fleetallocation.html')
@app.route("/reset")
def reset():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM `input`")
cur.execute("DELETE FROM `output`")
cur.execute("DELETE FROM `Scenario`")
conn.commit()
conn.close()
open(localaddress+'\\static\\demodata.txt', 'w').close()
return render_template('fleetallocation.html')
@app.route("/dalink",methods = ['GET','POST'])
def dalink():
sql = "INSERT INTO `input` (`Route`,`SLoc`,`Ship-to Abb`,`Primary Equipment`,`Batch`,`Prod Dt`,`SW`,`Met Held`,`Heat No`,`Delivery Qty`,`Width`,`Length`,`Test Cut`,`Customer Priority`) VALUES( %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
if request.method == 'POST':
typ = request.form.get('type')
frm = request.form.get('from')
to = request.form.get('to')
if typ and frm and to:
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("SELECT * FROM `inventory_data` WHERE `Primary Equipment` = '" + typ + "' AND `Prod Dt` BETWEEN '" + frm + "' AND '" + to + "'")
res = cur.fetchall()
if len(res)==0:
conn.close()
return render_template('fleetallocation.html',alert='No data available')
sfile = pd.DataFrame(res)
df1 = pd.DataFrame(sfile)
df1['Prod Dt'] =df1['Prod Dt'].astype(object)
for index, i in df1.iterrows():
data = (i['Route'],i['SLoc'],i['Ship-to Abb'],i['Primary Equipment'],i['Batch'],i['Prod Dt'],i['SW'],i['Met Held'],i['Heat No'],i['Delivery Qty'],i['Width'],i['Length'],i['Test Cut'],i['Customer Priority'])
curr.execute(sql,data)
conn.commit()
conn.close()
return render_template('fleetallocation.html',typ=" Equipment type: "+typ,frm="From: "+frm,to=" To:"+to,data = sfile.to_html(index=False))
else:
return render_template('fleetallocation.html',alert ='All input fields are required')
return render_template('fleetallocation.html')
@app.route('/optimise', methods=['GET', 'POST'])
def optimise():
open(localaddress+'\\static\\demodata.txt', 'w').close()
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("DELETE FROM `output`")
conn.commit()
os.system('python optimising.py')
sa=1
cur.execute("SELECT * FROM `output`")
result = cur.fetchall()
if len(result)==0:
say=0
else:
say=1
curr.execute("SELECT * FROM `input`")
sfile = curr.fetchall()
if len(sfile)==0:
conn.close()
return render_template('fleetallocation.html',say=say,sa=sa,alert='No data available')
sfile = pd.DataFrame(sfile)
conn.close()
with open(localaddress+"\\static\\demodata.txt", "r") as f:
content = f.read()
return render_template('fleetallocation.html',say=say,sa=sa,data = sfile.to_html(index=False),content=content)
@app.route("/scenario")
def scenario():
return render_template('scenario.html')
@app.route("/scenario_insert", methods=['GET','POST'])
def scenario_insert():
if request.method == 'POST':
scenario = request.form.getlist("scenario[]")
customer_priority = request.form.getlist("customer_priority[]")
oldest_sw = request.form.getlist("oldest_sw[]")
production_date = request.form.getlist("production_date[]")
met_held_group = request.form.getlist("met_held_group[]")
test_cut_group = request.form.getlist("test_cut_group[]")
sub_grouping_rules = request.form.getlist("sub_grouping_rules[]")
load_lower_bounds = request.form.getlist("load_lower_bounds[]")
load_upper_bounds = request.form.getlist("load_upper_bounds[]")
width_bounds = request.form.getlist("width_bounds[]")
length_bounds = request.form.getlist("length_bounds[]")
description = request.form.getlist("description[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
lngth = len(scenario)
curr.execute("DELETE FROM `scenario`")
if scenario and customer_priority and oldest_sw and production_date and met_held_group and test_cut_group and sub_grouping_rules and load_lower_bounds and load_upper_bounds and width_bounds and length_bounds and description:
say=0
for i in range(lngth):
scenario_clean = scenario[i]
customer_priority_clean = customer_priority[i]
oldest_sw_clean = oldest_sw[i]
production_date_clean = production_date[i]
met_held_group_clean = met_held_group[i]
test_cut_group_clean = test_cut_group[i]
sub_grouping_rules_clean = sub_grouping_rules[i]
load_lower_bounds_clean = load_lower_bounds[i]
load_upper_bounds_clean = load_upper_bounds[i]
width_bounds_clean = width_bounds[i]
length_bounds_clean = length_bounds[i]
description_clean = description[i]
if scenario_clean and customer_priority_clean and oldest_sw_clean and production_date_clean and met_held_group_clean and test_cut_group_clean and sub_grouping_rules_clean and load_lower_bounds_clean and load_upper_bounds_clean and width_bounds_clean and length_bounds_clean:
cur.execute("INSERT INTO `scenario`(scenario, customer_priority, oldest_sw, production_date, met_held_group, test_cut_group, sub_grouping_rules, load_lower_bounds, load_upper_bounds, width_bounds, length_bounds, description) VALUES('"+scenario_clean+"' ,'"+customer_priority_clean+"','"+oldest_sw_clean+"','"+production_date_clean+"','"+met_held_group_clean+"','"+test_cut_group_clean+"', '"+sub_grouping_rules_clean+"','"+load_lower_bounds_clean+"', '"+load_upper_bounds_clean+"','"+width_bounds_clean+"','"+length_bounds_clean+"','"+description_clean+"')")
else:
say = 1
conn.commit()
if(say==0):
alert='All Scenarios inserted'
else:
alert='Some scenarios were not inserted'
return (alert)
conn.close()
return ('All fields are required!')
return ('Failed!!!')
@app.route("/fetch", methods=['GET','POST'])
def fetch():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM scenario")
result = cur.fetchall()
if len(result)==0:
conn.close()
return render_template('scenario.html',alert1='No scenarios Available')
result1 = pd.DataFrame(result)
result1 = result1.drop('Sub-grouping rules', axis=1)
conn.close()
return render_template('scenario.html',sdata = result1.to_html(index=False))
return ("Error")
@app.route("/delete", methods=['GET','POST'])
def delete():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM scenario")
conn.commit()
conn.close()
return render_template('scenario.html',alert1="All the scenerios were dropped!")
return ("Error")
@app.route('/papadashboard', methods=['GET', 'POST'])
def papadashboard():
sql1 = "SELECT `Scenario`, MAX(`Wagon-No`) AS 'Wagon Used', COUNT(`Batch`) AS 'Products Allocated', SUM(`Delivery Qty`) AS 'Total Product Allocated', SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', SUM(`Width`)/(MAX(`Wagon-No`)) AS 'Average Width Used' FROM `output` WHERE `Wagon-No`>0 GROUP BY `Scenario`"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curs = conn.cursor()
curs.execute("SELECT `scenario` FROM `scenario`")
sdata = curs.fetchall()
if len(sdata)==0:
conn.close()
return render_template('warning.html',alert='No data available')
cur1 = conn.cursor()
cur1.execute(sql1)
data1 = cur1.fetchall()
if len(data1)==0:
conn.close()
return render_template('warning.html',alert='Infeasible to due Insufficient Load')
cu = conn.cursor()
cu.execute("SELECT `length_bounds`,`width_bounds`,`load_lower_bounds`,`load_upper_bounds` FROM `scenario`")
sdaa = cu.fetchall()
sdaa = pd.DataFrame(sdaa)
asa=list()
for index, i in sdaa.iterrows():
hover = "Length Bound:"+str(i['length_bounds'])+", Width Bound:"+str(i['width_bounds'])+", Load Upper Bound:"+str(i['load_upper_bounds'])+", Load Lower Bound:"+str(i['load_lower_bounds'])
asa.append(hover)
asa=pd.DataFrame(asa)
asa.columns=['Details']
data1 = pd.DataFrame(data1)
data1['Average Width Used'] = data1['Average Width Used'].astype(int)
data1['Total Product Allocated'] = data1['Total Product Allocated'].astype(int)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(float)
data1['Average Load Carried'] = round(data1['Average Load Carried'],2)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(str)
fdata = pd.DataFrame(columns=['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used','Details'])
fdata[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']] = data1[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']]
fdata['Details'] = asa['Details']
fdata = fdata.values
sql11 = "SELECT `Scenario`, SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', COUNT(`Batch`) AS 'Allocated', SUM(`Delivery Qty`) AS 'Load Allocated' FROM `output`WHERE `Wagon-No`>0 GROUP BY `Scenario`"
sql21 = "SELECT COUNT(`Batch`) AS 'Total Allocated' FROM `output` GROUP BY `Scenario`"
sql31 = "SELECT `load_upper_bounds` FROM `scenario`"
conn1 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur11 = conn1.cursor()
cur21 = conn1.cursor()
cur31 = conn1.cursor()
cur11.execute(sql11)
data11 = cur11.fetchall()
data11 = pd.DataFrame(data11)
cur21.execute(sql21)
data21 = cur21.fetchall()
data21 = pd.DataFrame(data21)
cur31.execute(sql31)
data31 = cur31.fetchall()
data31 = pd.DataFrame(data31)
data11['Average Load Carried']=data11['Average Load Carried'].astype(float)
fdata1 = pd.DataFrame(columns=['Scenario','Utilisation Percent','Allocation Percent','Total Load Allocated'])
fdata1['Utilisation Percent'] = round(100*(data11['Average Load Carried']/data31['load_upper_bounds']),2)
data11['Load Allocated']=data11['Load Allocated'].astype(int)
fdata1[['Scenario','Total Load Allocated']]=data11[['Scenario','Load Allocated']]
data11['Allocated']=data11['Allocated'].astype(float)
data21['Total Allocated']=data21['Total Allocated'].astype(float)
fdata1['Allocation Percent'] = round(100*(data11['Allocated']/data21['Total Allocated']),2)
fdata1['Allocation Percent'] = fdata1['Allocation Percent'].astype(str)
fdat1 = fdata1.values
conn1.close()
if request.method == 'POST':
conn2 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn2.cursor()
ata = request.form['name']
cur.execute("SELECT * FROM `output` WHERE `Scenario` = '"+ata+"' ")
ssdata = cur.fetchall()
datasss = pd.DataFrame(ssdata)
data=datasss.replace("Not Allocated", 0)
df=data[['Delivery Qty','Wagon-No','Width','Group-Number']]
df['Wagon-No']=df['Wagon-No'].astype(int)
a=df['Wagon-No'].max()
##bar1
result_array = np.array([])
for i in range (a):
data_i = df[df['Wagon-No'] == i+1]
del_sum_i = data_i['Delivery Qty'].sum()
per_i=[((del_sum_i)/(205000)*100)]
result_array = np.append(result_array, per_i)
result_array1 = np.array([])
for j in range (a):
data_j = df[df['Wagon-No'] == j+1]
del_sum_j = data_j['Width'].sum()
per_util_j=[((del_sum_j)/(370)*100)]
result_array1 = np.append(result_array1, per_util_j)
##pie1
df112 = df[df['Wagon-No'] == 0]
pie1 = df112 ['Width'].sum()
df221 = df[df['Wagon-No'] > 0]
pie11 = df221['Width'].sum()
df1=data[['SW','Group-Number']]
dff1 = df1[data['Wagon-No'] == 0]
da1 =dff1.groupby(['SW']).count()
re11 = np.array([])
res12 = np.append(re11,da1)
da1['SW'] = da1.index
r1 = np.array([])
r12 = np.append(r1, da1['SW'])
df0=data[['Group-Number','Route','SLoc','Ship-to Abb','Wagon-No','Primary Equipment']]
df1=df0.replace("Not Allocated", 0)
f2 = pd.DataFrame(df1)
f2['Wagon-No']=f2['Wagon-No'].astype(int)
####Not-Allocated
f2['Group']=data['Group-Number']
df=f2[['Group','Wagon-No']]
dee = df[df['Wagon-No'] == 0]
deer =dee.groupby(['Group']).count()##Not Allocated
deer['Group'] = deer.index
##Total-Data
f2['Group1']=data['Group-Number']
dfc=f2[['Group1','Wagon-No']]
dfa=pd.DataFrame(dfc)
der = dfa[dfa['Wagon-No'] >= 0]
dear =der.groupby(['Group1']).count()##Wagons >1
dear['Group1'] = dear.index
dear.rename(columns={'Wagon-No': 'Allocated'}, inplace=True)
result = pd.concat([deer, dear], axis=1, join_axes=[dear.index])
resu=result[['Group1','Wagon-No','Allocated']]
result1=resu.fillna(00)
r5 = np.array([])
r6 = np.append(r5, result1['Wagon-No'])
r66=r6[0:73]###Not Allocated
r7 = np.append(r5, result1['Allocated'])
r77=r7[0:73]####total
r8 = np.append(r5, result1['Group1'])
r88=r8[0:73]###group
conn2.close()
return render_template('papadashboard.html',say=1,data=fdata,data1=fdat1,ata=ata,bar1=result_array,bar11=result_array1,pie11=pie1,pie111=pie11,x=r12,y=res12,xname=r88, bar7=r77,bar8=r66)
conn.close()
return render_template('papadashboard.html',data=fdata,data1=fdat1)
@app.route('/facilityallocation')
def facilityallocation():
return render_template('facilityhome.html')
@app.route('/dataimport')
def dataimport():
return render_template('facilityimport.html')
@app.route('/dataimport1')
def dataimport1():
return redirect(url_for('dataimport'))
@app.route('/facility_location')
def facility_location():
return render_template('facility_location.html')
@app.route('/facility')
def facility():
return redirect(url_for('facilityallocation'))
@app.route("/imprt", methods=['GET','POST'])
def imprt():
global customerdata
global factorydata
global Facyy
global Custo
customerfile = request.files['CustomerData'].read()
factoryfile = request.files['FactoryData'].read()
if len(customerfile)==0 or len(factoryfile)==0:
return render_template('facilityhome.html',warning='Data Invalid')
cdat=pd.read_csv(io.StringIO(customerfile.decode('utf-8')))
customerdata=pd.DataFrame(cdat)
fdat=pd.read_csv(io.StringIO(factoryfile.decode('utf-8')))
factorydata=pd.DataFrame(fdat)
Custo=customerdata.drop(['Lat','Long'],axis=1)
Facyy=factorydata.drop(['Lat','Long'],axis=1)
return render_template('facilityimport1.html',loc1=factorydata.values,loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False))
@app.route("/gmap")
def gmap():
custdata=customerdata
Factorydata=factorydata
price=1
#to get distance beetween customer and factory
#first get the Dimension
#get no of factories
Numberoffact=len(Factorydata)
#get Number of Customer
Numberofcust=len(custdata)
#Get The dist/unit cost
cost=price
#def function for distance calculation
# approximate radius of earth in km
def dist(lati1,long1,lati2,long2,cost):
R = 6373.0
lat1 = radians(lati1)
lon1 = radians(long1)
lat2 = radians(lati2)
lon2 = radians(long2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance =round(R * c,2)
return distance*cost
#Create a list for customer and factory
def costtable(custdata,Factorydata):
distance=list()
for lat1,long1 in zip(custdata.Lat, custdata.Long):
for lat2,long2 in zip(Factorydata.Lat, Factorydata.Long):
distance.append(dist(lat1,long1,lat2,long2,cost))
distable=np.reshape(distance, (Numberofcust,Numberoffact)).T
tab=pd.DataFrame(distable,index=[Factorydata.Factory],columns=[custdata.Customer])
return tab
DelCost=costtable(custdata,Factorydata)#return cost table of the customer and factoery
#creating Demand Table
demand=np.array(custdata.Demand)
col1=np.array(custdata.Customer)
Demand=pd.DataFrame(demand,col1).T
cols=sorted(col1)
#Creating capacity table
fact=np.array(Factorydata.Capacity)
col2=np.array(Factorydata.Factory)
Capacity=pd.DataFrame(fact,index=col2).T
colo=sorted(col2)
#creating Fixed cost table
fixed_c=np.array(Factorydata.FixedCost)
col3=np.array(Factorydata.Factory)
FixedCost= pd.DataFrame(fixed_c,index=col3)
# Create the 'prob' variable to contain the problem data
model = LpProblem("Min Cost Facility Location problem",LpMinimize)
production = pulp.LpVariable.dicts("Production",
((factory, cust) for factory in Capacity for cust in Demand),
lowBound=0,
cat='Integer')
factory_status =pulp.LpVariable.dicts("factory_status", (factory for factory in Capacity),
cat='Binary')
cap_slack =pulp.LpVariable.dicts("capslack",
(cust for cust in Demand),
lowBound=0,
cat='Integer')
model += pulp.lpSum(
[DelCost.loc[factory, cust] * production[factory, cust] for factory in Capacity for cust in Demand]
+ [FixedCost.loc[factory] * factory_status[factory] for factory in Capacity] + 5000000*cap_slack[cust] for cust in Demand)
for cust in Demand:
model += pulp.lpSum(production[factory, cust] for factory in Capacity)+cap_slack[cust] == Demand[cust]
for factory in Capacity:
model += pulp.lpSum(production[factory, cust] for cust in Demand) <= Capacity[factory]*factory_status[factory]
model.solve()
print("Status:", LpStatus[model.status])
for v in model.variables():
print(v.name, "=", v.varValue)
print("Total Cost of Ingredients per can = ", value(model.objective))
# Getting the table for the Factorywise Allocation
def factoryalloc(model,Numberoffact,Numberofcust,listoffac,listofcus):
listj=list()
listk=list()
listcaps=list()
for v in model.variables():
listj.append(v.varValue)
customer=listj[(len(listj)-Numberofcust-Numberoffact):(len(listj)-Numberoffact)]
del listj[(len(listj)-Numberoffact-Numberofcust):len(listj)]
for row in listj:
if row==0:
listk.append(0)
else:
listk.append(1)
x=np.reshape(listj,(Numberoffact,Numberofcust))
y=np.reshape(listk,(Numberoffact,Numberofcust))
FactoryAlloc_table=pd.DataFrame(x,index=listoffac,columns=listofcus)
Factorystatus=pd.DataFrame(y,index=listoffac,columns=listofcus)
return FactoryAlloc_table,Factorystatus,customer
Alltable,FactorystatusTable,ded=factoryalloc(model,Numberoffact,Numberofcust,colo,cols)
Allstatus=list()
dede=pd.DataFrame(ded,columns=['UnSatisfied'])
finaldede=dede[dede.UnSatisfied != 0]
colss=pd.DataFrame(cols,columns=['CustomerLocation'])
fina=pd.concat([colss,finaldede],axis=1, join='inner')
print(fina)
for i in range(len(Alltable)):
for j in range(len(Alltable.columns)):
if (Alltable.loc[Alltable.index[i], Alltable.columns[j]]>0):
all=[Alltable.index[i], Alltable.columns[j], Alltable.loc[Alltable.index[i], Alltable.columns[j]]]
Allstatus.append(all)
Status=pd.DataFrame(Allstatus,columns=['Factory','Customer','Allocation']).astype(str)
#To get the Factory Data
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
#Making Connection to the Database
cur = con.cursor()
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Status.to_sql(con=engine, name='facilityallocation',index=False, if_exists='replace')
cur = con.cursor()
cur1 = con.cursor()
cur.execute("SELECT * FROM `facilityallocation`")
file=cur.fetchall()
dat=pd.DataFrame(file)
lst=dat[['Factory','Customer']]
mlst=[]
names=lst['Factory'].unique().tolist()
for name in names:
lsty=lst.loc[lst.Factory==name]
mlst.append(lsty.values)
data=dat[['Factory','Customer','Allocation']]
sql="SELECT SUM(`Allocation`) AS 'UseCapacity', `Factory` FROM `facilityallocation` GROUP BY `Factory`"
cur1.execute(sql)
file2=cur1.fetchall()
udata=pd.DataFrame(file2)
bdata=factorydata.sort_values(by=['Factory'])
adata=bdata['Capacity']
con.close()
infdata=dat[['Customer','Factory','Allocation']]
infodata=infdata.sort_values(by=['Customer'])
namess=infodata.Customer.unique()
lstyy=[]
for nam in namess:
bb=infodata[infodata.Customer==nam]
comment=bb['Factory']+":"+bb['Allocation']
prin=[nam,str(comment.values).strip('[]')]
lstyy.append(prin)
return render_template('facilityoptimise.html',say=1,lstyy=lstyy,x1=adata.values,x2=udata.values,dat=mlst,loc1=factorydata.values,
loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False),summary=data.to_html(index=False))
#Demand Forecast
@app.route('/demandforecast')
def demandforecast():
return render_template('demandforecast.html')
@app.route("/demandforecastdataimport",methods = ['GET','POST'])
def demandforecastdataimport():
if request.method== 'POST':
global actualforecastdata
flat=request.files['flat'].read()
if len(flat)==0:
return('No Data Selected')
cdat=pd.read_csv(io.StringIO(flat.decode('utf-8')))
actualforecastdata=pd.DataFrame(cdat)
return render_template('demandforecast.html',data=actualforecastdata.to_html(index=False))
@app.route('/demandforecastinput', methods = ['GET', 'POST'])
def demandforecastinput():
if request.method=='POST':
global demandforecastfrm
global demandforecasttoo
global demandforecastinputdata
demandforecastfrm=request.form['from']
demandforecasttoo=request.form['to']
value=request.form['typedf']
demandforecastinputdata=actualforecastdata[(actualforecastdata['Date'] >= demandforecastfrm) & (actualforecastdata['Date'] <= demandforecasttoo)]
if value=='monthly': ##monthly
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
demandforecastinputdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('monthlyforecast'))
if value=='quarterly': ##quarterly
global Quaterdata
dated2 = demandforecastinputdata['Date']
nlst=[]
for var in dated2:
var1 = int(var[5:7])
if var1 >=1 and var1 <4:
varr=var[:4]+'-01-01'
elif var1 >=4 and var1 <7:
varr=var[:4]+'-04-01'
elif var1 >=7 and var1 <10:
varr=var[:4]+'-07-01'
else:
varr=var[:4]+'-10-01'
nlst.append(varr)
nwlst=pd.DataFrame(nlst,columns=['Newyear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=nwlst['Newyear']
Quaterdata=demandforecastinputdata.groupby(['Date']).sum()
Quaterdata=Quaterdata.reset_index()
Quaterdata=Quaterdata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Quaterdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('quarterlyforecast'))
if value=='yearly': ##yearly
global Yeardata
#copydata=demandforecastinputdata
dated1 = demandforecastinputdata['Date']
lst=[]
for var in dated1:
var1 = var[:4]+'-01-01'
lst.append(var1)
newlst=pd.DataFrame(lst,columns=['NewYear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=newlst['NewYear']
Yeardata=demandforecastinputdata.groupby(['Date']).sum()
Yeardata=Yeardata.reset_index()
Yeardata=Yeardata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Yeardata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('yearlyforecast'))
#if value=='weakly': ##weakly
# return redirect(url_for('output4'))
return render_template('demandforecast.html')
@app.route("/monthlyforecast",methods = ['GET','POST'])
def monthlyforecast():
data = pd.DataFrame(demandforecastinputdata)
# container1
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])
# container2
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])
# container3
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])
# container4
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])
# container1
df=a1[['GDP']]
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutput`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutput`")
con.commit()
sql = "INSERT INTO `forecastoutput` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutput',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutput`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('monthly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('monthly.html',sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##quarterly
@app.route("/quarterlyforecast",methods = ['GET','POST'])
def quarterlyforecast():
data = pd.DataFrame(Quaterdata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/3
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/3
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputq`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputq`")
con.commit()
sql = "INSERT INTO `forecastoutputq` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='3M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='3M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutputq',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutputq`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('quarterly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('quarterly.html',sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##yearly
@app.route("/yearlyforecast",methods = ['GET','POST'])
def yearlyforecast():
data = pd.DataFrame(Yeardata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/12
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date']
vari=[]
for var in tdf:
vari.append(var[:4])
tres11 = vari
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/12
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputy`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputy`")
con.commit()
sql = "INSERT INTO `forecastoutputy` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
dindex=(tdfs.index).strftime("20%y")
tdfs['Date']=(dindex)
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='A')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='A', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='A')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='A', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='A')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='A', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutputy',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutputy`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('yearly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Yearly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('yearly.html',sayy=1,smt='Yearly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
#############################Dashboard#######################################
#yearly
@app.route('/youtgraph', methods = ['GET','POST'])
def youtgraph():
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("SELECT `Model` FROM `forecastoutputy` GROUP BY `Model`")
sfile=cur.fetchall()
global yqst
qlist=pd.DataFrame(sfile)
qlst=qlist['Model'].astype(str)
yqst=qlst.values
con.close()
return render_template('ydashboard.html',qulist=yqst)
@app.route('/youtgraph1', methods = ['GET', 'POST'])
def youtgraph1():
if request.method=='POST':
value=request.form['item']
qconn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
qcur = qconn.cursor()
qcur.execute("SELECT * FROM `demandforecastinputdata")
qsdata = qcur.fetchall()
qdata = pd.DataFrame(qsdata)
#graph1
adata=qdata['TotalDemand']
x_axis=qdata['Date'].astype(str)
#predictedgraph1
pcur = qconn.cursor()
pcur.execute("SELECT * FROM `forecastoutputy` WHERE `Model`='"+value+"'")
psdata = pcur.fetchall()
edata = pd.DataFrame(psdata)
eedata=edata['TotalDemand'].astype(float)
ldata=eedata.values
nur = qconn.cursor()
nur.execute("SELECT MIN(`Date`) AS 'MIN' FROM `forecastoutputy` WHERE `Model`='"+value+"'")
MIN=nur.fetchone()
pdata=[]
i=0
k=0
a="null"
while(x_axis[i]<MIN['MIN']):
pdata.append(a)
i=i+1
k=k+1
ata=np.concatenate((pdata,ldata),axis=0)
#x axis
fcur = qconn.cursor()
fcur.execute("SELECT `Date` FROM `demandforecastinputdata` WHERE `Date`<'"+MIN['MIN']+"'")
fsdata = fcur.fetchall()
indx = pd.DataFrame(fsdata)
indx=indx['Date']
index=np.concatenate((indx,edata['Date'].values),axis=0)
yindx=[]
for var in index:
var1 = var[:4]
yindx.append(var1)
#bargraph
bcur = qconn.cursor()
bcur.execute("SELECT * FROM `forecastoutputy` WHERE `Model`='"+value+"'")
bsdata = bcur.fetchall()
bdata = pd.DataFrame(bsdata)
btdf=bdata['Date'].astype(str)
btre11 = np.array([])
btres11 = np.append(btre11,btdf)
b1tdf1=bdata[['Spain']] #spain
b1tr1 = np.array([])
b1tr11 = np.append(b1tr1, b1tdf1)
b2tdf1=bdata[['Austria']] #austria
b2tr1 = np.array([])
b2tr11 = np.append(b2tr1, b2tdf1)
b3tdf1=bdata[['Japan']] #japan
b3tr1 = np.array([])
b3tr11 = np.append(b3tr1, b3tdf1)
b4tdf1=bdata[['Hungary']] #hungry
b4tr1 = np.array([])
b4tr11 = np.append(b4tr1, b4tdf1)
b5tdf1=bdata[['Germany']] #germany
b5tr1 = np.array([])
b5tr11 = np.append(b5tr1, b5tdf1)
b6tdf1=bdata[['TotalDemand']] #total
b6tr1 = np.array([])
b6tr11 = np.append(b6tr1, b6tdf1)
#comparisonbar
ccur = qconn.cursor()
ccur.execute("SELECT * FROM `forecastoutputy` WHERE `Model`='"+value+"'")
csdata = ccur.fetchall()
cdata = pd.DataFrame(csdata)
ctdf=cdata['Date'].astype(str)
ctre11 = np.array([])
ctres11 = np.append(ctre11,ctdf)
c1tdf1=cdata[['RatioIncrease']] #ratioincrease
c1tr1 = np.array([])
c1tr11 = np.append(c1tr1, c1tdf1)
qcur.execute("SELECT * FROM `summaryerror`")
sdata = qcur.fetchall()
mape = pd.DataFrame(sdata)
qconn.close()
return render_template('ydashboard.html',mon=value,qulist=yqst,mape=mape.to_html(index=False),say=1,pdata=ata,adata=adata.values,x_axis=yindx,frm=len(qdata)-1,to=k,x13=btres11,x14=ctres11,y13=b1tr11,y14=b2tr11,y15=b3tr11,y16=b4tr11,y17=b5tr11,y18=b6tr11,y19=c1tr11)
#monthly
@app.route('/moutgraph', methods = ['GET','POST'])
def moutgraph():
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("SELECT `Model` FROM `forecastoutput` GROUP BY `Model`")
sfile=cur.fetchall()
global mqst
qlist=pd.DataFrame(sfile)
qlst=qlist['Model'].astype(str)
mqst=qlst.values
con.close()
return render_template('mdashboard.html',qulist=mqst)
@app.route('/moutgraph1', methods = ['GET', 'POST'])
def moutgraph1():
if request.method=='POST':
value=request.form['item']
qconn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
qcur = qconn.cursor()
qcur.execute("SELECT * FROM `demandforecastinputdata")
qsdata = qcur.fetchall()
qdata = pd.DataFrame(qsdata)
#graph1
adata=qdata['TotalDemand']
x_axis=qdata['Date'].astype(str)
#predictedgraph1
pcur = qconn.cursor()
pcur.execute("SELECT * FROM `forecastoutput` WHERE `Model`='"+value+"'")
psdata = pcur.fetchall()
edata = pd.DataFrame(psdata)
eedata=edata['TotalDemand'].astype(float)
ldata=eedata.values
nur = qconn.cursor()
nur.execute("SELECT MIN(`Date`) AS 'MIN' FROM `forecastoutput` WHERE `Model`='"+value+"'")
MIN=nur.fetchone()
pdata=[]
i=0
k=0
a="null"
while(x_axis[i]<MIN['MIN']):
pdata.append(a)
i=i+1
k=k+1
ata=np.concatenate((pdata,ldata),axis=0)
#x axis
fcur = qconn.cursor()
fcur.execute("SELECT `Date` FROM `demandforecastinputdata` WHERE `Date`<'"+MIN['MIN']+"'")
fsdata = fcur.fetchall()
indx = pd.DataFrame(fsdata)
indx=indx['Date'].astype(str).values
index=np.concatenate((indx,edata['Date'].values),axis=0)
#bargraph
bcur = qconn.cursor()
bcur.execute("SELECT * FROM `forecastoutput` WHERE `Model`='"+value+"'")
bsdata = bcur.fetchall()
bdata = pd.DataFrame(bsdata)
btdf=bdata['Date'].astype(str)
btre11 = np.array([])
btres11 = np.append(btre11,btdf)
b1tdf1=bdata[['Spain']] #spain
b1tr1 = np.array([])
b1tr11 = np.append(b1tr1, b1tdf1)
b2tdf1=bdata[['Austria']] #austria
b2tr1 = np.array([])
b2tr11 = np.append(b2tr1, b2tdf1)
b3tdf1=bdata[['Japan']] #japan
b3tr1 = np.array([])
b3tr11 = np.append(b3tr1, b3tdf1)
b4tdf1=bdata[['Hungary']] #hungry
b4tr1 = np.array([])
b4tr11 = np.append(b4tr1, b4tdf1)
b5tdf1=bdata[['Germany']] #germany
b5tr1 = np.array([])
b5tr11 = np.append(b5tr1, b5tdf1)
b6tdf1=bdata[['TotalDemand']] #total
b6tr1 = np.array([])
b6tr11 = np.append(b6tr1, b6tdf1)
#comparisonbar
ccur = qconn.cursor()
ccur.execute("SELECT * FROM `forecastoutput` WHERE `Model`='"+value+"'")
csdata = ccur.fetchall()
cdata = pd.DataFrame(csdata)
ctdf=cdata['Date'].astype(str)
ctre11 = np.array([])
ctres11 = np.append(ctre11,ctdf)
c1tdf1=cdata[['RatioIncrease']] #ratioincrease
c1tr1 = np.array([])
c1tr11 = np.append(c1tr1, c1tdf1)
qcur.execute("SELECT * FROM `summaryerror`")
sdata = qcur.fetchall()
mape = pd.DataFrame(sdata)
qconn.close()
return render_template('mdashboard.html',mon=value,qulist=mqst,mape=mape.to_html(index=False),say=1,pdata=ata,adata=adata.values,x_axis=index,frm=len(qdata)-1,to=k,x13=btres11,x14=ctres11,y13=b1tr11,y14=b2tr11,y15=b3tr11,y16=b4tr11,y17=b5tr11,y18=b6tr11,y19=c1tr11)
#quarterly
@app.route('/qoutgraph', methods = ['GET','POST'])
def qoutgraph():
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("SELECT `Model` FROM `forecastoutputq` GROUP BY `Model`")
sfile=cur.fetchall()
global qst
qlist=pd.DataFrame(sfile)
qlst=qlist['Model'].astype(str)
qst=qlst.values
con.close()
return render_template('qdashboard.html',qulist=qst)
@app.route('/qoutgraph1', methods = ['GET', 'POST'])
def qoutgraph1():
if request.method=='POST':
value=request.form['item']
qconn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
qcur = qconn.cursor()
qcur.execute("SELECT * FROM `demandforecastinputdata")
qsdata = qcur.fetchall()
qdata = pd.DataFrame(qsdata)
#graph1
adata=qdata['TotalDemand']
x_axis=qdata['Date'].astype(str)
#predictedgraph1
pcur = qconn.cursor()
pcur.execute("SELECT * FROM `forecastoutputq` WHERE `Model`='"+value+"'")
psdata = pcur.fetchall()
edata = pd.DataFrame(psdata)
eedata=edata['TotalDemand'].astype(float)
ldata=eedata.values
nur = qconn.cursor()
nur.execute("SELECT MIN(`Date`) AS 'MIN' FROM `forecastoutputq` WHERE `Model`='"+value+"'")
MIN=nur.fetchone()
pdata=[]
i=0
k=0
a="null"
while(x_axis[i]<MIN['MIN']):
pdata.append(a)
i=i+1
k=k+1
ata=np.concatenate((pdata,ldata),axis=0)
#x axis
fcur = qconn.cursor()
fcur.execute("SELECT `Date` FROM `demandforecastinputdata` WHERE `Date`<'"+MIN['MIN']+"'")
fsdata = fcur.fetchall()
indx = pd.DataFrame(fsdata)
indx=indx['Date'].astype(str).values
index=np.concatenate((indx,edata['Date'].values),axis=0)
#bargraph
bcur = qconn.cursor()
bcur.execute("SELECT * FROM `forecastoutputq` WHERE `Model`='"+value+"'")
bsdata = bcur.fetchall()
bdata = pd.DataFrame(bsdata)
btdf=bdata['Date'].astype(str)
btre11 = np.array([])
btres11 = np.append(btre11,btdf)
b1tdf1=bdata[['Spain']] #spain
b1tr1 = np.array([])
b1tr11 = np.append(b1tr1, b1tdf1)
b2tdf1=bdata[['Austria']] #austria
b2tr1 = np.array([])
b2tr11 = np.append(b2tr1, b2tdf1)
b3tdf1=bdata[['Japan']] #japan
b3tr1 = np.array([])
b3tr11 = np.append(b3tr1, b3tdf1)
b4tdf1=bdata[['Hungary']] #hungry
b4tr1 = np.array([])
b4tr11 = np.append(b4tr1, b4tdf1)
b5tdf1=bdata[['Germany']] #germany
b5tr1 = np.array([])
b5tr11 = np.append(b5tr1, b5tdf1)
b6tdf1=bdata[['TotalDemand']] #total
b6tr1 = np.array([])
b6tr11 = np.append(b6tr1, b6tdf1)
#comparisonbar
ccur = qconn.cursor()
ccur.execute("SELECT * FROM `forecastoutputq` WHERE `Model`='"+value+"'")
csdata = ccur.fetchall()
cdata = pd.DataFrame(csdata)
ctdf=cdata['Date'].astype(str)
ctre11 = np.array([])
ctres11 = np.append(ctre11,ctdf)
c1tdf1=cdata[['RatioIncrease']] #ratioincrease
c1tr1 = np.array([])
c1tr11 = np.append(c1tr1, c1tdf1)
qcur.execute("SELECT * FROM `summaryerror`")
sdata = qcur.fetchall()
mape = pd.DataFrame(sdata)
qconn.close()
return render_template('qdashboard.html',mon=value,qulist=qst,mape=mape.to_html(index=False),say=1,pdata=ata,adata=adata.values,x_axis=index,frm=len(qdata)-1,to=k,x13=btres11,x14=ctres11,y13=b1tr11,y14=b2tr11,y15=b3tr11,y16=b4tr11,y17=b5tr11,y18=b6tr11,y19=c1tr11)
@app.route("/yearlysimulation",methods = ['GET','POST'])
def yearlysimulation():
if request.method == 'POST':
gdp=0
pi=0
ms=0
adv=0
gdp_dis=request.form.get('gdp_dis')
pi_dis=request.form.get('pi_dis')
ms_dis=request.form.get('ms_dis')
adv_dis=request.form.get('adv_dis')
min=request.form.get('min')
max=request.form.get('max')
mue=request.form.get('mue')
sig=request.form.get('sig')
cval=request.form.get('cval')
min1=request.form.get('min1')
max1=request.form.get('max1')
mue1=request.form.get('mue1')
sig1=request.form.get('sig1')
cval1=request.form.get('cval1')
min2=request.form.get('min2')
max2=request.form.get('max2')
mue2=request.form.get('mue2')
sig2=request.form.get('sig2')
cval2=request.form.get('cval2')
min3=request.form.get('min3')
max3=request.form.get('max3')
mue3=request.form.get('mue3')
sig3=request.form.get('sig3')
cval3=request.form.get('cval3')
itr= int(request.form.get('itr'))
frm = request.form.get('from')
sfrm=int(frm[:4])
to = request.form.get('to')
sto=int(to[:4])
kwargs={}
atrtable=[]
if request.form.get('gdp'):
gdp=1
atrtable.append('Gdp')
if gdp_dis == 'gdp_dis1':
min=request.form.get('min')
max=request.form.get('max')
kwargs['Gdp_dis']='Uniform'
kwargs['gdpvalues']=[min,max]
if gdp_dis == 'gdp_dis2':
mue=request.form.get('mue')
sig=request.form.get('sig')
kwargs['Gdp_dis']='Normal'
kwargs['gdpvalues']=[mue,sig]
if gdp_dis == 'gdp_dis3':
kwargs['Gdp_dis']='Random'
pass
if gdp_dis == 'gdp_dis4':
cval=request.form.get('cval')
kwargs['Gdp_dis']='Constant'
kwargs['gdpvalues']=[cval]
if request.form.get('pi'):
pi=1
atrtable.append('Pi')
if pi_dis == 'pi_dis1':
min1=request.form.get('min1')
max1=request.form.get('max1')
kwargs['Pi_dis']='Uniform'
kwargs['pivalues']=[min1,max1]
if pi_dis == 'pi_dis2':
mue1=request.form.get('mue1')
sig1=request.form.get('sig1')
kwargs['Pi_dis']='Normal'
kwargs['pivalues']=[mue1,sig1]
if pi_dis == 'pi_dis3':
kwargs['Pi_dis']='Random'
pass
if pi_dis == 'pi_dis4':
cval1=request.form.get('cval1')
kwargs['Pi_dis']='Constant'
kwargs['pivalues']=[cval1]
if request.form.get('ms'):
ms=1
atrtable.append('Ms')
if ms_dis == 'ms_dis1':
min=request.form.get('min2')
max=request.form.get('max2')
kwargs['Ms_dis']='Uniform'
kwargs['msvalues']=[min2,max2]
if ms_dis == 'ms_dis2':
mue=request.form.get('mue2')
sig=request.form.get('sig2')
kwargs['Ms_dis']='Normal'
kwargs['msvalues']=[mue2,sig2]
if ms_dis == 'ms_dis3':
kwargs['Ms_dis']='Random'
pass
if ms_dis == 'ms_dis4':
cval=request.form.get('cval2')
kwargs['Ms_dis']='Constant'
kwargs['msvalues']=[cval2]
if request.form.get('adv'):
adv=1
atrtable.append('Adv')
if adv_dis == 'adv_dis1':
min=request.form.get('min3')
max=request.form.get('max3')
kwargs['Adv_dis']='Uniform'
kwargs['advvalues']=[min3,max3]
if adv_dis == 'adv_dis2':
mue=request.form.get('mue3')
sig=request.form.get('sig3')
kwargs['Adv_dis']='Normal'
kwargs['advvalues']=[mue3,sig3]
if adv_dis == 'adv_dis3':
kwargs['Adv_dis']='Random'
pass
if adv_dis == 'adv_dis4':
cval=request.form.get('cval3')
kwargs['Adv_dis']='Constant'
kwargs['advvalues']=[cval3]
#print(kwargs)
#print(atrtable)
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `stech` (`gdp` VARCHAR(1),`pi` VARCHAR(1), `ms` VARCHAR(1),`adv` VARCHAR(1),`itr` VARCHAR(5),`sfrm` VARCHAR(10),`sto` VARCHAR(10))")
cur.execute("DELETE FROM `stech`")
con.commit()
cur.execute("INSERT INTO `stech` VALUES('"+str(gdp)+"','"+str(pi)+"','"+str(ms)+"','"+str(adv)+"','"+str(itr)+"','"+str(sfrm)+"','"+str(sto)+"')")
con.commit()
data = pd.DataFrame(Yeardata)
#print(data)
data.columns
xvar=pd.concat([data['GDP'],data['Pi_Exports'],data['Market_Share'],data['Advertisement_Expense']],axis=1)
yvar=pd.DataFrame(data['TotalDemand'])
regr = linear_model.LinearRegression()
regr.fit(xvar,yvar)
# predict=regr.predict(xvar)
#Error Measures
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
def sim(iteration,data,startyear,endyear,atrtable,Gdp_dis=None,gdpvalues=None,Adv_dis=None,advvalues=None,Ms_dis=None,msvalues=None,Pi_dis=None,pivalues=None):
preddata=pd.DataFrame()
simdata=pd.DataFrame()
#Errordf=pd.DataFrame()
Errormsr=pd.DataFrame()
date=pd.date_range(start=pd.datetime(startyear, 1, 1), end=pd.datetime(endyear+1, 1, 1),freq='A')
date=pd.DataFrame(date.strftime("%Y"))
#Fetching The Orignal Data Of Available Years of the Orignal Data That We Have Actually
m=len(date)
Arrayofdates=data['Date']
vari=[]
for var in Arrayofdates:
vari.append(var[:4])
Arrayofdates=pd.DataFrame(vari)
dates=[]
Fetchdata=[]
for i in range(0,m):
years=date.loc[i]
for j in range(0,len(Arrayofdates)):
if int(Arrayofdates.loc[j])==int(years):
da=data['TotalDemand'].loc[j]
Fetchdata.append(da) #Gives Data In the Given Range That we have actually
dates.extend(years) #Gives Years that we have data
for i in range(0,iteration):
df=pd.DataFrame()
#for The Gdp
S='flag'
for row in atrtable:
if row=='Gdp':
S='Gdp'
if S=='Gdp':
for row in Gdp_dis:
if row=='Normal':
gdpdf=pd.DataFrame(np.random.normal(gdpvalues[0],gdpvalues[1],m))
elif row=='Uniform':
gdpdf=pd.DataFrame(np.random.normal(gdpvalues[0],gdpvalues[1],m))
elif row=='Constant':
gdpdf=pd.DataFrame(np.random.choice([gdpvalues[0]],m))
else:
gdpdf=pd.DataFrame(np.random.uniform(-4,4,m))
else:
gdpdf=pd.DataFrame(np.random.uniform(0,0,m))
# for the pi dataframe
O='flag'
for row in atrtable:
if row=='Pi':
O='Pi'
if O=='Pi':
for row in Pi_dis:
if row=='Normal':
pidf=pd.DataFrame(np.random.normal(pivalues[0],pivalues[1],m))
elif row=='Uniform':
pidf=pd.DataFrame(np.random.normal(pivalues[0],pivalues[1],m))
elif row=='Constant':
pidf=pd.DataFrame(np.random.choice([pivalues[0]],m))
else:
pidf=pd.DataFrame(np.random.random_integers(80,120,m))
else:
pidf=pd.DataFrame(np.random.uniform(0,0,m))
#for the Adv Dataframe
N='flag'
for row in atrtable:
if row=='Adv':
N='Adv'
if N=='Adv':
for row in Adv_dis:
if row=='Normal':
advdf=pd.DataFrame(np.random.normal(advvalues[0],advvalues[1],m))
elif row=='Uniform':
advdf=pd.DataFrame(np.random.normal(advvalues[0],advvalues[1],m))
elif row=='Constant':
advdf=pd.DataFrame(np.random.choice([advvalues[0]],m))
else:
advdf=pd.DataFrame(np.random.random_integers(500000,1000000,m))
else:
advdf=pd.DataFrame(np.random.uniform(0,0,m))
#for the Ms dataframe
U='flag'
for row in atrtable:
if row=='Ms':
U='Ms'
if U=='Ms':
for row in Ms_dis:
if row=='Normal':
msdf=pd.DataFrame(np.random.normal(msvalues[0],msvalues[1],m))
elif row=='Uniform':
msdf=pd.DataFrame(np.random.normal(msvalues[0],msvalues[1],m))
elif row=='Constant':
msdf=pd.DataFrame(np.random.choice([msvalues[0]],m))
else:
msdf=pd.DataFrame(np.random.uniform(0.1,0.5,m))
else:
msdf=pd.DataFrame(np.random.uniform(0,0,m))
#Concatenating All the dataframes for Simulation Data
df=pd.concat([gdpdf,pidf,msdf,advdf],axis=1)
simid=pd.DataFrame(np.random.choice([i+1],m))
dd=pd.concat([simid,gdpdf,pidf,advdf,msdf],axis=1)
dd.columns=['Year','Gdp','Pi','Adv','Ms']
simdata=pd.concat([simdata,dd],axis=0)
#Predicting the Data And store in pred data through onhand Regression Method
dfs=pd.DataFrame(regr.predict(df))
datatable=pd.concat([simid,date,dfs],axis=1)
datatable.columns=['simid','Year','Total_Demand(Tonnes)']
preddata=pd.concat([datatable,preddata],axis=0)
datas=list()
#Geting Data With Respective Dates
# print(datatable)
for row in dates:
# print(dates)
datas.extend(datatable.loc[datatable['Year'] ==row, 'Total_Demand(Tonnes)'])
kkk=pd.DataFrame(datas)
me=ME(Fetchdata,kkk)
mae=MAE(Fetchdata,kkk)
mape=MAPE(Fetchdata,kkk)
dfe=pd.DataFrame([me,mae,mape],index=['ME','MAE','MAPE']).T
Errormsr=pd.concat([Errormsr,dfe],axis=0).reset_index(drop=True)
return preddata,simdata,Errormsr
preddata,simdata,Errormsr=sim(itr,data,sfrm,sto,atrtable,**kwargs)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
preddata.to_sql(con=engine, name='predicteddata',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
simdata.to_sql(con=engine2, name='simulationdata',index=False, if_exists='replace')
con.commit()
engine3 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Errormsr.to_sql(con=engine3, name='simerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `simerror`")
sdata = cnr.fetchall()
simerror = pd.DataFrame(sdata)
con.close()
return render_template('ysimulation.html',sayy=1,simerror=simerror.to_html(index=False))
return render_template('ysimulation.html')
##PROCURMENT PLANNING
@app.route('/procurementplanning')
def procurementplanning():
return render_template('vendorselection_criterianumberask.html')
@app.route("/criteriagenerate", methods=['GET','POST'])
def criteriagenerate():
if request.method == 'POST':
global cnmbr
global vnmbr
cnmbr = int(request.form['cnmbr'])
vnmbr = int(request.form['vnmbr'])
if cnmbr == 0 or vnmbr==0:
return render_template('criterianumberask.html',warning='Data Invalid')
cmainlist=[]
global cnames
cnames = []
for i in range (1,cnmbr+1):
lst=[]
name='cname'+str(i)
lst.append(i)
lst.append(name)
cmainlist.append(lst)
cnames.append(name)
vmainlist=[]
global vnames
vnames = []
for i in range (1,vnmbr+1):
lst=[]
name='vname'+str(i)
lst.append(i)
lst.append(name)
vmainlist.append(lst)
vnames.append(name)
return render_template('vendorselection_criteriagenerate.html',cmainlist=cmainlist,vmainlist=vmainlist)
return render_template('vendorselection_criterianumberask.html')
@app.route("/criteriagenerated", methods=['GET','POST'])
def criteriagenerated():
if request.method == 'POST':
global criterianames
criterianames=[]
for name in cnames:
criterianame = request.form[name]
criterianames.append(criterianame)
global vendornames
vendornames=[]
for name in vnames:
vendorname = request.form[name]
vendornames.append(vendorname)
mcrlst=[]
cn=len(criterianames)
k=1
global maincriteriaoption
maincriteriaoption=[]
global maincritriacri
maincritriacri=[]
for i in range(cn-1):
for j in range (i+1,cn):
cri='criteriaorder'+str(k)
opt='coption'+str(k)
crlst=[k,cri,criterianames[i],criterianames[j],opt]
mcrlst.append(crlst)
k=k+1
maincriteriaoption.append(opt)
maincritriacri.append(cri)
mvrlst=[]
vn=len(vendornames)
k=1
global mainvendoroption
mainvendoroption=[]
global mainvendorcri
mainvendorcri=[]
for z in criterianames:
mvrlst1=[]
vcri=[]
vopt=[]
for i in range(vn-1):
for j in range (i+1,vn):
cri='vendororder'+z+str(k)
opt='voption'+z+str(k)
vrlst=[k,cri,vendornames[i],vendornames[j],opt]
mvrlst1.append(vrlst)
k=k+1
vcri.append(cri)
vopt.append(opt)
mvrlst.append(mvrlst1)
mainvendorcri.append(vcri)
mainvendoroption.append(vopt)
return render_template('vendorselection_maincriteria.html',mcrlst=mcrlst,mvrlst=mvrlst)
return render_template('vendorselection_criteriagenerated.html')
def tablecreator(imp,val,crit):
n=len(imp)
for i in range(n):
if imp[i]==1:
val[i]=float(1/val[i])
fdata=pd.DataFrame(columns=[crit],index=[crit])
i=0
k=0
for index in fdata.index:
j=0
for columns in fdata.columns:
if i==j:
fdata[index][columns]=1
if i<j:
fdata[index][columns]=round((float(val[k])),2)
fdata[columns][index]=round((1/val[k]),2)
k=k+1
j=j+1
i=i+1
return fdata
@app.route("/criteriaread", methods=['GET','POST'])
def criteriaread():
if request.method == 'POST':
importances = []
values = []
for name1 in maincritriacri:
imp = int(request.form[name1])
importances.append(imp)
for name2 in maincriteriaoption:
val = int(request.form[name2])
values.append(val)
#global maincriteriadata
maincriteriadata=tablecreator(importances,values,criterianames)
mainimportances=[]
for crioption in mainvendorcri:
importance=[]
for option1 in crioption:
impc = int(request.form[option1])
importance.append(impc)
mainimportances.append(importance)
mainvalues=[]
for vendoroption in mainvendoroption:
vvalues=[]
for option2 in vendoroption:
valuev = int(request.form[option2])
vvalues.append(valuev)
mainvalues.append(vvalues)
maindf=[]
for z in range(len(criterianames)):
df=tablecreator(mainimportances[z],mainvalues[z],vendornames)
maindf.append(df)
dictmain={'crit':maincriteriadata}
names=criterianames
dfs=maindf
dictionary=dict((n,d) for (n,d) in zip(names,dfs))
def ahpmain(dictmain):
global wt_Crit
wt_Crit=[]
key=[]
key=list(dictmain.keys())
for i in key:
Crit=np.dot(dictmain[i],dictmain[i])
row_sum=[]
for j in range(len(Crit)):
row_sum.append(sum(Crit[j]))
wt_Crit.append([s/sum(row_sum) for s in row_sum])
Crit=[]
return wt_Crit
def ahp(dictmain,dictionary):
global output
main= ahpmain(dictmain)
submain= ahpmain(dictionary)
dd=pd.DataFrame(submain).T
df=pd.DataFrame(main).T
output=np.dot(dd,df)
return output,dd
yaxis,dd=ahp(dictmain,dictionary)
yax=pd.DataFrame(yaxis,index=vendornames,columns=['Score']).sort_values('Score',ascending=False).T
ynames=yax.columns
yval=yax.T.values
dd.index=vendornames
dd.columns=names
dd=dd.T
opq23=[]
for column in dd.columns:
opq21=[]
opq22=[]
opq21.append(column)
for val in dd[column]:
opq22.append(val)
opq21.append(opq22)
opq23.append(opq21)
return render_template('vendorselection_ahp_final_output.html',ynames=ynames,yval=yval,dd=opq23,names=names)
return render_template('vendorselection_criteriagenerated.html')
#DETERMINISTIC STARTS
@app.route("/spt")
def spt():
return render_template('SinglePeriod.html')
@app.route("/ppbreak")
def ppbreak():
return render_template('pbreak.html')
@app.route('/pbrk', methods=['GET','POST'])
def pbrk():
return render_template('pbrk.html')
@app.route('/eoq', methods=['GET','POST'])
def eoq():
##setUpCost::setting up cost prior(>>setUpCost;<<moving rate)
AnnulaUnitsDemand=100##purchase demand of product per year
FixedCost=500 ##cost fixed for the product
AnnHoldingcost=0.25 ##remaining goods cost
UnitCost=445 ##purchasing cost
LeadTime=10 ##time b/w initiation and completion of a production process.
SafetyStock=100##extra stock
if request.method == 'POST':
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
FixedCost=request.form['FixedCost']
AnnHoldingcost=request.form['AnnHoldingcost']
UnitCost=request.form['UnitCost']
LeadTime=request.form['LeadTime']
SafetyStock=request.form['SafetyStock']
AnnulaUnitsDemand=float(AnnulaUnitsDemand)
FixedCost=float(FixedCost)
AnnHoldingcost=float(AnnHoldingcost)
UnitCost=float(UnitCost)
LeadTime=float(LeadTime)
SafetyStock=float(SafetyStock)
sgap=1
pgap=1
HoldingCost=AnnHoldingcost*UnitCost
EOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))*sgap),2)
REOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))*sgap),0)
totOrderCost=round((FixedCost*AnnulaUnitsDemand/EOQ),2)
totHoldCost=round(((HoldingCost*EOQ*pgap)/2),2)
TotalCost=round((totOrderCost+totHoldCost),2)
NumOrders=round((AnnulaUnitsDemand/EOQ),2)
OrderTime=round((365/NumOrders),2)
ReorderPoint=round((((AnnulaUnitsDemand/365)*LeadTime)+SafetyStock),0)
count=round((EOQ*.75),0)
qtylist1=[]
hclist=[]
sclist=[]
mtlist=[]
tclist=[]
while (count < EOQ):
qtylist1.append(count)
hclist.append(round((count/2*HoldingCost),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round((count/2*HoldingCost+AnnulaUnitsDemand/count*FixedCost),2))
count +=2
qtylist1.append(EOQ)
hclist.append(totHoldCost)
sclist.append(totOrderCost)
tclist.append(totHoldCost+totOrderCost)
while (count < (EOQ*2)):
qtylist1.append(count)
hclist.append(round((count/2*HoldingCost),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round((count/2*HoldingCost+AnnulaUnitsDemand/count*FixedCost),2))
count +=2
val=0
for i in range(len(tclist)):
if(EOQ==qtylist1[i]):
val=i
# sstock=int(math.sqrt((LeadTime^2)+(int(ReorderPoint)^2)))
return render_template('eoq.html',NumOrders=NumOrders,OrderTime=OrderTime,
ReorderPoint=ReorderPoint,HoldCost=totHoldCost,TotalCost=TotalCost,
EOQ=EOQ,REOQ=REOQ,
sclist=sclist,hclist=hclist,tclist=tclist,val=val,qtylist1=qtylist1,
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,LeadTime=LeadTime,
SafetyStock=SafetyStock)
########################EEEEppppppppppQQQQQQ############
########################EEEEppppppppppQQQQQQ############
@app.route('/eproduction', methods=['GET','POST'])
def eproduction():
AnnulaUnitsDemand=100
Prodrate=125
FixedCost=500
AnnHoldingcost=0.1
UnitCost=25000
LeadTime=10
SafetyStock=100
if request.method == 'POST':
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
Prodrate=request.form['Prodrate']
FixedCost=request.form['FixedCost']
AnnHoldingcost=request.form['AnnHoldingcost']
UnitCost=request.form['UnitCost']
LeadTime=request.form['LeadTime']
SafetyStock=request.form['SafetyStock']
AnnulaUnitsDemand=int(AnnulaUnitsDemand)
Prodrate=int(Prodrate)
FixedCost=int(FixedCost)
AnnHoldingcost=float(AnnHoldingcost)
UnitCost=int(UnitCost)
LeadTime=int(LeadTime)
SafetyStock=int(SafetyStock)
if(Prodrate<=AnnulaUnitsDemand):
return render_template('eproduction.html',warning='Production date should not be least than Annual Demand',
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,Prodrate=Prodrate,
LeadTime=LeadTime,SafetyStock=SafetyStock
)
pgap=round((1-(AnnulaUnitsDemand/Prodrate)),2)
HoldingCost=float(AnnHoldingcost*UnitCost)
EOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))),2)
REOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))),0)
totOrderCost=round((FixedCost*AnnulaUnitsDemand/EOQ),2)
totHoldCost=round(((HoldingCost*EOQ*pgap)/2),2)
TotalCost=round((totOrderCost+totHoldCost),2)
NumOrders=round((AnnulaUnitsDemand/EOQ),2)
OrderTime=round((365/NumOrders),2)
ReorderPoint=round((((AnnulaUnitsDemand/365)*LeadTime)+SafetyStock),0)
count=EOQ*.75
qtylist1=[]
hclist=[]
sclist=[]
mtlist=[]
tclist=[]
while (count < EOQ):
qtylist1.append(int(count))
hclist.append(round((count/2*HoldingCost*pgap),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round(((count/2*HoldingCost*pgap+AnnulaUnitsDemand/count*FixedCost)),2))
count +=2
qtylist1.append(EOQ)
hclist.append(totHoldCost)
sclist.append(totOrderCost)
tclist.append(totOrderCost+totHoldCost)
while (count < (EOQ*1.7)):
qtylist1.append(int(count))
hclist.append(round((count/2*HoldingCost*pgap),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round(((count/2*HoldingCost*pgap+AnnulaUnitsDemand/count*FixedCost)),2))
count +=2
val=0
for i in range(len(tclist)):
if(EOQ==qtylist1[i]):
val=i
return render_template('eproduction.html',NumOrders=NumOrders,OrderTime=OrderTime,
ReorderPoint=ReorderPoint,HoldCost=totHoldCost,TotalCost=TotalCost,
EOQ=EOQ,REOQ=REOQ,
sclist=sclist,hclist=hclist,tclist=tclist,val=val,qtylist1=qtylist1,
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,Prodrate=Prodrate,
LeadTime=LeadTime,SafetyStock=SafetyStock
)
######################EEEEppppppppppQQQQQQ############
######################EEEEppppppppppQQQQQQ############
@app.route('/eoq_backorders', methods=['GET','POST'])
def eoq_backorders():
AnnulaUnitsDemand=12000
shortcost=1.1
FixedCost=8000
AnnHoldingcost=0.3
UnitCost=1
LeadTime=10
SafetyStock=100
if request.method == 'POST':
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
shortcost=request.form['shortcost']
FixedCost=request.form['FixedCost']
AnnHoldingcost=request.form['AnnHoldingcost']
UnitCost=request.form['UnitCost']
LeadTime=request.form['LeadTime']
SafetyStock=request.form['SafetyStock']
AnnulaUnitsDemand=int(AnnulaUnitsDemand)
shortcost=int(shortcost)
FixedCost=int(FixedCost)
AnnHoldingcost=float(AnnHoldingcost)
UnitCost=int(UnitCost)
LeadTime=int(LeadTime)
SafetyStock=int(SafetyStock)
HoldingCost=float(AnnHoldingcost*UnitCost)
sgap=(shortcost+HoldingCost)/shortcost
EOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/HoldingCost))*(math.sqrt(sgap)),2)
REOQ=round(math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost)*sgap),0)
totbackorder=EOQ*(HoldingCost/(shortcost+HoldingCost))
totOrderCost=round(((FixedCost*AnnulaUnitsDemand)/EOQ),2)
totHoldCost=round(((HoldingCost*((EOQ-totbackorder)**2))/(2*EOQ)),2)
totshortcost=round((shortcost*(totbackorder**2)/(2*EOQ)),2)
TotalCost=round((totOrderCost+totHoldCost+totshortcost),2)
NumOrders=round((AnnulaUnitsDemand/EOQ),2)
OrderTime=round((365/NumOrders),2)
ReorderPoint=round((((AnnulaUnitsDemand/365)*LeadTime)+SafetyStock),0)
count= EOQ*.75
qtylist1=[]
hclist=[]
sclist=[]
mtlist=[]
shlist=[]
tclist=[]
while (count < EOQ):
qtylist1.append(int((count)))
hclist.append(round(((HoldingCost*((count-totbackorder)**2))/(2*count)),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
shlist.append(round((shortcost*((totbackorder)**2)/(2*count)),2))
tclist.append(round(((((HoldingCost*((count-totbackorder)**2))/(2*count))+AnnulaUnitsDemand/count*FixedCost)+shortcost*((totbackorder)**2)/(2*count)),2))
count +=2
qtylist1.append(EOQ)
hclist.append(totHoldCost)
sclist.append(totOrderCost)
shlist.append(totshortcost)
tclist.append(totOrderCost+totshortcost+totHoldCost)
while (count < (EOQ*1.7)):
qtylist1.append(int((count)))
hclist.append(round(((HoldingCost*((count-totbackorder)**2))/(2*count)),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
shlist.append(round((shortcost*((totbackorder)**2)/(2*count)),2))
tclist.append(round(((((HoldingCost*((count-totbackorder)**2))/(2*count))+AnnulaUnitsDemand/count*FixedCost)+shortcost*((totbackorder)**2)/(2*count)),2))
count +=2
val=0
for i in range(len(tclist)):
if(EOQ==qtylist1[i]):
val=i
return render_template('eoq_backorders.html',NumOrders=NumOrders,OrderTime=OrderTime,
ReorderPoint=ReorderPoint,HoldCost=totHoldCost,TotalCost=TotalCost,
EOQ=EOQ,REOQ=REOQ,
shlist=shlist,sclist=sclist,hclist=hclist,tclist=tclist,val=val,qtylist1=qtylist1,
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,shortcost=shortcost,
LeadTime=LeadTime,SafetyStock=SafetyStock)
#################pbreak######################
@app.route("/pbreak_insert", methods=['GET','POST'])
def pbreak_insert():
if request.method == 'POST':
quantity = request.form.getlist("quantity[]")
price = request.form.getlist("price[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_classification',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curr = conn.cursor()
curr.execute("CREATE TABLE IF NOT EXISTS `pbreaktable` (quantity int(8),price int(8))")
curr.execute("DELETE FROM `pbreaktable`")
conn.commit()
say=1
for i in range(len(quantity)):
quantity_clean = quantity[i]
price_clean = price[i]
if quantity_clean and price_clean:
curr.execute("INSERT INTO `pbreaktable`(`quantity`,`price`) VALUES('"+quantity_clean+"','"+price_clean+"')")
conn.commit()
else:
say=0
if say==0:
message="Some values were not inserted!"
else:
message="All values were inserted!"
return(message)
@app.route('/view', methods=['GET','POST'])
def view():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_classification',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curr = conn.cursor()
curr.execute("SELECT * FROM `pbreaktable`")
res = curr.fetchall()
ress=pd.DataFrame(res)
return render_template('pbrk.html',username=username,ress =ress.to_html())
@app.route('/pbreakcalculate', methods=['GET','POST'])
def pbreakcalculate():
AnnulaUnitsDemand=10
FixedCost=1
AnnHoldingcost=0.1
UnitCost=445
LeadTime=10
SafetyStock=100
if request.method == 'POST':
if request.form['AnnulaUnitsDemand']:
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
AnnulaUnitsDemand=float(AnnulaUnitsDemand)
if request.form['FixedCost']:
FixedCost=request.form['FixedCost']
FixedCost=float(FixedCost)
if request.form['AnnHoldingcost']:
AnnHoldingcost=request.form['AnnHoldingcost']
AnnHoldingcost=float(AnnHoldingcost)
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_classification',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curr = conn.cursor()
curr.execute("SELECT * FROM `pbreaktable`")
res = curr.fetchall()
ress=pd.DataFrame(res)
conn.close()
datatable=pd.DataFrame(columns=['Quantity','Price','EOQ','TotalCost'])
mainlist=[]
Qu=ress['quantity']
Qm=0
for index, i in ress.iterrows():
tcl=[]
quantity = i['quantity']
price = i['price']
HoldingCost1=AnnHoldingcost*price
eoq1=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost1))),2)
REOQ=round(eoq1,0)
totOrderCost1=round((FixedCost*AnnulaUnitsDemand/eoq1),2)
totHoldCost1=round(((HoldingCost1*eoq1)/2),2)
totalcost1=float(round((totOrderCost1+totHoldCost1),2))
lst=[quantity,price,eoq1,totalcost1]
a=pd.DataFrame(lst).T
a.columns=['Quantity','Price','EOQ','TotalCost']
datatable=pd.concat([datatable,a],ignore_index=True)
name='TotalCost (Price='+str(a['Price'][0])+')'
tcl.append(name)
Qmin=1
Qmax=Qu[Qm]
qtylist2=[]
tclist1=[]
while (Qmin < Qmax):
qtylist2.append(Qmin)
tclist1.append(round((Qmin/2*totHoldCost1+AnnulaUnitsDemand/Qmin*FixedCost),2))
Qmin +=2
Qmin=Qmax+1
qtylist2.append(eoq1)
tclist1.append(totalcost1)
tcl.append(tclist1)
mainlist.append(tcl)
Eu=datatable['EOQ']
Qu=datatable['Quantity']
Tu=datatable['TotalCost']
minlst=[]
for i in range(len(Eu)):
if i ==0:
if Eu[i]<=Qu[i]:
minlst.append(i)
else:
if Eu[i]<=Qu[i] and Eu[i]>Qu[i-1]:
minlst.append(i)
if len(minlst)==0:
minnval='Solution not feasible'
else:
minval=Tu[minlst[0]]
minnval=Eu[minlst[0]]
for j in minlst:
if Tu[j]<minval:
minval=Tu[j]
minnval=Eu[j]
val1=0
for i in range(len(tclist1)):
if (round(minnval))==qtylist2[i]:
val1=i
minival=round(minval)
minnival=round(minnval)
NumOrders=round((AnnulaUnitsDemand/minnval),2)
OrderTime=round((365/NumOrders),2)
ReorderPoint=round((((AnnulaUnitsDemand/365)*LeadTime)+SafetyStock),0)
return render_template('pbreak.html',
NumOrders=NumOrders,OrderTime=OrderTime,REOQ=REOQ,ReorderPoint=ReorderPoint,
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,LeadTime=LeadTime,
SafetyStock=SafetyStock,minnval=minnval,minval=minval,minival=minival,minnival=minnival,
datatable=datatable.to_html(index=False),mainlist=mainlist,
val1=val1,tclist1=tclist1,qtylist2=qtylist2)
#################Demand problalstic######################
@app.route('/demand', methods=['GET', 'POST'])
def demand():
cost=10
price=12
salvage=2
if request.method == 'POST':
cost=request.form['cost']
price=request.form['price']
salvage=request.form['salvage']
cost=int(cost)
price=int(price)
salvage=int(salvage)
data=pd.read_csv(localpath+"\\Demand.csv")
data = pd.DataFrame(data)
cdf=[]
sum=0
for row in data['Prob']:
sum=sum+row
cdf.append(sum)
cumm_freq=(pd.DataFrame(cdf)).values##y-axis
overcost=cost-salvage
undercost=price-cost
CSl=undercost/(undercost+overcost)
k=[row>CSl for row in cumm_freq]
count=1
for row in k:
if row==False:
count=count+1
demand=(data['Demand']).values
w=data['Demand'].loc[count]##line across x-axis
val=0
for i in range(len(cumm_freq)):
if(w==demand[i]):
val=i
return render_template('demand.html',cost=cost,price=price,salvage=salvage,
cumm_freq=cumm_freq,demand=demand,val=val)
@app.route('/normal', methods=['GET', 'POST'])
def normal():
cost=10
price=12
salvage=9
sd=2
if request.method == 'POST':
cost=request.form['cost']
price=request.form['price']
salvage=request.form['salvage']
cost=int(cost)
price=int(price)
salvage=int(salvage)
data=pd.read_csv(localpath+"\\Demand.csv")
data = pd.DataFrame(data)
overcost1=cost-salvage
undercost1=price-cost
CSl=undercost1/(undercost1+overcost1)
zz=st.norm.ppf(CSl)##x-line
z=float(format(zz, '.2f'))
# Expecteddemand=round(mea+(z*sd))
mean = 0; sd = 1; variance = np.square(sd)
x = np.arange(-4,4,.01)##x-axis
f =(np.exp(-np.square(x-mean)/2*variance)/(np.sqrt(2*np.pi*variance)))##y-axis
val=0
for i in range(len(f)):
if(z==round((x[i]),2)):
val=i
return render_template('normal.html',x=x,f=f,val=val,cost=cost,price=price,salvage=salvage)
@app.route('/utype', methods=['GET','POST'])
def utype():
cost=10
price=12
salvage=2
mini=1
maxi=10
if request.method == 'POST':
cost=request.form['cost']
price=request.form['price']
salvage=request.form['salvage']
mini=request.form['mini']
maxi=request.form['maxi']
cost=int(cost)
price=int(price)
salvage=int(salvage)
mini=int(mini)
maxi=int(maxi)
data=pd.read_csv(localpath+"\\Demand.csv")
data = pd.DataFrame(data)
overcost=cost-salvage
undercost=price-cost
CSl=undercost/(undercost+overcost)
expdemand1=round(mini+((maxi-mini)*CSl))
# a=[mini,0]
# b=[mini,100]
# c=[maxi,0]
# d=[maxi,100]
# width = c[0] - b[0]
# height = d[1] - a[1]
lims = np.arange(0,maxi,1)
val=0
for i in range(len(lims)):
if(expdemand1==lims[i]):
val=i
return render_template('utype.html',x=lims,f=lims,val=val,cost=cost,price=price,salvage=salvage,mini=mini,maxi=maxi)
@app.route('/outputx', methods=['GET', 'POST'])
def outputx():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_classification',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM `abc`")
all_data = cur.fetchall()
all_data = pd.DataFrame(all_data)
A_ccat=.8
B_ccat=.95
A_ucat=.1
B_ucat=.25
tot_cost=all_data['Cost'].sum()
tot_usage=all_data['Annual Usage'].sum()
all_data['perc_cost']=all_data['Cost']/tot_cost
all_data['perc_usage']=all_data['Annual Usage']/tot_usage
all_data.sort_values(by=['perc_cost'], inplace=True, ascending=False)
sort_data=all_data.reset_index()
sort_data['cum_cperc']=np.nan
sort_data['cum_uperc']=np.nan
sort_data['Class']=''
for i in range(len(sort_data)):
if(i==0):
sort_data.set_value(i, 'cum_cperc', sort_data['perc_cost'][i])
sort_data.set_value(i, 'cum_uperc', sort_data['perc_usage'][i])
# cperc_data.append(all_data['perc_cost'][i])
sort_data.set_value(i,'Class','A')
else:
sort_data.set_value(i, 'cum_cperc', sort_data['perc_cost'][i]+sort_data['cum_cperc'][i-1])
sort_data.set_value(i, 'cum_uperc', sort_data['perc_usage'][i]+sort_data['cum_uperc'][i-1])
if(sort_data['cum_cperc'][i]<=A_ccat and sort_data['cum_uperc'][i]<=A_ucat):
sort_data.set_value(i,'Class','A')
elif(sort_data['cum_cperc'][i]<=B_ccat and sort_data['cum_uperc'][i]<=B_ucat):
sort_data.set_value(i,'Class','B')
else:
sort_data.set_value(i,'Class','C')
x7=sort_data[['cum_cperc']]
x1=x7*100
x3=np.round(x1)
x2=np.array([])
x5 = np.append(x2,x3)
y7= sort_data[['cum_uperc']]
y1=y7*100
y3=np.round(y1)
y2=np.array([])
y5 = np.append(y2,y3)
###############% of Total cost//
a= sort_data[(sort_data['Class']=='A')][['perc_cost']]
j=a.sum()
k=j*100
pd.DataFrame(k)
kf=k[0]
b= sort_data[(sort_data['Class']=='B')][['perc_cost']]
n=b.sum()
m=n*100
pd.DataFrame(m)
mf=m[0]
c= sort_data[(sort_data['Class']=='C')][['perc_cost']]
o=c.sum()
p=o*100
pd.DataFrame(p)
pf=p[0]
tes=k,m,p
t2 = np.array([])
te2 = np.append(t2,tes)
###################Items // Annual Usage
# z=sort_data[['Product number']]
# z1=z.sum()
f= sort_data[(sort_data['Class']=='A')][['Product number']]
v=f.sum()
pd.DataFrame(v)
vif=v[0]
f1= sort_data[(sort_data['Class']=='B')][['Product number']]
u=f1.sum()
pd.DataFrame(u)
uif=u[0]
f2= sort_data[(sort_data['Class']=='C')][['Product number']]
vf=f2.sum()
pd.DataFrame(vf)
kif=vf[0]
#################% of Total units // Annual Usage
t= sort_data[(sort_data['Class']=='A')][['perc_usage']]
i=t.sum()
p1=i*100
pd.DataFrame(p1)
nf=p1[0]
l= sort_data[(sort_data['Class']=='B')][['perc_usage']]
t=l.sum()
q1=t*100
pd.DataFrame(q1)
qf=q1[0]
u= sort_data[(sort_data['Class']=='C')][['perc_usage']]
w=u.sum()
s1=w*100
pd.DataFrame(s1)
sf=s1[0]
test=p1,q1,s1
tt2 = np.array([])
tte2 = np.append(tt2,test)
#############values//Cost*Annual Usage
sort_data['Value'] = sort_data['Cost'] * sort_data['Annual Usage']
fz= sort_data[(sort_data['Class']=='A')][['Value']]
vz=fz.sum()
pd.DataFrame(vz)
vzz=vz[0]
fz1= sort_data[(sort_data['Class']=='B')][['Value']]
uz=fz1.sum()
pd.DataFrame(uz)
uzf=uz[0]
fz2= sort_data[(sort_data['Class']=='C')][['Value']]
vzf=fz2.sum()
pd.DataFrame(vzf)
kzf=vzf[0]
h=[{'Scenario':'A','Values':vzz,'product number':vif,'perc_usage':nf,'perc_cost ':kf},
{'Scenario':'B','Values':uzf,'product number':uif,'perc_usage':qf,'perc_cost ':mf},
{'Scenario':'C','Values':kzf,'product number':kif,'perc_usage':sf,'perc_cost ':pf}]
df = pd.DataFrame(h)
lo=sort_data[['Product Description','Product number','Cost','Annual Usage','Class']]
cur = conn.cursor()
cur.execute("SELECT * FROM `abc1`")
all_data4 = cur.fetchall()
all_data4 = pd.DataFrame(all_data4)
lolz=all_data4[['Product number','Product Description','Cost','Annual Usage','Average Stay','Average Consumption','Criticality']]
######################FFFFFFFFSSSSSSSSSNNNNNNNNNNNN#########################
######################FFFFFFFFSSSSSSSSSNNNNNNNNNNNN#########################
######################FFFFFFFFSSSSSSSSSNNNNNNNNNNNN#########################
curr = conn.cursor()
curr.execute("SELECT * FROM `fsn`")
all_data1 = curr.fetchall()
all_data1 = pd.DataFrame(all_data1)
F_cat=.2
S_cat=.5
tot_stay=all_data1['Average Stay'].sum()
tot_consupt=all_data1['Average Consumption'].sum()
all_data1['perc_stay']=all_data1['Average Stay']/tot_stay
all_data1['perc_cons']=all_data1['Average Consumption']/tot_consupt
all_data1.sort_values(by=['perc_stay'], inplace=True, ascending=True)
sort_data1=all_data1.reset_index()
sort_data1['cum_stay']=np.nan
sort_data1['cum_cons']=np.nan
sort_data1['Class']=''
for i in range(len(sort_data1)):
if(i==0):
sort_data1.set_value(i, 'cum_stay', sort_data1['perc_stay'][i])
sort_data1.set_value(i, 'cum_cons', sort_data1['perc_cons'][i])
sort_data1.set_value(i,'Class','F')
else:
sort_data1.set_value(i, 'cum_stay', sort_data1['perc_stay'][i]+sort_data1['cum_stay'][i-1])
sort_data1.set_value(i, 'cum_cons', sort_data1['perc_cons'][i]+sort_data1['cum_cons'][i-1])
if(sort_data1['cum_stay'][i]<=F_cat) :
sort_data1.set_value(i,'Class','F')
elif(sort_data1['cum_stay'][i]<=S_cat):
sort_data1.set_value(i,'Class','S')
else:
sort_data1.set_value(i,'Class','N')
x71=sort_data1[['cum_stay']]
x11=x71*100
x31=np.round(x11)
x21=np.array([])
x51 = np.append(x21,x31)
y71= sort_data1[['cum_cons']]
y11=y71*100
y31=np.round(y11)
y21=np.array([])
y51 = np.append(y21,y31)
###############% of Total cost//
a1= sort_data1[(sort_data1['Class']=='F')][['perc_stay']]
j1=a1.sum()
k1=j1*100
pd.DataFrame(k1)
kf1=k1[0]
b1= sort_data1[(sort_data1['Class']=='S')][['perc_stay']]
n1=b1.sum()
m1=n1*100
pd.DataFrame(m1)
mf1=m1[0]
c1= sort_data1[(sort_data1['Class']=='N')][['perc_stay']]
o1=c1.sum()
p1=o1*100
pd.DataFrame(p1)
pf1=p1[0]
tes1=k1,m1,p1
t21 = np.array([])
te21 = np.append(t21,tes1)
###################Items // Annual Usage
# z=sort_data[['Product number']]
# z1=z.sum()
f1= sort_data1[(sort_data1['Class']=='F')][['Product number']]
v1=f1.sum()
pd.DataFrame(v1)
vif1=v1[0]
f11= sort_data1[(sort_data1['Class']=='S')][['Product number']]
u1=f11.sum()
pd.DataFrame(u1)
uif1=u1[0]
f21= sort_data1[(sort_data1['Class']=='N')][['Product number']]
vf1=f21.sum()
pd.DataFrame(vf1)
kif1=vf1[0]
#################% of Total units // Annual Usage
t1= sort_data1[(sort_data1['Class']=='F')][['perc_cons']]
i1=t1.sum()
p11=i1*100
pd.DataFrame(p11)
nf1=p11[0]
l1= sort_data1[(sort_data1['Class']=='S')][['perc_cons']]
t1=l1.sum()
q11=t1*100
pd.DataFrame(q11)
qf1=q11[0]
u1= sort_data1[(sort_data1['Class']=='N')][['perc_cons']]
w1=u1.sum()
s11=w1*100
pd.DataFrame(s11)
sf1=s11[0]
test1=p11,q11,s11
tt21 = np.array([])
tte21 = np.append(tt21,test1)
#############values//Cost*Annual Usage
sort_data1['Value'] = sort_data1['Average Stay'] * sort_data1['Average Consumption']
fz1= sort_data1[(sort_data1['Class']=='F')][['Value']]
vz1=fz1.sum()
pd.DataFrame(vz1)
vzz1=vz1[0]
fz11= sort_data1[(sort_data1['Class']=='S')][['Value']]
uz1=fz11.sum()
pd.DataFrame(uz1)
uzf1=uz1[0]
fz21= sort_data1[(sort_data1['Class']=='N')][['Value']]
vzf1=fz21.sum()
pd.DataFrame(vzf1)
kzf1=vzf1[0]
h1=[{'Scenario':'F','Values':vzz1,'product number':vif1,'perc_cons':nf1,'perc_stay ':kf1},
{'Scenario':'S','Values':uzf1,'product number':uif1,'perc_cons':qf1,'perc_stay ':mf1},
{'Scenario':'N','Values':kzf1,'product number':kif1,'perc_cons':sf1,'perc_stay ':pf1}]
df1 = pd.DataFrame(h1)
lo1=sort_data1[['Product Description','Product number','perc_stay','perc_cons','Class']]
##############VVVVVVVVVEEEEEEEEEEEEDDDDDDDDD#########
##############VVVVVVVVVEEEEEEEEEEEEDDDDDDDDD#########
cur1 = conn.cursor()
cur1.execute("SELECT * FROM `ved`")
all_data2 = cur1.fetchall()
all_data2 = pd.DataFrame(all_data2)
all_data2['values']=all_data2['Class'] + all_data2["Criticality"]
AV= all_data2[(all_data2['values']=='AV')]
AV=AV.index.max()
AE= all_data2[(all_data2['values']=='AE')]
AE= AE.index.max()
AE=np.nan_to_num(AE)
AD= all_data2[(all_data2['values']=='AD')]
AD=AD.index.max()
AD=np.nan_to_num(AD)
BV=all_data2[(all_data2['values']=='BV')]
BV=BV.index.max()
BE=all_data2[(all_data2['values']=='BE')]
BE=BE.index.max()
BD=all_data2[(all_data2['values']=='BD')]
BD=BD.index.max()
BD=np.nan_to_num(BD)
CV=all_data2[(all_data2['values']=='CV')]
CV=CV.index.max()
CV=np.nan_to_num(CV)
CE=all_data2[(all_data2['values']=='CE')]
CE=CE.index.max()
CD=all_data2[(all_data2['values']=='CD')]
CD=CD.index.max()
###############################################
xx71=all_data2[['cum_cperc']]
xx71=xx71.astype(float)
xx11=xx71*100
xx31=xx11.round()
xx21=np.array([])
xx51 = np.append(xx21,xx31)
yy71= all_data2[['cum_uperc']]
yy71=yy71.astype(float)
yy11=yy71*100
yy31=yy11.round(0)
yy21=np.array([])
yy51 = np.append(yy21,yy31)
###############% of Total cost//
aa= all_data2[(all_data2['Criticality']=='V')][['perc_cost']]
jj=aa.sum()
kk=jj*100
#k=pd.DataFrame(k)
kkf=kk[0]
bb= all_data2[(all_data2['Criticality']=='E')][['perc_cost']]
nn=bb.sum()
mm=nn*100
# m=pd.DataFrame(m)
mmf=mm[0]
cc= all_data2[(all_data2['Criticality']=='D')][['perc_cost']]
oo=cc.sum()
pp=oo*100
# p=pd.DataFrame(p)
ppf=pp[0]
ttes=[kk,mm,pp]
ttes=pd.concat(ttes)
th2 = np.array([])
the2 = np.append(th2,ttes)
###################Items // Annual Usage
# z=sort_data[['Product number']]
# z1=z.sum()
ff= all_data2[(all_data2['Criticality']=='V')][['Product number']]
vv=ff.sum()
pd.DataFrame(vv)
vvif=vv[0]
ff1= all_data2[(all_data2['Criticality']=='E')][['Product number']]
uu=ff1.sum()
pd.DataFrame(uu)
uuif=uu[0]
ff2= all_data2[(all_data2['Criticality']=='D')][['Product number']]
vvf=ff2.sum()
pd.DataFrame(vvf)
kkif=vvf[0]
#################% of Total units // Annual Usage
tt= all_data2[(all_data2['Criticality']=='V')][['perc_usage']]
ii=tt.sum()
pp1=ii*100
pd.DataFrame(pp1)
nnf=pp1[0]
ll= all_data2[(all_data2['Criticality']=='E')][['perc_usage']]
tq=ll.sum()
qq1=tq*100
pd.DataFrame(qq1)
qqf=qq1[0]
uw= all_data2[(all_data2['Criticality']=='D')][['perc_usage']]
wu=uw.sum()
sc1=wu*100
pd.DataFrame(sc1)
ssf=sc1[0]
testt=[pp1,qq1,sc1]
testt=pd.concat(testt)
ttt2 = np.array([])
ttte2 = np.append(ttt2,testt)
#############values//Cost*Annual Usage
all_data2['Value'] = all_data2['Cost'] * all_data2['Annual Usage']
fzz= all_data2[(all_data2['Criticality']=='V')][['Value']]
vzz=fzz.sum()
pd.DataFrame(vzz)
vzzz=vzz[0]
fzz1= all_data2[(all_data2['Criticality']=='E')][['Value']]
uzz=fzz1.sum()
pd.DataFrame(uzz)
uzzf=uzz[0]
fzz2= all_data2[(all_data2['Criticality']=='D')][['Value']]
vzzf=fzz2.sum()
pd.DataFrame(vzzf)
kzzf=vzzf[0]
hh=[{'Scenario':'V','Values':vzzz,'product number':vvif,'perc_usage':nnf,'perc_cost ':kkf},
{'Scenario':'E','Values':uzzf,'product number':uuif,'perc_usage':qqf,'perc_cost ':mmf},
{'Scenario':'D','Values':kzzf,'product number':kkif,'perc_usage':ssf,'perc_cost ':ppf}]
dff = pd.DataFrame(hh)
return render_template('inventoryclassification.html',
x=y5,y=x5,
barcost=te2 ,barusage=tte21,
s=df.to_html(index=False),
sam=lo.to_html(index=False),
tale=lolz.to_html(index=False),
x1=x51,y1=y51,
bar1=te21 ,bar2=tte2,
s1=df1.to_html(index=False),
sam1=lo1.to_html(index=False),
xx1=AV,xx2=AE,xx3=AD,
yy1=BV,yy2=BE,yy3=BD,
zz1=CV,zz2=CE,zz3=CD,
bb1=the2 ,bb2=ttte2,
zone1=yy51,zone2=xx51,
sammy=dff.to_html(index=False))
@app.route('/vendormanagement')
def vendormanagement():
return render_template('vendormanagement.html')
@app.route('/vendormanagementimport',methods=['POST','GET'])
def vendormanagementimport():
global vendordata
global vendordataview
db = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
vendordata = pd.read_sql("SELECT * from vendor_management", con=db)
db.close()
vendordata['POdate']=pd.to_datetime(vendordata['POdate'])
vendordata['POdate_year']=vendordata['POdate'].dt.year
vendordataview=vendordata.head(50)
return render_template('vendormanagementview.html',vendordataview=vendordataview.to_html(index=False))
@app.route('/vendormanagementview',methods=['POST','GET'])
def vendormanagementview():
return render_template('vendormanagementview.html',vendordataview=vendordataview.to_html(index=False))
@app.route('/vndrmngmnt1',methods=['POST','GET'])
def vndrmngmnt1():
VENDORID=sorted(vendordata['Vendorid'].unique())
if request.method=='POST':
vendorin=request.form['name1']
def Vendor(VendorId):
datasetcomb34=vendordata[['Vendorid','Vendor_name','Vendor_address','Vendormin_order']][vendordata['Vendorid']== VendorId]
return datasetcomb34.iloc[0,:]
snglvw=Vendor(vendorin)
singleview=pd.DataFrame(snglvw).T
return render_template('vendormanagement1.html',say=1,vendorin=vendorin,VENDORID=VENDORID,singleview=singleview.to_html(index=False))
return render_template('vendormanagement1.html',VENDORID=VENDORID)
@app.route('/vndrmngmnt2',methods=['POST','GET'])
def vndrmngmnt2():
pouyear=sorted(vendordata['POdate_year'].unique())
if request.method == 'POST':
SelectedYear = int(request.form['name1'])
SelectedTop = int(request.form['name2'])
def top10vendorspend(year,top_value):
x=[]
y=[]
gg1=vendordata[(vendordata['POdate_year']==year)].groupby(['POdate_year','Vendorid'])['PO_Value'].sum()
x=gg1.nlargest(top_value).index.get_level_values(1)
y=gg1.nlargest(top_value).values
df=pd.DataFrame({'VendorID':x,'Total':y})
return df
vndrvspnd=top10vendorspend(SelectedYear,SelectedTop)
def top10vendoravgspend(top):
gg3=vendordata.groupby(['POdate_year','Vendorid'])['PO_Value'].mean()
xxx=gg3.nlargest(top).index.get_level_values(1)
yyy=round(gg3.nlargest(top),2).values
df=pd.DataFrame({'VendorID':xxx,'Mean':yyy})
return df
vndrvavgspnd=top10vendoravgspend(SelectedTop)
return render_template('vendormanagement2.html',say=1,SelectedYear=SelectedYear,pouyear=pouyear,vndrval=vndrvspnd.values,vndrvavg=vndrvavgspnd.values)
return render_template('vendormanagement2.html',pouyear=pouyear)
@app.route('/vndrmngmnt3',methods=['POST','GET'])
def vndrmngmnt3():
pouyear=sorted(vendordata['POdate_year'].unique())
if request.method == 'POST':
SelectedYear = int(request.form['name1'])
SelectedTop = int(request.form['name2'])
def top10POvendorvalue(year,top_value):
x=[]
y=[]
gg1=vendordata[(vendordata['POdate_year']==year)].groupby(['POdate_year','Vendorid'])['Inventoryreplenished'].sum()
x=gg1.nlargest(top_value).index.get_level_values(1)
y=gg1.nlargest(top_value).values
df=pd.DataFrame({'VendorId':x,'Total':y})
return df
vndrval=top10POvendorvalue(SelectedYear,SelectedTop)
def top10POvendoravg(top):
gg3=vendordata.groupby(['POdate_year','Vendorid'])['Inventoryreplenished'].mean()
xxx=gg3.nlargest(top).index.get_level_values(1)
yyy=round(gg3.nlargest(top),2).values
df=pd.DataFrame({'VendorID':xxx,'Mean':yyy})
return df
vndrvavg=top10POvendoravg(SelectedTop)
return render_template('vendormanagement3.html',say=1,SelectedYear=SelectedYear,pouyear=pouyear,vndrval=vndrval.values,vndrvavg=vndrvavg.values)
return render_template('vendormanagement3.html',pouyear=pouyear)
@app.route('/vndrmngmnt4',methods=['POST','GET'])
def vndrmngmnt4():
pouyear=sorted(vendordata['POdate_year'].unique())
if request.method == 'POST':
SelectedYear = int(request.form['name1'])
SelectedTop = int(request.form['name2'])
def top10vendorPOcnt(year,top):
x=[]
y=[]
gg1=vendordata[(vendordata['POdate_year']==year)].groupby(['POdate_year','Vendorid'])['POdate_year'].count()
x=gg1.nlargest(top).index.get_level_values(1)
y=gg1.nlargest(top).values
df=pd.DataFrame({'MatID':x,'Total_count':y})
return df
vndrvavgpoacnt=top10vendorPOcnt(SelectedYear,SelectedTop)
def top10vendorPOavg(top):
g=vendordata.groupby('Vendorid')['POdate_year'].size()
xx=g.nlargest(top).index.get_level_values(0)
yy=g.nlargest(top).values
dfexp7=pd.DataFrame({'VendorID':xx,'Average_count':yy})
return dfexp7
vndrvavgpoavg=top10vendorPOavg(SelectedTop)
return render_template('vendormanagement4.html',say=1,SelectedYear=SelectedYear,pouyear=pouyear,vndrval=vndrvavgpoacnt.values,vndrvavg=vndrvavgpoavg.values)
return render_template('vendormanagement4.html',pouyear=pouyear)
@app.route('/vendorperformanceanalysis')
def vendorperformanceanalysis():
return render_template('vendorperformanceanalysis.html',say=0)
@app.route('/vendorperformanceanalysisdata',methods=['POST','GET'])
def vendorperformanceanalysisdata():
if request.method=='POST':
global wdata
global wtdata
file1 = request.files['file1'].read()
file2 = request.files['file2'].read()
if len(file1)==0 or len(file2)==0:
return render_template('vendorperformanceanalysis.html',say=0,warning='Data Invalid')
data1=pd.read_csv(io.StringIO(file1.decode('utf-8')))
wdata=pd.DataFrame(data1)
data2=pd.read_csv(io.StringIO(file2.decode('utf-8')))
wtdata=pd.DataFrame(data2)
return render_template('vendorperformanceanalysis.html',say=1,data1=data1.to_html(index=False),data2=data2.to_html(index=False))
@app.route('/vendorperformanceanalys',methods=['POST','GET'])
def vendorperformanceanalys():
wt=[]
for ds in wtdata['Weight']:
wt.append(round((float(ds)),2))
treatment=[]
for ds in wtdata['Positive Attribute']:
if ds=='Yes':
treatment.append('+')
else:
treatment.append('-')
def normalize(df,alpha,treatment):
y=df.iloc[:,1:len(list(df))]
for i, j in zip(list(y),treatment):
if j== '-':
y[i]=y[i].min()/y[i]
elif j== '+':
y[i]=y[i]/y[i].max()
for i, t in zip(list(y),wt):
y[i]=y[i]*t
df['Score'] = y.sum(axis=1)
df=df.sort_values('Score', ascending=False)
df['Rank']=df['Score'].rank(ascending=False)
df['Rank']=df['Rank'].astype(int)
return df[['Rank','Vendor']]
dff=normalize(wdata,wt,treatment)
return render_template('vendorperformanceanalysisview.html',say=1,data=dff.to_html(index=False))
@app.route('/purchaseorderallocation')
def purchaseorderallocation():
return render_template('purchaseorderallocation.html')
@app.route('/purchaseorderallocationimport',methods=['POST','GET'])
def purchaseorderallocationimport():
global ddemand1
global dsupply1
global maxy1
global miny1
global Vcost1
global Vrisk1
db = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
ddemand1 = pd.read_sql("SELECT * from opt_demand", con=db)
dsupply1 = pd.read_sql("SELECT * from opt_supply", con=db)
maxy1 = pd.read_sql("SELECT * from opt_maxcapacity", con=db)
miny1 = pd.read_sql("SELECT * from opt_mincapacity", con=db)
Vcost1 = pd.read_sql("SELECT * from opt_vcost", con=db)
Vrisk1 =
|
pd.read_sql("SELECT * from opt_vrisk", con=db)
|
pandas.read_sql
|
from functools import partial
from typing import Tuple
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.model_selection import ParameterGrid
from tqdm import tqdm
def gaussian_mean_sampler(reward_mean: float = 0, reward_std: float = 1, size: int = 1):
"""
Sample a mean from a Normal distribution with given mean and std.
"""
return np.random.normal(loc=reward_mean, scale=reward_std, size=size)
def gaussian_reward_sampler(mu: float, reward_std: float = 1, size: int = 1):
"""
Sample a reward for a Normal distribution with given mean and std
"""
reward = np.random.normal(loc=mu, scale=reward_std, size=size)
if size == 1:
return reward[0]
return reward
class MultiArmedBandit:
"""
Object that defines the multi-armed bandit environment. The environment
has `num_arms` arms, each with a mean reward sampled from
`arm_mean_sampler`, and, each producing a reward, upon pulling by
sampling from `arm_reward_sampler`.
"""
def __init__(
self,
num_arms: int = 10,
arm_mean_sampler: callable = gaussian_mean_sampler,
arm_reward_sampler: callable = gaussian_reward_sampler,
) -> None:
self.arm_mean_sampler = arm_mean_sampler
self.arm_reward_sampler = arm_reward_sampler
self.num_arms = num_arms
self.mean_rewards = arm_mean_sampler(size=num_arms)
def draw_arm(self, idx: int) -> float:
"""Draw arm `idx` and return the observed reward.
Args:
idx (int): index of arm to draw
Returns:
float: observed reward sampled with `arm_reward_sampler`
"""
mu = self.mean_rewards[idx]
return self.arm_reward_sampler(mu)
def visualise(self) -> None:
dfs = []
for arm, mu in enumerate(self.mean_rewards):
dummies = self.arm_reward_sampler(mu, size=1000)
dfs.append(
|
pd.DataFrame({"Arm": arm, "Reward Distribution": dummies})
|
pandas.DataFrame
|
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split,TimeSeriesSplit
import matplotlib.pyplot as plt
import pandas as pd
import time
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import AdaBoostRegressor
import plotly.plotly as py
import plotly.graph_objs as go
import plotly
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
# %matplotlib inline
import ssl
import json
import ast
import os
import bitfinex
api_v2 = bitfinex.bitfinex_v2.api_v2()
result = api_v2.candles()
import datetime
import time
from pandas_datareader import data
import matplotlib.pyplot as plt
import pandas as pd
import yfinance as yf
data = yf.download("AR", start="2010-01-03", end="2020-1-11")
data = pd.DataFrame(data)
# data = data.drop(['Adj Close'],axis=1)
print(data.tail())
data = data.to_csv('data/stocks/stocks_portfolio/AR.csv')
print('RETRIEVING DAILY STOCK DATA FOR {}'.format(str('AR')))
data = yf.download("CHK", start="2010-01-03", end="2020-1-11")
data = pd.DataFrame(data)
# data = data.drop(['Adj Close'],axis=1)
print(data.tail())
data = data.to_csv('data/stocks/stocks_portfolio/CHK.csv')
print('RETRIEVING DAILY STOCK DATA FOR {}'.format(str('CHK"')))
data = yf.download("PCG", start="2010-01-03", end="2020-1-11")
data = pd.DataFrame(data)
# data = data.drop(['Adj Close'],axis=1)
print(data.tail())
data = data.to_csv('data/stocks/stocks_portfolio/PCG.csv')
print('RETRIEVING DAILY STOCK DATA FOR {}'.format(str('PCG"')))
data = yf.download("SPY", start="2010-01-03", end="2020-1-11")
data = pd.DataFrame(data)
# data = data.drop(['Adj Close'],axis=1)
print(data.tail())
data = data.to_csv('data/stocks/stocks_portfolio/SPY.csv')
print('RETRIEVING DAILY STOCK DATA FOR {}'.format(str('SPY')))
data = yf.download("AAPL", start="2010-01-03", end="2020-1-11")
data = pd.DataFrame(data)
# data = data.drop(['Adj Close'],axis=1)
print(data.tail())
data = data.to_csv('data/stocks/stocks_portfolio/AAPL.csv')
print('RETRIEVING DAILY STOCK DATA FOR {}'.format(str('AAPL')))
data = yf.download("EA", start="2010-01-03", end="2020-1-11")
data = pd.DataFrame(data)
# data = data.drop(['Adj Close'],axis=1)
print(data.tail())
data = data.to_csv('data/stocks/stocks_portfolio/EA.csv')
print('RETRIEVING DAILY STOCK DATA FOR {}'.format(str('EA')))
data = yf.download("FB", start="2010-01-03", end="2020-1-11")
data = pd.DataFrame(data)
# data = data.drop(['Adj Close'],axis=1)
print(data.tail())
data = data.to_csv('data/stocks/stocks_portfolio/FB.csv')
print('RETRIEVING DAILY STOCK DATA FOR {}'.format(str('FB')))
data = yf.download("ROKU", start="2010-01-03", end="2020-1-11")
data =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# # Creating machine learning models
# ## High level ML project managment
# The need and the steps taken when creating a machine learning model usually falls into the following steps:
#
# 
#
# * The business first needs to define the problem and the potential value that a solution will bring.
# * The second step is to transfer the business problem into a machine learning problem.
# * The third step is to run alot of experiments: try out many ML algorithms, do feature engineering, debate with your colleagues and present the results.
# * The final step is to decide which model to use and start thinking about deployment.
#
# The deployment part historicaly is not the part of an ML practioner but this is changing rapidly.
#
# If any problem is to big to overcome in a given step, then a team should go a step back and rethink the previous step.
# ## Business problem
# 
# Imagine that we are working in a huge analytics company and our new task is to model the probability of Counter Terrorist (**CT** for short) team winning a Counter Strike: Global Offensive (**CSGO** for short) game.
#
# The rules of the game are simple: there are two teams, named terrorists and counter-terrorists, each consisting of 5 players. At the start of the round each player buys weapons, armor and other equipment and the objective is to win the match.
#
# To read more about the game visit the official website: https://blog.counter-strike.net/index.php/about/
#
# This esport is very popular and our analytics company is trying to break into the gaming market with a very accurate model which will be shown on TV, on gaming streams and other places.
# ## Rules of the game
# The ultimate victory of a CSGO match is when a team, either CT or T, earn **16 points**. A point is earn when a match is won.
#
# Match winning criteria:
#
# * A given team eliminates all 5 players of the oposite team.
# * If the terrorists have planted the bomb, then the winning criteria for a CT team is to defuse the bomb and for the T team to win the match the bomb needs to explode.
#
# The maximum number of seconds in a match is **175.00**.
#
# There are 5 CT and 5 T players on match start. Each of them have **100 hit points (HP)** and can buy up to **100 armor** and a helmet.
#
# Players earn in game dollars during a match which can be spent on weapons, grenades, armor and other accesories.
# ## Machine learning problem
# After the business problem is defined and the rules of the game are clear, we now need to convert the business problem into a machine learning problem.
# If we define:
#
# $$ \mathbb{Y}_{i} = \{0, 1\}, \forall i = 1, ..., n$$
#
# $$ \mathbb{X}_{i} \in R^{p}, \forall i = 1, ..., n$$
#
# Where
#
# $i$ - observation i.
#
# $n$ - total number of observations.
#
# $p$ - number of features.
#
# Then we are trying to create a model for the probability to observe $\mathbb{Y}=1$ event given $\mathbb{X}$:
#
# $$P(\mathbb{Y}=1|\mathbb{X}) \in (0, 1)$$
#
# $\mathbb{Y} = 1$ means that the CT team have won and the $\mathbb{Y} = 0$ means that CT team have lost.
#
# The function $f$ that links $\mathbb{X}$ to $\mathbb{Y}$ is the machine learning model which are trying to build:
#
# $$ f: \mathbb{X} \rightarrow \mathbb{Y} $$
# Because we are trying to predict an observation falling into one of two classes (CT winning or losing) the machine learning model $f$ can be called a *binary classifier*.
# # Python package imports
# The first thing that any developer or a ML practioner does is load up packages which are installed into his/hers machine.
# In[1]:
# Data reading
import pandas as pd
# Main modeling class
import xgboost as xgb
# Data spliting
from sklearn.model_selection import train_test_split
# Plotting library
import matplotlib.pyplot as plt
import seaborn as sns
# Array math
import numpy as np
# Modeling frameworks
from sklearn.linear_model import LogisticRegression
import xgboost as xgb
# Accuracy metrics
from sklearn.metrics import roc_auc_score, roc_curve
# Hp parameter search
from sklearn.model_selection import ParameterGrid
# Model saving
import pickle
# Operating system functionalities
import os
# JSON saving and loading
import json
# # Reading data
# Finding, cleaning and labelling data is ussually a long and painfull process. This is not the main emphasis of this book so lets imagine that we have already spent months in creating the beautifull dataset which we will read.
#
# The original dataset can be found here: https://www.kaggle.com/christianlillelund/csgo-round-winner-classification
# In[2]:
# Using pandas to read a csv file
d = pd.read_csv("data/data.csv")
# Printing the shape of data
print(f"Number of observations: {d.shape[0]}")
print(f"Number of features: {d.shape[1]}")
# In[3]:
# Getting the feature names
d.columns.values
# In[4]:
# Displaying a snippet of data
print(d.head())
# A short description about the data from the kaggle source:
#
# *The dataset consists of round snapshots from about 700 demos from high level tournament play in 2019 and 2020. Warmup rounds and restarts have been filtered, and for the remaining live rounds a round snapshot has been recorded every 20 seconds until the round is decided. Following its initial publication, It has been pre-processed and flattened to improve readability and make it easier for algorithms to process. The total number of snapshots is 122411. **Snapshots are i.i.d and should be treated as individual data points**, not as part of a match.*
# The feature that will be used for the creation of $\mathbb{Y}$ variable is **round_winner**. If CT have won, then the value of $\mathbb{Y}$ will be 1 and 0 othervise.
# In[5]:
# Creating the Y variable
d['Y'] = [1 if x == 'CT' else 0 for x in d['round_winner']]
# Inspecting the distribution of the classes
distribution = d.groupby('Y', as_index=False).size()
distribution['Y'] = distribution['Y'].astype(str)
distribution['share'] = distribution['size'] / distribution['size'].sum()
plt.bar(
distribution['Y'],
distribution['share'],
edgecolor='black'
)
plt.title("Share of binary responses in data")
plt.ylabel("Share in data")
plt.xlabel("Response value")
plt.show()
# The classes are almost perfectly balanced.
# ## Dropping inconsistancies
# In[6]:
d = d[(d['t_players_alive']<=5) & (d['ct_players_alive']<=5)].copy()
# # Feature engineering
# Feature engineering is the process of using domain knowledge to create additional features from the raw features in data. Alot of experimentation time is spent here and not all the features created end up improving the model. Nevertheless, if we create atlest one new feature from the given list of features which improves the performance of our classifier then we have added imense value to the original dataset without investing into new data collection.
#
# The AI expert <NAME> has proposed that the current ML industry should move from the model centric approach to the data centric approach {cite}`data_centric`:
#
# *"If 80 percent of our work is data preparation, then ensuring data quality is the important work of a machine learning team."*
#
# <NAME> urges to shift the focus from trying out new models while fixing a dataset and instead fix a model and then engineer new features, label new data points and do other data related experiments.
#
# Regardless of which school of thought wins out, developing new features is paramount in either case.
#
# In[7]:
# Boolean for the planting of the bomb event
d['bomb_planted'] = [1 if x else 0 for x in d['bomb_planted']]
# The differences between the team scores
d['team_score_diff'] = d['ct_score'] - d['t_score']
# Putting the team_score_diff into buckets
cut_bins_score = [-15, -5, 0, 5, 15]
d['team_score_diff'] = pd.cut(d['team_score_diff'], bins=cut_bins_score)
# Calculating the share of remaining health of CT
d['ct_health_share'] = d['ct_health'] / (d['t_health'] + d['ct_health'])
# Calculating the armor share
d['ct_armor_per_player'] = d['ct_armor'] / d['ct_players_alive']
# Total money share owned by CT
d['ct_money_share'] = d['ct_money'] / (d['t_money'] + d['ct_money'])
# Difference between alive CT players and T players
d['ct_players_alive_diff'] = d['ct_players_alive'] - d['t_players_alive']
# Is there a defuse kit in CT team?
d['ct_defuse_kit_present'] = [1 if x > 0 else 0 for x in d['ct_defuse_kits']]
# # Explanatory Data Analysis
# ## Bomb planting event
# In[8]:
# Calculating the probability of winning when a bomb is planted
prob_w = d.groupby(['bomb_planted'])['Y'].agg(['sum', 'size'])
prob_w['prob_of_win_CT'] = prob_w['sum'] / prob_w['size']
# Adding a custom index
prob_w.index = ['bomb not planted', 'bomb planted']
# Ploting the results
plt.bar(
prob_w.index,
prob_w['prob_of_win_CT'],
edgecolor='black'
)
plt.title("Probability of CT winning")
plt.ylabel("Probability")
plt.show()
# In[9]:
print(prob_w)
# As we can see, if a bomb is planted, the odds of winning for a CT squad is more than two times lower than if the bomb is not planted: **0.52** and **0.22** respectively.
# In[ ]:
# ## Maps
# In[10]:
# Calculating the probability of winning when a bomb is planted
prob_w = d.groupby(['map'])['Y'].agg(['sum', 'size'])
prob_w['prob_of_win_CT'] = prob_w['sum'] / prob_w['size']
# Ploting the results
plt.figure(figsize=(12, 7))
plt.bar(
prob_w.index,
prob_w['prob_of_win_CT'],
edgecolor='black'
)
plt.title("Probability of CT winning")
plt.ylabel("Probability")
plt.axhline(y=0.5, color='r', linestyle='--')
plt.show()
# In[11]:
print(prob_w)
# The map **de_cache** seems to be a clear outlier in the dataset: the CTs are winning in this map more than 70% of the maches.
# ## Tilting
# The definition of tilting in esports is ***state of mental or emotional confusion or frustration***. We can measure that by the influence of the current score of matches in favor of CTs to the probablity of winning.
# In[12]:
# Calculating the probability of winning when a bomb is planted
prob_w = d.groupby(['team_score_diff'])['Y'].agg(['sum', 'size'])
prob_w['prob_of_win_CT'] = prob_w['sum'] / prob_w['size']
# Adjusting the index
prob_w.index = [str(x) for x in prob_w.index]
# Ploting the results
plt.figure(figsize=(10, 6))
plt.bar(
prob_w.index,
prob_w['prob_of_win_CT'],
edgecolor='black'
)
plt.title("Probability of CT winning")
plt.ylabel("Probability")
plt.xlabel("Difference between scores in favor of CT")
plt.axhline(y=0.5, color='r', linestyle='--')
plt.show()
# There is a relationship between the matches won by CT and the probability of winning the current match: the bigger the difference between the match score in favor of CT, the higher the chances of winning.
# ## Health, armor and money influence
# In[13]:
# Ploting the distributions of CT health share
plt.figure(figsize=(10, 6))
plt.hist(
d.loc[d['Y']==1, 'ct_health_share'].values,
alpha=0.5,
label='CT won match',
edgecolor='black',
bins=20
)
plt.hist(
d.loc[d['Y']==0, 'ct_health_share'].values,
alpha=0.5,
label='CT won match',
edgecolor='black',
bins=20
)
plt.legend()
plt.title("Distribution of CT health share of total HP pool by match win event")
plt.ylabel("Number of matches")
plt.xlabel("Share of total HP pool")
plt.show()
# As our intuition suggested, the more total health is comprised of CT HP, the bigger the probability of winning.
# In[14]:
plt.figure(figsize=(10, 6))
sns.kdeplot(
d.loc[d['Y']==1, 'ct_armor_per_player'].values,
shade=True,
linewidth=2,
label = 'CT won match'
)
sns.kdeplot(
d.loc[d['Y']==0, 'ct_armor_per_player'].values,
shade=True,
linewidth=2,
label = 'CT lost match'
)
plt.legend()
plt.title("Distribution of CT armor per player by match win event")
plt.ylabel("Share of matches")
plt.xlabel("Armor per player")
plt.show()
# The density of CT winning a match is "bigger" the more there is armor per player.
# In[15]:
plt.figure(figsize=(10, 6))
plt.hist(
d.loc[d['Y']==1, 'ct_money_share'].values,
alpha=0.5,
label='CT won match',
edgecolor='black',
bins=20
)
plt.hist(
d.loc[d['Y']==0, 'ct_money_share'].values,
alpha=0.5,
label='CT lost match',
edgecolor='black',
bins=20
)
plt.legend()
plt.title("Distribution of all money owned by CT by match win event")
plt.ylabel("Number of matches")
plt.xlabel("Share of total money owned")
plt.show()
# As with the health case, having more of the total economy in the game helps positively to win a match.
# ## Impact of alive players
# In[16]:
# Calculating the probability of winning when a bomb is planted
prob_w = d.groupby(['ct_players_alive', 't_players_alive'], as_index=False)['Y'].agg(['sum', 'size'])
prob_w['prob_of_win_CT'] = prob_w['sum'] / prob_w['size']
# Droping the obvious cases of CT=0 and T=0
prob_w = prob_w[[False if x[0]==0.0 or x[1]==0.0 else True for x in prob_w.index]]
# Creating a dataframe for a heatmap
heatmap_df = pd.DataFrame({
'ct_players_alive': prob_w.index.get_level_values(0),
't_players_alive': prob_w.index.get_level_values(1),
'p': prob_w['prob_of_win_CT']
})
heatmap_df = heatmap_df.pivot(index='ct_players_alive', columns='t_players_alive', values='p')
# Drawing the heatmap
plt.figure(figsize=(8, 8))
sns.heatmap(heatmap_df, linewidths=.5, cmap="YlGnBu")
plt.title("Heatmap of probability to win vs alive players")
plt.show()
# Even having one player advantage in a CSGO match leads to huge increases in probability of winning. The highest probability to win is where there are alot of alive CT players and not much alive T players.
# ## Defusal kit necesity
# If a bomb is planted in the game, the only way to defuse it is with a difusal kit.
# In[17]:
# Calculating the probability of winning when a bomb is planted
prob_w = d.groupby(['ct_defuse_kit_present'])['Y'].agg(['sum', 'size'])
prob_w['prob_of_win_CT'] = prob_w['sum'] / prob_w['size']
# Adding a custom index
prob_w.index = ['Defuse kit not present', 'Defuse kit present']
# Ploting the results
plt.bar(
prob_w.index,
prob_w['prob_of_win_CT'],
edgecolor='black'
)
plt.title("Probability of CT winning")
plt.ylabel("Probability")
plt.show()
# In[18]:
prob_w
# Having a defusal kit in a team really proves to be beneficial!
# # Evaluating model performance
# In order to compare algorithms with one another or to measure the impact of new data and features, we need to have a performance metric (or more than one). One of the most popular metrics in measuring binary classifiers is the **Area Under the Curve metric (AUC)**. In order to have a grasp on AUC we first need to make sense of some intermediate definitions.
# ## Confusion matrix
# In the field of machine learning and specifically the problem of statistical classification, a confusion matrix is a specific table layout that allows visualization of the performance of an algorithm. Each row of the matrix represents the instances in an actual class while each column represents the instances in a predicted class, or vice versa – both variants are found in various textbooks and articles.
#
# 
#
# The abbreviations stand for:
#
# **TP** - True Positives
#
# **FN** - False Negatives
#
# **FP** - False Positives
#
# **TN** - True Negatives
#
# The **actual values** refer to the actual ending of the matches. In our case, if CT have won this is termed as a *positive* and if CT have lost then this is termed as a *negative*. The predicted values refer to the outcome predicted by the machine learning algorithm. Thus:
#
# * If a match is actually won by CT and our algorithm predicted the same, then that observation is a True Positive.
#
# * If a match is actually won by CT but our algorithm predicted that CT lost, then that observation is a False Negative.
#
# * If a match is actually lost by CT but our algorithm predicted that CT won, then that observation is a False Positive.
#
# * If a match is actually lost by CT and our algorithm predicted that CT have lost, then that observation is a True Negative.
#
# A perfect classifier would have only TPs and TNs in the confusion matrix and no FNs and FPs. Most of the time, this is not the case.
# ## Model treshold
# Most of the popular ML models do not just output 1 or 0 (meaning that CT have won or lost) given a set of features $\mathbb{X}$. Rather, they output a **probability**. Recall, that a binary classifier is just a probability model that:
#
# $$ f(\mathbb{X}) = P(\mathbb{Y} = 1| \mathbb{X}) \in (0, 1)$$
#
# So the output of the algorithm can be 0.0148, 0.5897, 0.998 and so on. By default, a label of 1 (CT winning a match) is given to an observation when $f(\mathbb{X}) \geqslant 0.5$. In other words, the treshold **t** = 0.5. In general terms:
#
# $$ y_{predicted} = \begin{cases} 1, & f(\mathbb{X}) \geqslant t \\
# 0, & f(\mathbb{X}) < t \end{cases} t \in (0, 1)$$
#
# Altough it is generaly advised to have the default treshold of 0.5, but in some cases a user can vary the treshold to achieve better results.
# ## Receiver operating characteristic curve (ROC)
# A receiver operating characteristic curve, or **ROC** curve, is a graphical plot that illustrates the performance of a binary classifier as the threshold is varied. It is a 2D plot where the X axis is the **False Positive Rate (FPR)** and the Y axis is the **True Positive Rate (TPR)**. FPR and TPR are defined as follows:
#
# $$FPR = \dfrac{FP}{N}$$
#
# $$TPR = \dfrac{TP}{P}$$
#
# Here **FP** - number of false positives generated by the classifier, **TP** - number of true positives generated by the classifier and **N** and **P** are the total number of "negative" and "positive" class observations in the data respectively.
#
# An example ROC plot:
#
# 
#
# Notice that the axis values are in the interval **[0, 1]**. Altough it may not look like it, but the orange curve is made up out of alot of points who are connected to make a line (hence the term "curve"). Every point was gotten using a different treshold **t**. We always want a classifier whose ROC curve spikes as much as possible to the top left corner. The more the curve is closer to the right bottom corner, the worse the classifier.
#
# If the curve shoots up rapidly that means that by adjusting the treshold by a little bit, the true positive rate (or the amount of "positive" class observations identified correctly) is very high while the errors that our model makes are minimal (FRP is near zero). Further adjusting the treshold may increase the total positive class observations identified but it will come with a cost of increasing the FPR.
#
# To put everything in an interactive way, please watch the video by the great StatQuest team about ROC curves: https://www.youtube.com/watch?v=4jRBRDbJemM
#
# Another great resource on this topic: https://developers.google.com/machine-learning/crash-course/classification/roc-and-auc
# ## AUC statistic
# The area under the curve (AUC) statistic is the integral of a given ROC curve between the points (0,0) and (1,1):
#
# 
#
# The perfect estimator has an are under the curve of 1.0, a bad estimator has the value of 0.5 and bellow. In practise, a classifier with the AUC statistic above 0.8 is consider to be good and AUC above 0.9 is considered to be very good.
#
# For the objective of creating an ML model for the winner of a CSGO match, we will use the AUC statistic as the main measure of the "goodness" of the model.
# # Creating the train, validation and test sets
# When creating machine learning models it is very advised to split the data into a **train**, **validation** and **test** sets. A good general rule of thumb is to have ~80% of the data to train the algotithm, ~10% of the data to use in various parameter tuning and ~10% of the data to only use in final performance metric calculation.
#
# All of these datasets are needed to make sure that our model does not **overfit**.
# ## Overfitting problem
# As stated beautifully in the book "Introduction to Statistical Learning"{cite}`stat_learning`:
#
# **"When we overfit the training data, the test performance metrics will be very large because the supposed patterns that the method found in the training data simply don’t exist in the test data. Note that regardless of whether or not overfitting has occurred, we almost always expect the training errors to be smaller than the test errors because most statistical learning methods either directly or indirectly seek to minimize the training errors"**
#
# In other words, if we only use training data when creating ML models, we are blinded a bit and do not know how will the model perform with unseen data.
#
# As per {cite}`train_val_test`:
#
# **"The training set the largest corpus of your dataset that you reserve for training your model. After training, inference on these images will be taken with a grain of salt, since the model has already had a chance to look at and memorize the correct output."**
#
# **"The validation set is a separate section of your dataset that you will use during training to get a sense of how well your model is doing on images that are not being used in training. During training, it is common to report validation metrics continually after each training epoch <\...\>. You use these metrics to get a sense of when your model has hit the best performance it can reach on your validation set. You may choose to cease training at this point <\...\> As you work on your model, you can continually iterate on your dataset, image augmentations, and model design to increase your model's performance on the validation set."**
#
# **"After all of the training experiments have concluded, you probably have gotten a sense on how your model might do on the validation set. But it is important to remember that the validation set metrics may have influenced you during the creation of the model, and in this sense you might, as a designer, overfit the new model to the validation set. Because the validation set is heavily used in model creation, it is important to hold back a completely separate stronghold of data - the test set. You can run evaluation metrics on the test set at the very end of your project, to get a sense of how well your model will do in production."**
# ## Feature list
# After the feature engineering steps and EDA we can define the final feature list which we will use in our models:
# In[19]:
# Initial list
features = [
'bomb_planted',
'ct_health_share',
'ct_players_alive',
't_players_alive',
'ct_defuse_kit_present',
'ct_helmets',
't_helmets'
]
# **NOTE:** some of the features will be left out because of iterative inspection of model results and EDA.
# In[20]:
# Creating dummy vars for the map feature
map_df = pd.get_dummies(d['map'])
# Map feature names
map_features = map_df.columns.values.tolist()
# Concatenating the map_df to original dataframe
d = pd.concat([d, map_df], axis=1)
# Adding the map features to the original feature list
#features += map_features
# In[21]:
# Creating dummy vars for the team_score_diff features
score_df = pd.get_dummies(d['team_score_diff'])
# Score feature names
score_df.columns = [f"team_score_diff_in_{str(x)}" for x in score_df.columns]
score_features = score_df.columns.values.tolist()
# Concatenating the map_df to original dataframe
d = pd.concat([d, score_df], axis=1)
# Adding the map features to the original feature list
#features += score_features
# In[22]:
print(f"""Final feature list: \n \n {features} \n \n Number of features: {len(features)}""")
# ## Spliting the original dataset
# We will use 80% of the data to train the model, 10% for validating our model and search for hyper parameters and 10% of the data will be reserved for the test set.
#
# For reproducibility, we will set a random seed of **123**.
# In[23]:
# Setting the seed
seed = 123
# Subseting the dataframe to the features needed + the target variable
dsubset = d[features + ['Y']].copy()
# Dropping missing values
dsubset.dropna(inplace=True)
# Reseting the indexz
dsubset.reset_index(inplace=True, drop=True)
# Spliting to train and test sets
train, test = train_test_split(dsubset, test_size=0.2, random_state=seed)
# Further spliting the test to test and validation sets
test, val = train_test_split(test, test_size=0.5, random_state=seed)
# In[24]:
print(f"Total number of rows of the dataset: {d.shape[0]}")
print(f"Rows in the train set: {train.shape[0]}")
print(f"Rows in the validation set: {val.shape[0]}")
print(f"Rows in the test set: {test.shape[0]}")
# ## Creating the X and Y matrices
# In[25]:
# Final matrices for training and validating models
train_X, train_Y = train[features], train['Y']
val_X, val_Y = val[features], val['Y']
test_X, test_Y = test[features], test['Y']
# Printing the stats about the distribution of Ys
print(f"Share of CT wins in training: {np.sum(train_Y) / len(train_Y)}")
print(f"Share of CT wins in validation: {np.sum(val_Y) / len(val_Y)}")
print(f"Share of CT wins in testing: {np.sum(test_Y) / len(test_Y)}")
# # ML model creation
# ## Performance metric for a binary classifier
# As was stated in the introduction of this book, when creating an ML model we need to have a performance metric to see how the model is performing and to measure how it improves over time.
#
# One of the most popular choice is the **area under the curve (AUC)** metric. The metric calculates the plot bellow the **receiver operating curve (ROC)**.
#
# The ROC curve plots the true positive (TP) and the false positive (FP) rates using different tresholds.
# ## Logistic Regression model
# Logistic regression {cite}`100_page_ml` is used when we want to model the probability:
#
# $$P(\mathbb{Y}|\mathbb{X})$$
#
# The above probability reads as "the probability of $\mathbb{Y}$ given $\mathbb{X}$". In other words, how do the features in $\mathbb{X}$ influence the event of $\mathbb{Y}$?
#
# The full equation for the probability which we will be trying to fit to the given data is:
#
# $$ P(\mathbb{Y}|\mathbb{X}) = \dfrac{1}{1 + e^{-\mathbb{X} \beta }}$$
#
# Where
#
# $\mathbb{X}$ - a feature matrix of $n$ observations and $p$ features.
#
# $\beta$ - a vector of dimensions $p$ x 1.
#
# In other words, we get a coefficient for each feature in the $\mathbb{X}$ matrix.
#
# What is very helpful in logistic regression is that a negative coefficient for a given feature means that increasing the feature $x_{i}$ will lower the probability of a CT win. If a coefficient is positive, then increasing the $x_{i}$ value increases the probability of a CT win.
#
# This simple fact helps us in some quick sanity checks - the logic of the EDA analysis should hold in respect to coefficient values and signs.
# ### Fitting logistic regression
# #### Hyper parameter tuning
# We will try and find a combination of best hyper parameters from a given grid of parameters using the validation set.
#
# The full list of logistic regression HPs can be found here: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html?highlight=logistic%20regression#sklearn.linear_model.LogisticRegression
# In[26]:
# Defining a list of hyperparameters
hp_dict = {
'C': [0.1, 0.5, 1, 1.5, 2],
'max_iter': [1000],
'fit_intercept': [True],
'solver': ['liblinear'],
'penalty': ['l1', 'l2']
}
# Creating the hp grid
hp_grid = ParameterGrid(hp_dict)
# In[27]:
# Placeholders for the iteration
auc_val_best = 0
best_hp = {}
results = pd.DataFrame({})
# Iterating through all the parameters and evaluating the results
for i, hp in enumerate(hp_grid):
# Initiating the empty model/classifier
clf = LogisticRegression(**hp)
# Fitting on data
clf.fit(train_X, train_Y)
# Predicting on the validation set
yhat_val = [x[1] for x in clf.predict_proba(val_X)]
# Calculating the AUC metric
auc_val = roc_auc_score(val_Y, yhat_val)
# Adding to the results frame
hp_results =
|
pd.DataFrame(hp, index=[i])
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding: utf-8 -*--
# Copyright (c) 2021, 2022 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
"""
The module that helps to register custom warnings for the feature types.
Classes
-------
FeatureWarning
The Feature Warning class. Provides functionality to register
warning handlers and invoke them.
Examples
--------
>>> warning = FeatureWarning()
>>> def warning_handler_zeros_count(data):
... return pd.DataFrame(
... [['Zeros', 'Age has 38 zeros', 'Count', 38]],
... columns=['Warning', 'Message', 'Metric', 'Value'])
>>> def warning_handler_zeros_percentage(data):
... return pd.DataFrame(
... [['Zeros', 'Age has 12.2% zeros', 'Percentage', '12.2%']],
... columns=['Warning', 'Message', 'Metric', 'Value'])
>>> warning.register(name="zeros_count", handler=warning_handler_zeros_count)
>>> warning.register(name="zeros_percentage", handler=warning_handler_percentage)
>>> warning.registered()
Name Handler
----------------------------------------------------------
0 zeros_count warning_handler_zeros_count
1 zeros_percentage warning_handler_zeros_percentage
>>> warning.zeros_percentage(data_series)
Warning Message Metric Value
----------------------------------------------------------------
0 Zeros Age has 38 zeros Count 38
>>> warning.zeros_count(data_series)
Warning Message Metric Value
----------------------------------------------------------------
1 Zeros Age has 12.2% zeros Percentage 12.2%
>>> warning(data_series)
Warning Message Metric Value
----------------------------------------------------------------
0 Zeros Age has 38 zeros Count 38
1 Zeros Age has 12.2% zeros Percentage 12.2%
>>> warning.unregister('zeros_count')
>>> warning(data_series)
Warning Message Metric Value
----------------------------------------------------------------
0 Zeros Age has 12.2% zeros Percentage 12.2%
"""
from typing import Callable
import pandas as pd
from ads.feature_engineering.exceptions import WarningNotFound, WarningAlreadyExists
def _validate_warning_handler(handler: Callable) -> bool:
"""Validates warning handler.
Handler should get pd.Series as a parameter and return pd.DataFrame as result.
Dataframe should have four columns: Warning, Message, Metric and Value.
Parameters
----------
handler: Callable
The handler to validate.
Returns
-------
bool
True if handler compatible with Feature Warning, False otherwise.
"""
result = True
try:
handler_result = handler(pd.Series([]))
assert isinstance(handler_result, pd.DataFrame)
assert list(handler_result.columns) == ["Warning", "Message", "Metric", "Value"]
except AssertionError:
result = False
return result
class FeatureWarning:
"""The Feature Warning class.
Provides functionality to register warning handlers and invoke them.
Methods
-------
register(self, name: str, handler: Callable) -> None
Registers a new warning for the feature type.
unregister(self, name: str) -> None
Unregisters warning.
registered(self) -> pd.DataFrame
Gets the list of registered warnings.
Examples
--------
>>> warning = FeatureWarning()
>>> def warning_handler_zeros_count(data):
... return pd.DataFrame(
... [['Zeros', 'Age has 38 zeros', 'Count', 38]],
... columns=['Warning', 'Message', 'Metric', 'Value'])
>>> def warning_handler_zeros_percentage(data):
... return pd.DataFrame(
... [['Zeros', 'Age has 12.2% zeros', 'Percentage', '12.2%']],
... columns=['Warning', 'Message', 'Metric', 'Value'])
>>> warning.register(name="zeros_count", handler=warning_handler_zeros_count)
>>> warning.register(name="zeros_percentage", handler=warning_handler_percentage)
>>> warning.registered()
Warning Handler
----------------------------------------------------------
0 zeros_count warning_handler_zeros_count
1 zeros_percentage warning_handler_zeros_percentage
>>> warning.zeros_percentage(data_series)
Warning Message Metric Value
----------------------------------------------------------------
0 Zeros Age has 38 zeros Count 38
>>> warning.zeros_count(data_series)
Warning Message Metric Value
----------------------------------------------------------------
1 Zeros Age has 12.2% zeros Percentage 12.2%
>>> warning(data_series)
Warning Message Metric Value
----------------------------------------------------------------
0 Zeros Age has 38 zeros Count 38
1 Zeros Age has 12.2% zeros Percentage 12.2%
>>> warning.unregister('zeros_count')
>>> warning(data_series)
Warning Message Metric Value
----------------------------------------------------------------
0 Zeros Age has 12.2% zeros Percentage 12.2%
"""
def __init__(self):
"""Initializes the FeatureWarning."""
self._data = None
self._handlers = {}
def register(self, name: str, handler: Callable, replace: bool = False) -> None:
"""Registers a new warning.
Parameters
----------
name : str
The warning name.
handler: callable
The handler associated with the warning.
replace: bool
The flag indicating if the registered warning should be replaced with the new one.
Returns
-------
None
Nothing
Raises
------
ValueError
If warning name is empty or handler not defined.
TypeError
If handler is not callable.
WarningAlreadyExists
If warning is already registered.
"""
if not name:
raise ValueError("Warning name is not provided.")
if name in self._handlers and not replace:
raise WarningAlreadyExists(name)
if not handler:
raise ValueError("Handler is not provided.")
if not callable(handler):
raise TypeError("Handler should be a function.")
self._handlers[name] = handler
def unregister(self, name: str) -> None:
"""Unregisters warning.
Parameters
-----------
name: str
The name of warning to be unregistered.
Returns
-------
None
Nothing.
Raises
------
ValueError
If warning name is not provided or empty.
WarningNotFound
If warning not found.
"""
if not name:
raise ValueError("Warning name is not provided.")
if name not in self._handlers:
raise WarningNotFound(name)
del self._handlers[name]
def registered(self) -> pd.DataFrame:
"""Gets the list of registered warnings.
Returns
-------
pd.DataFrame
Examples
--------
>>> The list of registerd warnings in DataFrame format.
Name Handler
-----------------------------------------------------------
0 zeros_count warning_handler_zeros_count
1 zeros_percentage warning_handler_zeros_percentage
"""
result = []
for name, handler in self._handlers.items():
result.append((name, handler.__name__))
return pd.DataFrame(result, columns=["Warning", "Handler"])
def _bind_data(self, data: pd.Series) -> None:
"""Binds data to the feature warning.
Parameters
----------
data: pd.Series
The data to be bound.
"""
self._data = data
def _process(self) -> pd.DataFrame:
"""Invokes the all registered warnings.
Returns
-------
pd.DataFrame
>>> The result of invoked warning handlers.
Warning Message Metric Value
--------------------------------------------------------
Zeros Age has 38 zeros Count 38
Zeros Age has 12.2% zeros Percentage 12.2%
Raises
------
ValueError
If data is not provided or result of warning has a wrong format.
"""
if self._data is None:
raise ValueError("Data is not provided.")
if not self._handlers:
return None
expected_columns = ["Warning", "Message", "Metric", "Value"]
result_df =
|
pd.DataFrame([], columns=expected_columns)
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import streamlit as st
import pandas as pd
import numpy as np
import geopandas as gpd
from pathlib import Path
from PIL import Image
import altair as alt
import pydeck as pdk
import numpy as np
from api_key import mapbox_key
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
import pydeckmapping
from importlib import reload
reload(pydeckmapping)
from shapely.geometry import Point
from pydeckmapping import build_map
import datetime
import sys
sys.path.append('/Users/david/Dropbox/PhD/Scripts/Spatial analyses')
import pyspace
reload(pyspace)
from pyproj import Transformer
transformer = Transformer.from_crs("epsg:2056", "epsg:4326")
st.image('https://reseau-delta.ch/assets/ci_content/images/logo.png',width = 180)
st.markdown(st.__version__)
st.title("Plateforme d'analyse des données du réseau de soins Delta")
text_intro = """ Les données analysées sur cette plateforme correspondent aux données Delta de l'année {} et portent sur plus de {} patients dont {} à Genève. Il y a {} prescriteurs dont {} MPR Delta, {} distributeurs et {} cercles. """
################################
###########LOAD DATA############
################################
@st.cache(allow_output_mutation=True)
def load_data(path,DATE_COLUMN = None):
"""Load data into DataFrame"""
data = pd.read_csv(path)
lowercase = lambda x: str(x).lower()
data.rename(lowercase, axis='columns', inplace=True)
if DATE_COLUMN is not None:
data[DATE_COLUMN] = pd.to_datetime(data[DATE_COLUMN])
return data
@st.cache(allow_output_mutation=True)
def load_gdf(path):
"""Load data into GeoDataFrame"""
data = gpd.read_file(path)
lowercase = lambda x: str(x).lower()
data.rename(lowercase, axis='columns', inplace=True)
return data
date = '20200318'
#####################################GENERAL DATASETS###########################################
data_folder = Path("../Data").resolve()
buildings_ge = pd.read_pickle('../Data/buildings_ge.pkl')
drug_path = data_folder/'Clean_data'/'{}_drug.csv'.format(date)
geom_path = data_folder/'Clean_data'/'{}_geometries.geojson'.format(date)
patient_path = data_folder/'Clean_data'/'{}_patient.geojson'.format(date)
cercle_path = data_folder/'Clean_data'/'{}_cercle.csv'.format(date)
event_path = data_folder / 'Clean_data'/'{}_event.geojson'.format(date)
mpr_path = data_folder / 'Clean_data'/'{}_mpr.geojson'.format(date)
distributor_path = data_folder/'Clean_data/{}_distributor.geojson'.format(date)
prescriber_path = data_folder / 'Clean_data/{}_prescriber.geojson'.format(date)
provider_path = data_folder / 'Clean_data/{}_provider.geojson'.format(date)
animator_path = data_folder / 'Clean_data/{}_animator.geojson'.format(date)
prestation_path = data_folder/'Clean_data'/'{}_prestation.csv'.format(date)
data_load_state = st.text('Loading data...') # Create a text element and let the reader know the data is loading.
df_geometries = load_gdf(path = geom_path) #Import geometries
gdf_distributor = load_gdf(distributor_path)
gdf_prescriber = load_gdf(prescriber_path)
gdf_provider = load_gdf(provider_path)
gdf_animator = load_gdf(animator_path)
gdf_event = load_gdf(event_path)
gdf_mpr = load_gdf(mpr_path)
df_cercle = load_data(cercle_path)
df_drug = load_data(path = drug_path,DATE_COLUMN = 'delivereddate') #Load drug data
gdf_patient = load_gdf(path = patient_path) # Load patient data
data_load_state.text('Loading data...done!') # Notify the reader that the data was successfully loaded.
atc_data = load_data(path = '../Data/atc_nomenclature.csv') #Load the ATC nomenclature from WHO
df_atc_levels = pd.read_csv('../Data/atc_levels.csv') #Import ATC levels
cantons = gpd.read_file('/Users/david/Dropbox/PhD/Data/Databases/SITG/SHAPEFILE_LV95_LN02/swissBOUNDARIES3D_1_3_TLM_KANTONSGEBIET.shp')
communes = gpd.read_file('/Users/david/Dropbox/PhD/Data/Databases/SITG/SHAPEFILE_LV95_LN02/swissBOUNDARIES3D_1_3_TLM_HOHEITSGEBIET.shp')
communes_ge = communes[communes.KANTONSNUM == 25]
MIP = ['indometacin','acemetacin','ketoprofen','phenylbutazon','piroxicam','meloxicam','etoricoxib','pethidin','chinidin','flecainid','sotalol','nitrofurantoin','zolpidem','estradiol','trimipamine','acémétacine','amiodarone']
#################################################################################################
################################## DATA PREPARATION #############################################
atc_data = atc_data.fillna('')
gdf_mpr['mpr_yy_bth'] = gdf_mpr['mprbirthday'].str.split('.').str[2]
gdf_mpr['mpr_yy_entry'] = gdf_mpr['mprentrydate'].str.split('.').str[2]
gdf_mpr['mpr_yy_exit'] = gdf_mpr['mprexitdate'].str.split('.').str[2]
gdf_mpr = gdf_mpr.drop(['mprentrydate','mprbirthday','mprexitdate'],axis = 1).drop_duplicates()
gdf_mpr[['mpr_yy_bth','mpr_yy_entry','mpr_yy_exit']] = gdf_mpr[['mpr_yy_bth','mpr_yy_entry','mpr_yy_exit']].astype('float')
no_dupli = gdf_mpr.groupby(['id']).mean().reset_index()
no_dupli = no_dupli.drop(['e','n'],axis = 1)
gdf_mpr = gdf_mpr.drop(['mpr_yy_bth','mpr_yy_entry','mpr_yy_exit'],axis = 1).merge(no_dupli, on = 'id').drop_duplicates().reset_index()
gdf_mpr = gdf_mpr[['id','name','mprsex','mpr_yy_bth','mpr_yy_entry','mpr_yy_exit','e','n','geometry']].drop_duplicates(subset = ['id'])
gdf_mpr['age'] = 2018-gdf_mpr.mpr_yy_bth
gdf_mpr.loc[gdf_mpr.age > 200, 'age'] = 65 ###To be changed (better to change in Data Preparation and replace yy_bth before age calculation)
gdf_mpr.loc[gdf_mpr.age < 0,'age'] = np.nan
bins = [30, 45, 60, 75]
gdf_mpr['age_cat'] = pd.cut(gdf_mpr['age'], bins)
dict_atc_levels= dict(zip(df_atc_levels.atc, df_atc_levels.level))
gdf_event_cercle = pd.merge(gdf_event,df_cercle, left_on = 'id',right_on = 'eventid', how = 'left')
uniq_cercle_geom = gdf_event_cercle.drop_duplicates(subset = 'circlename',keep='first').reset_index(drop = True)
# uniq_cercle_geom['longitude'],uniq_cercle_geom['latitude'] = uniq_cercle_geom.to_crs(epsg = 4326).geometry.x,uniq_cercle_geom.to_crs(epsg = 4326).geometry.y
uniq_cercle_geom[['latitude','longitude']]= uniq_cercle_geom.apply(lambda x: transformer.transform(x.e,x.n),axis = 1,result_type = 'expand')
geojson_file_CQ = '../Data/CQ_polygons.geojson'
bins = [0,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100,105,110,115]
gdf_patient['age_cat'] = pd.cut(gdf_patient['age'], bins)
#################################################################################################
################################ PERSONAL ID ##############################################
st.sidebar.markdown('## Insert your personal ID')
st.sidebar.markdown('Example : 40e8ac4dbc023d86a815f8476a1884e4')
personal_id = st.sidebar.text_input('Personal ID')
if personal_id not in df_drug.prescriberid.values:
st.sidebar.markdown('### *Invalid ID*')
else:
st.sidebar.markdown('### *Valid ID*')
colors_id = {}
for i in df_drug.prescriberid.values:
colors_id[i] = 'blue'
if i == personal_id:
colors_id[i] = 'red'
#################################################################################################
################################ INTRODUCTION TEXT ##############################################
text_intro = text_intro.format(2018,gdf_patient.id.nunique(),gdf_patient[gdf_patient.networkname == 'Delta Genève'].id.nunique(),df_drug.prescriberid.nunique(),df_drug[df_drug.mpr_delta == 1].prescriberid.nunique(),df_drug.distributorid.nunique(),52)
st.markdown(text_intro)
st.sidebar.markdown('# *Analyses*')
#################################################################################################
################################ SHOW RAW DATA ##################################################
if st.sidebar.checkbox('Show raw prescription data',key = 'Drug prescription data'):
st.subheader('Raw data')
st.write(df_drug.head())
#################################################################################################
#################################AGE FILTERING ##################################################
st.markdown('### Age filtering')
age_filter = st.slider("Patient Age", 0, 110, (25, 75), step = 5)
patients = gdf_patient[(gdf_patient.age >= age_filter[0])&(gdf_patient.age <= age_filter[1])].id.values
filtered_drug = df_drug[df_drug.patientid.isin(patients)]
filtered_drug = pd.merge(filtered_drug,df_geometries[['id','lat','lon']],how = 'left',left_on = 'patientid',right_on = 'id')
filtered_drug = filtered_drug[filtered_drug.lat.isnull()==False]
filtered_drug = filtered_drug.rename(columns = {'lon' : 'longitude','lat':'latitude'})
filtered_drug['month_delivery'] = filtered_drug['delivereddate'].dt.month_name()
filtered_drug['month_delivery'] = pd.Categorical(filtered_drug['month_delivery'], ['January','February',"March", "April",'May','June','July','August','September','October','November', "December"])
st.markdown('### Time period selection')
start_date = str(st.date_input('Start date', datetime.date(2018, 1, 1)))
end_date = str(st.date_input('End date', filtered_drug.delivereddate.max()))
if start_date < end_date:
st.success('Start date: `%s`\n\nEnd date: `%s`' % (start_date, end_date))
else:
st.error('Error: End date must fall after start date.')
filtered_drug = filtered_drug[(filtered_drug.delivereddate >start_date)&(filtered_drug.delivereddate < end_date)]
#################################################################################################
if st.sidebar.checkbox('Time and space distribution of a drug',key = 'Time space drug A'):
st.sidebar.markdown('## Drug group A')
atc_filter = st.sidebar.text_input('Filter ATC name list').lower() #Add text box input for ATC filtering
atc_on = st.sidebar.selectbox("ATC choices", atc_data[atc_data.nameen.str.contains(atc_filter)]['nameen'].sort_values().unique().tolist(), 0) #Add a select box to choose an ATC among the filtered ones
if atc_on != '':
DATE_COLUMN = 'delivereddate'
atc_level_on = dict_atc_levels[atc_on]
st.subheader('ATC ontology for "%s"' % atc_on)
st.write(filtered_drug[filtered_drug[atc_level_on] == atc_on].filter(regex='atc_').drop_duplicates())
################################# BAR CHART ###################################################
st.subheader('Nombre de prescriptions de %s par mois' % atc_on)
hist_values = pd.DataFrame(filtered_drug[filtered_drug[atc_level_on] == atc_on].month_delivery.value_counts().sort_index()).reset_index()
# st.bar_chart(hist_values,height = 300)
fig = px.bar(hist_values, x='index', y='month_delivery',
hover_data=['month_delivery'], color='month_delivery', color_continuous_scale=[(0, "rgb(222,235,247)"), (0.5, "rgb(158,202,225)"), (1, "rgb(49,130,189)")],
labels={'month_delivery':'Number of prescriptions'}, height=600,width = 780)
st.plotly_chart(fig)
###MAP
# Some number in the range 0-12
month_to_filter = st.slider('Mois', 1, 12, 6)
filtered_drug_atc = filtered_drug[(filtered_drug[DATE_COLUMN].dt.month == month_to_filter)&(filtered_drug[atc_level_on] == atc_on)]
mapstyle = st.sidebar.radio("Mapping style",('Simple', 'Advanced'))
st.markdown('## Geographic distribution')
st.subheader('Carte des prescriptions de %s au %s/2018' % (atc_on,month_to_filter))
if mapstyle == 'Simple':
st.map(filtered_drug_atc)
if mapstyle == 'Advanced':
st.sidebar.markdown('### Map Options')
##MAP OPTIONS
show_buildings = False
show_grid = False
# show_geojson = False
if st.sidebar.checkbox('Show buildings blueprint',key = 'Buildings'):
show_buildings = True
if st.sidebar.checkbox('Show grid layer',key = 'Grid'):
show_grid = True
layers, view_state = build_map(filtered_drug_atc[['longitude','latitude']],buildings_ge,geojson_file_CQ,show_buildings = show_buildings,show_grid = show_grid)
r = pdk.Deck(map_style = 'mapbox://styles/mapbox/light-v9',layers=layers, initial_view_state=view_state,mapbox_key = mapbox_key())
st.pydeck_chart(r)
##########################
grouping_level = st.sidebar.selectbox("See prescribing by:", ['Prescribers','Distributors'], 0,key = 'group level') #Add a select box to choose the grouping level
group_dict = {'Prescribers':'prescriberid','Distributors':'distributorid'}
group_var = group_dict[grouping_level]
##########################
st.sidebar.markdown('## Drug group B')
versus_filter = st.sidebar.text_input('Filter versus name list').lower() #Add text box input for ATC filtering
atc_vs = st.sidebar.selectbox("ATC choices", atc_data[atc_data.nameen.str.contains(versus_filter)]['nameen'].sort_values().unique().tolist(), 0,key = 'atc_choice')
##########################
if atc_on in MIP:
st.markdown('# Médicaments Potentiellement Inadéquats (MIP)')
st.markdown("""Pour la définition des médicaments potentiellement inadéquats, la liste Beers et PRISCUS a été opérationnalisée en fonction des limitations données. Les recommandations font référence aux "personnes âgées". Selon le principe actif et l'affection, la limite d'âge selon les listes peut varier légèrement. Dans ce contexte, le dénominateur commun est l'application des règles à tous les assurés qui ont atteint l'âge de 65 ans au cours du trimestre concerné.
En principe, les deux listes contiennent des catégories similaires pour l'évaluation d'une substance active :
La liste des bières distingue essentiellement 9 critères différents qui définissent la MIP : 1. l'ATC est toujours le MIP
2. seule la préparation à courte durée d'action du médicament est une MIP (donc pas de produits retardés) 3. seules les doses élevées sont des MIP
4. seul l'usage à long terme est PIM
5. pas de MIP en cas de certains diagnostics
6. uniquement les MIP avec une forme de dosage spéciale (par exemple, orale) 7. seuls les médicaments spéciaux sont des MIP
8. seulement PIM si sans utilisation simultanée d'autres substances actives
9. la MIP si des critères cliniques sont présents.
Outre ces classifications, il existe également des médicaments qui doivent répondre à plusieurs critères d'exigence ou combinaisons de conditions pour être considérés comme des MIP. Ces groupes sont les suivants : 2/6, 3/9, 4/9, 6/7, 8/4 et 8/3/4.
Comme il n'y a pas de diagnostics et de critères cliniques dans les données d'Helsana,
les groupes 5 et 9 ne sont pas utilisés, c'est-à-dire que ces MIP
ne peuvent pas être déterminées. Dans le groupe 7 également, aucun médicament
n'est défini pour cette évaluation. Le groupe 8 n'apparaît qu'en combinaison avec d'autres groupes.
Le groupe 4 n'est utilisé qu'avec les bières Version 2015 en combinaison avec les valeurs cliniques.
Le groupe 2 n'est actuellement pas pertinent sur le marché suisse.""")
st.markdown('## Prescription of {} vs {}'.format(atc_on, atc_vs))
if atc_vs != '':
atc_level_vs = dict_atc_levels[atc_vs]
show_by = st.radio("Show by :",('Patient', 'Item'))
if show_by == 'Patient':
if st.sidebar.checkbox('Only Delta MPR',key = 'MPR'):
df_onvs_frac = filtered_drug[[group_var,atc_level_on,'patientid']][(filtered_drug['mpr_delta']==1)&(filtered_drug[atc_level_on] == atc_on)].groupby(group_var).patientid.nunique() / filtered_drug[[group_var,atc_level_vs,'patientid']][(filtered_drug['mpr_delta']==1)&(filtered_drug[atc_level_vs] == atc_vs)].groupby(group_var).patientid.nunique()
else:
df_onvs_frac = filtered_drug[[group_var,atc_level_on,'patientid']][filtered_drug[atc_level_on] == atc_on].groupby(group_var).patientid.nunique() / filtered_drug[[group_var,atc_level_vs,'patientid']][filtered_drug[atc_level_vs] == atc_vs].groupby(group_var).patientid.nunique()
df_onvs_frac = pd.DataFrame(df_onvs_frac.dropna().sort_values()).rename(columns = {'patientid':'fraction'}).reset_index()
title = 'Number of patients that received {} per <br> 1,000 patients that received {}'.format(atc_on, atc_vs)
if show_by == 'Item':
if st.sidebar.checkbox('Only Delta MPR',key = 'MPR'):
df_onvs_frac = filtered_drug[[group_var,atc_level_on]][(filtered_drug['mpr_delta']==1)&(filtered_drug[atc_level_on] == atc_on)].groupby(group_var).count()[atc_level_on] / filtered_drug[[group_var,atc_level_vs,]][(filtered_drug['mpr_delta']==1)&(filtered_drug[atc_level_vs] == atc_vs)].groupby(group_var).count()[atc_level_vs]
else:
df_onvs_frac = filtered_drug[[group_var,atc_level_on]][filtered_drug[atc_level_on] == atc_on].groupby(group_var).count()[atc_level_on] / filtered_drug[[group_var,atc_level_vs]][filtered_drug[atc_level_vs] == atc_vs].groupby(group_var).count()[atc_level_vs]
df_onvs_frac = pd.DataFrame(df_onvs_frac.dropna().sort_values()).rename(columns = {0:'fraction'}).reset_index()
title = 'Items for {} per 1,000 {}'.format(atc_on, atc_vs)
df_onvs_frac['fraction'] = df_onvs_frac['fraction']*1000
fig = go.Figure(data=[go.Bar(x=df_onvs_frac[group_var], y=df_onvs_frac['fraction'])])
fig.update_traces(marker_color=df_onvs_frac[group_var].map(colors_id).values, marker_line_width=0, opacity=1)
xaxis_title = "ID of the {}".format(grouping_level[:-1])
fig.update_layout(xaxis_title=xaxis_title, yaxis_title= 'n', width=780, height=600,title={'text':title})
st.plotly_chart(fig)
if grouping_level == 'Prescribers':
df_onvs_frac_by_cercle = pd.DataFrame(pd.merge(df_onvs_frac,gdf_event_cercle, left_on = 'prescriberid', right_on = 'participantid',how = 'left').groupby('circlename').mean().fraction.sort_values()).reset_index()
fig = px.bar(df_onvs_frac_by_cercle, x='circlename', y='fraction',hover_data=['fraction'], color='fraction',labels={'fraction':'n','circlename':'Name of the Quality Circle'}, width=780, height=600)
st.plotly_chart(fig)
df_onvs_frac_by_cercle = gpd.GeoDataFrame(pd.merge(df_onvs_frac_by_cercle,uniq_cercle_geom[['circlename','longitude','latitude','networkname','geometry']],on = 'circlename',how = 'left'))
ax = df_onvs_frac_by_cercle[df_onvs_frac_by_cercle.networkname == 'Delta Genève'].plot('fraction',legend = True,cmap = 'magma',markersize = 90,figsize = (8,5))
df_onvs_frac_by_cercle[df_onvs_frac_by_cercle.networkname == 'Delta Genève'].apply(lambda x: ax.annotate(s=x.circlename, xy=x.geometry.centroid.coords[0], ha='center',size = 2),axis=1);
communes_ge.plot(ax = ax,alpha = 0.2,color = 'lightgrey')
ax.set_axis_off()
st.pyplot(height = 800,dpi = 800)
# layers, view_state = build_map(df_onvs_frac_by_cercle[['longitude','latitude']],buildings_ge,geojson_file_CQ,show_buildings = False,show_grid = False,show_geojson = True)
# r = pdk.Deck(map_style = 'mapbox://styles/mapbox/light-v9',layers=layers,tooptip = True, initial_view_state=view_state,mapbox_key = mapbox_key())
# st.write(r)
if st.sidebar.checkbox('Generic and original drugs analysis',key = 'Generic drug'):
atc_filter = st.sidebar.text_input('Filter ATC name list',key = 'atc 2').lower() #Add text box input for ATC filtering
atc_on_generic = st.sidebar.selectbox("ATC choices", atc_data[atc_data.nameen.str.contains(atc_filter)]['nameen'].sort_values().unique().tolist(),key = 'atc 2 selectbox') #Add a select box to choose an ATC among the filtered ones
if atc_on_generic != '':
atc_generic_level_on = dict_atc_levels[atc_on_generic]
st.markdown('# Generic drugs usage')
grouping_level = st.sidebar.selectbox("See prescribing by:", ['Prescribers','Distributors'],0,key = 'group level drug') #Add a select box to choose the grouping level
group_dict = {'Prescribers':'prescriberid','Distributors':'distributorid'}
group_var = group_dict[grouping_level]
generic_status = filtered_drug[['drugatcname','druggeneric']].drop_duplicates().dropna()
drugs_with_generic = generic_status[generic_status.drugatcname.duplicated()].sort_values('drugatcname').dropna().drugatcname.unique()
prescriptions_gene_orig = filtered_drug[filtered_drug.drugatcname.isin(drugs_with_generic)]
# drugs_to_study = prescriptions_gene_orig[['drugatcname','atc_lvl1','atc_lvl2','atc_lvl3','atc_lvl4','atc_lvl5']].drop_duplicates()
# st.write(drugs_to_study.head())
prescriptions_gene_orig_nonull = prescriptions_gene_orig[prescriptions_gene_orig.druggeneric.isnull()==False]
ratio_gene_orig = pd.DataFrame(prescriptions_gene_orig_nonull.groupby([group_var,atc_generic_level_on,'druggeneric']).drugname.count()).unstack().reset_index().fillna(0)
st.write(ratio_gene_orig)
ratio_gene_orig.columns = [group_var,atc_generic_level_on,'Générique','Original']
ratio_gene_orig['total'] = ratio_gene_orig[['Générique','Original']].sum(axis = 1)
ratio_gene_orig['perc_generique'] = ((ratio_gene_orig['Générique']/ratio_gene_orig[['Original','Générique']].sum(axis = 1))*100).round(1)
ratio_gene_orig['perc_original'] = ((ratio_gene_orig['Original']/ratio_gene_orig[['Original','Générique']].sum(axis = 1))*100).round(1)
drug_gene_orig_ratio = ratio_gene_orig[ratio_gene_orig[atc_generic_level_on] == atc_on_generic].sort_values('perc_generique')
if grouping_level == 'Prescribers':
prescription_info = pd.merge(gdf_prescriber,drug_gene_orig_ratio,left_on = 'id',right_on = group_var)
if st.sidebar.checkbox('Only Delta MPR',key = 'MPR'):
prescription_info = prescription_info[prescription_info.mpr_delta == 1]
if grouping_level == 'Distributors':
prescription_info = pd.merge(gdf_distributor,drug_gene_orig_ratio,left_on = 'id',right_on = group_var)
max_prescription = int(prescription_info.total.max())
min_prescription = int(prescription_info.total.min())
n_prescri_filter = st.slider("Number of prescriptions delivered by {}".format(grouping_level[:-1]), min_prescription,max_prescription, (min_prescription, max_prescription), step = 5)
prescription_info = prescription_info[(prescription_info.total >= n_prescri_filter[0]) & (prescription_info.total <= n_prescri_filter[1]) ]
####
barplot_option = st.sidebar.selectbox('Show bar plot with: ',('Absolute values', 'Percentages'))
####
if barplot_option == 'Absolute values':
title = 'Number of "Generic" and "Original" {} prescriptions by {}'.format(atc_on_generic,grouping_level)
labels= prescription_info.sort_values('total')[group_var].values
fig = go.Figure(data=[
go.Bar(name='Generic', x=labels, y=prescription_info.sort_values('total').Générique.values),
go.Bar(name='Original', x=labels, y=prescription_info.sort_values('total').Original.values)])
xaxis_title = "ID of the {}".format(grouping_level[:-1])
fig.update_layout(barmode='stack',xaxis_title=xaxis_title, yaxis_title= 'n', width=780, height=600,title={'text':title})
# Change the bar mode
if barplot_option == 'Percentages':
title = 'Percentage of "Generic" and "Original" {} prescriptions by {}'.format(atc_on_generic,grouping_level)
labels= prescription_info[group_var].values
fig = go.Figure(data=[
go.Bar(name='Générique', x=labels, y=prescription_info.sort_values('perc_generique').perc_generique.values),
go.Bar(name='Original', x=labels, y=prescription_info.sort_values('perc_generique').perc_original.values)])
xaxis_title = "ID of the {}".format(grouping_level[:-1])
fig.update_layout(barmode='stack',xaxis_title=xaxis_title, yaxis_title= '%', width=780, height=600,title={'text':title})
# fig.update_traces(marker_color=prescription_info[group_var].map(colors_id).values, marker_line_width=0, opacity=1)
st.plotly_chart(fig)
if grouping_level == 'Prescribers':
drug_gene_orig_per_cq = pd.DataFrame(pd.merge(prescription_info,gdf_event_cercle[['participantid','circlename']].drop_duplicates(), left_on = 'prescriberid', right_on = 'participantid',how = 'left').groupby('circlename').mean().perc_generique.sort_values()).reset_index()
fig = px.bar(drug_gene_orig_per_cq, x='circlename', y='perc_generique',hover_data=['perc_generique'], color='perc_generique',labels={'perc_generique':'%','circlename':'Name of the Quality Circle'}, width=780, height=600)
st.plotly_chart(fig)
prescription_info[['lat','lon']]= prescription_info.apply(lambda x: transformer.transform(x.e,x.n),axis = 1,result_type = 'expand')
prescription_info = pyspace.add_random_noise(prescription_info)
distance = st.slider('Distance',100,2000, value = 1200, step = 100)
prescription_info,weights = pyspace.get_distanceBandW(prescription_info,distance)
getis = pyspace.compute_getis(prescription_info,'perc_generique',weights,9999,0.05,star = True)
colors_cl = {'Cold Spot - p < 0.01':'#2166ac', 'Cold Spot - p < 0.05':'#67a9cf','Cold Spot - p < 0.1':'#d1e5f0', 'Hot Spot - p < 0.01':'#b2182b','Hot Spot - p < 0.05':'#ef8a62','Hot Spot - p < 0.1':'#fddbc7','Not significant':'#bdbdbd'}
prescription_info['perc_generique_G_cl'] = pd.Categorical(prescription_info['perc_generique_G_cl'], ['Cold Spot - p < 0.01','Cold Spot - p < 0.05','Cold Spot - p < 0.1','Hot Spot - p < 0.01','Hot Spot - p < 0.05','Hot Spot - p < 0.1','Not significant'])
px.set_mapbox_access_token(mapbox_key())
specialty = group_var[:-2]+'specialty'
fig = px.scatter_mapbox(prescription_info.sort_values('perc_generique_G_cl'), lat="lat", lon="lon",hover_data = [specialty,'perc_generique'],color = 'perc_generique_G_cl',
color_discrete_map = colors_cl,size = 'perc_generique', size_max=10, zoom=8)
st.plotly_chart(fig)
fig = px.scatter_mapbox(prescription_info.sort_values('perc_generique'), lat="lat", lon="lon",color = 'perc_generique',size = 'perc_generique', size_max=10, zoom=8)
st.plotly_chart(fig)
st.markdown('# Polymedication')
st.markdown("""La polymédication est souvent définie comme l'administration ou la prise de 5 médicaments ou
agents différents ou plus. Plus les médicaments sont combinés, plus le
risque d'effets secondaires indésirables est élevé. Par
exemple, le risque d'interaction est déjà de 38% pour 4 médicaments différents, alors
qu'il passe à 82% s'il y a 7 médicaments différents ou plus (Blozik, et al., 2013).
### Définition de la polymédication dans l'ensemble des données d'analyse
Pour l'identification d'un assuré avec polymédication, les codes ATC réglés
par assuré sont comptés dans un trimestre d'évaluation. Si un code ATC à moins de
7 chiffres est trouvé dans un autre code ATC à plus de chiffres, il n'est compté
qu'une fois par assuré et par trimestre. Pour un drapeau positif de polymédication,
6 codes ATC différents ou plus doivent avoir été comptabilisés avec un assuré.""")
if st.checkbox('Only Delta MPR',key = 'MPR2'):
df_onvs_meanatc_perpatient = pd.DataFrame(filtered_drug[(filtered_drug['mpr_delta']==1)].groupby(['prescriberid','patientid']).drugatcname.nunique().groupby('prescriberid').mean().sort_values()).reset_index()
#################
patient_n_atc = pd.DataFrame(filtered_drug[(filtered_drug['mpr_delta']==1)].groupby(['patientid']).drugatcname.nunique()).reset_index()
patient_n_atc.columns = ['patientid','n_cat']
patient_n_atc = pd.merge(gdf_patient[['id','age_cat']],patient_n_atc, left_on = 'id',right_on = 'patientid', how = 'left').drop('patientid',axis = 1)
patient_n_atc['n_cat']= patient_n_atc['n_cat'].fillna(0)
patient_n_atc.loc[patient_n_atc.n_cat == 0, 'atc_cat'] = '0. No medication'
patient_n_atc.loc[patient_n_atc.n_cat > 4, 'atc_cat'] = '2. 5 or more medications'
patient_n_atc.loc[(patient_n_atc.n_cat < 5)& (patient_n_atc.n_cat > 0), 'atc_cat'] = '1. 1-4 medications'
patient_n_atc['age_cat'] = patient_n_atc['age_cat'].astype(str)
patient_n_atc = pd.DataFrame(patient_n_atc.groupby(['age_cat','atc_cat']).size().mul(100)/patient_n_atc.groupby(['age_cat']).size()).reset_index()
patient_n_atc.columns = ['age_cat','atc_cat','perc']
patient_n_atc['perc'] = patient_n_atc['perc'].round(1)
patient_n_atc['age_cat'] = pd.Categorical(patient_n_atc['age_cat'], ['(0.0, 5.0]','(5.0, 10.0]', '(10.0, 15.0]',
'(15.0, 20.0]', '(20.0, 25.0]', '(25.0, 30.0]', '(30.0, 35.0]',
'(35.0, 40.0]', '(40.0, 45.0]', '(45.0, 50.0]',
'(50.0, 55.0]', '(55.0, 60.0]', '(60.0, 65.0]', '(65.0, 70.0]',
'(70.0, 75.0]', '(75.0, 80.0]', '(80.0, 85.0]', '(85.0, 90.0]',
'(90.0, 95.0]', '(95.0, 100.0]','(100.0, 105.0]', '(105.0, 110.0]', 'nan'])
patient_n_atc = patient_n_atc.sort_values(['age_cat','atc_cat'])
fig = px.bar(patient_n_atc, x="age_cat", y="perc", color='atc_cat', barmode='group',
height=400,title = 'Proportion de personnes assurées en fonction du nombre de médicaments',labels={'age_cat':'Age category','perc':'Percentage (%)'})
st.plotly_chart(fig)
else:
df_onvs_meanatc_perpatient = pd.DataFrame(filtered_drug.groupby(['prescriberid','patientid']).drugatcname.nunique().groupby('prescriberid').mean().sort_values()).reset_index()
title = 'Mean number of unique ATC prescribed by patient for each prescriber'
fig = go.Figure(data=[go.Bar(x=df_onvs_meanatc_perpatient['prescriberid'], y=df_onvs_meanatc_perpatient['drugatcname'])])
fig.update_traces(marker_color='rgb(8,48,107)', marker_line_color='rgb(8,48,107)', marker_line_width=1.5, opacity=1)
fig.add_shape(
# Line Horizontal
type="line",
x0=0,
y0=6,
x1=len(df_onvs_meanatc_perpatient['prescriberid'].values),
y1=6,
line=dict(
color="LightSeaGreen",
width=4,
dash="dashdot",
),
)
xaxis_title = "ID of the Prescriber"
fig.update_layout(xaxis_title=xaxis_title, yaxis_title= 'n', width=780, height=600,title={'text':title})
st.plotly_chart(fig)
##########################
df_onvs_meanatc_perpatient =
|
pd.merge(df_onvs_meanatc_perpatient,gdf_mpr[['id','mprsex','age_cat']], left_on = 'prescriberid',right_on = 'id',how = 'left')
|
pandas.merge
|
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: combine all tables
# Description:
# Author: <NAME>
# Date: 2020/5/12
# -------------------------------------------------------------------------------
import pandas as pd
def combine_all_tables(fruit, place, weather,path):
place['City'] = place['City'].str.upper()
weather['City'] = weather['City'].str.upper()
weather_fruit_merge_df =
|
pd.merge(weather, fruit, on=['City', 'Date'])
|
pandas.merge
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 2 15:46:50 2018
@author: <NAME>
"""
import pandas as pd
import re
import time
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
dataset =
|
pd.read_csv('chatdata.csv',encoding='latin-1')
|
pandas.read_csv
|
"""
Author: <NAME>
Creates an icosphere by repeated subdivision of an icosahedron.
I have naively built it from:
1 - defining vertex locations
2 - finding their neighbouring vertices from a nearest-neighbour search
3 - interpolating half way between vertices and their neighbours to identify the new vertices
4 - repeating
It would be more efficient to define the faces (triangles), and subdivide them
Alternatives?
https://en.wikipedia.org/wiki/Goldberg%E2%80%93Coxeter_construction
https://en.wikipedia.org/wiki/List_of_geodesic_polyhedra_and_Goldberg_polyhedra
http://donhavey.com/blog/tutorials/tutorial-3-the-icosahedron-sphere/
https://github.com/mbrubake/cryoem-cvpr2015/blob/master/quadrature/icosphere.py
https://github.com/brsr/antitile
http://docs.sympy.org/latest/modules/combinatorics/polyhedron.html
https://www.mathworks.com/matlabcentral/fileexchange/50105-icosphere
"""
import numpy as np
import pandas as pd
from mpl_toolkits import mplot3d
from scipy.spatial import cKDTree as KDTree
def sph2cart(R, t, p):
# R,t,p are Radius, theta (colatitude), phi (longitude)
# 0<t<180, 0<p<360
# Calculate the sines and cosines
rad = np.pi/180
s_p = np.sin(p*rad)
s_t = np.sin(t*rad)
c_p = np.cos(p*rad)
c_t = np.cos(t*rad)
# Calculate the x,y,z over the whole grid
X = R*c_p*s_t
Y = R*s_p*s_t
Z = R*c_t
return X, Y, Z
def cart2sph(X, Y, Z):
"""Returns r, t, p with t,p in degrees
"""
rad = np.pi/180
theta = 90 - np.arctan2(Z, np.sqrt(X**2 + Y**2))/rad
phi = np.mod(np.arctan2(Y, X)/rad, 360)
R = np.sqrt(X**2 + Y**2 + Z**2)
return R, theta, phi
def get_nearest_neighbours(p, N, i):
"""Return the nearest N neighbours to a given point, i
Args:
p (DataFrame): vertices dataframe
N (int): integer for number of nearest neighbours to return
i (int): loc within dataframe p
Returns:
a tuple of locs of the nearest neighbours
"""
# p_new will be the returned dataframe
p_new = p.copy()
# calculate distances to other points
vecs = p_new[["x", "y", "z"]] - p[["x", "y", "z"]].loc[i]
dists = vecs.x**2 + vecs.y**2 + vecs.z**2
# merge distances into the p_new
dists = dists.to_frame(name='dist2')
p_new = p_new.join(dists)
p_new.sort_values(by='dist2', inplace=True)
return p_new.iloc[1:N+1]
def matchxyz(xyz0, xyz1, xyz0arr, xyz1arr):
"""Returns True if vector xyz0->xyz1 occurs in arrays of vectors xyz0arr->xyz1arr
"""
for xyz0_, xyz1_ in zip(xyz0arr, xyz1arr):
if np.array_equal(xyz0, xyz0_) and np.array_equal(xyz1, xyz1_):
return True
return False
def get_edgevecs(vertices, fudge=False):
"""Given a set of vertices, find the neighbouring 5 or 6 vertices to each,
return the set of vectors between vertices (which define the edges)
"""
vertices = vertices.copy()
try:
# Remove the previous neighbours as they will be recalculated
vertices = vertices.drop(['neighbours'], axis=1)
except:
pass
kdt = KDTree(list(zip(vertices.x.values,
vertices.y.values,
vertices.z.values)))
# Get 7 nearest neighbours for every vertex (includes itself, i.e. dist 0)
dists, indices = kdt.query(list(zip(vertices.x.values,
vertices.y.values,
vertices.z.values)), k = 7)
# Add the neighbour vertices to the vertex dataframe
# 5 for the original icosahedron vertices
# 6 for the others
locs_origicos = vertices[vertices.iteration == 0].index.values
locs_others = vertices[vertices.iteration != 0].index.values
neighbs5 = pd.DataFrame({'neighbours':indices[:,1:6].tolist()}).loc[locs_origicos]
neighbs6 = pd.DataFrame({'neighbours':indices[:,1:7].tolist()}).loc[locs_others]
neighbs = pd.concat([neighbs5,neighbs6])
vertices = vertices.join(neighbs)
# # New dataframe with the previous iteration's vertices as centres of faces
# faces = vertices[vertices.iteration < vertices.iteration.max()]
# faces['corners'] = np.empty((faces.shape[0]),dtype=list)
# faces['corners'][:] = []
#faces['corners'] =
# Set up all the edge vectors from each vertex's neighbour sets
# E = 3V-6 number of edges, E, from number of vertices, V
if not fudge:
edgevecs = np.zeros((3*vertices.shape[0]-6, 3, 2))
else:
edgevecs = np.zeros((9*vertices.shape[0], 3, 2))
k = 0 # loop counter through edgevecs
for i in range(vertices.shape[0]):
# i runs from 0 to V
# Coordinates of point i:
x0,y0,z0 = vertices.loc[i].x, vertices.loc[i].y, vertices.loc[i].z
for j in vertices.loc[i].neighbours:
# Coordinates of each neighbour:
x1,y1,z1 = vertices.loc[j].x, vertices.loc[j].y, vertices.loc[j].z
# # Add face corners if we are on a face centre
# if i in faces.index.values:
# faces['corners'].loc[i].append([x1,y1,z1])
# Check if p1->p0 already exists in a previous p0->p1
# https://stackoverflow.com/a/33218744
if not (edgevecs == np.array([[x1,x0],[y1,y0],[z1,z0]])).all((1,2)).any():
# Store the vectors
edgevecs[k] = np.array([[x0,x1],[y0,y1],[z0,z1]])
k+=1
x0 = edgevecs[:,0,0]
x1 = edgevecs[:,0,1]
y0 = edgevecs[:,1,0]
y1 = edgevecs[:,1,1]
z0 = edgevecs[:,2,0]
z1 = edgevecs[:,2,1]
edgevecs = pd.DataFrame({'x0':x0,'x1':x1,'y0':y0,'y1':y1,'z0':z0,'z1':z1})
if fudge:
edgevecs = edgevecs.dropna().reset_index().drop(columns="index")
return edgevecs, vertices
def slerp(p0, p1):
"""Spherical linear interpolation to halfway between between p0 and p1
https://en.wikipedia.org/wiki/Slerp
"""
omega = np.arccos(np.dot(p0/np.linalg.norm(p0), p1/np.linalg.norm(p1)))
# print(np.dot(p0,p1))
slerphalfway = (np.sin(omega/2)/np.sin(omega))*(p0+p1)
return slerphalfway
def vertices2dataframe(vertices):
x = vertices[:, 0]
y = vertices[:, 1]
z = vertices[:, 2]
return pd.DataFrame({'x': x, 'y': y, 'z': z})
class Polyhedron(object):
def __init__(self):
self.vertices = pd.DataFrame({'x': [], 'y': [], 'z': []})
self.edgevecs = pd.DataFrame({'x0': [], 'x1': [], 'y0': [],
'y1': [], 'z0': [], 'z1': []})
def _set_vertices(self, verts):
self.vertices.x = verts[:, 0]
self.vertices.y = verts[:, 1]
self.vertices.z = verts[:, 2]
def _get_edgevecs(self, fudge=False):
self.edgevecs, self.vertices = get_edgevecs(self.vertices, fudge=fudge)
def rotate(self, delta_phi):
"""Rotate by delta_phi degrees.
"""
r, t, p = cart2sph(*[self.vertices[i] for i in "xyz"])
p = (p + delta_phi) % 360
x, y, z = sph2cart(r, t, p)
for i, var in zip("xyz", (x, y, z)):
self.vertices[i] = var
self._get_edgevecs()
return self
def get_faces(self):
"""Construct the triagonal faces.
There are duplicate faces in what gets returned
"""
faces = []
# (p, q, r) are indexes within self.vertices
for p in self.vertices.index:
# define all the faces neighbouring point p
# Loop through the points, q, neighbouring p, and identify
# those neighbours, r, of q, which themselves also neighbour p
for q in self.vertices.loc[p].neighbours:
# build "face", an array containing points (p, q, r)
# to define p->q, q->r, r->p
# [[px, py, pz]
# [qx, qy, qz]
# [rx, ry, rz]]
face = np.empty((3, 3))
face[:] = np.nan
if q not in self.vertices.index:
continue
face[0] = self.vertices.loc[p][["x", "y", "z"]].values
face[1] = self.vertices.loc[q][["x", "y", "z"]].values
for r in self.vertices.loc[q].neighbours:
if r not in self.vertices.index:
continue
if r in self.vertices.loc[p].neighbours:
face[2] = self.vertices.loc[r][["x", "y", "z"]].values
# break
faces.append(face)
return faces
def get_dualfaces(self, dual=False):
"""CURRENTLY BROKEN
Construct the hexagonal(+12pentagons) faces from the next subdivision
Get the sets of vertices that define the corners of the faces
The faces will be centred on the vertices of the current Polyhedron
"""
pass
def _construct_centroid_polygons(self):
"""Constructs pentagons/hexagons around the grid vertices.
It was meant to be get_dualfaces() but doesn't actually do that...
"""
newpoly = self.subdivide()
verts = newpoly.vertices
facecentres = verts[verts.iteration < verts.iteration.max()]
# faces = [[] for i in range(facecentres.shape[0])]
faces = []
for i in range(facecentres.shape[0]):
locs_neighbs = facecentres.iloc[i].neighbours
neighbs = verts.loc[locs_neighbs]
faces.append(neighbs[['x', 'y', 'z']].values)
# Reorder the vertices in each face so that they can go into a patch
# i.e. that they are ordered from one neighbour to the next
newfaces = []
for f in faces:
fnew = np.zeros_like(f)
fnew[0] = f[0]
ftemp = np.delete(f, 0, axis=0)
for i in range(len(ftemp)):
j = ((ftemp-fnew[i])**2).sum(axis=1).argmin()
fnew[i+1] = ftemp[j]
ftemp = np.delete(ftemp, j, axis=0)
newfaces.append(fnew)
return newfaces
def subdivide(self):
"""Take the edge vectors and subdivide them
to get the new set of vertices, and merge with the first set.
Return a new polyhedron with set vertices and edge vectors"""
x0 = self.edgevecs.x0.values
y0 = self.edgevecs.y0.values
z0 = self.edgevecs.z0.values
x1 = self.edgevecs.x1.values
y1 = self.edgevecs.y1.values
z1 = self.edgevecs.z1.values
p0arr = np.vstack([x0, y0, z0]).T
p1arr = np.vstack([x1, y1, z1]).T
newvertices = []
for p0, p1 in zip(p0arr, p1arr):
newvertices.append(slerp(p0, p1))
newvertices = vertices2dataframe(np.array(newvertices))
# Set the iteration number on the set of new vertices
last_iteration = self.vertices["iteration"].max()
newvertices["iteration"] = last_iteration + 1
# newvertices['origicos'] = False
newvertices =
|
pd.concat((newvertices, self.vertices), ignore_index=True)
|
pandas.concat
|
import pandas as pd
def create_template(part_name, id_code):
"""
uses template to create email
:param part_name: participant name
:param id_code: id code (no prefix)
:return:
"""
key_dict = {
'participant_name': str(part_name),
'lab_name': 'Personality Processes and Outcomes Laboratory',
'id_code': 'AGCW' + str(id_code),
'my_name': '<NAME>'
}
template = """Dear {participant_name},
My name is {my_name}, from the {lab_name} at the University of Pittsburgh. You participated in our study Mental Health and Coping During COVID-19. When you participated in that study, you agreed to be contacted for participation in follow up surveys for this study. We are reaching out now to ask you to complete a follow up survey for this study.
Coronavirus/COVID-19 is a stressful experience that is different from anything that most of us have experienced in our lifetime. As researchers, we want to understand the effects of the current crisis on mental health. To ask that question, we need to reach out to people (like you) who participated in research related to mental health before COVID-19, so that we can see how mental health has changed from then to now.
One of the most important parts of answering this question is recruiting a very large and diverse sample of participants who already answered questions about their mental health in the past (before COVID-19). Because of this, we have joined forces with research labs across the United States and abroad to recruit a combined sample of as many as 10,000 people who previously completed studies in our labs and are willing to answer follow-up questions about their mental health now. This will be the first and largest mental health dataset of its kind.
As a thank you for your time, you will have the option to be compensated with a $10 Amazon e-gift card, a $10 Apple i-Tunes electronic gift card, or a $10 Google Play electronic gift card. (Due to terms of service restrictions, Apple i-Tunes and Google Play compensation options are only available to individuals who reside in the United States.) Compensation will be emailed to you from the lab in which you previously had contact with.
More detail about the survey is provided in the online consent (link below). You can also feel free to get in touch by emailing the Study Coordinator at <EMAIL> if you have any questions or would like to speak to a researcher directly.
Just like the last survey, to keep your responses strictly confidential, we are sending all participants a personalized study link and ID code. Authorized individuals from the Personality Processes and Outcomes Lab are the only people who will have access to your name, contact information, and any other personal information. We keep this information in a secure and password protected electronic file that is not shared with anyone else.
Your ID code: {id_code}
Please keep this ID code nearby, because we will ask you to enter it a few times during the survey.
Your personalized study link:
https://pitt.co1.qualtrics.com/jfe/form/SV_29LSpFZNx3RDRvD
This link will only work for you, so please do not share it.
If you no longer want to be contacted for this study, please contact the Study Coordinator at <EMAIL>.
Sincerely,
{my_name}, B.Phil.
Department of Psychology
University of Pittsburgh""".format(**key_dict)
return id_code, template
def full_path(df):
"""
:param df: AAPECS recontacts subset
:return: df of emales and template
"""
results =
|
pd.DataFrame(columns=['email', 'template'])
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import sys
import os
import matplotlib.pyplot as plt
from sortedcontainers import SortedDict
import warnings
import math
from copy import deepcopy
from tqdm import tqdm
import matplotlib as mpl
from collections import Counter
warnings.filterwarnings('ignore')
pd.options.display.max_rows = 10000
import matplotlib.dates as mdates
from config_default import *
from helpers import *
import platform
from helpers import myround
def get_deal(raw):
active_exec = raw[raw["NO"] == "mm"]
passive_exec = raw[raw["NO"] != "mm"]
bid_deal = pd.concat([active_exec[active_exec["BUYSELL"] == "B"], \
passive_exec[passive_exec["BUYSELL"] == "S"]])
ask_deal = pd.concat([active_exec[active_exec["BUYSELL"] == "S"], \
passive_exec[passive_exec["BUYSELL"] == "B"]])
return bid_deal["TIME"] // 100000, ask_deal["TIME"] // 100000, bid_deal["PRICE"], ask_deal["PRICE"]
def p_l_calc(raw, fair_price, share):
# PL = Bought*(FP-AvgBid) + Sold*(AvgAsk-FP)
active_exec = raw[raw["NO"] == "mm"]
passive_exec = raw[raw["NO"] != "mm"]
bought_vol = sum(active_exec[active_exec["BUYSELL"] == "B"]["VOLUME"]) + \
sum(passive_exec[passive_exec["BUYSELL"] == "S"]["VOLUME"])
sold_vol = sum(active_exec[active_exec["BUYSELL"] == "S"]["VOLUME"]) + \
sum(passive_exec[passive_exec["BUYSELL"] == "B"]["VOLUME"])
if bought_vol >= 1:
avg_bought = (sum(active_exec[active_exec["BUYSELL"] == "B"]["TURNOVER"]) + \
sum(passive_exec[passive_exec["BUYSELL"] == "S"]["TURNOVER"])) / bought_vol
else:
avg_bought = 0
if sold_vol >= 1:
avg_sold = (sum(active_exec[active_exec["BUYSELL"] == "S"]["TURNOVER"]) + \
sum(passive_exec[passive_exec["BUYSELL"] == "B"]["TURNOVER"])) / sold_vol
else:
avg_sold = 0
return round(bought_vol * (fair_price - avg_bought) + sold_vol * (avg_sold - fair_price), 1), \
bought_vol, sold_vol, myround(avg_bought, base=price_step[share]), myround(avg_sold, base=price_step[share])
def algo_stats(raw, fair_price, first_trade, algo, date, trade_log, dom, share="LKOH", bid_ask=False, show_deals=False):
print("Stats for date -", date)
print("Algo Params: ")
print(algo.algo_params)
print()
print("Num of trades - ", raw.shape[0])
print("Algo turnover - ", round(sum(raw["PRICE"] * raw["VOLUME"]), 1))
p_l, bought_vol, sold_vol, avg_bought, avg_sold = p_l_calc(raw, fair_price, share)
print("P&L Gross - ", p_l)
print("P&L Net(with commision) -", round(p_l - sum(raw["PRICE"] * raw["VOLUME"]) * 0.00008, 1))
print("Num of bought - ", bought_vol)
print("Weighted average bought price - ", avg_bought)
print("Num of sold - ", sold_vol)
print("Weighted average sold price - ", avg_sold)
print("Open Price - ", first_trade)
print("Close price - ", dom.trade_log[-1][0])
print("Initial cash - ", algo.first_cash)
print("Total Return - ", round(100 * p_l / algo.first_cash, 2), "%", sep="")
mpl.style.use("seaborn")
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(20, 12))
axs[0, 0].set_title(
algo.name + ' - Algo Equity in % at ' + str(date)[:4] + "/" + str(date)[4:6] + "/" + str(date)[6:], size=16)
if algo.name[:4] != "Hard":
print("End Cash - ", round(algo.cash))
print("End Equity - ", round(algo.cash + (bought_vol - sold_vol) * fair_price))
print("Max day Drawdown - ", round((min(algo.equity) / algo.first_cash - 1) * 100, 2), "%", sep='')
#print(pd.to_datetime(pd.Series(algo.time).astype(str).str[:4], format="%H%M"))
axs[0, 0].plot(pd.to_datetime(
|
pd.Series(algo.time)
|
pandas.Series
|
import numpy as np
import pandas as pd
import random
from abc import ABC, abstractmethod
from bignmf.models.nmf import NmfBase
# Abstract Class - Do not instantiate this class
class JnmfBase(NmfBase):
"""Base Joint NMF class from which all Joint NMF algorithms inherit from"""
def __init__(self, x: dict, k: int):
"""Initalizes class variables.
Args:
x (dict): Input matrices on which we have to do NMF. Dictionary containing the input matrices as DataFrames.
The common dimension between the matrices should be the row.
k (int): Rank for factorization
"""
super().__init__(k)
if str(type(list(x.values())[0])) == "<class 'pandas.core.frame.DataFrame'>":
self.column_index={}
self.x={}
self.row_index=list(list(x.values())[0].index)
for key in x:
self.column_index[key] = list(x[key].columns)
self.x[key] = x[key].values
if all(self.row_index != x[key].index):
raise ValueError("Row indices are not uniform")
else:
raise ValueError("Invalid DataType")
self.error = float('inf')
self.eps = np.finfo(list(self.x.values())[0].dtype).eps
def initialize_variables(self):
"""Initializes the consensus variables. It is run before the iterations of the various trials"""
number_of_samples = list(self.x.values())[0].shape[0]
self.consensus_matrix_w = np.zeros((number_of_samples, number_of_samples))
self.consensus_matrix_h = {}
for key in self.x:
number_of_features = self.x[key].shape[1]
self.consensus_matrix_h[key] = np.zeros((number_of_features, number_of_features))
def run(self, trials, iterations, verbose=0):
"""Runs the NMF algorithm for the specified iterations over the specified trials
Args:
trials (int): Number of different trials.
iterations (int): Number of iterations.
verbose (bool): To increase verbosity. Defaults to 0.
"""
self.initialize_variables()
for i in range(0, trials):
self.initialize_wh()
self.wrapper_update(iterations, verbose if i==0 else 0)
self.consensus_matrix_w += self.connectivity_matrix(self.w, axis=1)
for key in self.h:
self.consensus_matrix_h[key] += self.connectivity_matrix(self.h[key], axis=0)
if verbose == 1:
print("\tSuper iteration: %i completed with Error: %f " % (i, self.error))
# Normalization
self.consensus_matrix_w = self.reorder_consensus_matrix(self.consensus_matrix_w / trials)
for key in self.h:
self.consensus_matrix_h[key] /= trials
# Converting values to DataFrames
class_list = ["class-%i" % a for a in list(range(self.k))]
self.w =
|
pd.DataFrame(self.w, index=self.row_index, columns=class_list)
|
pandas.DataFrame
|
"""
Make scatter plot of mass versus period. Optionally, color by discovery method.
Optionally, overplot archetype names.
"""
# Standard imports
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd, numpy as np
import os
from astropy import units as u
import plotly.express as px
import plotly.graph_objects as go
# If you want to run the code, you'll need to do:
# `git clone https://github.com/lgbouma/cdips; cd cdips; python setup.py install`
# `git clone https://github.com/lgbouma/aesthetic; cd aesthetic; python setup.py install`
from cdips.utils import today_YYYYMMDD
from cdips.utils.catalogs import get_nasa_exoplanet_archive_pscomppars
from aesthetic.plot import savefig, format_ax, set_style
# This "VER" string caches the NASA exoplanet archive `ps` table at a
# particular date, in the YYYYMMDD format.
VER = '20210915' # could be today_YYYYMMDD()
def plot_rp_vs_period_scatter(
showlegend=1, colorbydisc=1, showarchetypes=1, showss=1, colorbyage=0,
verbose=0, add_kep1627=0, add_allkep=0, add_plnames=0
):
"""
Plot planetary parameters versus ages. By default, it writes the plots to
'../results/rp_vs_period_scatter/' from wherever you put this script.
(See `outdir` parameter below).
Options (all boolean):
showlegend: whether to overplot a legend.
colorbydisc: whether to color by the discovery method.
showarchetypes: whether to show "Hot Jupiter", "Cold Jupiter" etc
labels for talks.
showss: whether to show the solar system planets.
colorbyage: whether to color the points by their ages.
verbose: if True, prints out more information about the youngest
planets from the NASA exoplanet archive.
add_kep1627: adds a special star for Kepler 1627.
add_allkep: adds special symbols for the recent Kepler systems in open
clusters: 'Kepler-52', 'Kepler-968', 'Kepler-1627', 'KOI-7368'
add_plnames: if True, shows tiny texts for the age-dated planets.
"""
set_style()
#
# Columns are described at
# https://exoplanetarchive.ipac.caltech.edu/docs/API_exoplanet_columns.html
#
ea_df = get_nasa_exoplanet_archive_pscomppars(VER)
#
# In most cases, we will select systems with finite ages (has a value, and
# +/- error bar). We may also want to select on "is transiting", "has
# mass", etc.
#
has_rp_value = ~pd.isnull(ea_df['pl_rade'])
has_rp_errs = (~
|
pd.isnull(ea_df['pl_radeerr1'])
|
pandas.isnull
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats as st
import statsmodels.formula.api as smf
import statsmodels.api as sm
import pingouin as pg
get_ipython().run_line_magic('matplotlib', 'inline')
# In[3]:
df = pd.read_csv('../Data_Files/goggles.csv')
df.head()
# In[6]:
df['genderX'] = df['gender'].replace({'Male':1, 'Female':2})
df['alcoholX'] = df['alcohol'].replace({'None':1, '2 Pints':2,'4 Pints':3})
# In[10]:
df.groupby(['gender', 'alcohol']).describe()['attractiveness']
# In[12]:
from statsmodels.graphics.factorplots import interaction_plot
fig = interaction_plot(df.alcoholX, df.gender, df.attractiveness,
colors=['red','blue'], markers=['D','^'], ms=10)
# In[26]:
_ = sns.lineplot(x='alcohol', y='attractiveness', hue='gender', err_style="bars",sort=False,data=df,style='gender',markers=['D','^'])
# In[28]:
plt.figure(figsize=(8,6))
_ = sns.boxplot(x='alcohol', y='attractiveness', hue='gender', data=df)
# In[41]:
# main effect of alcohol
_ = sns.boxplot(x='alcoholX', y='attractiveness', data=df)
# In[30]:
# main effect of gender
_ = sns.boxplot(x='genderX', y='attractiveness', data=df)
# ## levene test on interaction of variables
# In[32]:
unique_list = [i for i in range(1,7)]
unique_list
# In[33]:
df['interaction'] = 0
for i in range(6):
for j in range(8):
df.at[8*i+j,'interaction'] = unique_list[i]
# In[34]:
df.head()
# In[35]:
# Levene test on interaction variables
pg.homoscedasticity(df, dv='attractiveness',group='interaction')
# #### # A non-significant result like the one we have here, W(5, 42) = 1.425, p = .235, is indicative of the assumption being met.
# In[36]:
m01 = smf.ols('attractiveness~C(genderX)*C(alcoholX)', data=df).fit()
m01.summary()
# #### # https://www.statsmodels.org/devel/examples/notebooks/generated/contrasts.html#examples-notebooks-generated-contrasts--page-root
# ### Planned Contrast
# In[37]:
con1 = [-2,1,1]
con2 = [0,-1,1]
contrast = np.vstack((con1, con2))
contrast_alc = contrast.T
contrast_alc
# In[38]:
contrast_gen = np.array([[1,-1]])
contrast_gen =contrast_gen.reshape(2,1)
contrast_gen
# In[50]:
contrast_model = smf.ols('attractiveness~C(genderX,contrast_gen)*C(alcoholX, contrast_alc)', data=df).fit()
contrast_model.summary()
# ## Simple Effect Analysis
# In[42]:
from IPython.display import Image
Image('/home/atrides/Downloads/simpleEffectAnalysis.png')
# In[46]:
Image('/home/atrides/Downloads/contrast_table.png')
# In[43]:
contrast1 = [-2, 1, 1, -2, 1, 1]
contrast2 = [0, -1, 1, 0, -1, 1]
contrast3 = [ 1, 0, 0, -1, 0, 0]
contrast4 = [ 0, 1, 0, 0, -1, 0]
contrast5 = [ 0, 0, -1, 0, 0, 1]
# In[47]:
final_contrast = np.vstack((contrast1, contrast2, contrast3, contrast4, contrast5))
final_contrast = final_contrast.T
final_contrast # according to levels, i.e 0F, 2F, 4F, 0M, 2M, 4M
# In[51]:
effectAnalysis_model = smf.ols('attractiveness~C(interaction,final_contrast)', data=df).fit()
effectAnalysis_model.summary()
# #### # The resulting output contains the parameter estimates for the five contrasts. Looking at the significance values for each simple effect, it appears that there was no significant difference between men and women when they drank no alcohol, p = .177, or when they drank 2 pints, p = .34, but there was a very significant difference, p < .001, when 4 pints were consumed (which, judging from the interaction graph, reflects the fact that the mean for men is considerably lower than for women)
# ## Post-hoc Tests
# In[52]:
from statsmodels.sandbox.stats.multicomp import MultiComparison
# In[55]:
multicomp = MultiComparison(df['attractiveness'], df['interaction']) # testfunc
# In[56]:
# Bonferroni
com = multicomp.allpairtest(st.ttest_ind, method='bonf')
print(com[0])
# In[58]:
prediction =
|
pd.DataFrame(m01.fittedvalues)
|
pandas.DataFrame
|
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
from distutils.version import LooseVersion
import unittest
import numpy as np
import pandas as pd
from databricks import koalas as ks
from databricks.koalas.exceptions import SparkPandasIndexingError
from databricks.koalas.testing.utils import ComparisonTestBase, ReusedSQLTestCase, compare_both
class BasicIndexingTest(ComparisonTestBase):
@property
def pdf(self):
return pd.DataFrame(
{"month": [1, 4, 7, 10], "year": [2012, 2014, 2013, 2014], "sale": [55, 40, 84, 31]}
)
@compare_both(almost=False)
def test_indexing(self, df):
df1 = df.set_index("month")
yield df1
yield df.set_index("month", drop=False)
yield df.set_index("month", append=True)
yield df.set_index(["year", "month"])
yield df.set_index(["year", "month"], drop=False)
yield df.set_index(["year", "month"], append=True)
yield df1.set_index("year", drop=False, append=True)
df2 = df1.copy()
df2.set_index("year", append=True, inplace=True)
yield df2
self.assertRaisesRegex(KeyError, "unknown", lambda: df.set_index("unknown"))
self.assertRaisesRegex(KeyError, "unknown", lambda: df.set_index(["month", "unknown"]))
for d in [df, df1, df2]:
yield d.reset_index()
yield d.reset_index(drop=True)
yield df1.reset_index(level=0)
yield df2.reset_index(level=1)
yield df2.reset_index(level=[1, 0])
yield df1.reset_index(level="month")
yield df2.reset_index(level="year")
yield df2.reset_index(level=["month", "year"])
yield df2.reset_index(level="month", drop=True)
yield df2.reset_index(level=["month", "year"], drop=True)
self.assertRaisesRegex(
IndexError,
"Too many levels: Index has only 1 level, not 3",
lambda: df1.reset_index(level=2),
)
self.assertRaisesRegex(
IndexError,
"Too many levels: Index has only 1 level, not 4",
lambda: df1.reset_index(level=[3, 2]),
)
self.assertRaisesRegex(KeyError, "unknown.*month", lambda: df1.reset_index(level="unknown"))
self.assertRaisesRegex(
KeyError, "Level unknown not found", lambda: df2.reset_index(level="unknown")
)
df3 = df2.copy()
df3.reset_index(inplace=True)
yield df3
yield df1.sale.reset_index()
yield df1.sale.reset_index(level=0)
yield df2.sale.reset_index(level=[1, 0])
yield df1.sale.reset_index(drop=True)
yield df1.sale.reset_index(name="s")
yield df1.sale.reset_index(name="s", drop=True)
s = df1.sale
self.assertRaisesRegex(
TypeError,
"Cannot reset_index inplace on a Series to create a DataFrame",
lambda: s.reset_index(inplace=True),
)
s.reset_index(drop=True, inplace=True)
yield s
yield df1
# multi-index columns
df4 = df.copy()
df4.columns = pd.MultiIndex.from_tuples(
[("cal", "month"), ("cal", "year"), ("num", "sale")]
)
df5 = df4.set_index(("cal", "month"))
yield df5
yield df4.set_index([("cal", "month"), ("num", "sale")])
self.assertRaises(KeyError, lambda: df5.reset_index(level=("cal", "month")))
yield df5.reset_index(level=[("cal", "month")])
# non-string names
df6 = df.copy()
df6.columns = [10.0, 20.0, 30.0]
df7 = df6.set_index(10.0)
yield df7
yield df6.set_index([10.0, 30.0])
yield df7.reset_index(level=10.0)
yield df7.reset_index(level=[10.0])
df8 = df.copy()
df8.columns = pd.MultiIndex.from_tuples([(10, "month"), (10, "year"), (20, "sale")])
df9 = df8.set_index((10, "month"))
yield df9
yield df8.set_index([(10, "month"), (20, "sale")])
yield df9.reset_index(level=[(10, "month")])
def test_from_pandas_with_explicit_index(self):
pdf = self.pdf
df1 = ks.from_pandas(pdf.set_index("month"))
self.assertPandasEqual(df1.to_pandas(), pdf.set_index("month"))
df2 = ks.from_pandas(pdf.set_index(["year", "month"]))
self.assertPandasEqual(df2.to_pandas(), pdf.set_index(["year", "month"]))
def test_limitations(self):
df = self.kdf.set_index("month")
self.assertRaisesRegex(
ValueError,
"Level should be all int or all string.",
lambda: df.reset_index([1, "month"]),
)
class IndexingTest(ReusedSQLTestCase):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
@property
def kdf(self):
return ks.from_pandas(self.pdf)
@property
def pdf2(self):
return pd.DataFrame(
{0: [1, 2, 3, 4, 5, 6, 7, 8, 9], 1: [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
@property
def kdf2(self):
return ks.from_pandas(self.pdf2)
def test_at(self):
pdf = self.pdf
kdf = self.kdf
# Create the equivalent of pdf.loc[3] as a Koalas Series
# This is necessary because .loc[n] does not currently work with Koalas DataFrames (#383)
test_series = ks.Series([3, 6], index=["a", "b"], name="3")
# Assert invalided signatures raise TypeError
with self.assertRaises(TypeError, msg="Use DataFrame.at like .at[row_index, column_name]"):
kdf.at[3]
with self.assertRaises(TypeError, msg="Use DataFrame.at like .at[row_index, column_name]"):
kdf.at["ab"] # 'ab' is of length 2 but str type instead of tuple
with self.assertRaises(TypeError, msg="Use Series.at like .at[column_name]"):
test_series.at[3, "b"]
# Assert .at for DataFrames
self.assertEqual(kdf.at[3, "b"], 6)
self.assertEqual(kdf.at[3, "b"], pdf.at[3, "b"])
self.assert_eq(kdf.at[9, "b"], np.array([0, 0, 0]))
self.assert_eq(kdf.at[9, "b"], pdf.at[9, "b"])
# Assert .at for Series
self.assertEqual(test_series.at["b"], 6)
self.assertEqual(test_series.at["b"], pdf.loc[3].at["b"])
# Assert multi-character indices
self.assertEqual(
ks.Series([0, 1], index=["ab", "cd"]).at["ab"],
pd.Series([0, 1], index=["ab", "cd"]).at["ab"],
)
# Assert invalid column or index names result in a KeyError like with pandas
with self.assertRaises(KeyError, msg="x"):
kdf.at[3, "x"]
with self.assertRaises(KeyError, msg=99):
kdf.at[99, "b"]
with self.assertRaises(ValueError):
kdf.at[(3, 6), "b"]
with self.assertRaises(KeyError):
kdf.at[3, ("x", "b")]
# Assert setting values fails
with self.assertRaises(TypeError):
kdf.at[3, "b"] = 10
# non-string column names
pdf = self.pdf2
kdf = self.kdf2
# Assert .at for DataFrames
self.assertEqual(kdf.at[3, 1], 6)
self.assertEqual(kdf.at[3, 1], pdf.at[3, 1])
self.assert_eq(kdf.at[9, 1], np.array([0, 0, 0]))
self.assert_eq(kdf.at[9, 1], pdf.at[9, 1])
def test_at_multiindex(self):
pdf = self.pdf.set_index("b", append=True)
kdf = self.kdf.set_index("b", append=True)
# TODO: seems like a pandas' bug in pandas>=1.1.0
if LooseVersion(pd.__version__) < LooseVersion("1.1.0"):
self.assert_eq(kdf.at[(3, 6), "a"], pdf.at[(3, 6), "a"])
self.assert_eq(kdf.at[(3,), "a"], pdf.at[(3,), "a"])
self.assert_eq(list(kdf.at[(9, 0), "a"]), list(pdf.at[(9, 0), "a"]))
self.assert_eq(list(kdf.at[(9,), "a"]), list(pdf.at[(9,), "a"]))
else:
self.assert_eq(kdf.at[(3, 6), "a"], 3)
self.assert_eq(kdf.at[(3,), "a"], np.array([3]))
self.assert_eq(list(kdf.at[(9, 0), "a"]), [7, 8, 9])
self.assert_eq(list(kdf.at[(9,), "a"]), [7, 8, 9])
with self.assertRaises(ValueError):
kdf.at[3, "a"]
def test_at_multiindex_columns(self):
arrays = [np.array(["bar", "bar", "baz", "baz"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.at["B", ("bar", "one")], pdf.at["B", ("bar", "one")])
with self.assertRaises(KeyError):
kdf.at["B", "bar"]
# non-string column names
arrays = [np.array([0, 0, 1, 1]), np.array([1, 2, 1, 2])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.at["B", (0, 1)], pdf.at["B", (0, 1)])
def test_iat(self):
pdf = self.pdf
kdf = self.kdf
# Create the equivalent of pdf.loc[3] as a Koalas Series
# This is necessary because .loc[n] does not currently work with Koalas DataFrames (#383)
test_series = ks.Series([3, 6], index=["a", "b"], name="3")
# Assert invalided signatures raise TypeError
with self.assertRaises(
TypeError,
msg="Use DataFrame.at like .iat[row_interget_position, column_integer_position]",
):
kdf.iat[3]
with self.assertRaises(
ValueError, msg="iAt based indexing on multi-index can only have tuple values"
):
kdf.iat[3, "b"] # 'ab' is of length 2 but str type instead of tuple
with self.assertRaises(TypeError, msg="Use Series.iat like .iat[row_integer_position]"):
test_series.iat[3, "b"]
# Assert .iat for DataFrames
self.assertEqual(kdf.iat[7, 0], 8)
self.assertEqual(kdf.iat[7, 0], pdf.iat[7, 0])
# Assert .iat for Series
self.assertEqual(test_series.iat[1], 6)
self.assertEqual(test_series.iat[1], pdf.loc[3].iat[1])
# Assert invalid column or integer position result in a KeyError like with pandas
with self.assertRaises(KeyError, msg=99):
kdf.iat[0, 99]
with self.assertRaises(KeyError, msg=99):
kdf.iat[99, 0]
with self.assertRaises(ValueError):
kdf.iat[(1, 1), 1]
with self.assertRaises(ValueError):
kdf.iat[1, (1, 1)]
# Assert setting values fails
with self.assertRaises(TypeError):
kdf.iat[4, 1] = 10
def test_iat_multiindex(self):
pdf = self.pdf.set_index("b", append=True)
kdf = self.kdf.set_index("b", append=True)
self.assert_eq(kdf.iat[7, 0], pdf.iat[7, 0])
with self.assertRaises(ValueError):
kdf.iat[3, "a"]
def test_iat_multiindex_columns(self):
arrays = [np.array(["bar", "bar", "baz", "baz"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.iat[1, 3], pdf.iat[1, 3])
with self.assertRaises(KeyError):
kdf.iat[0, 99]
with self.assertRaises(KeyError):
kdf.iat[99, 0]
def test_loc(self):
kdf = self.kdf
pdf = self.pdf
self.assert_eq(kdf.loc[5:5], pdf.loc[5:5])
self.assert_eq(kdf.loc[3:8], pdf.loc[3:8])
self.assert_eq(kdf.loc[:8], pdf.loc[:8])
self.assert_eq(kdf.loc[3:], pdf.loc[3:])
self.assert_eq(kdf.loc[[5]], pdf.loc[[5]])
self.assert_eq(kdf.loc[:], pdf.loc[:])
# TODO?: self.assert_eq(kdf.loc[[3, 4, 1, 8]], pdf.loc[[3, 4, 1, 8]])
# TODO?: self.assert_eq(kdf.loc[[3, 4, 1, 9]], pdf.loc[[3, 4, 1, 9]])
# TODO?: self.assert_eq(kdf.loc[np.array([3, 4, 1, 9])], pdf.loc[np.array([3, 4, 1, 9])])
self.assert_eq(kdf.a.loc[5:5], pdf.a.loc[5:5])
self.assert_eq(kdf.a.loc[3:8], pdf.a.loc[3:8])
self.assert_eq(kdf.a.loc[:8], pdf.a.loc[:8])
self.assert_eq(kdf.a.loc[3:], pdf.a.loc[3:])
self.assert_eq(kdf.a.loc[[5]], pdf.a.loc[[5]])
# TODO?: self.assert_eq(kdf.a.loc[[3, 4, 1, 8]], pdf.a.loc[[3, 4, 1, 8]])
# TODO?: self.assert_eq(kdf.a.loc[[3, 4, 1, 9]], pdf.a.loc[[3, 4, 1, 9]])
# TODO?: self.assert_eq(kdf.a.loc[np.array([3, 4, 1, 9])],
# pdf.a.loc[np.array([3, 4, 1, 9])])
self.assert_eq(kdf.a.loc[[]], pdf.a.loc[[]])
self.assert_eq(kdf.a.loc[np.array([])], pdf.a.loc[np.array([])])
self.assert_eq(kdf.loc[1000:], pdf.loc[1000:])
self.assert_eq(kdf.loc[-2000:-1000], pdf.loc[-2000:-1000])
self.assert_eq(kdf.loc[5], pdf.loc[5])
self.assert_eq(kdf.loc[9], pdf.loc[9])
self.assert_eq(kdf.a.loc[5], pdf.a.loc[5])
self.assert_eq(kdf.a.loc[9], pdf.a.loc[9])
self.assertRaises(KeyError, lambda: kdf.loc[10])
self.assertRaises(KeyError, lambda: kdf.a.loc[10])
# monotonically increasing index test
pdf = pd.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9]}, index=[0, 1, 1, 2, 2, 2, 4, 5, 6])
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc[:2], pdf.loc[:2])
self.assert_eq(kdf.loc[:3], pdf.loc[:3])
self.assert_eq(kdf.loc[3:], pdf.loc[3:])
self.assert_eq(kdf.loc[4:], pdf.loc[4:])
self.assert_eq(kdf.loc[3:2], pdf.loc[3:2])
self.assert_eq(kdf.loc[-1:2], pdf.loc[-1:2])
self.assert_eq(kdf.loc[3:10], pdf.loc[3:10])
# monotonically decreasing index test
pdf = pd.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9]}, index=[6, 5, 5, 4, 4, 4, 2, 1, 0])
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc[:4], pdf.loc[:4])
self.assert_eq(kdf.loc[:3], pdf.loc[:3])
self.assert_eq(kdf.loc[3:], pdf.loc[3:])
self.assert_eq(kdf.loc[2:], pdf.loc[2:])
self.assert_eq(kdf.loc[2:3], pdf.loc[2:3])
self.assert_eq(kdf.loc[2:-1], pdf.loc[2:-1])
self.assert_eq(kdf.loc[10:3], pdf.loc[10:3])
# test when type of key is string and given value is not included in key
pdf = pd.DataFrame({"a": [1, 2, 3]}, index=["a", "b", "d"])
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc["a":"z"], pdf.loc["a":"z"])
# KeyError when index is not monotonic increasing or decreasing
# and specified values don't exist in index
kdf = ks.DataFrame([[1, 2], [4, 5], [7, 8]], index=["cobra", "viper", "sidewinder"])
self.assertRaises(KeyError, lambda: kdf.loc["cobra":"koalas"])
self.assertRaises(KeyError, lambda: kdf.loc["koalas":"viper"])
kdf = ks.DataFrame([[1, 2], [4, 5], [7, 8]], index=[10, 30, 20])
self.assertRaises(KeyError, lambda: kdf.loc[0:30])
self.assertRaises(KeyError, lambda: kdf.loc[10:100])
def test_loc_non_informative_index(self):
pdf = pd.DataFrame({"x": [1, 2, 3, 4]}, index=[10, 20, 30, 40])
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc[20:30], pdf.loc[20:30])
pdf = pd.DataFrame({"x": [1, 2, 3, 4]}, index=[10, 20, 20, 40])
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc[20:20], pdf.loc[20:20])
def test_loc_with_series(self):
kdf = self.kdf
pdf = self.pdf
self.assert_eq(kdf.loc[kdf.a % 2 == 0], pdf.loc[pdf.a % 2 == 0])
self.assert_eq(kdf.loc[kdf.a % 2 == 0, "a"], pdf.loc[pdf.a % 2 == 0, "a"])
self.assert_eq(kdf.loc[kdf.a % 2 == 0, ["a"]], pdf.loc[pdf.a % 2 == 0, ["a"]])
self.assert_eq(kdf.a.loc[kdf.a % 2 == 0], pdf.a.loc[pdf.a % 2 == 0])
self.assert_eq(kdf.loc[kdf.copy().a % 2 == 0], pdf.loc[pdf.copy().a % 2 == 0])
self.assert_eq(kdf.loc[kdf.copy().a % 2 == 0, "a"], pdf.loc[pdf.copy().a % 2 == 0, "a"])
self.assert_eq(kdf.loc[kdf.copy().a % 2 == 0, ["a"]], pdf.loc[pdf.copy().a % 2 == 0, ["a"]])
self.assert_eq(kdf.a.loc[kdf.copy().a % 2 == 0], pdf.a.loc[pdf.copy().a % 2 == 0])
def test_loc_noindex(self):
kdf = self.kdf
kdf = kdf.reset_index()
pdf = self.pdf
pdf = pdf.reset_index()
self.assert_eq(kdf[["a"]], pdf[["a"]])
self.assert_eq(kdf.loc[:], pdf.loc[:])
self.assert_eq(kdf.loc[5:5], pdf.loc[5:5])
def test_loc_multiindex(self):
kdf = self.kdf
kdf = kdf.set_index("b", append=True)
pdf = self.pdf
pdf = pdf.set_index("b", append=True)
self.assert_eq(kdf.loc[:], pdf.loc[:])
self.assert_eq(kdf.loc[5:5], pdf.loc[5:5])
self.assert_eq(kdf.loc[5:9], pdf.loc[5:9])
self.assert_eq(kdf.loc[5], pdf.loc[5])
self.assert_eq(kdf.loc[9], pdf.loc[9])
# TODO: self.assert_eq(kdf.loc[(5, 3)], pdf.loc[(5, 3)])
# TODO: self.assert_eq(kdf.loc[(9, 0)], pdf.loc[(9, 0)])
self.assert_eq(kdf.a.loc[5], pdf.a.loc[5])
self.assert_eq(kdf.a.loc[9], pdf.a.loc[9])
self.assertTrue((kdf.a.loc[(5, 3)] == pdf.a.loc[(5, 3)]).all())
self.assert_eq(kdf.a.loc[(9, 0)], pdf.a.loc[(9, 0)])
# monotonically increasing index test
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5]},
index=pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "c"), ("y", "d"), ("z", "e")]
),
)
kdf = ks.from_pandas(pdf)
for rows_sel in [
slice(None),
slice("y", None),
slice(None, "y"),
slice(("x", "b"), None),
slice(None, ("y", "c")),
slice(("x", "b"), ("y", "c")),
slice("x", ("y", "c")),
slice(("x", "b"), "y"),
]:
with self.subTest("monotonically increasing", rows_sel=rows_sel):
self.assert_eq(kdf.loc[rows_sel], pdf.loc[rows_sel])
self.assert_eq(kdf.a.loc[rows_sel], pdf.a.loc[rows_sel])
# monotonically increasing first index test
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5]},
index=pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "c"), ("y", "a"), ("z", "e")]
),
)
kdf = ks.from_pandas(pdf)
for rows_sel in [
slice(None),
slice("y", None),
slice(None, "y"),
]:
with self.subTest("monotonically increasing first index", rows_sel=rows_sel):
self.assert_eq(kdf.loc[rows_sel], pdf.loc[rows_sel])
self.assert_eq(kdf.a.loc[rows_sel], pdf.a.loc[rows_sel])
for rows_sel in [
slice(("x", "b"), None),
slice(None, ("y", "c")),
slice(("x", "b"), ("y", "c")),
slice("x", ("y", "c")),
slice(("x", "b"), "y"),
]:
with self.subTest("monotonically increasing first index", rows_sel=rows_sel):
self.assertRaises(KeyError, lambda: kdf.loc[rows_sel])
self.assertRaises(KeyError, lambda: kdf.a.loc[rows_sel])
# not monotonically increasing index test
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5]},
index=pd.MultiIndex.from_tuples(
[("z", "e"), ("y", "d"), ("y", "c"), ("x", "b"), ("x", "a")]
),
)
kdf = ks.from_pandas(pdf)
for rows_sel in [
slice("y", None),
slice(None, "y"),
slice(("x", "b"), None),
slice(None, ("y", "c")),
slice(("x", "b"), ("y", "c")),
slice("x", ("y", "c")),
slice(("x", "b"), "y"),
]:
with self.subTest("monotonically decreasing", rows_sel=rows_sel):
self.assertRaises(KeyError, lambda: kdf.loc[rows_sel])
self.assertRaises(KeyError, lambda: kdf.a.loc[rows_sel])
def test_loc2d_multiindex(self):
kdf = self.kdf
kdf = kdf.set_index("b", append=True)
pdf = self.pdf
pdf = pdf.set_index("b", append=True)
self.assert_eq(kdf.loc[:, :], pdf.loc[:, :])
self.assert_eq(kdf.loc[:, "a"], pdf.loc[:, "a"])
self.assert_eq(kdf.loc[5:5, "a"], pdf.loc[5:5, "a"])
self.assert_eq(kdf.loc[:, "a":"a"], pdf.loc[:, "a":"a"])
self.assert_eq(kdf.loc[:, "a":"c"], pdf.loc[:, "a":"c"])
self.assert_eq(kdf.loc[:, "b":"c"], pdf.loc[:, "b":"c"])
def test_loc2d(self):
kdf = self.kdf
pdf = self.pdf
# index indexer is always regarded as slice for duplicated values
self.assert_eq(kdf.loc[5:5, "a"], pdf.loc[5:5, "a"])
self.assert_eq(kdf.loc[[5], "a"], pdf.loc[[5], "a"])
self.assert_eq(kdf.loc[5:5, ["a"]], pdf.loc[5:5, ["a"]])
self.assert_eq(kdf.loc[[5], ["a"]], pdf.loc[[5], ["a"]])
self.assert_eq(kdf.loc[:, :], pdf.loc[:, :])
self.assert_eq(kdf.loc[3:8, "a"], pdf.loc[3:8, "a"])
self.assert_eq(kdf.loc[:8, "a"], pdf.loc[:8, "a"])
self.assert_eq(kdf.loc[3:, "a"], pdf.loc[3:, "a"])
self.assert_eq(kdf.loc[[8], "a"], pdf.loc[[8], "a"])
self.assert_eq(kdf.loc[3:8, ["a"]], pdf.loc[3:8, ["a"]])
self.assert_eq(kdf.loc[:8, ["a"]], pdf.loc[:8, ["a"]])
self.assert_eq(kdf.loc[3:, ["a"]], pdf.loc[3:, ["a"]])
# TODO?: self.assert_eq(kdf.loc[[3, 4, 3], ['a']], pdf.loc[[3, 4, 3], ['a']])
self.assertRaises(SparkPandasIndexingError, lambda: kdf.loc[3, 3, 3])
self.assertRaises(SparkPandasIndexingError, lambda: kdf.a.loc[3, 3])
self.assertRaises(SparkPandasIndexingError, lambda: kdf.a.loc[3:, 3])
self.assertRaises(SparkPandasIndexingError, lambda: kdf.a.loc[kdf.a % 2 == 0, 3])
self.assert_eq(kdf.loc[5, "a"], pdf.loc[5, "a"])
self.assert_eq(kdf.loc[9, "a"], pdf.loc[9, "a"])
self.assert_eq(kdf.loc[5, ["a"]], pdf.loc[5, ["a"]])
self.assert_eq(kdf.loc[9, ["a"]], pdf.loc[9, ["a"]])
self.assert_eq(kdf.loc[:, "a":"a"], pdf.loc[:, "a":"a"])
self.assert_eq(kdf.loc[:, "a":"d"], pdf.loc[:, "a":"d"])
self.assert_eq(kdf.loc[:, "c":"d"], pdf.loc[:, "c":"d"])
# bool list-like column select
bool_list = [True, False]
self.assert_eq(kdf.loc[:, bool_list], pdf.loc[:, bool_list])
self.assert_eq(kdf.loc[:, np.array(bool_list)], pdf.loc[:, np.array(bool_list)])
pser = pd.Series(bool_list, index=pdf.columns)
self.assert_eq(kdf.loc[:, pser], pdf.loc[:, pser])
pser = pd.Series(list(reversed(bool_list)), index=list(reversed(pdf.columns)))
self.assert_eq(kdf.loc[:, pser], pdf.loc[:, pser])
self.assertRaises(IndexError, lambda: kdf.loc[:, bool_list[:-1]])
self.assertRaises(IndexError, lambda: kdf.loc[:, np.array(bool_list + [True])])
self.assertRaises(SparkPandasIndexingError, lambda: kdf.loc[:, pd.Series(bool_list)])
# non-string column names
kdf = self.kdf2
pdf = self.pdf2
self.assert_eq(kdf.loc[5:5, 0], pdf.loc[5:5, 0])
self.assert_eq(kdf.loc[5:5, [0]], pdf.loc[5:5, [0]])
self.assert_eq(kdf.loc[3:8, 0], pdf.loc[3:8, 0])
self.assert_eq(kdf.loc[3:8, [0]], pdf.loc[3:8, [0]])
self.assert_eq(kdf.loc[:, 0:0], pdf.loc[:, 0:0])
self.assert_eq(kdf.loc[:, 0:3], pdf.loc[:, 0:3])
self.assert_eq(kdf.loc[:, 2:3], pdf.loc[:, 2:3])
def test_loc2d_multiindex_columns(self):
arrays = [np.array(["bar", "bar", "baz", "baz"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc["B":"B", "bar"], pdf.loc["B":"B", "bar"])
self.assert_eq(kdf.loc["B":"B", ["bar"]], pdf.loc["B":"B", ["bar"]])
self.assert_eq(kdf.loc[:, "bar":"bar"], pdf.loc[:, "bar":"bar"])
self.assert_eq(kdf.loc[:, "bar":("baz", "one")], pdf.loc[:, "bar":("baz", "one")])
self.assert_eq(
kdf.loc[:, ("bar", "two"):("baz", "one")], pdf.loc[:, ("bar", "two"):("baz", "one")]
)
self.assert_eq(kdf.loc[:, ("bar", "two"):"bar"], pdf.loc[:, ("bar", "two"):"bar"])
self.assert_eq(kdf.loc[:, "a":"bax"], pdf.loc[:, "a":"bax"])
self.assert_eq(
kdf.loc[:, ("bar", "x"):("baz", "a")],
pdf.loc[:, ("bar", "x"):("baz", "a")],
almost=True,
)
pdf = pd.DataFrame(
np.random.randn(3, 4),
index=["A", "B", "C"],
columns=pd.MultiIndex.from_tuples(
[("bar", "two"), ("bar", "one"), ("baz", "one"), ("baz", "two")]
),
)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc[:, "bar":"baz"], pdf.loc[:, "bar":"baz"])
self.assertRaises(KeyError, lambda: kdf.loc[:, "bar":("baz", "one")])
self.assertRaises(KeyError, lambda: kdf.loc[:, ("bar", "two"):"bar"])
# bool list-like column select
bool_list = [True, False, True, False]
self.assert_eq(kdf.loc[:, bool_list], pdf.loc[:, bool_list])
self.assert_eq(kdf.loc[:, np.array(bool_list)], pdf.loc[:, np.array(bool_list)])
pser = pd.Series(bool_list, index=pdf.columns)
self.assert_eq(kdf.loc[:, pser], pdf.loc[:, pser])
pser = pd.Series(list(reversed(bool_list)), index=list(reversed(pdf.columns)))
self.assert_eq(kdf.loc[:, pser], pdf.loc[:, pser])
# non-string column names
arrays = [np.array([0, 0, 1, 1]), np.array([1, 2, 1, 2])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc["B":"B", 0], pdf.loc["B":"B", 0])
self.assert_eq(kdf.loc["B":"B", [0]], pdf.loc["B":"B", [0]])
self.assert_eq(kdf.loc[:, 0:0], pdf.loc[:, 0:0])
self.assert_eq(kdf.loc[:, 0:(1, 1)], pdf.loc[:, 0:(1, 1)])
self.assert_eq(kdf.loc[:, (0, 2):(1, 1)], pdf.loc[:, (0, 2):(1, 1)])
self.assert_eq(kdf.loc[:, (0, 2):0], pdf.loc[:, (0, 2):0])
self.assert_eq(kdf.loc[:, -1:2], pdf.loc[:, -1:2])
def test_loc2d_with_known_divisions(self):
pdf = pd.DataFrame(
np.random.randn(20, 5), index=list("abcdefghijklmnopqrst"), columns=list("ABCDE")
)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc[["a"], "A"], pdf.loc[["a"], "A"])
self.assert_eq(kdf.loc[["a"], ["A"]], pdf.loc[["a"], ["A"]])
self.assert_eq(kdf.loc["a":"o", "A"], pdf.loc["a":"o", "A"])
self.assert_eq(kdf.loc["a":"o", ["A"]], pdf.loc["a":"o", ["A"]])
self.assert_eq(kdf.loc[["n"], ["A"]], pdf.loc[["n"], ["A"]])
self.assert_eq(kdf.loc[["a", "c", "n"], ["A"]], pdf.loc[["a", "c", "n"], ["A"]])
# TODO?: self.assert_eq(kdf.loc[['t', 'b'], ['A']], pdf.loc[['t', 'b'], ['A']])
# TODO?: self.assert_eq(kdf.loc[['r', 'r', 'c', 'g', 'h'], ['A']],
# TODO?: pdf.loc[['r', 'r', 'c', 'g', 'h'], ['A']])
@unittest.skip("TODO: should handle duplicated columns properly")
def test_loc2d_duplicated_columns(self):
pdf = pd.DataFrame(
np.random.randn(20, 5), index=list("abcdefghijklmnopqrst"), columns=list("AABCD")
)
kdf = ks.from_pandas(pdf)
# TODO?: self.assert_eq(kdf.loc[['a'], 'A'], pdf.loc[['a'], 'A'])
# TODO?: self.assert_eq(kdf.loc[['a'], ['A']], pdf.loc[['a'], ['A']])
self.assert_eq(kdf.loc[["j"], "B"], pdf.loc[["j"], "B"])
self.assert_eq(kdf.loc[["j"], ["B"]], pdf.loc[["j"], ["B"]])
# TODO?: self.assert_eq(kdf.loc['a':'o', 'A'], pdf.loc['a':'o', 'A'])
# TODO?: self.assert_eq(kdf.loc['a':'o', ['A']], pdf.loc['a':'o', ['A']])
self.assert_eq(kdf.loc["j":"q", "B"], pdf.loc["j":"q", "B"])
self.assert_eq(kdf.loc["j":"q", ["B"]], pdf.loc["j":"q", ["B"]])
# TODO?: self.assert_eq(kdf.loc['a':'o', 'B':'D'], pdf.loc['a':'o', 'B':'D'])
# TODO?: self.assert_eq(kdf.loc['a':'o', 'B':'D'], pdf.loc['a':'o', 'B':'D'])
# TODO?: self.assert_eq(kdf.loc['j':'q', 'B':'A'], pdf.loc['j':'q', 'B':'A'])
# TODO?: self.assert_eq(kdf.loc['j':'q', 'B':'A'], pdf.loc['j':'q', 'B':'A'])
self.assert_eq(kdf.loc[kdf.B > 0, "B"], pdf.loc[pdf.B > 0, "B"])
# TODO?: self.assert_eq(kdf.loc[kdf.B > 0, ['A', 'C']], pdf.loc[pdf.B > 0, ['A', 'C']])
def test_getitem(self):
pdf = pd.DataFrame(
{
"A": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"B": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"C": [True, False, True] * 3,
},
columns=list("ABC"),
)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf["A"], pdf["A"])
self.assert_eq(kdf[["A", "B"]], pdf[["A", "B"]])
self.assert_eq(kdf[kdf.C], pdf[pdf.C])
self.assertRaises(KeyError, lambda: kdf["X"])
self.assertRaises(KeyError, lambda: kdf[["A", "X"]])
self.assertRaises(AttributeError, lambda: kdf.X)
# not str/unicode
# TODO?: pdf = pd.DataFrame(np.random.randn(10, 5))
# TODO?: kdf = ks.from_pandas(pdf)
# TODO?: self.assert_eq(kdf[0], pdf[0])
# TODO?: self.assert_eq(kdf[[1, 2]], pdf[[1, 2]])
# TODO?: self.assertRaises(KeyError, lambda: pdf[8])
# TODO?: self.assertRaises(KeyError, lambda: pdf[[1, 8]])
# non-string column names
pdf = pd.DataFrame(
{
10: [1, 2, 3, 4, 5, 6, 7, 8, 9],
20: [9, 8, 7, 6, 5, 4, 3, 2, 1],
30: [True, False, True] * 3,
}
)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf[10], pdf[10])
self.assert_eq(kdf[[10, 20]], pdf[[10, 20]])
def test_getitem_slice(self):
pdf = pd.DataFrame(
{
"A": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"B": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"C": [True, False, True] * 3,
},
index=list("abcdefghi"),
)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf["a":"e"], pdf["a":"e"])
self.assert_eq(kdf["a":"b"], pdf["a":"b"])
self.assert_eq(kdf["f":], pdf["f":])
def test_loc_on_numpy_datetimes(self):
pdf = pd.DataFrame(
{"x": [1, 2, 3]}, index=list(map(np.datetime64, ["2014", "2015", "2016"]))
)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc["2014":"2015"], pdf.loc["2014":"2015"])
def test_loc_on_pandas_datetimes(self):
pdf = pd.DataFrame(
{"x": [1, 2, 3]}, index=list(map(pd.Timestamp, ["2014", "2015", "2016"]))
)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc["2014":"2015"], pdf.loc["2014":"2015"])
@unittest.skip("TODO?: the behavior of slice for datetime")
def test_loc_datetime_no_freq(self):
datetime_index = pd.date_range("2016-01-01", "2016-01-31", freq="12h")
datetime_index.freq = None # FORGET FREQUENCY
pdf = pd.DataFrame({"num": range(len(datetime_index))}, index=datetime_index)
kdf = ks.from_pandas(pdf)
slice_ = slice("2016-01-03", "2016-01-05")
result = kdf.loc[slice_, :]
expected = pdf.loc[slice_, :]
self.assert_eq(result, expected)
@unittest.skip("TODO?: the behavior of slice for datetime")
def test_loc_timestamp_str(self):
pdf = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="H", periods=100),
)
kdf = ks.from_pandas(pdf)
# partial string slice
# TODO?: self.assert_eq(pdf.loc['2011-01-02'],
# TODO?: kdf.loc['2011-01-02'])
self.assert_eq(pdf.loc["2011-01-02":"2011-01-05"], kdf.loc["2011-01-02":"2011-01-05"])
# series
# TODO?: self.assert_eq(pdf.A.loc['2011-01-02'],
# TODO?: kdf.A.loc['2011-01-02'])
self.assert_eq(pdf.A.loc["2011-01-02":"2011-01-05"], kdf.A.loc["2011-01-02":"2011-01-05"])
pdf = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="M", periods=100),
)
kdf = ks.from_pandas(pdf)
# TODO?: self.assert_eq(pdf.loc['2011-01'], kdf.loc['2011-01'])
# TODO?: self.assert_eq(pdf.loc['2011'], kdf.loc['2011'])
self.assert_eq(pdf.loc["2011-01":"2012-05"], kdf.loc["2011-01":"2012-05"])
self.assert_eq(pdf.loc["2011":"2015"], kdf.loc["2011":"2015"])
# series
# TODO?: self.assert_eq(pdf.B.loc['2011-01'], kdf.B.loc['2011-01'])
# TODO?: self.assert_eq(pdf.B.loc['2011'], kdf.B.loc['2011'])
self.assert_eq(pdf.B.loc["2011-01":"2012-05"], kdf.B.loc["2011-01":"2012-05"])
self.assert_eq(pdf.B.loc["2011":"2015"], kdf.B.loc["2011":"2015"])
@unittest.skip("TODO?: the behavior of slice for datetime")
def test_getitem_timestamp_str(self):
pdf = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="H", periods=100),
)
kdf = ks.from_pandas(pdf)
# partial string slice
# TODO?: self.assert_eq(pdf['2011-01-02'],
# TODO?: kdf['2011-01-02'])
self.assert_eq(pdf["2011-01-02":"2011-01-05"], kdf["2011-01-02":"2011-01-05"])
pdf = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="M", periods=100),
)
kdf = ks.from_pandas(pdf)
# TODO?: self.assert_eq(pdf['2011-01'], kdf['2011-01'])
# TODO?: self.assert_eq(pdf['2011'], kdf['2011'])
self.assert_eq(pdf["2011-01":"2012-05"], kdf["2011-01":"2012-05"])
self.assert_eq(pdf["2011":"2015"], kdf["2011":"2015"])
@unittest.skip("TODO?: period index can't convert to DataFrame correctly")
def test_getitem_period_str(self):
pdf = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.period_range("2011-01-01", freq="H", periods=100),
)
kdf = ks.from_pandas(pdf)
# partial string slice
# TODO?: self.assert_eq(pdf['2011-01-02'],
# TODO?: kdf['2011-01-02'])
self.assert_eq(pdf["2011-01-02":"2011-01-05"], kdf["2011-01-02":"2011-01-05"])
pdf = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=
|
pd.period_range("2011-01-01", freq="M", periods=100)
|
pandas.period_range
|
import os
import logging
import glob
import numpy as np
import pandas as pd
import tensorflow as tf
import datetime as dt
import multiprocessing as mp
from tqdm.auto import tqdm
from sklearn.preprocessing import MaxAbsScaler
from functools import partial
import joblib
import sys
import warnings
import argparse as arg
tf.get_logger().setLevel(logging.ERROR)
from data_loader import create_ds, set_input_shape_global
from gan import init_gan, train_gan
def sort_timestamps_inner(file):
'''
Inner sort_timestamps method for parallelization.
@param file: file to preprocess
'''
df = pd.read_csv(file)
df.sort_values(['timeStamp'], axis=0, ascending=True, inplace=True, kind='merge')
df.to_csv(os.path.join(file), ',', index=False)
def sort_timestamps(dir, region='Berlin', pbar=None):
'''
Method to sort timestamps data points in ride files by their timestamps as some are not in the correct order.
@param dir: path to the data directory with the exported files
@param region: target region of files that should be preprocessed
@param pbar: progress bar
'''
for split in ['train', 'test', 'val']:
file_list = glob.glob(os.path.join(dir, split, region, 'VM2_*.csv'))
with mp.Pool(mp.cpu_count()) as pool:
pool.map(sort_timestamps_inner, file_list)
pbar.update(1) if pbar is not None else print()
def remove_invalid_rides_inner(file):
'''
Inner remove_invalid_rides method for parallelization.
@param file: file to preprocess
'''
df = pd.read_csv(file)
df_cp = df.copy(deep=True)
df_cp['timeStamp'] = df_cp['timeStamp'].diff()
breakpoints = np.where((df_cp['timeStamp'] > 6000).to_numpy())
df_cp.dropna(inplace=True, axis=0)
if len(df_cp) == 0 or len(breakpoints[0]) > 0:
# remove rides where one col is completely empty or timestamp interval is too long
os.remove(file)
def remove_invalid_rides(dir, region='Berlin', pbar=None):
'''
Method to remove rides that contain adjacent timestamps that differ by more than 6000 ms as it points at invalid ride files.
@param dir: path to the data directory with the exported files
@param region: target region of files that should be preprocessed
@param pbar: progress bar
'''
for split in ['train', 'test', 'val']:
file_list = glob.glob(os.path.join(dir, split, region, 'VM2_*.csv'))
with mp.Pool(mp.cpu_count()) as pool:
pool.map(remove_invalid_rides_inner, file_list)
pbar.update(1) if pbar is not None else print()
def remove_sensor_values_from_gps_timestamps_inner(lin_acc_flag, file):
'''
Inner remove_sensor_values_from_gps_timestamps method for parallelization.
@param lin_acc_flag: whether the linear accelerometer data was exported, too
@param file: file to preprocess
'''
# for android data remove accelerometer and gyroscope sensor data from gps measurements as timestamps is rounded to seconds and order is not restorable
if os.path.splitext(file)[0][-1] == 'a':
df = pd.read_csv(file)
df_cp = df.copy(deep=True)
df_cp = df_cp[['lat', 'lon', 'acc']].dropna()
df_cp = df.iloc[df_cp.index.values].copy(True)
df_cp[['X', 'Y', 'Z', 'a', 'b', 'c']] = ''
if lin_acc_flag:
df_cp[['XL', 'YL', 'ZL']] = ''
df.iloc[df_cp.index] = df_cp
df.to_csv(file, ',', index=False)
def remove_sensor_values_from_gps_timestamps(dir, region='Berlin', lin_acc_flag=False, pbar=None):
'''
Method to remove the recorded sensor values that are present when a gps sensor value was recorded as they are often faulty or at least time delayed.
@param dir: path to the data directory with the exported files
@param region: target region of files that should be preprocessed
@param lin_acc_flag: whether the linear accelerometer data was exported, too
@param pbar: progress bar
'''
for split in ['train', 'test', 'val']:
file_list = glob.glob(os.path.join(dir, split, region, 'VM2_*.csv'))
with mp.Pool(mp.cpu_count()) as pool:
pool.map(partial(remove_sensor_values_from_gps_timestamps_inner, lin_acc_flag), file_list)
pbar.update(1) if pbar is not None else print()
def remove_acc_outliers_inner(lower, upper, file):
'''
Inner remove_acc_outliers method for parallelization.
@param lower: lower border
@param upper: upper border
@param file: file to preprcess
'''
df = pd.read_csv(file)
arr = df[['acc']].to_numpy()
outliers_lower = arr < lower
outliers_upper = arr > upper
outliers = np.logical_or(outliers_lower, outliers_upper)
outliers_bool = np.any(outliers, axis=1)
outlier_rows = np.where(outliers_bool)[0]
if len(outlier_rows) > 0:
# for accuracy outliers, set lat, lon and acc to ''
df.loc[outlier_rows, 'lat'] = ''
df.loc[outlier_rows, 'lon'] = ''
df.drop(columns=['acc'], inplace=True)
df.to_csv(file, ',', index=False)
def remove_acc_outliers(dir, region='Berlin', pbar=None):
'''
Method to remove gps accuracy outliers.
@param dir: path to the data directory with the exported files
@param region: target region of files that should be preprocessed
@param pbar: progress bar
'''
l = []
split = 'train'
for file in glob.glob(os.path.join(dir, split, region, 'VM2_*.csv')):
df = pd.read_csv(file)
df = df[['acc']].dropna()
if df.shape[0] == 0:
os.remove(file)
else:
l.append(df[['acc']].to_numpy())
arr = np.concatenate(l, axis=0)
arr = arr[:, 0]
q25 = np.percentile(arr, 25, axis=0)
q75 = np.percentile(arr, 75, axis=0)
iqr = q75 - q25
cut_off = iqr * 1.5
lower = q25 - cut_off
upper = q75 + cut_off
for split in ['train', 'test', 'val']:
file_list = glob.glob(os.path.join(dir, split, region, 'VM2_*.csv'))
with mp.Pool(mp.cpu_count()) as pool:
pool.map(partial(remove_acc_outliers_inner, lower, upper), file_list)
pbar.update(1) if pbar is not None else print()
def calc_vel_delta_inner(file):
'''
Inner calc_vel_delta method for parallelization.
@param file: file to preprocess
'''
df = pd.read_csv(file)
df_cp = df.copy(deep=True)
df_cp[['lat', 'lon', 'timeStamp']] = df_cp[['lat', 'lon', 'timeStamp']].dropna().diff()
# compute lat & lon change per second
df_cp['lat'] = df_cp['lat'].dropna() * 1000 / df_cp['timeStamp'].dropna()
df_cp['lon'] = df_cp['lon'].dropna() * 1000 / df_cp['timeStamp'].dropna()
df[['lat', 'lon']] = df_cp[['lat', 'lon']]
df.to_csv(file, ',', index=False)
def calc_vel_delta(dir, region='Berlin', pbar=None):
'''
Method to calculate the "velocity" data deltas based on the gps longitude and latitude.
@param dir: path to the data directory with the exported files
@param region: target region of files that should be preprocessed
@param pbar: progress bar
'''
for split in ['train', 'test', 'val']:
file_list = glob.glob(os.path.join(dir, split, region, 'VM2_*.csv'))
with mp.Pool(mp.cpu_count()) as pool:
pool.map(calc_vel_delta_inner, file_list)
pbar.update(1) if pbar is not None else print()
def linear_interpolate(lin_acc_flag, file):
'''
Method to apply linear interpolation on the ride files.
@param lin_acc_flag: whether the linear accelerometer data was exported, too
@param file: file to preprocess.
'''
df = pd.read_csv(file)
# convert timestamp to datetime format
df['timeStamp'] = df['timeStamp'].apply(
lambda x: dt.datetime.utcfromtimestamp(x / 1000))
# set timeStamp col as pandas datetime index
df['timeStamp'] = pd.to_datetime(df['timeStamp'], unit='ms')
df = df.set_index(pd.DatetimeIndex(df['timeStamp']))
# drop all duplicate occurrences of the labels and keep the first occurrence
df = df[~df.index.duplicated(keep='first')]
# interpolation of X, Y, Z, a, b, c via linear interpolation based on timestamp
df['X'].interpolate(method='time', inplace=True)
df['Y'].interpolate(method='time', inplace=True)
df['Z'].interpolate(method='time', inplace=True)
df['a'].interpolate(method='time', inplace=True)
df['b'].interpolate(method='time', inplace=True)
df['c'].interpolate(method='time', inplace=True)
if os.path.splitext(file)[0][-1] == 'a' and lin_acc_flag:
df['XL'].interpolate(method='time', inplace=True)
df['YL'].interpolate(method='time', inplace=True)
df['ZL'].interpolate(method='time', inplace=True)
# interpolation of missing values via padding on the reversed df
df.sort_index(axis=0, ascending=False, inplace=True)
df['lat'].interpolate(method='pad', inplace=True)
df['lon'].interpolate(method='pad', inplace=True)
df.sort_index(axis=0, ascending=True, inplace=True)
# convert timestamp back to unix timestamp format in milliseconds
df['timeStamp'] = df.index.view(np.int64) // 10 ** 6
df.to_csv(file, ',', index=False)
def equidistant_interpolate(time_interval, lin_acc_flag, file):
'''
Method to apply equidistant interpolation on the ride files.
@param time_interval: interval between adjacent timestamps (only relevant with equidistant interpolation)
@param lin_acc_flag: whether the linear accelerometer data was exported, too
@param file: file to preprocess.
'''
df = pd.read_csv(file)
# floor start_time so that full seconds are included in the new timestamp series (time_interval may be 50, 100, 125 or 200ms)
# this ensures that less original data are thrown away after resampling, as GPS measurements are often at full seconds
start_time = (df['timeStamp'].iloc[0] // time_interval) * time_interval
end_time = df['timeStamp'].iloc[-1]
timestamps_original = df['timeStamp'].values
# new timestamps for equidistant resampling after linear interpolation
timestamps_new = np.arange(start_time, end_time, time_interval)
# throw away new timestamps that are already in the original rows
timestamps_net_new = list(set(timestamps_new) - set(timestamps_original))
# store which original rows to remove later, as they have no equidistant timestamp
removables = list(set(timestamps_original) - set(timestamps_new))
removables = [dt.datetime.utcfromtimestamp(x / 1000) for x in removables]
df_net_new = pd.DataFrame(timestamps_net_new, columns=['timeStamp'])
df = pd.concat([df, df_net_new])
# convert timestamp to datetime format
df['timeStamp'] = df['timeStamp'].apply(
lambda x: dt.datetime.utcfromtimestamp(x / 1000))
# set timeStamp col as pandas datetime index
df['timeStamp'] =
|
pd.to_datetime(df['timeStamp'], unit='ms')
|
pandas.to_datetime
|
import pandas as pd
selectionSundays = {'2002':'03/10/2002','2003':'03/16/2003',
'2004':'03/14/2004','2005':'03/13/2005',
'2006':'03/12/2006','2007':'03/11/2007',
'2008':'03/16/2008','2009':'03/15/2009',
'2010':'03/14/2010','2011':'03/13/2011',
'2012':'03/11/2012','2013':'03/17/2013',
'2014':'03/16/2014','2015':'03/15/2015',
'2016':'03/13/2016','2017':'03/12/2017',
'2018':'03/11/2018','2019':'3/17/2019'}
selectionSundayList = ['03/10/2002','03/16/2003','03/14/2004','03/13/2005','03/12/2006','03/11/2007','03/16/2008',
'03/15/2009','03/14/2010','03/13/2011','03/11/2012',
'03/17/2013','03/16/2014','03/15/2015','03/13/2016','03/12/2017','03/11/2018', '3/17/2019']
from datetime import timedelta
days_to_subtract=7
d = timedelta(days=days_to_subtract)
# Just a consistent way of processing files. Ignore the fact that the local variables say 2014
def read_data(teams_file,games_file,madness_teams_file):
teams_2014 = pd.read_csv(teams_file,header=None)
teams_2014.columns=["number","name"]
games_2014 = pd.read_csv(games_file,header=None)
games_2014.columns = ["notsure1","date","team1","H_A_N1","points1","team2","H_A_N2","points2"]
team1_names = teams_2014.copy()
team1_names.columns = ["team1","team1_name"]
team1_names.set_index('team1',inplace=True)
games_2014 = games_2014.set_index("team1").join(team1_names,how='inner').reset_index()
team2_names = teams_2014.copy()
team2_names.columns = ["team2","team2_name"]
team2_names.set_index('team2',inplace=True)
games_2014 = games_2014.set_index("team2").join(team2_names,how='inner').reset_index()
games_2014["date"] =
|
pd.to_datetime(games_2014["date"],format="%Y%m%d")
|
pandas.to_datetime
|
import logging
from datetime import datetime
import json
import pandas as pd
from schedule import every
from sqlalchemy import func
from dispatch.decorators import background_task
from dispatch.enums import Visibility
from dispatch.extensions import sentry_sdk
from dispatch.job.models import Job
from dispatch.worker import service as worker_service
from dispatch.worker.models import Worker
from dispatch.location.models import Location
from dispatch.plugins.base import plugins
from dispatch.plugins.kandbox_planner.data_adapter.kplanner_db_adapter import KPlannerDBAdapter
from dispatch.scheduler import scheduler
from dispatch.service import service as service_service
from dispatch.tag import service as tag_service
from dispatch.tag.models import Tag
from .enums import JobPlanningStatus
from .service import calculate_cost, get_all, get_all_by_status, get_all_last_x_hours_by_status
log = logging.getLogger(__name__)
@scheduler.add(every(1).hours, name="job-tagger")
@background_task
def auto_tagger(db_session):
"""Attempts to take existing tags and associate them with jobs."""
tags = tag_service.get_all(db_session=db_session).all()
log.debug(f"Fetched {len(tags)} tags from database.")
tag_strings = [t.name.lower() for t in tags if t.discoverable]
phrases = build_term_vocab(tag_strings)
matcher = build_phrase_matcher("dispatch-tag", phrases)
p = plugins.get(
INCIDENT_PLUGIN_STORAGE_SLUG
) # this may need to be refactored if we support multiple document types
for job in get_all(db_session=db_session).all():
log.debug(f"Processing job. Name: {job.name}")
doc = job.job_document
try:
mime_type = "text/plain"
text = p.get(doc.resource_id, mime_type)
except Exception as e:
log.debug(f"Failed to get document. Reason: {e}")
sentry_sdk.capture_exception(e)
continue
extracted_tags = list(set(extract_terms_from_text(text, matcher)))
matched_tags = (
db_session.query(Tag)
.filter(func.upper(Tag.name).in_([func.upper(t) for t in extracted_tags]))
.all()
)
job.tags.extend(matched_tags)
db_session.commit()
log.debug(f"Associating tags with job. Job: {job.name}, Tags: {extracted_tags}")
@scheduler.add(every(3000).seconds, name="calc_historical_location_features")
@background_task
def calc_historical_location_features(db_session=None):
"""Calculates the cost of all jobs."""
log.debug(
f"calc_historical_location_features - worker started worker.served_location_gmm ......"
)
calc_historical_location_features_real_func(db_session)
def calc_historical_location_features_real_func(db_session=None):
job_loc_df = pd.read_sql(
db_session.query(
Job.scheduled_primary_worker_id,
Job.id.label("job_id"),
Job.scheduled_start_datetime,
Job.scheduled_duration_minutes,
Job.requested_start_datetime,
Job.location_id,
Location.geo_longitude,
Location.geo_latitude,
)
.filter(Job.location_id == Location.id)
.filter(Job.planning_status != JobPlanningStatus.unplanned)
.statement,
db_session.bind,
)
if job_loc_df.count().max() < 1:
# raise ValueError("job_loc_df.count().max() < 1, no data to proceed")
print(
"calc_historical_location_features_real_func: job_loc_df.count().max() < 1, no data to proceed"
)
return
from dispatch.plugins.kandbox_planner.util.kandbox_date_util import (
extract_minutes_from_datetime,
)
job_loc_df["actual_start_minutes"] = job_loc_df.apply(
lambda x: extract_minutes_from_datetime(x["scheduled_start_datetime"]),
axis=1,
)
job_loc_df["days_delay"] = job_loc_df.apply(
lambda x: (x["scheduled_start_datetime"] - x["requested_start_datetime"]).days, axis=1
)
loc_gmm_df = (
job_loc_df.groupby(["location_id"])
.agg(
# job_count=pd.NamedAgg(column='location_code', aggfunc='count')
avg_geo_longitude=pd.NamedAgg(column="geo_longitude", aggfunc="mean"),
avg_geo_latitude=pd.NamedAgg(column="geo_latitude", aggfunc="mean"),
std_geo_longitude=pd.NamedAgg(column="geo_longitude", aggfunc="std"),
std_geo_latitude=pd.NamedAgg(column="geo_latitude", aggfunc="std"),
job_count=pd.NamedAgg(column="scheduled_primary_worker_id", aggfunc="count"),
list_scheduled_worker_code=pd.NamedAgg(
column="scheduled_primary_worker_id", aggfunc=list
),
avg_actual_start_minutes=pd.NamedAgg(column="actual_start_minutes", aggfunc="mean"),
avg_actual_duration_minutes=pd.NamedAgg(
column="scheduled_duration_minutes", aggfunc="mean"
),
avg_days_delay=pd.NamedAgg(column="days_delay", aggfunc="mean"),
stddev_days_delay=pd.NamedAgg(column="days_delay", aggfunc="std"),
)
.reset_index()
) # .sort_values(['location_code'], ascending=True)
from collections import Counter
loc_gmm_df["job_historical_worker_service_dict"] = loc_gmm_df.apply(
lambda x: Counter(x["list_scheduled_worker_code"]),
axis=1,
)
# Then I should get "job_historical_worker_service_dict": {"19": 3, "18":1}
loc_gmm_df["job_history_feature_data"] = loc_gmm_df.apply(
lambda x: {
"mean": {"longitude": x["avg_geo_longitude"], "latitude": x["avg_geo_latitude"]},
"std": {"longitude": x["std_geo_longitude"], "latitude": x["std_geo_latitude"]},
"job_count": x["job_count"], # TODO same as out side, it should be here.!
"job_historical_worker_service_dict": x["job_historical_worker_service_dict"],
},
axis=1,
)
loc_feature_df = loc_gmm_df[
[
"location_id",
"job_history_feature_data",
"job_count",
"avg_actual_start_minutes",
"avg_actual_duration_minutes",
"avg_days_delay",
"stddev_days_delay",
]
]
loc_feature_df.rename(columns={"location_id": "id"}, inplace=True)
loc_update_dict_list = json.loads(loc_feature_df.to_json(orient="records"))
db_session.bulk_update_mappings(
Location,
loc_update_dict_list,
)
# db_session.flush()
db_session.commit()
log.debug(
f"calc_historical_location_features - Finished Location features, now started worker.served_location_gmm. "
)
job_loc_df = job_loc_df[
[
"scheduled_primary_worker_id",
"location_id",
"geo_longitude",
"geo_latitude",
]
]
worker_loc_df = pd.read_sql(
db_session.query(
Worker.id.label("scheduled_primary_worker_id"),
Worker.location_id,
Location.geo_longitude,
Location.geo_latitude,
)
.filter(Worker.location_id == Location.id)
.statement,
db_session.bind,
)
# worker_loc_df.rename(columns={"id": "scheduled_primary_worker_id"}, inplace=True)
"""
job_loc_df = pd.read_sql(
db_session.query(Job).filter(Job.planning_status != JobPlanningStatus.unplanned).statement,
db_session.bind,
)
# TO attach home location for each worker.
worker_loc_df = pd.read_sql(
db_session.query(Worker)
.filter(Job.planning_status != JobPlanningStatus.unplanned)
.statement,
db_session.bind,
)
worker_loc_df.rename(columns={"id": "scheduled_primary_worker_id"}, inplace=True)
job_loc_with_worker_home = pd.concat(
[
visit[["actual_worker_code", "location_code", "geo_longitude", "geo_latitude"]],
worker_df,
]
).copy()
"""
# job_loc_with_worker_home = job_loc_df
job_loc_with_worker_home = pd.concat(
[
job_loc_df,
worker_loc_df,
]
).copy()
log.debug(f"calc_historical_location_features - worker loaded from db ...")
#
worker_gmm_df = (
job_loc_with_worker_home.groupby(["scheduled_primary_worker_id"])
.agg(
# job_count=pd.NamedAgg(column='location_code', aggfunc='count')
avg_geo_longitude=pd.NamedAgg(column="geo_longitude", aggfunc="mean"),
avg_geo_latitude=pd.NamedAgg(column="geo_latitude", aggfunc="mean"),
std_geo_longitude=pd.NamedAgg(column="geo_longitude", aggfunc="std"),
std_geo_latitude=pd.NamedAgg(column="geo_latitude", aggfunc="std"),
job_count=
|
pd.NamedAgg(column="scheduled_primary_worker_id", aggfunc="count")
|
pandas.NamedAgg
|
import xarray as _xr
import pandas as _pd
import numpy as _np
import pathlib as _pl
import traceback as _tb
import datetime as _dt
from email.mime.text import MIMEText as _MIMEText
import smtplib as _smtplib
import pathlib as __pl
import configparser as _cp
import magic as _magic
settings = """
[notify]
email_address = None
smtp = localhost
"""
def generate_config(p2sf):
if not p2sf.parent.is_dir():
p2sf.parent.mkdir()
with open(p2sf, 'w') as raus:
raus.write(settings)
def load_config():
p2sf = __pl.Path.home().joinpath('.ceilopy/config.ini')
if not p2sf.is_file():
generate_config(p2sf)
config = _cp.ConfigParser()
config.read(p2sf)
return config
class CorruptFileError(Exception):
"""Exception raised when File is not whats expected.
"""
def __init__(self, message):
super().__init__(message)
class MissingSerialNumberError(Exception):
"""Exception raised when File does not contain Serial number.
"""
def __init__(self, message):
super().__init__(message)
class SerialNumberMissmatchError(Exception):
"""Exception raised when Files doe not have the same serial number.
"""
def __init__(self, message):
super().__init__(message)
def read_L1(file, parent = None):
if isinstance(file, (str, _pl.Path)):
file = [file]
assert(isinstance(file, (_pd.Series,list, _np.array))), f'File type not recognized: {type(file)}'
ignore1 = ['name','message_type','version','date_stamp',
'period','tilt_angle',
'cloud_status','cloud_data','status_bits','profile_scale',
'profile_resolution','profile_length']
if not _np.all([_magic.from_file(fn.as_posix()) == 'Hierarchical Data Format (version 5) data' for fn in file]):
fnc = '\n\t'.join([fn.as_posix() for fn in file])
raise CorruptFileError(f'At least one of the following can not be identified as a netcdf file: \n\t {fnc}')
L1 = _xr.open_mfdataset(file, concat_dim = 'timeDim', drop_variables=ignore1)
L1 = L1.assign_coords(time = _pd.to_datetime(L1.time.values, unit = 's'))
for var in L1.variables:
if 'timeDim' in L1[var].dims:
L1[var] = L1[var].swap_dims({'timeDim':'time'})
return L1
# read hist file
##### Read Level3 hist files. #############################################
def read_level3_hist(file, parent = None):
def read_file(fn):
cols = ['CREATEDATE',' CEILOMETER',' CLOUD_STATUS',' CLOUD_1',' CLOUD_2',
' CLOUD_3'] # What columns to keep.
his3 = _pd.read_csv(fn, skiprows=1, header=0, sep=',',
na_values='-9999', index_col=0, parse_dates=True,
infer_datetime_format=True, usecols=cols)
his3.index.rename('time', inplace=True)
his3.columns = [col.strip() for col in his3.columns]
return his3
if isinstance(file, (str, _pl.Path)):
file = [file]
assert(isinstance(file, (_pd.Series,list, _np.array))), f'File type not recognized: {type(file)}'
df = _pd.concat([read_file(fn) for fn in file], sort = True)
#### testpoint
parent.tp_dfcc = df.copy()
# assert(df.index.duplicated().sum() == 0), 'There are duplicates in the hist file ... I would think this should not be happening. if it does un-comment the following line'
df = df[~df.index.duplicated(keep='first')] # Remove duplicates
return df
class Cl51CloudProdRetriever():
def __init__(self, poutg,
# check_serial = True,
):
self.poutg = poutg
# self.p2fnout = poutg.path2fn_out.unique()[0]
self._product_dataset = None
self.get_serial_numbers()
# if check_serial:
# self.check_serial()
def get_serial_numbers(self):
def get_serial(row):
# Extract serial numbers from files
key = row.file_type
file = row.path2raw
if key in ['L1', 'L2', 'bl']:
serial = file.name[-11:-3] # Extract serial number from L1 filename.
# elif key == 'L3':
# serial = files['L3'][-11:-3]
elif key in ['H2','H3','hist']:
h = _pd.read_csv(file, skiprows=1, header=0, sep=',')
serial = h[' CEILOMETER'][0].strip() # Extract serial number from H2 file.
else:
raise KeyError('File type unknown')
return serial
self.poutg['sn'] = self.poutg.apply(get_serial, axis = 1)
def check_serial(self, error_handling = 'raise'):
"""
Checks if the serial numbers in all the files are the same. In early
measurments the serial number was not stored ... use error_handling to
deal with occuring errors.
Parameters
----------
error_handling : str, optional
How to deal with errors. The default is 'raise'.
raise: raises occuring errors
allow_empty: do not raise an error if serial number is not available
Raises
------
KeyError
DESCRIPTION.
Returns
-------
serial : TYPE
DESCRIPTION.
"""
sn_series = self.poutg['sn'].copy()
# self.poutg['sn'] = sn_series.copy()
valid = ['raise', 'allow_empty']
assert(error_handling in valid), f'error_handling got an unexpected value ({error_handling}. Choose from: {valid})'
if error_handling == 'allow_empty':
sn_series = sn_series[sn_series.apply(lambda x: len(x)) != 0]
if sn_series.unique().shape[0] != 1:
if len(sn_series[sn_series.apply(lambda x: len(x)) != 0]) != len(sn_series):
fnj = '\n\t'.join([fn.as_posix() for fn in self.poutg.path2raw])
raise MissingSerialNumberError(f'At least one of the following files is missing a serial number:\n\t{fnj}')
raise SerialNumberMissmatchError(f'Serial numbers ({sn_series.unique()}) do not match')
@property
def product_dataset(self):
if isinstance(self._product_dataset, type(None)):
poutg = self.poutg
L1 = read_L1(poutg[poutg.file_type == 'bl'].path2raw)
dfL1 = L1.rcs_910.to_pandas()
assert(dfL1.index.duplicated().sum() == 0), "there are duplicates in L1's index, I would think this should be happening. if it does un-comment the following line"
# dfL1 = dfL1[~dfL1.index.duplicated(keep='first')]
his3 = read_level3_hist(poutg[poutg.file_type == 'hist'].path2raw, parent = self)
##### Clean and resample to 36s ###########################################
# resample to 36s even though L1 files are already at 36 sec because the
# time intervals on the L1 files from BL-View are sometimes off by a second.
# Keeping the limit at 1 step prevents the resample from repeating a nearby
# data point to fill in large gaps in the data.
dfL1 = dfL1.resample('36S').nearest(limit=1)
# The .his files are originally at 16 sec.
his3 = his3.resample('36S').nearest(limit=1)
# Do this to fill in an gaps in the data with nans to build com_plete days.
# Create a date range of a com_plete day with 36s intervals with no gaps.
day = _pd.date_range(dfL1.index[0].floor('D'), dfL1.index[-1].ceil('D'),freq='36S')
df = _pd.DataFrame(index=day[:-1])
df.index.rename('time', inplace=True)
# Merge the date range from above with the dataframes to fill in any gaps
# left to com_plete a whole day of 36s intervals.
dfx = _pd.merge_ordered(df, dfL1, on='time') # For the L1 file
dfx.set_index('time', drop = True, inplace = True)
dfhis = _pd.merge_ordered(df, his3, on='time')
dfhis.set_index('time', drop = True, inplace = True)
##### Build the Variables and attributes ##################################
var = {} # Create empty variable dictionary.
# L1 file
var['backscatter_profile']=(['time','range'], _np.float32(dfx.values),
{'long_name':'2-D ceilometer signal backscatter profile.',
'units':'10e-9 m^-1 sr^-1',
'comments':'Range-corrected-scattering'})
# Level3 .his files.
var['cloud_status']=(['time'], _np.float32(dfhis['CLOUD_STATUS']),
{'long_name':'Cloud detection status.',
'units':'1',
'flag_values':_np.float32([0,1,2,3,4]),
'flag_0':'No significant backscatter.',
'flag_1':'One cloud layer detected.',
'flag_2':'Two cloud layers detected.',
'flag_3':'Three cloud layers detected.',
'flag_4':'''Full obscuration/vertical visibility mode.
First cloud_base will report vertical visibility
and second cloud_base will report highest signal''',
'comments':'''When cloud_status=4 there is an optically
thick cloud that obscures the signal. Therefore
it is not possible to discern additional cloud
layers above it so the vertical visibility and
highest signal are reported instead.'''})
var['cloud_base']=(['time','cloud_layer'],
_np.float32(dfhis[['CLOUD_1','CLOUD_2','CLOUD_3']].values.tolist()),
{'long_name':'Cloud base heights.',
'units':'m',
'comments':'''A 2D array containing all three cloud bases
at each timestep. -999 if no significant signal.''',
'cloud_base_1':'''First cloud base height or vertical visibility
if cloud_status=4''',
'cloud_base_2':'''Second cloud base height or highest received
signal if cloud_status=4''',
'cloud_base_3':'Third cloud base height'})
##### Create dataset. #####################################################
ds = _xr.Dataset(attrs = {'title':'Ceilometer cloud product',
'version':'1.0',
'institution':'NOAA/GML/GRAD',
'author':'<EMAIL>',
'source':'Vaisala CL51 ceilometer',
'serial_number': poutg.sn.unique()[0],
'i_nput_files': [fn.name for fn in poutg.path2raw],
'Conventions':'CF-1.8',
'comments':'''The data has not undergone any processing
other than what the Vaisala software has ap_plied.
In addition, no QC has been ap_plied other than
a visual inspection for impossible values or
obvious errors.'''
},
coords = {'time':('time', df.index.values,
{'long_name':'Time in UTC',
'comments':'36 second message interval'}),
'range':('range', L1['range'].values,
{'long_name':'Vertical height bins',
'units':'meters'}),
'cloud_layer':('cloud_layer', _np.array([1,2,3]),
{'long_name':'cloud layer',
'units':'1'}),
},
data_vars = var
)
self._product_dataset = ds
return self._product_dataset
class Cl51CloudProdProcessor(object):
def __init__(self,
p2fl_in = '/nfs/grad/Inst/Ceil/SURFRAD/',
p2fl_out = '/nfs/iftp/aftp/g-rad/surfrad/ceilometer/cl51_cloud_prod_lev0',
):
self.p2fl_in = _pl.Path(p2fl_in)
self.hist_file_format= '*LEVEL_3*.his'
self.bl_file_format = 'L1*.nc'
self.p2fl_out = _pl.Path(p2fl_out)
self.fn_format_out = '{site}.cl51.cloud_prod.{date}.nc'
# self.test = test
self._workplan = None
@property
def workplan(self):
if isinstance(self._workplan, type(None)):
def bl2date(row):
if row.path2raw.name.split('_')[-1].split('.')[0].isnumeric():
dt = row.path2raw.name.split('_')[-1].split('.')[0]
else:
dt = row.path2raw.name.split('_')[-2]
return _pd.to_datetime(dt)
workplan =
|
_pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import silhouette_samples, silhouette_score
from scipy.stats import pearsonr, spearmanr
def cluster(n_clusters, layer, output_dir, file_name):
clusterer = AgglomerativeClustering(linkage='ward', n_clusters=n_clusters)
cluster_labels = clusterer.fit_predict(np.array(layer))
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
# silhouette_avg = silhouette_score(layer, cluster_labels)
# print("For n_clusters =", n_clusters,
# "The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(np.array(layer), cluster_labels)
# silh_values.append(sample_silhouette_values)
category = [[n]*332 for n in range(64)]
category = [n for i in category for n in i]
cluster_df = pd.DataFrame({'ids':ids, 'category':category, 'cluster_labels':cluster_labels, 'silh_values':sample_silhouette_values})
cluster_df.to_csv(output_dir+'cluster_df_'+file_name+'.csv')
return cluster_labels, sample_silhouette_values
output_dir = '/Users/danielmlow/Dropbox/cnn/experiment/stimuli_final/'
dense_final_df = pd.read_csv(output_dir+'whole_dataset/dense_final_filtered_vectors.csv', index_col=0)
conv_1_df =
|
pd.read_csv(output_dir+'whole_dataset/conv_1_filtered_vectors.csv', index_col=0)
|
pandas.read_csv
|
import pandas as pd
import numpy as np
from statsmodels.formula.api import ols
import plotly_express
import plotly.graph_objs as go
from plotly.subplots import make_subplots
# Read in data
batter_data = pd.read_csv("~/Desktop/MLB_FA/Data/fg_bat_data.csv")
del batter_data['Age']
print(len(batter_data))
print(batter_data.head())
pitcher_data = pd.read_csv("~/Desktop/MLB_FA/Data/fg_pitch_data.csv")
del pitcher_data['Age']
print(len(pitcher_data))
print(pitcher_data.head())
salary_data = pd.read_csv("~/Desktop/MLB_FA/Data/salary_data.csv")
print(len(salary_data))
injury_data = pd.read_csv("~/Desktop/MLB_FA/Data/injury_data_use.csv")
# Check for whether there is overlap between injury data and the salary data players
# injury_data_players = injury_data['Player'].unique()
# mutual = salary_data[salary_data['Player'].isin(injury_data_players)] # 945 out of 1135 players included
# excl = salary_data[~salary_data['Player'].isin(injury_data_players)]
# print(len(excl['Player'].unique())) # 129 unique players injury data omitted; use mlb.com trans for these
# Define inflation
def npv(df, rate):
r = rate
df['Salary'] = pd.to_numeric(df['Salary'])
df['AAV'] = salary_data['Salary'] / df['Years']
df['NPV'] = 0
df['NPV'] = round(df['AAV'] * (1 - (1 / ((1 + r) ** df['Years']))) / r, 2)
return df
salary_data = npv(salary_data, 0.05)
# Lagged metrics to see if there is carryover value / value in continuity
class Metrics:
def lagged_batter(df):
df['WAR'] = pd.to_numeric(df['WAR'])
df['y_n1_war'] = df.groupby("Name")['WAR'].shift(1)
df['y_n2_war'] = df.groupby("Name")['y_n1_war'].shift(1)
df['y_n3_war'] = df.groupby("Name")['y_n2_war'].shift(1)
df['y_n4_war'] = df.groupby("Name")['y_n3_war'].shift(1)
df['y_n5_war'] = df.groupby("Name")['y_n4_war'].shift(1)
df['y_n6_war'] = df.groupby("Name")['y_n5_war'].shift(1)
df['wOBA'] =
|
pd.to_numeric(df['wOBA'])
|
pandas.to_numeric
|
from datetime import datetime, timedelta
import warnings
import operator
from textwrap import dedent
import numpy as np
from pandas._libs import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
Timedelta)
from pandas._libs.lib import is_datetime_array
from pandas.compat import range, u, set_function_name
from pandas.compat.numpy import function as nv
from pandas import compat
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.generic import (
ABCSeries, ABCDataFrame,
ABCMultiIndex,
ABCPeriodIndex, ABCTimedeltaIndex,
ABCDateOffset)
from pandas.core.dtypes.missing import isna, array_equivalent
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_platform_int,
is_integer,
is_float,
is_dtype_equal,
is_dtype_union_equal,
is_object_dtype,
is_categorical,
is_categorical_dtype,
is_interval_dtype,
is_period_dtype,
is_bool,
is_bool_dtype,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
is_integer_dtype, is_float_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_hashable,
needs_i8_conversion,
is_iterator, is_list_like,
is_scalar)
from pandas.core.base import PandasObject, IndexOpsMixin
import pandas.core.common as com
from pandas.core import ops
from pandas.util._decorators import (
Appender, Substitution, cache_readonly, deprecate_kwarg)
from pandas.core.indexes.frozen import FrozenList
import pandas.core.dtypes.concat as _concat
import pandas.core.missing as missing
import pandas.core.algorithms as algos
import pandas.core.sorting as sorting
from pandas.io.formats.printing import (
pprint_thing, default_pprint, format_object_summary, format_object_attrs)
from pandas.core.ops import make_invalid_op
from pandas.core.strings import StringMethods
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
_index_doc_kwargs = dict(klass='Index', inplace='',
target_klass='Index',
unique='Index', duplicated='np.ndarray')
_index_shared_docs = dict()
def _try_get_item(x):
try:
return x.item()
except AttributeError:
return x
def _make_comparison_op(op, cls):
def cmp_method(self, other):
if isinstance(other, (np.ndarray, Index, ABCSeries)):
if other.ndim > 0 and len(self) != len(other):
raise ValueError('Lengths must match to compare')
# we may need to directly compare underlying
# representations
if needs_i8_conversion(self) and needs_i8_conversion(other):
return self._evaluate_compare(other, op)
if is_object_dtype(self) and self.nlevels == 1:
# don't pass MultiIndex
with np.errstate(all='ignore'):
result = ops._comp_method_OBJECT_ARRAY(op, self.values, other)
else:
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings(record=True):
with np.errstate(all='ignore'):
result = op(self.values, np.asarray(other))
# technically we could support bool dtyped Index
# for now just return the indexing array directly
if is_bool_dtype(result):
return result
try:
return Index(result)
except TypeError:
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(cmp_method, name, cls)
def _make_arithmetic_op(op, cls):
def index_arithmetic_method(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
elif isinstance(other, ABCTimedeltaIndex):
# Defer to subclass implementation
return NotImplemented
other = self._validate_for_numeric_binop(other, op)
# handle time-based others
if isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)):
return self._evaluate_with_timedelta_like(other, op)
elif isinstance(other, (datetime, np.datetime64)):
return self._evaluate_with_datetime_like(other, op)
values = self.values
with np.errstate(all='ignore'):
result = op(values, other)
result = missing.dispatch_missing(op, values, other, result)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
if op is divmod:
result = (Index(result[0], **attrs), Index(result[1], **attrs))
else:
result = Index(result, **attrs)
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(index_arithmetic_method, name, cls)
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
# required for backward compat, because PI can't be instantiated with
# ordinals through __new__ GH #13277
if issubclass(cls, ABCPeriodIndex):
from pandas.core.indexes.period import _new_PeriodIndex
return _new_PeriodIndex(cls, **d)
return cls.__new__(cls, **d)
class Index(IndexOpsMixin, PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
If dtype is None, we find the dtype that best fits the data.
If an actual dtype is provided, we coerce to that dtype if it's safe.
Otherwise, an error will be raised.
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible
Notes
-----
An Index instance can **only** contain hashable objects
Examples
--------
>>> pd.Index([1, 2, 3])
Int64Index([1, 2, 3], dtype='int64')
>>> pd.Index(list('abc'))
Index(['a', 'b', 'c'], dtype='object')
See Also
---------
RangeIndex : Index implementing a monotonic integer range
CategoricalIndex : Index of :class:`Categorical` s.
MultiIndex : A multi-level, or hierarchical, Index
IntervalIndex : an Index of :class:`Interval` s.
DatetimeIndex, TimedeltaIndex, PeriodIndex
Int64Index, UInt64Index, Float64Index
"""
# To hand over control to subclasses
_join_precedence = 1
# Cython methods
_left_indexer_unique = libjoin.left_join_indexer_unique_object
_left_indexer = libjoin.left_join_indexer_object
_inner_indexer = libjoin.inner_join_indexer_object
_outer_indexer = libjoin.outer_join_indexer_object
_typ = 'index'
_data = None
_id = None
name = None
asi8 = None
_comparables = ['name']
_attributes = ['name']
_is_numeric_dtype = False
_can_hold_na = True
# would we like our indexing holder to defer to us
_defer_to_indexing = False
# prioritize current class for _shallow_copy_with_infer,
# used to infer integers as datetime-likes
_infer_as_myclass = False
_engine_type = libindex.ObjectEngine
_accessors = set(['str'])
str = CachedAccessor("str", StringMethods)
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False, tupleize_cols=True, **kwargs):
if name is None and hasattr(data, 'name'):
name = data.name
if fastpath:
return cls._simple_new(data, name)
from .range import RangeIndex
# range
if isinstance(data, RangeIndex):
return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)
elif isinstance(data, range):
return RangeIndex.from_range(data, copy=copy, dtype=dtype,
name=name)
# categorical
if is_categorical_dtype(data) or is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(data, dtype=dtype, copy=copy, name=name,
**kwargs)
# interval
if is_interval_dtype(data) or is_interval_dtype(dtype):
from .interval import IntervalIndex
closed = kwargs.get('closed', None)
return IntervalIndex(data, dtype=dtype, name=name, copy=copy,
closed=closed)
# index-like
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
if (is_datetime64_any_dtype(data) or
(dtype is not None and is_datetime64_any_dtype(dtype)) or
'tz' in kwargs):
from pandas.core.indexes.datetimes import DatetimeIndex
result = DatetimeIndex(data, copy=copy, name=name,
dtype=dtype, **kwargs)
if dtype is not None and is_dtype_equal(_o_dtype, dtype):
return Index(result.to_pydatetime(), dtype=_o_dtype)
else:
return result
elif (is_timedelta64_dtype(data) or
(dtype is not None and is_timedelta64_dtype(dtype))):
from pandas.core.indexes.timedeltas import TimedeltaIndex
result = TimedeltaIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pytimedelta(), dtype=_o_dtype)
else:
return result
if dtype is not None:
try:
# we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
# they are actually ints, e.g. '0' and 0.0
# should not be coerced
# GH 11836
if is_integer_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'integer':
try:
data = np.array(data, copy=copy, dtype=dtype)
except OverflowError:
# gh-15823: a more user-friendly error message
raise OverflowError(
"the elements provided in the data cannot "
"all be casted to the dtype {dtype}"
.format(dtype=dtype))
elif inferred in ['floating', 'mixed-integer-float']:
if isna(data).any():
raise ValueError('cannot convert float '
'NaN to integer')
# If we are actually all equal to integers,
# then coerce to integer.
try:
return cls._try_convert_to_int_index(
data, copy, name, dtype)
except ValueError:
pass
# Return an actual float index.
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype,
name=name)
elif inferred == 'string':
pass
else:
data = data.astype(dtype)
elif is_float_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'string':
pass
else:
data = data.astype(dtype)
else:
data = np.array(data, dtype=dtype, copy=copy)
except (TypeError, ValueError) as e:
msg = str(e)
if 'cannot convert float' in msg:
raise
# maybe coerce to a sub-class
from pandas.core.indexes.period import (
PeriodIndex, IncompatibleFrequency)
if isinstance(data, PeriodIndex):
return PeriodIndex(data, copy=copy, name=name, **kwargs)
if is_signed_integer_dtype(data.dtype):
from .numeric import Int64Index
return Int64Index(data, copy=copy, dtype=dtype, name=name)
elif is_unsigned_integer_dtype(data.dtype):
from .numeric import UInt64Index
return UInt64Index(data, copy=copy, dtype=dtype, name=name)
elif is_float_dtype(data.dtype):
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data):
subarr = data.astype('object')
else:
subarr = com._asarray_tuplesafe(data, dtype=object)
# _asarray_tuplesafe does not always copy underlying data,
# so need to make sure that this happens
if copy:
subarr = subarr.copy()
if dtype is None:
inferred = lib.infer_dtype(subarr)
if inferred == 'integer':
try:
return cls._try_convert_to_int_index(
subarr, copy, name, dtype)
except ValueError:
pass
return Index(subarr, copy=copy,
dtype=object, name=name)
elif inferred in ['floating', 'mixed-integer-float']:
from .numeric import Float64Index
return Float64Index(subarr, copy=copy, name=name)
elif inferred == 'interval':
from .interval import IntervalIndex
return IntervalIndex(subarr, name=name, copy=copy)
elif inferred == 'boolean':
# don't support boolean explicitly ATM
pass
elif inferred != 'string':
if inferred.startswith('datetime'):
if (lib.is_datetime_with_singletz_array(subarr) or
'tz' in kwargs):
# only when subarr has the same tz
from pandas.core.indexes.datetimes import (
DatetimeIndex)
try:
return DatetimeIndex(subarr, copy=copy,
name=name, **kwargs)
except libts.OutOfBoundsDatetime:
pass
elif inferred.startswith('timedelta'):
from pandas.core.indexes.timedeltas import (
TimedeltaIndex)
return TimedeltaIndex(subarr, copy=copy, name=name,
**kwargs)
elif inferred == 'period':
try:
return PeriodIndex(subarr, name=name, **kwargs)
except IncompatibleFrequency:
pass
return cls._simple_new(subarr, name)
elif hasattr(data, '__array__'):
return Index(np.asarray(data), dtype=dtype, copy=copy, name=name,
**kwargs)
elif data is None or is_scalar(data):
cls._scalar_data_error(data)
else:
if tupleize_cols and is_list_like(data) and data:
if is_iterator(data):
data = list(data)
# we must be all tuples, otherwise don't construct
# 10697
if all(isinstance(e, tuple) for e in data):
from .multi import MultiIndex
return MultiIndex.from_tuples(
data, names=name or kwargs.get('names'))
# other iterable of some kind
subarr = com._asarray_tuplesafe(data, dtype=object)
return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)
"""
NOTE for new Index creation:
- _simple_new: It returns new Index with the same type as the caller.
All metadata (such as name) must be provided by caller's responsibility.
Using _shallow_copy is recommended because it fills these metadata
otherwise specified.
- _shallow_copy: It returns new Index with the same type (using
_simple_new), but fills caller's metadata otherwise specified. Passed
kwargs will overwrite corresponding metadata.
- _shallow_copy_with_infer: It returns new Index inferring its type
from passed values. It fills caller's metadata otherwise specified as the
same as _shallow_copy.
See each method's docstring.
"""
@classmethod
def _simple_new(cls, values, name=None, dtype=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
Must be careful not to recurse.
"""
if not hasattr(values, 'dtype'):
if (values is None or not len(values)) and dtype is not None:
values = np.empty(0, dtype=dtype)
else:
values = np.array(values, copy=False)
if is_object_dtype(values):
values = cls(values, name=name, dtype=dtype,
**kwargs)._ndarray_values
result = object.__new__(cls)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
return result._reset_identity()
_index_shared_docs['_shallow_copy'] = """
create a new Index with the same class as the caller, don't copy the
data, use the same object attributes with passed in attributes taking
precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
return self._simple_new(values, **attributes)
def _shallow_copy_with_infer(self, values=None, **kwargs):
"""
create a new Index inferring the class with passed value, don't copy
the data, use the same object attributes with passed in attributes
taking precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
attributes['copy'] = False
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
if self._infer_as_myclass:
try:
return self._constructor(values, **attributes)
except (TypeError, ValueError):
pass
return Index(values, **attributes)
def _deepcopy_if_needed(self, orig, copy=False):
"""
.. versionadded:: 0.19.0
Make a copy of self if data coincides (in memory) with orig.
Subclasses should override this if self._base is not an ndarray.
Parameters
----------
orig : ndarray
other ndarray to compare self._data against
copy : boolean, default False
when False, do not run any check, just return self
Returns
-------
A copy of self if needed, otherwise self : Index
"""
if copy:
# Retrieve the "base objects", i.e. the original memory allocations
if not isinstance(orig, np.ndarray):
# orig is a DatetimeIndex
orig = orig.values
orig = orig if orig.base is None else orig.base
new = self._data if self._data.base is None else self._data.base
if orig is new:
return self.copy(deep=True)
return self
def _update_inplace(self, result, **kwargs):
# guard when called from IndexOpsMixin
raise TypeError("Index can't be updated inplace")
def _sort_levels_monotonic(self):
""" compat with MultiIndex """
return self
_index_shared_docs['_get_grouper_for_level'] = """
Get index grouper corresponding to an index level
Parameters
----------
mapper: Group mapping function or None
Function mapping index values to groups
level : int or None
Index level
Returns
-------
grouper : Index
Index of values to group on
labels : ndarray of int or None
Array of locations in level_index
uniques : Index or None
Index of unique values for level
"""
@Appender(_index_shared_docs['_get_grouper_for_level'])
def _get_grouper_for_level(self, mapper, level=None):
assert level is None or level == 0
if mapper is None:
grouper = self
else:
grouper = self.map(mapper)
return grouper, None, None
def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to compare against.
Returns
-------
True if both have same underlying data, False otherwise : bool
"""
# use something other than None to be clearer
return self._id is getattr(
other, '_id', Ellipsis) and self._id is not None
def _reset_identity(self):
"""Initializes or resets ``_id`` attribute with new object"""
self._id = _Identity()
return self
# ndarray compat
def __len__(self):
"""
return the length of the Index
"""
return len(self._data)
def __array__(self, dtype=None):
""" the array interface, return my values """
return self._data.view(np.ndarray)
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc
"""
if is_bool_dtype(result):
return result
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(result, **attrs)
@cache_readonly
def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@cache_readonly
def dtype_str(self):
""" return the dtype str of the underlying data """
return str(self.dtype)
@property
def values(self):
""" return the underlying data as an ndarray """
return self._data.view(np.ndarray)
@property
def _values(self):
# type: () -> Union[ExtensionArray, Index]
# TODO(EA): remove index types as they become extension arrays
"""The best array representation.
This is an ndarray, ExtensionArray, or Index subclass. This differs
from ``_ndarray_values``, which always returns an ndarray.
Both ``_values`` and ``_ndarray_values`` are consistent between
``Series`` and ``Index``.
It may differ from the public '.values' method.
index | values | _values | _ndarray_values |
----------------- | -------------- -| ----------- | --------------- |
CategoricalIndex | Categorical | Categorical | codes |
DatetimeIndex[tz] | ndarray[M8ns] | DTI[tz] | ndarray[M8ns] |
For the following, the ``._values`` is currently ``ndarray[object]``,
but will soon be an ``ExtensionArray``
index | values | _values | _ndarray_values |
----------------- | --------------- | ------------ | --------------- |
PeriodIndex | ndarray[object] | ndarray[obj] | ndarray[int] |
IntervalIndex | ndarray[object] | ndarray[obj] | ndarray[object] |
See Also
--------
values
_ndarray_values
"""
return self.values
def get_values(self):
"""
Return `Index` data as an `numpy.ndarray`.
Returns
-------
numpy.ndarray
A one-dimensional numpy array of the `Index` values.
See Also
--------
Index.values : The attribute that get_values wraps.
Examples
--------
Getting the `Index` values of a `DataFrame`:
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
... index=['a', 'b', 'c'], columns=['A', 'B', 'C'])
>>> df
A B C
a 1 2 3
b 4 5 6
c 7 8 9
>>> df.index.get_values()
array(['a', 'b', 'c'], dtype=object)
Standalone `Index` values:
>>> idx = pd.Index(['1', '2', '3'])
>>> idx.get_values()
array(['1', '2', '3'], dtype=object)
`MultiIndex` arrays also have only one dimension:
>>> midx = pd.MultiIndex.from_arrays([[1, 2, 3], ['a', 'b', 'c']],
... names=('number', 'letter'))
>>> midx.get_values()
array([(1, 'a'), (2, 'b'), (3, 'c')], dtype=object)
>>> midx.get_values().ndim
1
"""
return self.values
@Appender(IndexOpsMixin.memory_usage.__doc__)
def memory_usage(self, deep=False):
result = super(Index, self).memory_usage(deep=deep)
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
# ops compat
@deprecate_kwarg(old_arg_name='n', new_arg_name='repeats')
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an Index.
Returns a new index where each element of the current index
is repeated consecutively a given number of times.
Parameters
----------
repeats : int
The number of repetitions for each element.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
pandas.Index
Newly created Index with repeated elements.
See Also
--------
Series.repeat : Equivalent function for Series
numpy.repeat : Underlying implementation
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx
Int64Index([1, 2, 3], dtype='int64')
>>> idx.repeat(2)
Int64Index([1, 1, 2, 2, 3, 3], dtype='int64')
>>> idx.repeat(3)
Int64Index([1, 1, 1, 2, 2, 2, 3, 3, 3], dtype='int64')
"""
nv.validate_repeat(args, kwargs)
return self._shallow_copy(self._values.repeat(repeats))
_index_shared_docs['where'] = """
.. versionadded:: 0.19.0
Return an Index of same shape as self and whose corresponding
entries are from self where cond is True and otherwise are from
other.
Parameters
----------
cond : boolean array-like with the same length as self
other : scalar, or array-like
"""
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
dtype = self.dtype
values = self.values
if is_bool(other) or is_bool_dtype(other):
# bools force casting
values = values.astype(object)
dtype = None
values = np.where(cond, values, other)
if self._is_numeric_dtype and np.any(isna(values)):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return self._shallow_copy_with_infer(values, dtype=dtype)
def ravel(self, order='C'):
"""
return an ndarray of the flattened values of the underlying data
See also
--------
numpy.ndarray.ravel
"""
return self._ndarray_values.ravel(order=order)
# construction helpers
@classmethod
def _try_convert_to_int_index(cls, data, copy, name, dtype):
"""
Attempt to convert an array of data into an integer index.
Parameters
----------
data : The data to convert.
copy : Whether to copy the data or not.
name : The name of the index returned.
Returns
-------
int_index : data converted to either an Int64Index or a
UInt64Index
Raises
------
ValueError if the conversion was not successful.
"""
from .numeric import Int64Index, UInt64Index
if not is_unsigned_integer_dtype(dtype):
# skip int64 conversion attempt if uint-like dtype is passed, as
# this could return Int64Index when UInt64Index is what's desrired
try:
res = data.astype('i8', copy=False)
if (res == data).all():
return Int64Index(res, copy=copy, name=name)
except (OverflowError, TypeError, ValueError):
pass
# Conversion to int64 failed (possibly due to overflow) or was skipped,
# so let's try now with uint64.
try:
res = data.astype('u8', copy=False)
if (res == data).all():
return UInt64Index(res, copy=copy, name=name)
except (OverflowError, TypeError, ValueError):
pass
raise ValueError
@classmethod
def _scalar_data_error(cls, data):
raise TypeError('{0}(...) must be called with a collection of some '
'kind, {1} was passed'.format(cls.__name__,
repr(data)))
@classmethod
def _string_data_error(cls, data):
raise TypeError('String dtype not supported, you may need '
'to explicitly cast to a numeric type')
@classmethod
def _coerce_to_ndarray(cls, data):
"""coerces data to ndarray, raises on scalar data. Converts other
iterables to list first and then to array. Does not touch ndarrays.
"""
if not isinstance(data, (np.ndarray, Index)):
if data is None or
|
is_scalar(data)
|
pandas.core.dtypes.common.is_scalar
|
import datetime
from collections import OrderedDict
from behave import *
from behave_pandas import table_to_dataframe
import pandas as pd
import pandas.testing as pdt
import numpy as np
use_step_matcher("parse")
@then("it matches a manually created data frame with all valid nullable boolean dtypes")
def step_impl(context):
all_dtypes_df = pd.concat(
[pd.Series([True, False, pd.NA], dtype="boolean")], axis=1
)
pdt.assert_frame_equal(all_dtypes_df, context.parsed)
@then("it matches a manually created data frame with all valid nullable integer dtypes")
def step_impl(context):
all_dtypes_df = pd.concat([pd.Series([0, 10, pd.NA], dtype="Int64"),], axis=1,)
pdt.assert_frame_equal(all_dtypes_df, context.parsed)
@then("it matches a manually created data frame with all valid string dtypes")
def step_impl(context):
all_dtypes_df = pd.concat(
[
pd.Series(["egg", "spam", pd.NA], dtype="string"),
|
pd.Series(["silly walks", "", "dead parrot"], dtype="string")
|
pandas.Series
|
import os
import re
import sys
import time
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.gridspec as gridspec
from itertools import permutations, product
import numpy as np
import pandas as pd
from palettable.colorbrewer.qualitative import Paired_12
import seaborn as sns
from scipy.optimize import curve_fit
import utils
mpl.rcParams['text.usetex'] = True
mpl.rcParams['text.latex.preamble'] = [r'\usepackage{amsmath}']
DATA_DIR = '/home/johnmcbride/projects/Scales/Data_compare/Data_for_figs/'
FIGS_DIR = '/home/johnmcbride/projects/Scales/Data_compare/Figs/'
REAL_DIR = '/home/johnmcbride/projects/Scales/Data_compare/Processed/Real'
BIASES = ['none', 'S#1_n1', 'S#1_n2',#'',
'distI_n1', 'distI_n2', 'distI_n3', 'distW',#'',
'distI_n1_S#1_n1', 'distI_n1_S#1_n2', 'distI_n2_S#1_n1', 'distI_n2_S#1_n2',
'distW_S#1_n1', 'distW_S#1_n2', 'distW_S#2_n2', 'distW_S#2_n3',
'hs_n1_w05', 'hs_n1_w10', 'hs_n1_w15', 'hs_n1_w20',
'hs_n2_w05', 'hs_n2_w10', 'hs_n2_w15', 'hs_n2_w20',
'hs_n3_w05', 'hs_n3_w10', 'hs_n3_w15', 'hs_n3_w20',
'hs_r3_w05', 'hs_r3_w10', 'hs_r3_w15', 'hs_r3_w20'] + \
[f"im5_r{r:3.1f}_w{w:02d}" for r in [0, 0.5, 1, 2] for w in [5,10,15,20]] + \
[f"Nim5_r0.0_w{w:02d}" for w in [5,10,15,20]] + \
[f"Nhs_n1_w{w:02d}" for w in [5,10,15,20]] + \
[f"Nhs_n2_w{w:02d}" for w in [10,20]] + \
[f"Nhs_n3_w{w:02d}" for w in [10,20]]
BIAS_GROUPS = ['none', 'S#1', 'HS',
'distW', 'distW_S#1', 'distW_S#2',
'distI', 'distI_S#1']
BIAS_GROUPS = ['none', 'HS',
'S#1', 'distW',
'distW_S#1', 'distW_S#2',
'distI', 'distI_S#1', 'im5', 'AHS']
groups = ['none'] + ['S#1']*2 + ['distI']*3 + ['distW'] + ['distI_S#1']*4 + \
['distW_S#1']*2 + ['distW_S#2']*2 + ['HS']*12 + ['im5']*24 + ['HS']*8
BIAS_KEY = {BIASES[i]:groups[i] for i in range(len(BIASES))}
def plot_MC_dist(fName, X='pair_ints', out=False, f=False, cum=False):
df = pd.read_feather(fName)
if f:
sns.distplot(df[X], bins=100)
else:
if cum:
sns.distplot(utils.extract_floats_from_string(df[X]), bins=100, hist_kws=dict(cumulative=True), kde_kws=dict(cumulative=True))
else:
sns.distplot(utils.extract_floats_from_string(df[X]), bins=100)
if out:
return df
def plot_MC_kde(fName, X='pair_ints', out=False, f=False, ax='None'):
df =
|
pd.read_feather(fName)
|
pandas.read_feather
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# Necessary Imports
import re
import pandas as pd
# Reference Number Lists
numbersList = ['zeroth','first','second','third','fourth','fifth','sixth','seventh','eighth','ninth','tenth',
'eleventh','twelvth','thirteenth','fourteenth','fifteenth','sixteenth','seventeenth','eighteenth','nineteenth',
'twentieth','thirtieth','fourtieth,','fiftieth','sixtieth','seventieth','eightieth','nintieth',
'hundreth','thousandth','millionth','billionth','trillionth']
uniqueNumbersList = ['zero','one','two','three','four','five','six','seven','eight','nine','ten',
'eleven','twelve','thirteen','fourteen','fifteen','sixteen','seventeen','eighteen','nineteen',
'twenty','thirty','fourty','fifty','sixty','seventy','eighty','ninety',
'hundred','thousand','million','billioin','trillion']
numbSuffixList = ['st','nd','rd','th']
numbWordList = ['1st','2nd','3rd','13th','23rd','31st','222nd','65th','99th','4,256th',
'0.1','0.93','0.21','0.00','-1.10','0.001','0.1234','3.14159',
'3,265','5,384','26,221','469,365','1,000,000','2,000,000,000',
'644.355.222,01','3,000,000,000,000','965,000,332,000,001']
# In[56]:
# Functions List
# Returns a list of all phone numbers (handles international) in a given sentence/string
def extract_phone_numb(inputString):
extractedNumbList = re.findall('((?:\+\d{2}[-\.\s]??|\d{4}[-\.\s]??)?(?:\d{3}[-\.\s]??\d{3}[-\.\s]??\d{4}|\(\d{3}\)\s*\d{3}[-\.\s]??\d{4}|\d{3}[-\.\s]??\d{4}))',inputString)
df = pd.DataFrame(extractedNumbList,columns =['Phone_Numbers'])
display(df)
return extractedNumbList
# Returns a list of all emails (handles non-'.com' domains) in a given sentence/string
def extract_emails(inputString):
extractedEmailList = re.findall('\S+@\S+', inputString)
for e,email in enumerate(extractedEmailList):
emailSplit = email.split('.')
domainSub = re.sub(r'[^\w\s]', '', emailSplit[1])
extractedEmailList[e] = f'{emailSplit[0]}.{domainSub}'
df =
|
pd.DataFrame(extractedEmailList,columns =['Email_Addresses'])
|
pandas.DataFrame
|
import pandas as pd
import pytest
from feature_engine.encoding import CountFrequencyEncoder
def test_encode_1_variable_with_counts(df_enc):
# test case 1: 1 variable, counts
encoder = CountFrequencyEncoder(encoding_method="count", variables=["var_A"])
X = encoder.fit_transform(df_enc)
# expected result
transf_df = df_enc.copy()
transf_df["var_A"] = [
6,
6,
6,
6,
6,
6,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
4,
4,
4,
4,
]
# init params
assert encoder.encoding_method == "count"
assert encoder.variables == ["var_A"]
# fit params
assert encoder.variables_ == ["var_A"]
assert encoder.encoder_dict_ == {"var_A": {"A": 6, "B": 10, "C": 4}}
assert encoder.n_features_in_ == 3
# transform params
|
pd.testing.assert_frame_equal(X, transf_df)
|
pandas.testing.assert_frame_equal
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from numpy import genfromtxt
import glob
path = r'/Users/jamesledoux/Documents/bandits/results' # use your path
all_files = glob.glob(path + "/*.csv")
all_dataframes = []
for filename in all_files:
if '_raw' not in filename:
df = pd.read_csv(filename, index_col=None, header=0)
if 'epsilon_greedy' in filename:
df['algorithm'] = 'epsilon_greedy'
if 'ucb1' in filename:
df['algorithm'] = 'ucb1'
if 'bayesian' in filename:
df['algorithm'] = 'bayesian_ucb'
if 'exp3' in filename:
df['algorithm'] = 'exp3'
all_dataframes.append(df)
df = pd.concat(all_dataframes, axis=0, ignore_index=True)
epsilon = df.loc[(df['algorithm']=='epsilon_greedy') & (df['batch_size']==10000) & (df[' slate_size']==5)]
epsilon = epsilon.sort_values(' epsilon')
epsilon.plot(x=' epsilon', y=' mean_reward', kind='bar', title='Mean Reward by Epsilon Value',
legend=False)
plt.tight_layout()
plt.savefig(path + '/epsilon_plot.png', dpi = 300)
exp = df.loc[(df['algorithm']=='exp3') & (df['batch_size']==10000) & (df[' slate_size']==5)]
exp = exp.sort_values(' gamma')
exp.plot(x=' gamma', y=' mean_reward', kind='bar', title='Mean Reward by UCB Scale Parameter',
legend=False)
plt.tight_layout()
plt.savefig(path + '/exp_plot.png', dpi = 300)
ucb = df.loc[(df['algorithm']=='ucb1') & (df['batch_size']==10000) & (df[' slate_size']==5)]
ucb = ucb.sort_values(' ucb_multiplier')
ucb[' ucb_multiplier'] = 'UCB1'
ucb_bayes = df.loc[(df['algorithm']=='bayesian_ucb') & (df['batch_size']==10000) & (df[' slate_size']==5)]
ucb_bayes = ucb_bayes.sort_values(' ucb_multiplier')
ucb_bayes[' ucb_multiplier'] = ucb_bayes[' ucb_multiplier'].astype(str)
ucb = pd.concat([ucb, ucb_bayes], axis=0)
ucb.plot(x=' ucb_multiplier', y=' mean_reward', kind='bar', title='Mean Reward by UCB Scale Parameter',
legend=False)
plt.tight_layout()
plt.savefig(path + '/ucb_plot.png', dpi = 300)
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(20,7), sharey=True, constrained_layout=True)
fig.suptitle('Parameter Tuning for Epsilon Greedy, EXP3, and UCB Bandits')
exp.plot(x=' gamma', y=' mean_reward', kind='bar', title='EXP3',
legend=False, ax=axes[0], ylim=(0,.6))
epsilon.plot(x=' epsilon', y=' mean_reward', kind='bar', title='Epsilon Greedy',
legend=False, ax=axes[1], ylim=(0,.6))
ucb.plot(x=' ucb_multiplier', y=' mean_reward', kind='bar', title='UCB (Bayesian and UCB1)',
legend=False, ax=axes[2], ylim=(0,.6))
axes[0].set(ylabel='Mean Reward')
plt.rc('font', size=12)
plt.savefig(path + '/all_plots.png')
best_epsilon = '/Users/jamesledoux/Documents/bandits/results/epsilon_greedy_100_5_0.1_1500_raw.csv'
best_ucb = '/Users/jamesledoux/Documents/bandits/results/bayesian_100_5_1.5_1500_raw.csv'
best_exp = '/Users/jamesledoux/Documents/bandits/results/exp3_100_5_0.1_1500_raw.csv'
epsilon = genfromtxt(best_epsilon, delimiter=',')
ucb = genfromtxt(best_ucb, delimiter=',')
exp = genfromtxt(best_exp, delimiter=',')
epsilon = epsilon[~np.isnan(epsilon)]
ucb = ucb[~np.isnan(ucb)]
exp = exp[~np.isnan(exp)]
cumulative_epsilon = np.cumsum(epsilon) / np.linspace(1, len(epsilon), len(epsilon))
cumulative_ucb = np.cumsum(ucb) / np.linspace(1, len(ucb), len(ucb))
cumulative_exp = np.cumsum(exp) / np.linspace(1, len(exp), len(exp))
plt.plot(pd.Series(cumulative_epsilon).rolling(200).mean(), label='Epsilon Greedy')
plt.plot(pd.Series(cumulative_exp).rolling(200).mean(), label='EXP3')
plt.plot(pd.Series(cumulative_ucb).rolling(200).mean(), label='Bayesian UCB')
plt.title('200-Round Rolling Mean Reward')
plt.xlabel('Time Step')
plt.ylabel('Reward')
plt.legend()
plt.savefig(path + '/trailing_average_rewards.png')
plt.clf()
cumulative_epsilon = np.cumsum(epsilon)
cumulative_ucb = np.cumsum(ucb)
cumulative_exp = np.cumsum(exp)
plt.plot(pd.Series(cumulative_epsilon), label='Epsilon Greedy')
plt.plot(pd.Series(cumulative_exp), label='EXP3')
plt.plot(pd.Series(cumulative_ucb), label='Bayesian UCB')
plt.title('Cumulative Reward Over Time')
plt.xlabel('Time Step')
plt.ylabel('Reward ("Liked" Movies)')
plt.legend()
plt.savefig(path + '/cumulative_rewards.png')
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(20,9))
lw=2
cumulative_epsilon = np.cumsum(epsilon)
cumulative_ucb = np.cumsum(ucb)
cumulative_exp = np.cumsum(exp)
axes[1].plot(pd.Series(cumulative_epsilon), lw=lw, label='Epsilon Greedy')
axes[1].plot(pd.Series(cumulative_exp), lw=lw, label='EXP3')
axes[1].plot(
|
pd.Series(cumulative_ucb)
|
pandas.Series
|
"""
(c) 2013 <NAME>
This source code is released under the Apache license.
<EMAIL>
Created on April 1, 2013
"""
import datetime as dt
import pandas as pd
import numpy as np
import random
import csv
from .order import Order
from .fincommon import FinCommon
import finpy.utils.fpdateutil as du
from finpy.utils import utils as ut
from finpy.financial.equity import get_tickdata
class Portfolio():
"""
Portfolio has three items.
equities is a panda Panel of equity data.
Reference by ticker. self.equities['AAPL']
cash is a pandas series with daily cash balance.
total is the daily balance.
order_list is a list of Order
"""
def __init__(self, equities, cash, dates, order_list=None):
self.equities = pd.concat(equities, names=["tick", "date"])
self.equities.sort_index(inplace=True)
# self.equities = self.equities.reorder_levels(order=["date", "tick"])
"""
:var equities: is a Panel of equities.
"""
if order_list == None:
self.order = pd.DataFrame(columns=['tick', 'date', 'action', 'shares', 'price'])
self.order = self.order.set_index(["tick","date"])
else:
ol = order_list
ol.sort(key=lambda x: x.date)
self.order = pd.DataFrame.from_records([s.to_dict() for s in ol])
self.order = self.order.set_index(["tick","date"])
xi = self.order[self.order["price"].isnull()].index
self.order.loc[xi, "price"] = self.equities.loc[xi, "close"]
self.cash = pd.Series(index=dates)
self.cash[0] = cash
self.total = pd.Series(index=dates)
self.total[0] = self.dailysum(dates[0])
self.dates = dates
def dailysum(self, date):
" Calculate the total balance of the date."
equities_total = np.nansum(self.equities.xs(key=date, level=1)['shares'] * self.equities.xs(key=date, level=1)['close'])
total = equities_total + self.cash[date]
return total
def buy(self, shares, tick, price, date, update_ol=False):
"""
Portfolio Buy
Calculate total, shares and cash upto the date.
Before we buy, we need to update share numbers. "
"""
self.cal_total(date)
last_valid = self.equities.loc[(tick,slice(None)),'shares'].last_valid_index()[1]
self.equities.loc[(tick, slice(last_valid, date)), 'shares'] = self.equities.loc[(tick, last_valid), 'shares']
self.equities.loc[(tick, date), 'shares'] += shares
self.cash[date] -= price*shares
self.total[date] = self.dailysum(date)
if update_ol:
self.order = self.order.append(pd.DataFrame({"action": "buy", "shares" : shares, "price": self.equities.loc[(tick, date), 'close']}, [(tick, date)]))
def sell(self, shares, tick, price, date, update_ol=False):
"""
Portfolio sell
Calculate shares and cash upto the date.
"""
self.cal_total(date)
last_valid = self.equities.loc[(tick,slice(None)),'shares'].last_valid_index()[1]
self.equities.loc[(tick, slice(last_valid, date)), 'shares'] = self.equities.loc[(tick, last_valid), 'shares']
self.equities.loc[(tick, date), 'shares'] -= shares
self.cash[date] += price*shares
self.total[date] = self.dailysum(date)
if update_ol:
self.order = self.order.append(pd.DataFrame({"action": "sell", "shares" : shares, "price": self.equities.loc[(tick, date), 'close']}, [(tick, date)]))
def fillna_cash(self, date):
" fillna on cash up to date "
update_start = self.cash.last_valid_index()
update_end = date
self.cash[update_start:update_end] = self.cash[update_start]
return update_start, update_end
def fillna(self, date):
"""
fillna cash and all equities.
return update_start and update_end.
"""
update_start, update_end = self.fillna_cash(date)
for tick in self.equities.index.unique(0).tolist():
self.equities.loc[(tick, slice(update_start, update_end)),'shares'] = self.equities.loc[(tick, update_start), 'shares']
return update_start, update_end
def cal_total(self, date=None):
"""
Calculate total up to "date".
"""
if date == None:
equities_sum = pd.Series(index=self.ldt_timestamps())
each_total = self.equities.loc[(slice(None),slice(None)),'close'] * self.equities.loc[(slice(None),slice(None)),'shares']
equities_sum = each_total.groupby(level=1).sum()
self.total = self.cash + equities_sum
else:
start, end = self.fillna(date)
equities_total_df = self.equities.loc[(slice(None),slice(start,end)),'shares'] * self.equities.loc[(slice(None),slice(start,end)),'close']
equities_total = equities_total_df.groupby(level=1).sum()
self.total[start:end ] = equities_total + self.cash[start:end]
def put_orders(self):
"""
Put the order list to the DataFrame.
Update shares, cash columns of each Equity
"""
for o in self.order:
if o.action.lower() == "buy":
self.buy(date=o.date, shares=np.float(o.shares), price=np.float(o.price), tick=o.tick)
elif o.action.lower() == "sell":
self.sell(shares=np.float(o.shares), tick=o.tick, price=np.float(o.price), date=o.date)
def sim(self, ldt_timestamps=None):
"""
Go through each day and calculate total and cash.
"""
self.put_orders()
if ldt_timestamps == None:
ldt_timestamps = self.ldt_timestamps()
dt_end = ldt_timestamps[-1]
self.cal_total()
def csvwriter(self, equity_col=None, csv_file="pf.csv", total=True, cash=True, d=','):
"""
Write the content of the Portfolio to a csv file.
If total is True, the total is printed to the csv file.
If cash is True, the cash is printed to the csv file.
equity_col specify which columns to print for an equity.
The specified columns of each equity will be printed.
"""
lines = []
l = []
l.append("Date")
if total:
l.append("Total")
if cash:
l.append("Cash")
if equity_col != None:
for e in self.equities:
for col in equity_col:
label = e + col
l.append(label)
lines.append(l)
for i in self.ldt_timestamps():
l = []
l.append(i.strftime("%Y-%m-%d"))
if total:
l.append(round(self.total[i], 2))
if cash:
l.append(round(self.cash[i], 2))
if equity_col != None:
for e in self.equities.index.droplevel(1).drop_duplicates():
for col in equity_col:
l.append(round(self.equities.loc[(e, i), col], 2))
lines.append(l)
with open(csv_file, 'w') as fp:
cw = csv.writer(fp, lineterminator='\n', delimiter=d)
for line in lines:
cw.writerow(line)
def write_order_csv(self, csv_file="pf_order.csv", d=','):
self.order.reorder_levels(["date", "tick"]).to_csv(path_or_buf = csv_file, sep = d, header = False, columns = ["action", "shares"])
def daily_return(self,tick=None):
"""
Return the return rate of each day, a list.
:param tick: The ticker of the equity.
:type string:
"""
if tick == None:
total = self.total
else:
total = self.equities.loc[(tick,slice(None)),'close'].droplevel(0)
daily_rtn = total/total.shift(1)-1
daily_rtn[0] = 0
return np.array(daily_rtn)
def avg_daily_return(self, tick=None):
" Average of the daily_return list "
return np.average(self.daily_return(tick))
def std(self, tick=None):
" Standard Deviation of the daily_return "
return np.std(self.daily_return(tick))
def normalized(self, tick=None):
start = self.ldt_timestamps()[0]
if tick == None:
return self.total/self.total[0]
else:
return (self.equities.loc[(tick, slice(None)), 'close']/self.equities.loc[(tick, start), 'close']).droplevel(0)
def normalized_price(self, tick):
self.equities.loc[(tick, slice(None)),'open'] = self.equities.loc[(tick, slice(None)),'open'] * self.equities.loc[(tick, slice(None)),'close']/self.equities.loc[(tick, slice(None)),'actual_close']
self.equities.loc[(tick, slice(None)),'high'] = self.equities.loc[(tick, slice(None)),'high'] * self.equities.loc[(tick, slice(None)),'close']/self.equities.loc[(tick, slice(None)),'actual_close']
self.equities.loc[(tick, slice(None)),'low'] = self.equities.loc[(tick, slice(None)),'low'] * self.equities.loc[(tick, slice(None)),'close']/self.equities.loc[(tick, slice(None)),'actual_close']
def sortino(self, k=252, tick=None):
"""
Return Sortino Ratio.
You can overwirte the coefficient with k.
The default is 252.
"""
daily_rtn = self.daily_return(tick)
negative_daily_rtn = daily_rtn[daily_rtn < 0]
sortino_dev = np.std( negative_daily_rtn)
sortino = (self.avg_daily_return(tick) / sortino_dev) * np.sqrt(k)
return sortino
def return_ratio(self, tick=None):
" Return the return ratio of the period "
if tick == None:
return self.total[-1]/self.total[0]
else:
return self.equities.loc[(tick, self.ldt_timestamps()[-1]), 'close']/self.equities.loc[(tick, self.ldt_timestamps()[0]), 'close']
def moving_average(self, window=20, tick=None):
"""
Return an array of moving average. Window specified how many days in
a window.
"""
if tick == None:
ma = pd.stats.moments.rolling_mean(self.total, window=window)
else:
ma = self.equities[tick].stats.moments.rolling_mean(window=window)
ma[0:window] = ma[window]
return ma
def drawdown(self, window=10):
"""
Find the peak within the retrospective window.
Drawdown is the difference between the peak and the current value.
"""
ldt_timestamps = self.ldt_timestamps()
pre_timestamps = ut.pre_timestamps(ldt_timestamps, window)
# ldf_data has the data prior to our current interest.
# This is used to calculate moving average for the first window.
merged_data = self.total[pd.Index(pre_timestamps[0]), ldt_timestamps[-1]]
total_timestamps = merged_data.index
dd = pd.Series(index=ldt_timestamps)
j = 0
for i in range(len(pre_timestamps), len(total_timestamps)):
win_start = total_timestamps[i - window]
win_end = total_timestamps[i]
ts_value = merged_data[win_start:win_end]
current = merged_data[win_end]
peak = np.amax(ts_value)
dd[j] = (peak-current)/peak
j += 1
return dd
def random_choose_tick(self, exclude=[]):
"""
Randomly return a ticker in the portfolio.
The items in exclude list are not in the select pool.
"""
ex_set = set(exclude)
pf_set = set([x for x in self.equities])
sel_ls = [s for s in pf_set - ex_set]
return random.choice(sel_ls)
def equities_long(self, date):
"""
Return the list of long equities on the date.
"Long equities" means the number of shares of the equity is greater than 0.
"""
return [x for x in self.equities if self.equities[x].shares[date] > 0]
def ldt_timestamps(self):
"""
Return an array of datetime objects.
"""
ldt_index = self.total.index
dt_start = ldt_index[0]
dt_end = ldt_index[-1]
dt_timeofday = dt.timedelta(hours=16)
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt_timeofday)
return ldt_timestamps
def excess_return(self, rf_tick="$TNX", tick=None):
"""
An excess return is the difference between an asset's return and the riskless rate.
"""
return self.daily_return(tick=tick) - ut.riskfree_return(self.ldt_timestamps(), rf_tick=rf_tick)
def mean_excess_return(self, rf_tick="$TNX", tick=None):
return np.mean(self.excess_return(rf_tick=rf_tick, tick=tick))
def residual_return(self, benchmark, rf_tick="$TNX", tick=None):
"""
A residual return is the excess return minus beta times the benchmark excess return.
"""
beta = self.beta(benchmark, tick)
return self.excess_return(rf_tick=rf_tick, tick=tick) - beta * self.excess_return(rf_tick=rf_tick, tick=benchmark)
def mean_residual_return(self, benchmark, rf_tick="$TNX", tick=None):
return np.mean(self.residual_return(benchmark=benchmark, rf_tick=rf_tick, tick=tick))
def residual_risk(self, benchmark, rf_tick="$TNX", tick=None):
"""
Residual Risk is the standard deviation of the residual return.
"""
return np.std(self.residual_return(benchmark=benchmark, rf_tick=rf_tick, tick=tick))
def active_return(self, benchmark, tick=None):
"""
An active return is the difference between the benchmark and the actual return.
"""
return self.daily_return(tick=tick) - self.daily_return(tick=benchmark)
def mean_active_return(self, benchmark, tick=None):
return np.mean(self.active_return(benchmark, tick))
def beta_alpha(self, benchmark):
"""
benchmark is an Equity representing the market.
It can be S&P 500, Russel 2000, or your choice of market indicator.
This function uses polyfit in numpy to find the closest linear equation.
"""
beta, alpha = np.polyfit(self.daily_return(tick=benchmark), self.daily_return(), 1)
return beta, alpha
def beta(self, benchmark, tick=None):
"""
benchmark is an Equity representing the market.
This function uses cov in numpy to calculate beta.
"""
benchmark_return = self.daily_return(tick=benchmark)
C = np.cov(benchmark_return, self.daily_return(tick=tick))/np.var(benchmark_return)
beta = C[0][1]/C[0][0]
return beta
def excess_risk(self, rf_tick="$TNX", tick=None):
"""
$FVX is another option. Five-Year treasury rate.
An excess risk is the standard deviation of the excess return.
"""
return np.std(self.excess_return(rf_tick=rf_tick, tick=tick))
def active_risk(self, benchmark, tick=None):
"""
An active risk is the standard deviation of the active return.
"""
return np.std(self.active_return(benchmark, tick))
def info_ratio(self, benchmark, rf_tick="$TNX", tick=None):
"""
Information Ratio
https://en.wikipedia.org/wiki/Information_ratio
Information Ratio is defined as active return divided by active risk,
where active return is the difference between the return of the security
and the return of a selected benchmark index, and active risk is the
standard deviation of the active return.
"""
return self.mean_active_return(benchmark=benchmark, tick=tick)/self.active_risk(benchmark=benchmark, tick=tick)
def appraisal_ratio(self, benchmark, rf_tick="$TNX", tick=None):
"""
Appraisal Ratio
https://en.wikipedia.org/wiki/Appraisal_ratio
Appraisal Ratio is defined as residual return divided by residual risk,
where residual return is the difference between the return of the security
and the return of a selected benchmark index, and residual risk is the
standard deviation of the residual return.
"""
return self.mean_residual_return(benchmark, rf_tick, tick)/self.residual_risk(benchmark, rf_tick, tick)
def sharpe_ratio(self, rf_tick="$TNX", tick=None):
"""
Return the Original Sharpe Ratio.
https://en.wikipedia.org/wiki/Sharpe_ratio
rf_tick is Ten-Year treasury rate ticker at Yahoo.
"""
return self.mean_excess_return(rf_tick=rf_tick, tick=tick)/self.excess_risk(rf_tick=rf_tick, tick=tick)
def up_ratio(self, date, tick, days=10):
"""
Return the ratio of the past up days.
This function only applies to equities.
"""
ldt_index = self.ldt_timestamps()
last = date
first = date-days
up = 0.0
dn = 0.0
for i in range(first, last+1):
if self.equities.loc[(tick, ldt_index[i]), 'close'] < self.equities.loc[(tick, ldt_index[i-1]), 'close']:
dn += 1
else:
up += 1
ratio = up / (dn + up)
return ratio
def dn_ratio(self, date,tick , days=10):
"""
Return the ratio of the past down days.
This function only applies to equities.
"""
ratio = 1.0 - self.up_ratio(date=date, tick=tick, days=days)
return ratio
def rolling_normalized_stdev(self, tick, window=50):
"""
Return the rolling standard deviation of normalized price.
This function only applies to equities.
"""
ldt_timestamps = self.ldt_timestamps()
pre_timestamps = ut.pre_timestamps(ldt_timestamps, window)
# ldf_data has the data prior to our current interest.
# This is used to calculate moving average for the first window.
ldf_data = get_tickdata([tick], pre_timestamps)
pre_data = pd.concat(ldf_data, names=["tick", "date"])
merged_data = pd.concat([pre_data.loc[(tick, slice(None)), 'close'], self.equities.loc[(tick,slice(None)),'close']])
all_timestamps = pre_timestamps.append(ldt_timestamps)
merged_daily_rtn = (self.equities.loc[(tick,slice(None)),'close']/self.equities.loc[(tick,slice(None)),'close'].shift(1)-1)
merged_daily_rtn[0] = 0
sigma = merged_daily_rtn.rolling(window).std()
return sigma.droplevel(0)[self.ldt_timestamps()]
def max_rise(self, tick, date, window=20):
"""
Find the maximum change percentage between the current date and the bottom of the retrospective window.
:param tick: ticker
:type tick: string
:param date: date to calculate max_rise
:type date: datetime
:param window: The days of window to calculate max_rise.
:type window: int
"""
ldt_timestamps = self.ldt_timestamps()
pre_timestamps = ut.pre_timestamps(ldt_timestamps, window)
first = pre_timestamps[0]
# ldf_data has the data prior to our current interest.
# This is used to calculate moving average for the first window.
try:
self.equities.loc[(tick, first), 'close']
merged_data = self.equties.loc[(tick, slice(None)), 'close']
except:
ldf_data = get_tickdata([tick], pre_timestamps)
pre_data = pd.concat(ldf_data, names=["tick", "date"])
merged_data = pd.concat([pre_data.loc[(tick, slice(None)), 'close'], self.equities.loc[(tick,slice(None)),'close']])
if(isinstance(date , int)):
int_date = ldt_timestamps[date]
else:
int_date = date
merged_data = merged_data.droplevel(0)
c = merged_data.index.get_loc(int_date)
m = merged_data[c-window:c].min()
r = (merged_data[c]-m)/merged_data[c]
return r
def max_fall(self, tick, date, window=20):
"""
Find the change percentage between the top and the bottom of the retrospective window.
:param tick: ticker
:type tick: string
:param date: date to calculate max_rise
:type date: datetime
:param window: The days of window to calculate max_rise.
:type window: int
"""
ldt_timestamps = self.ldt_timestamps()
pre_timestamps = ut.pre_timestamps(ldt_timestamps, window)
first = pre_timestamps[0]
# ldf_data has the data prior to our current interest.
# This is used to calculate moving average for the first window.
try:
self.equities.loc[(tick, first), 'close']
merged_data = self.equties.loc[(tick, slice(None)), 'close']
except:
ldf_data = get_tickdata([tick], pre_timestamps)
pre_data = pd.concat(ldf_data, names=["tick", "date"])
merged_data = pd.concat([pre_data.loc[(tick, slice(None)), 'close'], self.equities.loc[(tick,slice(None)),'close']])
if(isinstance(date , int)):
int_date = ldt_timestamps[date]
else:
int_date = date
merged_data = merged_data.droplevel(0)
c = merged_data.index.get_loc(int_date)
mx = merged_data[c-window:c].max()
mn = merged_data[c-window:c].min()
r = (mx-mn)/merged_data[c]
return r
def moving_average(self, tick, window=20):
"""
Return an array of moving average. Window specified how many days in
a window.
:param tick: ticker
:type tick: string
:param window: The days of window to calculate moving average.
:type window: int
"""
mi = self.bollinger_band(tick=tick, window=window, mi_only=True)
return mi
def bollinger_band(self, tick, window=20, k=2, mi_only=False):
"""
Return four arrays for Bollinger Band. The upper band at k times an N-period
standard deviation above the moving average. The lower band at k times an N-period
below the moving average.
:param tick: ticker
:type tick: string
:param window: The days of window to calculate Bollinger Band.
:type window: int
:param k: k *
:return bo: bo['mi'] is the moving average. bo['lo'] is the lower band.
bo['hi'] is the upper band. bo['ba'] is a seris of the position of the current
price relative to the bollinger band.
:type bo: A dictionary of series.
"""
ldt_timestamps = self.ldt_timestamps()
pre_timestamps = ut.pre_timestamps(ldt_timestamps, window)
# ldf_data has the data prior to our current interest.
# This is used to calculate moving average for the first window.
ldf_data = get_tickdata([tick], pre_timestamps)
pre_data =
|
pd.concat(ldf_data, names=["tick", "date"])
|
pandas.concat
|
import datetime
import logging
from dataclasses import dataclass
from pathlib import Path
from typing import ClassVar, Optional, Union
import fsspec
import numpy as np
import pandas as pd
import xarray as xr
from power_perceiver.consts import BatchKey, Location
from power_perceiver.exceptions import NoPVSystemsInSlice
from power_perceiver.geospatial import lat_lon_to_osgb
from power_perceiver.load_prepared_batches.data_sources.prepared_data_source import NumpyBatch
from power_perceiver.load_raw.data_sources.raw_data_source import (
RawDataSource,
TimeseriesDataSource,
)
from power_perceiver.utils import check_path_exists, datetime64_to_float
_log = logging.getLogger(__name__)
@dataclass(kw_only=True)
class RawPVDataSource(
# Surprisingly, Python's class hierarchy is defined right-to-left.
# So the base class must go on the right.
TimeseriesDataSource,
RawDataSource,
):
"""Load PV data directly from the intermediate PV Zarr store.
Args:
pv_power_filename:
pv_metadata_filename:
roi_height_meters: The height of the region of interest (ROI) when creating examples.
For PV, we use meters (not pixels) because PV isn't an image.
Must be at least 1,000 meters.
roi_width_meters:
n_pv_systems_per_example: Each example will have exactly this number of PV systems,
or, if there are zero PV systems in the region (e.g. in northern Scotland),
then raise `NoPVSystemsInSlice` exception. If there is at least 1 PV system, then
randomly select PV systems for each example. If there are less PV systems available
than requested, then randomly sample with duplicates allowed, whilst ensuring all
available PV systems are used.
Attributes:
empty_example: xr.DataArray: An example of the correct shape, but where data and coords
are all NaNs!
_data_in_ram: xr.DataArray
The data is the 5-minutely PV power in Watts.
Dimension coordinates: time_utc, pv_system_id
Additional coordinates: x_osgb, y_osgb, capacity_wp
"""
pv_power_filename: str
pv_metadata_filename: str
roi_height_meters: int
roi_width_meters: int
n_pv_systems_per_example: int
# For now, let's assume the PV data is always 5-minutely, even though some PVOutput.org
# PV systems report data at 15-minutely intervals. For now, let's just interpolate
# 15-minutely data to 5-minutely. Later (WP3?) we could experiment with giving the model
# the "raw" (un-interpolated) 15-minutely PV data. See issue #74.
sample_period_duration: ClassVar[datetime.timedelta] = datetime.timedelta(minutes=5)
def __post_init__(self): # noqa: D105
self._sanity_check_args()
RawDataSource.__post_init__(self)
TimeseriesDataSource.__post_init__(self)
# Load everything into RAM once (at init) rather than in each worker process.
# This should be faster than loading from disk in every worker!
self.load_everything_into_ram()
self.empty_example = self._get_empty_example()
def _sanity_check_args(self) -> None:
check_path_exists(self.pv_power_filename)
check_path_exists(self.pv_metadata_filename)
assert self.roi_height_meters > 1_000
assert self.roi_width_meters > 1_000
assert self.n_pv_systems_per_example > 0
def load_everything_into_ram(self) -> None:
"""Open AND load PV data into RAM."""
# Load pd.DataFrame of power and pd.Series of capacities:
pv_power_watts, pv_capacity_wp, pv_system_row_number = _load_pv_power_watts_and_capacity_wp(
self.pv_power_filename, start_date=self.start_date, end_date=self.end_date
)
pv_metadata = _load_pv_metadata(self.pv_metadata_filename)
# Ensure pv_metadata, pv_power_watts, and pv_capacity_wp all have the same set of
# PV system IDs, in the same order:
pv_metadata, pv_power_watts = _intersection_of_pv_system_ids(pv_metadata, pv_power_watts)
pv_capacity_wp = pv_capacity_wp.loc[pv_power_watts.columns]
pv_system_row_number = pv_system_row_number.loc[pv_power_watts.columns]
self._data_in_ram = _put_pv_data_into_an_xr_dataarray(
pv_power_watts=pv_power_watts,
y_osgb=pv_metadata.y_osgb.astype(np.float32),
x_osgb=pv_metadata.x_osgb.astype(np.float32),
capacity_wp=pv_capacity_wp,
pv_system_row_number=pv_system_row_number,
t0_idx=self.t0_idx,
sample_period_duration=self.sample_period_duration,
)
# Sanity checks:
time_utc = pd.DatetimeIndex(self._data_in_ram.time_utc)
assert time_utc.is_monotonic_increasing
assert time_utc.is_unique
def get_osgb_location_for_example(self) -> Location:
"""Get a single random geographical location."""
raise NotImplementedError(
"Not planning to implement this just yet. To start with, let's use"
" `RawSatelliteDataSource` and/or `RawGSPDataSource` to generate locations."
)
def _get_spatial_slice(self, xr_data: xr.DataArray, center_osgb: Location) -> xr.DataArray:
half_roi_width_meters = self.roi_width_meters // 2
half_roi_height_meters = self.roi_height_meters // 2
left = center_osgb.x - half_roi_width_meters
right = center_osgb.x + half_roi_width_meters
top = center_osgb.y + half_roi_height_meters
bottom = center_osgb.y - half_roi_height_meters
# Select data in the region of interest:
pv_system_id_mask = (
(left <= xr_data.x_osgb)
& (xr_data.x_osgb <= right)
& (xr_data.y_osgb <= top)
& (bottom <= xr_data.y_osgb)
)
selected_data = xr_data.isel(pv_system_id=pv_system_id_mask)
# Drop any PV systems which have NaN readings at every timestep in the example:
selected_data = selected_data.dropna(dim="pv_system_id", how="all")
if len(selected_data.pv_system_id) == 0:
raise NoPVSystemsInSlice()
# Interpolate forwards to fill NaNs which follow finite data:
selected_data = selected_data.interpolate_na(dim="time_utc")
# Finally, fill any remaining NaNs with zeros. This assumes - probably incorrectly -
# that any NaNs remaining at this point are at nighttime, and so *should* be zero.
# TODO: Give the model the "raw" data: Don't `interpolate_na` or `fillna(0)`. See issue #74
selected_data = selected_data.fillna(0)
return self._ensure_n_pv_systems_per_example(selected_data)
def _ensure_n_pv_systems_per_example(self, selected_data: xr.DataArray) -> xr.DataArray:
"""Ensure there are always `self.n_pv_systems_per_example` PV systems."""
if len(selected_data.pv_system_id) > self.n_pv_systems_per_example:
# More PV systems are available than we need. Reduce by randomly sampling:
subset_of_pv_system_ids = self.rng.choice(
selected_data.pv_system_id,
size=self.n_pv_systems_per_example,
replace=False,
)
selected_data = selected_data.sel(pv_system_id=subset_of_pv_system_ids)
elif len(selected_data.pv_system_id) < self.n_pv_systems_per_example:
# If we just used `choice(replace=True)` then there's a high chance
# that the output won't include every available PV system but instead
# will repeat some PV systems at the expense of leaving some on the table.
# TODO: Don't repeat PV systems. Instead, pad with NaNs and mask the loss. Issue #73.
n_random_pv_systems = self.n_pv_systems_per_example - len(selected_data.pv_system_id)
allow_replacement = n_random_pv_systems > len(selected_data.pv_system_id)
random_pv_system_ids = self.rng.choice(
selected_data.pv_system_id,
size=n_random_pv_systems,
replace=allow_replacement,
)
selected_data = xr.concat(
(selected_data, selected_data.sel(pv_system_id=random_pv_system_ids)),
dim="pv_system_id",
)
return selected_data
def _post_process(self, xr_data: xr.DataArray) -> xr.DataArray:
return xr_data / xr_data.capacity_wp
def _get_empty_example(self) -> xr.DataArray:
"""Return a single example of the correct shape but where data & coords are all NaN."""
empty_pv_system_ids = np.full(
shape=self.n_pv_systems_per_example, fill_value=np.NaN, dtype=np.float32
)
empty_dt_index = np.full(shape=self.total_seq_length, fill_value=np.NaN)
empty_dt_index = pd.DatetimeIndex(empty_dt_index)
empty_pv_power = pd.DataFrame(
np.NaN, index=empty_dt_index, columns=empty_pv_system_ids
).astype(np.float32)
empty_metadata = pd.Series(np.NaN, index=empty_pv_system_ids).astype(np.float32)
pv_system_row_number = np.full(
shape=self.n_pv_systems_per_example, fill_value=np.NaN, dtype=np.float32
)
pv_system_row_number =
|
pd.Series(pv_system_row_number, index=empty_pv_system_ids)
|
pandas.Series
|
import pandas as pd
import numpy as np
def is_integer(value):
try:
i_val = int(value)
return True
except Exception as e:
return False
def load_df_csv(filepath, delimiter=','):
return pd.read_csv(filepath, delimiter=',')
def load_xls_df(filepath, sheetnumber):
xls_obj = pd.ExcelFile(filepath)
try:
sheet_name = xls_obj.sheet_names[sheetnumber]
return xls_obj.parse(sheet_name)
except Exception as e:
print(e)
return
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
Profile prediction speed
"""
import numpy
numpy.random.seed(0)
import time
import cProfile
import pstats
import collections
import argparse
import sys
import pandas
from mhcflurry import Class1AffinityPredictor
from mhcflurry.encodable_sequences import EncodableSequences
from mhcflurry.common import random_peptides
from mhcflurry.downloads import get_path
from mhcflurry.testing_utils import cleanup, startup
ALLELE_SPECIFIC_PREDICTOR = None
PAN_ALLELE_PREDICTOR = None
def setup():
global ALLELE_SPECIFIC_PREDICTOR, PAN_ALLELE_PREDICTOR
startup()
ALLELE_SPECIFIC_PREDICTOR = Class1AffinityPredictor.load(
get_path("models_class1", "models"))
PAN_ALLELE_PREDICTOR = Class1AffinityPredictor.load(
get_path("models_class1_pan", "models.with_mass_spec"))
def teardown():
global ALLELE_SPECIFIC_PREDICTOR, PAN_ALLELE_PREDICTOR
ALLELE_SPECIFIC_PREDICTOR = None
PAN_ALLELE_PREDICTOR = None
cleanup()
DEFAULT_NUM_PREDICTIONS = 10000
def test_speed_allele_specific(profile=False, num=DEFAULT_NUM_PREDICTIONS):
global ALLELE_SPECIFIC_PREDICTOR
starts = collections.OrderedDict()
timings = collections.OrderedDict()
profilers = collections.OrderedDict()
predictor = ALLELE_SPECIFIC_PREDICTOR
def start(name):
starts[name] = time.time()
if profile:
profilers[name] = cProfile.Profile()
profilers[name].enable()
def end(name):
timings[name] = time.time() - starts[name]
if profile:
profilers[name].disable()
start("first")
predictor.predict(["SIINFEKL"], allele="HLA-A*02:01")
end("first")
peptides = random_peptides(num)
start("pred_%d" % num)
predictor.predict(peptides, allele="HLA-A*02:01")
end("pred_%d" % num)
NUM2 = 10000
peptides = EncodableSequences.create(random_peptides(NUM2, length=13))
start("encode_blosum_%d" % NUM2)
peptides.variable_length_to_fixed_length_vector_encoding("BLOSUM62")
end("encode_blosum_%d" % NUM2)
start("pred_already_encoded_%d" % NUM2)
predictor.predict(peptides, allele="HLA-A*02:01")
end("pred_already_encoded_%d" % NUM2)
NUM_REPEATS = 100
start("pred_already_encoded_%d_%d_times" % (NUM2, NUM_REPEATS))
for _ in range(NUM_REPEATS):
predictor.predict(peptides, allele="HLA-A*02:01")
end("pred_already_encoded_%d_%d_times" % (NUM2, NUM_REPEATS))
print("SPEED BENCHMARK")
print("Results:\n%s" % str(pandas.Series(timings)))
return dict(
(key, pstats.Stats(value)) for (key, value) in profilers.items())
def test_speed_pan_allele(profile=False, num=DEFAULT_NUM_PREDICTIONS):
global PAN_ALLELE_PREDICTOR
starts = collections.OrderedDict()
timings = collections.OrderedDict()
profilers = collections.OrderedDict()
predictor = PAN_ALLELE_PREDICTOR
def start(name):
starts[name] = time.time()
if profile:
profilers[name] = cProfile.Profile()
profilers[name].enable()
def end(name):
timings[name] = time.time() - starts[name]
if profile:
profilers[name].disable()
start("first")
predictor.predict(["SIINFEKL"], allele="HLA-A*02:01")
end("first")
peptides = random_peptides(num)
start("pred_%d" % num)
predictor.predict(peptides, allele="HLA-A*02:01")
end("pred_%d" % num)
print("SPEED BENCHMARK")
print("Results:\n%s" % str(
|
pandas.Series(timings)
|
pandas.Series
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 17 16:27:09 2017
@author: MichaelEK
"""
import geopandas as gpd
import pandas as pd
import os
from util import grp_ts_agg, tsreg, getPolyCoords
import shutil
from gistools.vector import multipoly_to_poly, xy_to_gpd
from datetime import date
from scipy.stats import rankdata
from numpy import nan
from warnings import filterwarnings
from pdsql import mssql
from pyhydrotel import get_ts_data
from bokeh.plotting import figure, show, output_file
from bokeh.models import ColumnDataSource, HoverTool, CategoricalColorMapper, CustomJS, renderers, annotations
from bokeh.palettes import brewer
from bokeh.models.widgets import Select
from bokeh.layouts import column
from bokeh.io import save
import parameters as param
pd.options.display.max_columns = 10
##################################################
#### Read in data
print('Reading in the data')
### gw
#gw_sites = read_file(join(base_dir, gw_sites_shp))
gw_zones = gpd.read_file(os.path.join(param.base_dir, param.input_dir, param.gw_poly_shp))[['ZONE_NAME', 'geometry']]
gw_zones = gw_zones.rename(columns={'ZONE_NAME': 'zone'})
#gw_zones['mtype'] = 'gw'
# well_depths = pd.read_csv(os.path.join(param.base_dir, param.input_dir, param.well_depth_csv)).set_index('site')
well_depths = mssql.rd_sql(param.wells_server, param.wells_database, param.well_depth_table, ['well_no', 'depth']).drop_duplicates('well_no')
well_depths = well_depths[well_depths['depth'].notnull()]
well_depths.rename(columns={'depth': 'well_depth'}, inplace=True)
well_screens = mssql.rd_sql(param.wells_server, param.wells_database, param.well_screen_table, ['well_no', 'top_screen'], where_in={'screen_no': [1]}).drop_duplicates('well_no')
##################################################
#### Process well depth catergories
well_info = pd.merge(well_depths, well_screens, on='well_no', how='left')
well_info['depth'] = 'Shallow'
well_info.loc[well_info['top_screen'] >= 30, 'depth'] = 'Deep'
well_info.loc[well_info['top_screen'].isnull() & (well_info['well_depth'] >= 30), 'depth'] = 'Deep'
well_depths = well_info[['well_no', 'depth']].rename(columns={'well_no': 'site'}).set_index('site')
#################################################
#### Select sites
### GW
sites1 = mssql.rd_sql(param.usm_server, param.usm_database, param.site_table, ['ID', 'UpstreamSiteID', 'NZTMX', 'NZTMY'])
sites_attr1 = mssql.rd_sql(param.usm_server, param.usm_database, param.site_attr_table, ['SiteID', 'CwmsName'])
sites_attr1.rename(columns={'SiteID': 'ID'}, inplace=True)
sites = pd.merge(sites1, sites_attr1, on='ID').drop('ID', axis=1)
sites.rename(columns={'UpstreamSiteID': 'site'}, inplace=True)
sites = sites[sites.site.isin(well_depths.index)]
## Manual data
mgw1 = mssql.rd_sql(param.wells_server, param.wells_database, 'DTW_READINGS', ['well_no', 'date_read', 'depth_to_water'], where_in={'TIDEDA_FLAG': ['N']}, rename_cols=['site', 'time', 'data'])
mgw1['time'] = pd.to_datetime(mgw1['time'])
mgw1 = mgw1.groupby(['site', pd.Grouper(key='time', freq='D')]).mean().reset_index()
mgw1 = mgw1[mgw1.site.isin(sites.site)]
## Recorder data
# hy1 = get_ts_data(param.hydrotel_server, param.hydrotel_database, ['water level', 'adjusted water level'], sites.site.tolist(), resample_code='D').reset_index()
# rgw1 = hy1.sort_values('MType').drop_duplicates(['ExtSiteID', 'DateTime']).drop('MType', axis=1)
# rgw1.rename(columns={'ExtSiteID': 'site', 'DateTime': 'time', 'Value': 'data'}, inplace=True)
# rgw1 = mssql.rd_sql_ts(param.hydro_server, param.hydro_database, param.ts_table, 'ExtSiteID', 'DateTime', 'Value', where_in={'DatasetTypeID': [10]}).reset_index()
# rgw1.rename(columns={'ExtSiteID': 'site', 'DateTime': 'time', 'Value': 'data'}, inplace=True)
#
# rgw1 = rgw1[rgw1.site.isin(sites.site)]
## Prioritise recorder data
# mgw1 = mgw1[~mgw1.site.isin(rgw1.site.unique())].copy()
## Combine
# gw1 = pd.concat([rgw1, mgw1]).drop_duplicates(['site', 'time'])
gw1 = mgw1.copy()
#################################################
#### Run monthly summary stats
print('Processing past data')
### Filter sites
count0 = gw1.copy()
count0['month'] = gw1.time.dt.month
count0['year'] = gw1.time.dt.year
count1 = count0.drop_duplicates(['site', 'year', 'month']).groupby('site').data.count()
start_date0 = gw1.groupby('site').time.first()
end_date1 = gw1.groupby('site').time.last()
now1 = pd.to_datetime(param.date_now) + pd.DateOffset(days=param.add_days)
start_date1 = now1 - pd.DateOffset(months=121) - pd.DateOffset(days=now1.day - 1)
start_date2 = now1 - pd.DateOffset(months=1) - pd.DateOffset(days=now1.day - 1)
sites1 = sites[sites.site.isin(count1[(count1 >= 120) & (end_date1 >= start_date2) & (start_date0 <= start_date1)].index)]
uw1 = sites[sites.CwmsName.isin(['Upper Waitaki']) & sites.site.isin(count1[(count1 >= 80) & (end_date1 >= start_date2) & (start_date0 <= start_date1)].index)]
sites2 = pd.concat([sites1, uw1]).drop_duplicates()
gw_sites = xy_to_gpd(['site', 'CwmsName'], 'NZTMX', 'NZTMY', sites2)
gw2 = gw1[gw1.site.isin(gw_sites.site)].copy()
### Extract Site locations
gw_sites.to_file(os.path.join(param.base_dir, param.output_dir, param.gw_sites_shp))
### Combine the sites with the polygons
gw_site_zone = gw_sites.drop(['geometry'], axis=1)
gw_site_zone.rename(columns={'CwmsName': 'zone'}, inplace=True)
### Monthly interpolations
if param.interp:
## Estimate monthly means through interpolation
day1 = grp_ts_agg(gw2, 'site', 'time', 'D').mean().unstack('site')
day2 = tsreg(day1, 'D', False)
day3 = day2.interpolate(method='time', limit=40)
mon_gw1 = day3.resample('M').median().stack().reset_index()
else:
mon_gw1 = grp_ts_agg(gw2, 'site', 'time', 'M').median().reset_index()
## End the dataset to the lastest month
end_date = now1 - pd.DateOffset(days=now1.day - 1)
mon_gw1 = mon_gw1[mon_gw1.time < end_date].copy()
## Assign month
mon_gw1['mon'] = mon_gw1.time.dt.month
##############################################
#### Run the monthly stats comparisons
print('Calculating the percentiles')
hy_gw0 = mon_gw1.copy()
hy_gw0['perc'] = (hy_gw0.groupby(['site', 'mon'])['data'].transform(lambda x: (rankdata(x)-1)/(len(x)-1)) * 100).round(2)
###############################################
#### Pull out recent monthly data
start_date = now1 - pd.DateOffset(months=param.n_previous_months) - pd.DateOffset(days=now1.day - 1)
print('start date: ' + str(start_date), 'and date: ' + str(end_date))
### selection
hy_gw = hy_gw0[(hy_gw0.time >= start_date)].copy()
### Convert datetime to year-month str
hy_gw['time'] = hy_gw.time.dt.strftime('%Y-%m')
##############################################
#### Calc zone stats and apply categories
perc_site_zone = pd.merge(hy_gw, gw_site_zone, on='site')
perc_zone = perc_site_zone.groupby(['zone', 'time'])['perc'].mean()
prod1 = [gw_zones.zone.unique(), perc_zone.reset_index().time.unique()]
mindex = pd.MultiIndex.from_product(prod1, names=['zone', 'time'])
blank1 = pd.Series(nan, index=mindex, name='temp')
zone_stats2 =
|
pd.concat([perc_zone, blank1], axis=1)
|
pandas.concat
|
from typing import List
import pandas as pd
from fhirpipe.analyze.attribute import Attribute
from fhirpipe.analyze.sql_column import SqlColumn
def clean_dataframe(
df, attributes: List[Attribute], primary_key_column,
):
""" Apply cleaning scripts and concept maps.
This function takes the dataframe produced by the sql query and return another
dataframe which looks like:
| Attribute | Attribute
| ({table_col}, table) | ({table_col}, table) | ({table_col}, table)
|---------------------------|---------------------------|------------------------
row 1 | val | val | val
row 2 | val | val | val
... | ... | ... | ...
and where all values are cleaned (with cleaning scripts and concept maps).
"""
cleaned_df = pd.DataFrame()
df_pk_col = df[primary_key_column.dataframe_column_name()]
for attribute in attributes:
attr_df = pd.DataFrame()
for col in attribute.columns:
df_col_name = col.dataframe_column_name()
# The column name in the new intermediary dataframe
# We put also col.table because it's needed in squash_rows
attr_col_name = (df_col_name, col.table)
# Get the original column
attr_df[attr_col_name] = df[df_col_name]
# Apply cleaning script
if col.cleaning_script:
attr_df[attr_col_name] = col.cleaning_script.apply(
attr_df[attr_col_name], df_pk_col
)
# Apply concept map
if col.concept_map:
attr_df[attr_col_name] = col.concept_map.apply(attr_df[attr_col_name], df_pk_col)
if not attr_df.empty:
# Change col names to have hierarchical names in the dataframe with all the attributes
attr_df.columns = pd.MultiIndex.from_product(([attribute], attr_df.columns))
# Build the dataframe containing all the attributes
cleaned_df = pd.concat([cleaned_df, attr_df], axis=1)
cleaned_df[pk_col_name(primary_key_column)] = df_pk_col
return cleaned_df
def squash_rows(df, squash_rules, parent_cols=[]):
"""
Apply the squash rules to have a single row for each instance. This is needed
because joins will create several rows with the same primary key.
args:
df (dataframe): the original dataframe with possibly several rows for the same
primary key.
squash_rules (nested list): squash rules built by the Analyzer
parent_cols (list): param used for recursive call
Example:
if you join people with bank accounts on guy.id = account.owner_id,
you want at the end to have for a single guy to have a single instance
with an attribute accounts.
ROWS:
GUY.NAME ... GUY.AGE ACCOUNT.NAME ACCOUNT.AMOUNT
Robert 21 Compte courant 17654
Robert 21 Compte d'epargne 123456789
David 51 Ibiza summer 100
Squash rule: ['GUY', ['ACCOUNT', []]
Output:
GUY.NAME ... GUY.AGE ACCOUNT.NAME ACCOUNT.AMOUNT
Robert 21 (Compte courant, Compte d'epargne) (17654, 123456789)
David 51 Ibiza summer 100
"""
table, child_rules = squash_rules
new_cols = [col for col in df.columns if col[1][1] == table]
pivot_cols = parent_cols + new_cols
to_squash = [col for col in df.columns if any([col[1][1] == rule[0] for rule in child_rules])]
if not to_squash:
return df
for child_rule in child_rules:
df = squash_rows(df, child_rule, pivot_cols)
df = (
df.groupby(pivot_cols, as_index=False)
.apply(lambda x: x.drop_duplicates())
.groupby(pivot_cols, as_index=False)
.agg(flat_tuple_agg)
)
return df
def merge_dataframe(
df, attributes: List[Attribute], primary_key_column,
):
""" Apply merging scripts.
Takes as input a dataframe of the form
| Attribute | Attribute
| ({table_col}, table) | ({table_col}, table) | ({table_col}, table)
|---------------------------|---------------------------|------------------------
row 1 | val | val | val
row 2 | val | val | val
... | ... | ... | ...
and outputs
| Attribute | Attribute
|---------------------------|------------------------
row 1 | val | val
row 2 | val | val
... | ... | ...
where values are merge thanks to the mergig scripts.
"""
merged_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import connectome_tools.process_matrix as promat
import pandas as pd
import glob as gl
import numpy as np
# load whole brain connectivity matrix
matrix = pd.read_csv('data/Gadn-pair-sorted.csv', header=0, index_col=0)
# import pair list CSV, manually generated
pairs = pd.read_csv('data/bp-pairs-2020-01-28.csv', header = 0)
# identify list of neuron-groups to import
neuron_groups = gl.glob('neuron_groups_data/*.json')
print(neuron_groups)
# load skids of each neuron class
MBONs = pd.read_json(neuron_groups[0])['skeleton_id'].values
mPNs =
|
pd.read_json(neuron_groups[1])
|
pandas.read_json
|
import openml
import numpy as np
import pandas as pd
from typing import Union
from pathlib import Path
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
from sklearn.utils import check_random_state
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from oslo_concurrency import lockutils
from hpobench.util.data_manager import DataManager
from hpobench import config_file
class OpenMLDataManager(DataManager):
def __init__(self, task_id: int,
valid_size: Union[float, None] = 0.33,
data_path: Union[str, Path, None] = None,
global_seed: Union[int, None] = 1):
self.task_id = task_id
self.global_seed = global_seed
self.valid_size = valid_size
self.train_X = None
self.valid_X = None
self.test_X = None
self.train_y = None
self.valid_y = None
self.test_y = None
self.train_idx = None
self.test_idx = None
self.task = None
self.dataset = None
self.preprocessor = None
self.lower_bound_train_size = None
self.n_classes = None
if data_path is None:
data_path = config_file.data_dir / "OpenML"
self.data_path = Path(data_path)
openml.config.set_cache_directory(str(self.data_path))
super(OpenMLDataManager, self).__init__()
# pylint: disable=arguments-differ
@lockutils.synchronized('not_thread_process_safe', external=True,
lock_path=f'{config_file.cache_dir}/openml_dm_lock', delay=0.2)
def load(self, valid_size=None, verbose=False):
"""Fetches data from OpenML and initializes the train-validation-test data splits
The validation set is fixed till this function is called again or explicitly altered
"""
# fetches task
self.task = openml.tasks.get_task(self.task_id, download_data=False)
self.n_classes = len(self.task.class_labels)
# fetches dataset
self.dataset = openml.datasets.get_dataset(self.task.dataset_id, download_data=False)
if verbose:
self.logger.debug(self.task)
self.logger.debug(self.dataset)
data_set_path = self.data_path / "org/openml/www/datasets" / str(self.task.dataset_id)
successfully_loaded = self.try_to_load_data(data_set_path)
if successfully_loaded:
self.logger.info(f'Successfully loaded the preprocessed splits from '
f'{data_set_path}')
return
# If the data is not available, download it.
self.__download_data(verbose=verbose, valid_size=valid_size)
# Save the preprocessed splits to file for later usage.
self.generate_openml_splits(data_set_path)
return
def try_to_load_data(self, data_path: Path) -> bool:
path_str = "{}_{}.parquet.gzip"
try:
self.train_X = pd.read_parquet(data_path / path_str.format("train", "x")).to_numpy()
self.train_y = pd.read_parquet(data_path / path_str.format("train", "y")).squeeze(axis=1)
self.valid_X = pd.read_parquet(data_path / path_str.format("valid", "x")).to_numpy()
self.valid_y = pd.read_parquet(data_path / path_str.format("valid", "y")).squeeze(axis=1)
self.test_X = pd.read_parquet(data_path / path_str.format("test", "x")).to_numpy()
self.test_y = pd.read_parquet(data_path / path_str.format("test", "y")).squeeze(axis=1)
except FileNotFoundError:
return False
return True
def __download_data(self, valid_size: Union[int, float, None], verbose: bool):
self.logger.info('Start to download the OpenML dataset')
# loads full data
X, y, categorical_ind, feature_names = self.dataset.get_data(target=self.task.target_name,
dataset_format="dataframe")
assert Path(self.dataset.data_file).exists(), f'The datafile {self.dataset.data_file} does not exists.'
categorical_ind = np.array(categorical_ind)
(cat_idx,) = np.where(categorical_ind)
(cont_idx,) = np.where(~categorical_ind)
# splitting dataset into train and test (10% test)
# train-test split is fixed for a task and its associated dataset (from OpenML)
self.train_idx, self.test_idx = self.task.get_train_test_split_indices()
train_x = X.iloc[self.train_idx]
train_y = y.iloc[self.train_idx]
self.test_X = X.iloc[self.test_idx]
self.test_y = y.iloc[self.test_idx]
# splitting training into training and validation
# validation set is fixed as per the global seed independent of the benchmark seed
valid_size = self.valid_size if valid_size is None else valid_size
self.train_X, self.valid_X, self.train_y, self.valid_y = train_test_split(
train_x, train_y, test_size=valid_size, shuffle=True, stratify=train_y,
random_state=check_random_state(self.global_seed)
)
# preprocessor to handle missing values, categorical columns encodings,
# and scaling numeric columns
self.preprocessor = make_pipeline(
ColumnTransformer([
(
"cat",
make_pipeline(SimpleImputer(strategy="most_frequent"),
OneHotEncoder(sparse=False, handle_unknown="ignore")),
cat_idx.tolist(),
),
(
"cont",
make_pipeline(SimpleImputer(strategy="median"),
StandardScaler()),
cont_idx.tolist(),
)
])
)
if verbose:
self.logger.debug("Shape of data pre-preprocessing: {}".format(self.train_X.shape))
# preprocessor fit only on the training set
self.train_X = self.preprocessor.fit_transform(self.train_X)
# applying preprocessor built on the training set, across validation and test splits
self.valid_X = self.preprocessor.transform(self.valid_X)
self.test_X = self.preprocessor.transform(self.test_X)
# converting boolean labels to strings
self.train_y = self._convert_labels(self.train_y)
self.valid_y = self._convert_labels(self.valid_y)
self.test_y = self._convert_labels(self.test_y)
# Similar to (https://arxiv.org/pdf/1605.07079.pdf)
# use 10 times the number of classes as lower bound for the dataset fraction
self.lower_bound_train_size = (10 * self.n_classes) / self.train_X.shape[0]
self.lower_bound_train_size = np.max((1 / 512, self.lower_bound_train_size))
if verbose:
self.logger.debug("Shape of data post-preprocessing: {}".format(self.train_X.shape), "\n")
self.logger.debug("\nTraining data (X, y): ({}, {})".format(self.train_X.shape, self.train_y.shape))
self.logger.debug("Validation data (X, y): ({}, {})".format(self.valid_X.shape, self.valid_y.shape))
self.logger.debug("Test data (X, y): ({}, {})".format(self.test_X.shape, self.test_y.shape))
self.logger.debug("\nData loading complete!\n")
def generate_openml_splits(self, data_path):
""" Store the created splits to file for later use… """
self.logger.info(f'Save the splits to {data_path}')
path_str = "{}_{}.parquet.gzip"
colnames = np.arange(self.train_X.shape[1]).astype(str)
label_name = str(self.task.target_name)
pd.DataFrame(self.train_X, columns=colnames).to_parquet(data_path / path_str.format("train", "x"))
self.train_y.to_frame(label_name).to_parquet(data_path / path_str.format("train", "y"))
|
pd.DataFrame(self.valid_X, columns=colnames)
|
pandas.DataFrame
|
# Copyright (c) 2019 Microsoft Corporation
# Distributed under the MIT software license
import pytest
import numpy as np
import numpy.ma as ma
import pandas as pd
import scipy as sp
import math
from itertools import repeat, chain
from ..bin import *
from ..bin import _process_column_initial, _encode_categorical_existing, _process_continuous
class StringHolder:
def __init__(self, internal_str):
self.internal_str = internal_str
def __str__(self):
return self.internal_str
def __lt__(self, other):
return True # make all objects of this type identical to detect sorting failures
def __hash__(self):
return 0 # make all objects of this type identical to detect hashing failures
def __eq__(self,other):
return True # make all objects of this type identical to detect hashing failures
class DerivedStringHolder(StringHolder):
def __init__(self, internal_str):
StringHolder.__init__(self, internal_str)
class FloatHolder:
def __init__(self, internal_float):
self.internal_float = internal_float
def __float__(self):
return self.internal_float
def __lt__(self, other):
return True # make all objects of this type identical to detect sorting failures
def __hash__(self):
return 0 # make all objects of this type identical to detect hashing failures
def __eq__(self,other):
return True # make all objects of this type identical to detect hashing failures
class DerivedFloatHolder(FloatHolder):
def __init__(self, internal_float):
FloatHolder.__init__(self, internal_float)
class FloatAndStringHolder:
def __init__(self, internal_float, internal_str):
self.internal_float = internal_float
self.internal_str = internal_str
def __float__(self):
return self.internal_float
def __str__(self):
return self.internal_str
def __lt__(self, other):
return True # make all objects of this type identical to detect sorting failures
def __hash__(self):
return 0 # make all objects of this type identical to detect hashing failures
def __eq__(self,other):
return True # make all objects of this type identical to detect hashing failures
class DerivedFloatAndStringHolder(FloatAndStringHolder):
def __init__(self, internal_float, internal_str):
FloatAndStringHolder.__init__(self, internal_float, internal_str)
class NothingHolder:
# the result of calling str(..) includes the memory address, so they won't be dependable categories
def __init__(self, internal_str):
self.internal_str = internal_str
def check_pandas_normal(dtype, val1, val2):
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([val1, val2], dtype=np.object_), dtype=dtype)
feature_types_given = ['nominal']
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
X_cols = list(unify_columns(X, [(0, None)], feature_names_in, None))
assert(len(X_cols) == 1)
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(len(X_cols[0][2]) == 2)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 2)
assert(X_cols[0][1][0] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val2)])
c1 = {str(val1) : 1, str(val2) : 2}
X_cols = list(unify_columns(X, [(0, c1)], feature_names_in, feature_types_given))
assert(len(X_cols) == 1)
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c1)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 2)
assert(X_cols[0][1][0] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val2)])
c2 = {str(val2) : 1, str(val1) : 2}
X_cols = list(unify_columns(X, [(0, c2)], feature_names_in, feature_types_given))
assert(len(X_cols) == 1)
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c2)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 2)
assert(X_cols[0][1][0] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val2)])
def check_pandas_missings(dtype, val1, val2):
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([val2, val1, val1], dtype=np.object_), dtype=dtype)
X["feature2"] = pd.Series(np.array([None, val2, val1], dtype=np.object_), dtype=dtype)
X["feature3"] = pd.Series(np.array([val1, None, val2], dtype=np.object_), dtype=dtype)
X["feature4"] = pd.Series(np.array([val2, val1, None], dtype=np.object_), dtype=dtype)
c1 = {str(val1) : 1, str(val2) : 2}
c2 = {str(val2) : 1, str(val1) : 2}
feature_types_given = ['nominal', 'nominal', 'nominal', 'nominal']
X, n_samples = clean_X(X)
assert(n_samples == 3)
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None), (3, None)], feature_names_in, None))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(len(X_cols[0][2]) == 2)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 3)
assert(X_cols[0][1][0] == X_cols[0][2][str(val2)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][2] == X_cols[0][2][str(val1)])
assert(X_cols[1][0] == 'nominal')
assert(X_cols[1][3] is None)
assert(len(X_cols[1][2]) == 2)
assert(X_cols[1][1].dtype == np.int64)
assert(len(X_cols[1][1]) == 3)
assert(X_cols[1][1][0] == 0)
assert(X_cols[1][1][1] == X_cols[1][2][str(val2)])
assert(X_cols[1][1][2] == X_cols[1][2][str(val1)])
assert(X_cols[2][0] == 'nominal')
assert(X_cols[2][3] is None)
assert(len(X_cols[2][2]) == 2)
assert(X_cols[2][1].dtype == np.int64)
assert(len(X_cols[2][1]) == 3)
assert(X_cols[2][1][0] == X_cols[2][2][str(val1)])
assert(X_cols[2][1][1] == 0)
assert(X_cols[2][1][2] == X_cols[2][2][str(val2)])
assert(X_cols[3][0] == 'nominal')
assert(X_cols[3][3] is None)
assert(len(X_cols[3][2]) == 2)
assert(X_cols[3][1].dtype == np.int64)
assert(len(X_cols[3][1]) == 3)
assert(X_cols[3][1][0] == X_cols[3][2][str(val2)])
assert(X_cols[3][1][1] == X_cols[3][2][str(val1)])
assert(X_cols[3][1][2] == 0)
assert(np.array_equal(X_cols[1][1] == 0, X.iloc[:, 1].isna()))
assert(np.array_equal(X_cols[2][1] == 0, X.iloc[:, 2].isna()))
assert(np.array_equal(X_cols[3][1] == 0, X.iloc[:, 3].isna()))
X_cols = list(unify_columns(X, [(0, c1), (1, c1), (2, c1), (3, c1)], feature_names_in, feature_types_given))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c1)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 3)
assert(X_cols[0][1][0] == X_cols[0][2][str(val2)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][2] == X_cols[0][2][str(val1)])
assert(X_cols[1][0] == 'nominal')
assert(X_cols[1][3] is None)
assert(X_cols[1][2] is c1)
assert(X_cols[1][1].dtype == np.int64)
assert(len(X_cols[1][1]) == 3)
assert(X_cols[1][1][0] == 0)
assert(X_cols[1][1][1] == X_cols[1][2][str(val2)])
assert(X_cols[1][1][2] == X_cols[1][2][str(val1)])
assert(X_cols[2][0] == 'nominal')
assert(X_cols[2][3] is None)
assert(X_cols[2][2] is c1)
assert(X_cols[2][1].dtype == np.int64)
assert(len(X_cols[2][1]) == 3)
assert(X_cols[2][1][0] == X_cols[2][2][str(val1)])
assert(X_cols[2][1][1] == 0)
assert(X_cols[2][1][2] == X_cols[2][2][str(val2)])
assert(X_cols[3][0] == 'nominal')
assert(X_cols[3][3] is None)
assert(X_cols[3][2] is c1)
assert(X_cols[3][1].dtype == np.int64)
assert(len(X_cols[3][1]) == 3)
assert(X_cols[3][1][0] == X_cols[3][2][str(val2)])
assert(X_cols[3][1][1] == X_cols[3][2][str(val1)])
assert(X_cols[3][1][2] == 0)
assert(np.array_equal(X_cols[1][1] == 0, X.iloc[:, 1].isna()))
assert(np.array_equal(X_cols[2][1] == 0, X.iloc[:, 2].isna()))
assert(np.array_equal(X_cols[3][1] == 0, X.iloc[:, 3].isna()))
X_cols = list(unify_columns(X, [(0, c2), (1, c2), (2, c2), (3, c2)], feature_names_in, feature_types_given))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c2)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 3)
assert(X_cols[0][1][0] == X_cols[0][2][str(val2)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][2] == X_cols[0][2][str(val1)])
assert(X_cols[1][0] == 'nominal')
assert(X_cols[1][3] is None)
assert(X_cols[1][2] is c2)
assert(X_cols[1][1].dtype == np.int64)
assert(len(X_cols[1][1]) == 3)
assert(X_cols[1][1][0] == 0)
assert(X_cols[1][1][1] == X_cols[1][2][str(val2)])
assert(X_cols[1][1][2] == X_cols[1][2][str(val1)])
assert(X_cols[2][0] == 'nominal')
assert(X_cols[2][3] is None)
assert(X_cols[2][2] is c2)
assert(X_cols[2][1].dtype == np.int64)
assert(len(X_cols[2][1]) == 3)
assert(X_cols[2][1][0] == X_cols[2][2][str(val1)])
assert(X_cols[2][1][1] == 0)
assert(X_cols[2][1][2] == X_cols[2][2][str(val2)])
assert(X_cols[3][0] == 'nominal')
assert(X_cols[3][3] is None)
assert(X_cols[3][2] is c2)
assert(X_cols[3][1].dtype == np.int64)
assert(len(X_cols[3][1]) == 3)
assert(X_cols[3][1][0] == X_cols[3][2][str(val2)])
assert(X_cols[3][1][1] == X_cols[3][2][str(val1)])
assert(X_cols[3][1][2] == 0)
assert(np.array_equal(X_cols[1][1] == 0, X.iloc[:, 1].isna()))
assert(np.array_equal(X_cols[2][1] == 0, X.iloc[:, 2].isna()))
assert(np.array_equal(X_cols[3][1] == 0, X.iloc[:, 3].isna()))
X_cols = list(unify_columns(X, [(0, c1), (1, c2), (2, c1), (3, c2)], feature_names_in, feature_types_given))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c1)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 3)
assert(X_cols[0][1][0] == X_cols[0][2][str(val2)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][2] == X_cols[0][2][str(val1)])
assert(X_cols[1][0] == 'nominal')
assert(X_cols[1][3] is None)
assert(X_cols[1][2] is c2)
assert(X_cols[1][1].dtype == np.int64)
assert(len(X_cols[1][1]) == 3)
assert(X_cols[1][1][0] == 0)
assert(X_cols[1][1][1] == X_cols[1][2][str(val2)])
assert(X_cols[1][1][2] == X_cols[1][2][str(val1)])
assert(X_cols[2][0] == 'nominal')
assert(X_cols[2][3] is None)
assert(X_cols[2][2] is c1)
assert(X_cols[2][1].dtype == np.int64)
assert(len(X_cols[2][1]) == 3)
assert(X_cols[2][1][0] == X_cols[2][2][str(val1)])
assert(X_cols[2][1][1] == 0)
assert(X_cols[2][1][2] == X_cols[2][2][str(val2)])
assert(X_cols[3][0] == 'nominal')
assert(X_cols[3][3] is None)
assert(X_cols[3][2] is c2)
assert(X_cols[3][1].dtype == np.int64)
assert(len(X_cols[3][1]) == 3)
assert(X_cols[3][1][0] == X_cols[3][2][str(val2)])
assert(X_cols[3][1][1] == X_cols[3][2][str(val1)])
assert(X_cols[3][1][2] == 0)
assert(np.array_equal(X_cols[1][1] == 0, X.iloc[:, 1].isna()))
assert(np.array_equal(X_cols[2][1] == 0, X.iloc[:, 2].isna()))
assert(np.array_equal(X_cols[3][1] == 0, X.iloc[:, 3].isna()))
def check_pandas_float(dtype, val1, val2):
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([val2, val1, val1], dtype=np.object_), dtype=dtype)
X["feature2"] = pd.Series(np.array([None, val2, val1], dtype=np.object_), dtype=dtype)
X["feature3"] = pd.Series(np.array([val1, None, val2], dtype=np.object_), dtype=dtype)
X["feature4"] = pd.Series(np.array([val2, val1, None], dtype=np.object_), dtype=dtype)
X, n_samples = clean_X(X)
assert(n_samples == 3)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in, min_unique_continuous=0))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'continuous')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is None)
assert(X_cols[0][1].dtype == np.float64)
assert(X_cols[0][1][0] == np.float64(dtype(val2)))
assert(X_cols[0][1][1] == np.float64(dtype(val1)))
assert(X_cols[0][1][2] == np.float64(dtype(val1)))
assert(X_cols[1][0] == 'continuous')
assert(X_cols[1][3] is None)
assert(X_cols[1][2] is None)
assert(X_cols[1][1].dtype == np.float64)
assert(np.isnan(X_cols[1][1][0]))
assert(X_cols[1][1][1] == np.float64(dtype(val2)))
assert(X_cols[1][1][2] == np.float64(dtype(val1)))
assert(X_cols[2][0] == 'continuous')
assert(X_cols[2][3] is None)
assert(X_cols[2][2] is None)
assert(X_cols[2][1].dtype == np.float64)
assert(X_cols[2][1][0] == np.float64(dtype(val1)))
assert(np.isnan(X_cols[2][1][1]))
assert(X_cols[2][1][2] == np.float64(dtype(val2)))
assert(X_cols[3][0] == 'continuous')
assert(X_cols[3][3] is None)
assert(X_cols[3][2] is None)
assert(X_cols[3][1].dtype == np.float64)
assert(X_cols[3][1][0] == np.float64(dtype(val2)))
assert(X_cols[3][1][1] == np.float64(dtype(val1)))
assert(np.isnan(X_cols[3][1][2]))
def check_numpy_throws(dtype_src, val1, val2):
X = np.array([[val1, val2], [val1, val2]], dtype=dtype_src)
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
try:
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(False)
except:
pass
def test_process_continuous_float64():
vals, bad = _process_continuous(np.array([3.5, 4.5], dtype=np.float64), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([3.5, 4.5], dtype=np.float64)))
def test_process_continuous_float32():
vals, bad = _process_continuous(np.array([3.1, np.nan], dtype=np.float32), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 2)
assert(vals[0] == 3.0999999046325684)
assert(np.isnan(vals[1]))
def test_process_continuous_int8():
vals, bad = _process_continuous(np.array([7, -9], dtype=np.int8), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([7, -9], dtype=np.float64)))
def test_process_continuous_uint16_missing():
vals, bad = _process_continuous(np.array([7], dtype=np.uint16), np.array([True, False], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 2)
assert(vals[0] == 7)
assert(np.isnan(vals[1]))
def test_process_continuous_bool():
vals, bad = _process_continuous(np.array([False, True], dtype=np.bool_), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([0, 1], dtype=np.float64)))
def test_process_continuous_bool_missing():
vals, bad = _process_continuous(np.array([False, True], dtype=np.bool_), np.array([True, False, True], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 3)
assert(vals[0] == 0)
assert(np.isnan(vals[1]))
assert(vals[2] == 1)
def test_process_continuous_obj_simple():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5")], dtype=np.object_), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([1, 2.5, 3, 4.5, 5.5], dtype=np.float64)))
def test_process_continuous_obj_simple_missing():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5")], dtype=np.object_), np.array([True, True, True, True, True, False], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 6)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(vals[2] == 3)
assert(vals[3] == 4.5)
assert(vals[4] == 5.5)
assert(np.isnan(vals[5]))
def test_process_continuous_obj_hard():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5"), StringHolder("6.5"), DerivedStringHolder("7.5"), FloatHolder(8.5), DerivedFloatHolder(9.5), FloatAndStringHolder(10.5, "88"), DerivedFloatAndStringHolder(11.5, "99")], dtype=np.object_), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([1, 2.5, 3, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5], dtype=np.float64)))
def test_process_continuous_obj_hard_missing():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5"), StringHolder("6.5")], dtype=np.object_), np.array([True, True, True, True, True, True, False], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 7)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(vals[2] == 3)
assert(vals[3] == 4.5)
assert(vals[4] == 5.5)
assert(vals[5] == 6.5)
assert(np.isnan(vals[6]))
def test_process_continuous_obj_hard_bad():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5"), StringHolder("6.5"), "bad", StringHolder("bad2"), NothingHolder("bad3")], dtype=np.object_), np.array([True, True, True, True, True, True, True, False, True, True], dtype=np.bool_))
assert(len(bad) == 10)
assert(bad[0] is None)
assert(bad[1] is None)
assert(bad[2] is None)
assert(bad[3] is None)
assert(bad[4] is None)
assert(bad[5] is None)
assert(bad[6] == "bad")
assert(bad[7] is None)
assert(bad[8] == "bad2")
assert(isinstance(bad[9], str))
assert(vals.dtype == np.float64)
assert(len(vals) == 10)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(vals[2] == 3)
assert(vals[3] == 4.5)
assert(vals[4] == 5.5)
assert(vals[5] == 6.5)
assert(np.isnan(vals[7]))
def test_process_continuous_str_simple():
vals, bad = _process_continuous(np.array(["1", "2.5"], dtype=np.unicode_), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([1, 2.5], dtype=np.float64)))
def test_process_continuous_str_simple_missing():
vals, bad = _process_continuous(np.array(["1", "2.5"], dtype=np.unicode_), np.array([True, True, False], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 3)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(np.isnan(vals[2]))
def test_process_continuous_str_hard_bad():
vals, bad = _process_continuous(np.array(["1", "2.5", "bad"], dtype=np.unicode_), np.array([True, True, True, False], dtype=np.bool_))
assert(len(bad) == 4)
assert(bad[0] is None)
assert(bad[1] is None)
assert(bad[2] == "bad")
assert(bad[3] is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 4)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(np.isnan(vals[3]))
def test_process_column_initial_int_float():
# this test is hard since np.unique seems to think int(4) == float(4.0) so naively it returns just "4"
encoded, c = _process_column_initial(np.array([4, 4.0], dtype=np.object_), None, None, None)
assert(len(c) == 2)
assert(c["4"] == 1)
assert(c["4.0"] == 2)
assert(np.array_equal(encoded, np.array([c["4"], c["4.0"]], dtype=np.int64)))
def test_process_column_initial_float32_float64():
# np.float64(np.float32(0.1)) != np.float64(0.1) since the float32 to float64 version has the lower mantisa bits
# all set to zero, and there will be another float64 that will be closer to "0.1" in float64 representation, so
# they aren't the same, but if to convert them to strings first then they are identical. Strings are the
# ultimate arbiter of categorical membership since strings are cross-platform and JSON encodable. np.unique
# will tend to separate the float32 and the float64 values since they aren't the same, but then serialize
# them to the same string. The our model has ["0.1", "0.1"] as the categories if we don't convert to float64!
encoded, c = _process_column_initial(np.array([np.float32(0.1), np.float64(0.1)], dtype=np.object_), None, None, None)
assert(len(c) == 2)
assert(c["0.1"] == 1)
assert(c["0.10000000149011612"] == 2)
assert(np.array_equal(encoded, np.array([c["0.10000000149011612"], c["0.1"]], dtype=np.int64)))
def test_process_column_initial_obj_obj():
encoded, c = _process_column_initial(np.array([StringHolder("abc"), StringHolder("def")], dtype=np.object_), None, None, None)
assert(len(c) == 2)
assert(c["abc"] == 1)
assert(c["def"] == 2)
assert(np.array_equal(encoded, np.array([c["abc"], c["def"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_nomissing():
encoded, c = _process_column_initial(np.array(["xyz", "abc", "xyz"], dtype=np.unicode_), None, 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["abc"] == 1)
assert(c["xyz"] == 2)
assert(np.array_equal(encoded, np.array([c["xyz"], c["abc"], c["xyz"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_missing():
encoded, c = _process_column_initial(np.array(["xyz", "abc", "xyz"], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["abc"] == 1)
assert(c["xyz"] == 2)
assert(np.array_equal(encoded, np.array([c["xyz"], c["abc"], 0, c["xyz"]], dtype=np.int64)))
def test_process_column_initial_prevalence_nomissing():
encoded, c = _process_column_initial(np.array(["xyz", "abc", "xyz"], dtype=np.unicode_), None, 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["xyz"] == 1)
assert(c["abc"] == 2)
assert(np.array_equal(encoded, np.array([c["xyz"], c["abc"], c["xyz"]], dtype=np.int64)))
def test_process_column_initial_prevalence_missing():
encoded, c = _process_column_initial(np.array(["xyz", "abc", "xyz"], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["xyz"] == 1)
assert(c["abc"] == 2)
assert(np.array_equal(encoded, np.array([c["xyz"], c["abc"], 0, c["xyz"]], dtype=np.int64)))
def test_process_column_initial_float64_nomissing():
encoded, c = _process_column_initial(np.array(["11.1", "2.2", "11.1"], dtype=np.unicode_), None, 'ANYTHING_ELSE', None)
assert(len(c) == 2)
assert(c["2.2"] == 1)
assert(c["11.1"] == 2)
assert(np.array_equal(encoded, np.array([c["11.1"], c["2.2"], c["11.1"]], dtype=np.int64)))
def test_process_column_initial_float64_missing():
encoded, c = _process_column_initial(np.array(["11.1", "2.2", "11.1"], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), 'ANYTHING_ELSE', None)
assert(len(c) == 2)
assert(c["2.2"] == 1)
assert(c["11.1"] == 2)
assert(np.array_equal(encoded, np.array([c["11.1"], c["2.2"], 0, c["11.1"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_nomissing_int8():
encoded, c = _process_column_initial(np.array([1, -1, 1], dtype=np.int8), None, 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["-1"] == 1)
assert(c["1"] == 2)
assert(np.array_equal(encoded, np.array([c["1"], c["-1"], c["1"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_missing_int8():
encoded, c = _process_column_initial(np.array([1, -1, 1], dtype=np.int8), np.array([True, True, False, True], dtype=np.bool_), 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["-1"] == 1)
assert(c["1"] == 2)
assert(np.array_equal(encoded, np.array([c["1"], c["-1"], 0, c["1"]], dtype=np.int64)))
def test_process_column_initial_prevalence_nomissing_int8():
encoded, c = _process_column_initial(np.array([1, -1, 1], dtype=np.int8), None, 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["1"] == 1)
assert(c["-1"] == 2)
assert(np.array_equal(encoded, np.array([c["1"], c["-1"], c["1"]], dtype=np.int64)))
def test_process_column_initial_prevalence_missing_int8():
encoded, c = _process_column_initial(np.array([1, -1, 1], dtype=np.int8), np.array([True, True, False, True], dtype=np.bool_), 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["1"] == 1)
assert(c["-1"] == 2)
assert(np.array_equal(encoded, np.array([c["1"], c["-1"], 0, c["1"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_nomissing_one_bool():
encoded, c = _process_column_initial(np.array([True, True, True], dtype=np.bool_), None, 'nominal_alphabetical', None)
assert(len(c) == 1)
assert(c["True"] == 1)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], c["True"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_nomissing_two_bool():
encoded, c = _process_column_initial(np.array([True, True, False, True], dtype=np.bool_), None, 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["False"] == 1)
assert(c["True"] == 2)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], c["False"], c["True"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_missing_one_bool():
encoded, c = _process_column_initial(np.array([True, True, True], dtype=np.bool_), np.array([True, True, False, True], dtype=np.bool_), 'nominal_alphabetical', None)
assert(len(c) == 1)
assert(c["True"] == 1)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], 0, c["True"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_missing_two_bool():
encoded, c = _process_column_initial(np.array([True, True, False, True], dtype=np.bool_), np.array([True, True, False, True, True], dtype=np.bool_), 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["False"] == 1)
assert(c["True"] == 2)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], 0, c["False"], c["True"]], dtype=np.int64)))
def test_process_column_initial_prevalence_nomissing_one_bool():
encoded, c = _process_column_initial(np.array([True, True, True], dtype=np.bool_), None, 'nominal_prevalence', None)
assert(len(c) == 1)
assert(c["True"] == 1)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], c["True"]], dtype=np.int64)))
def test_process_column_initial_prevalence_nomissing_two_bool():
encoded, c = _process_column_initial(np.array([True, True, False, True], dtype=np.bool_), None, 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["True"] == 1)
assert(c["False"] == 2)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], c["False"], c["True"]], dtype=np.int64)))
def test_process_column_initial_prevalence_missing_one_bool():
encoded, c = _process_column_initial(np.array([True, True, True], dtype=np.bool_), np.array([True, True, False, True], dtype=np.bool_), 'nominal_prevalence', None)
assert(len(c) == 1)
assert(c["True"] == 1)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], 0, c["True"]], dtype=np.int64)))
def test_process_column_initial_prevalence_missing_two_bool():
encoded, c = _process_column_initial(np.array([True, True, False, True], dtype=np.bool_), np.array([True, True, False, True, True], dtype=np.bool_), 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["True"] == 1)
assert(c["False"] == 2)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], 0, c["False"], c["True"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str():
c = {"cd": 1, "ab": 2}
encoded, bad = _encode_categorical_existing(np.array(["ab", "cd"], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["ab"], c["cd"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_bool():
c = {"True": 1, "False": 2}
encoded, bad = _encode_categorical_existing(np.array([True, False], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["True"], c["False"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_int_small():
c = {"-2": 1, "3": 2, "1": 3}
encoded, bad = _encode_categorical_existing(np.array([int(1), np.int8(-2), np.uint64(3)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["1"], c["-2"], c["3"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_int_big():
c = {"-2": 1, "18446744073709551615": 2, "1": 3}
encoded, bad = _encode_categorical_existing(np.array([int(1), np.int8(-2), np.uint64("18446744073709551615")], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["1"], c["-2"], c["18446744073709551615"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_floats():
# np.float64(np.float32(0.1)) != np.float64(0.1) since the float32 to float64 version has the lower mantisa bits
# all set to zero, and there will be another float64 that will be closer to "0.1" in float64 representation, so
# they aren't the same, but if to convert them to strings first then they are identical. Strings are the
# ultimate arbiter of categorical membership since strings are cross-platform and JSON encodable. np.unique
# will tend to separate the float32 and the float64 values since they aren't the same, but then serialize
# them to the same string. The our model has ["0.1", "0.1"] as the categories if we don't convert to float64!
c = {"1.1": 1, "2.19921875": 2, "3.299999952316284": 3, "4.4": 4, "5.5": 5}
encoded, bad = _encode_categorical_existing(np.array([float(1.1), np.float16(2.2), np.float32(3.3), np.float64(4.4), np.longfloat(5.5)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["1.1"], c["2.19921875"], c["3.299999952316284"], c["4.4"], c["5.5"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str_int():
c = {"abc": 1, "1": 2}
encoded, bad = _encode_categorical_existing(np.array(["abc", int(1)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["abc"], c["1"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str_float():
c = {"abc": 1, "1.1": 2}
encoded, bad = _encode_categorical_existing(np.array(["abc", float(1.1)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["abc"], c["1.1"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str_float64():
c = {"abc": 1, "1.1": 2}
encoded, bad = _encode_categorical_existing(np.array(["abc", np.float64(1.1)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["abc"], c["1.1"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str_float32():
c = {"abc": 1, "1.100000023841858": 2}
encoded, bad = _encode_categorical_existing(np.array(["abc", np.float32(1.1)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["abc"], c["1.100000023841858"]], dtype=np.int64)))
def test_encode_categorical_existing_int_float():
# this test is hard since np.unique seems to think int(4) == float(4) so naively it returns just "4"
c = {"4": 1, "4.0": 2}
encoded, bad = _encode_categorical_existing(np.array([int(4), 4.0], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["4"], c["4.0"]], dtype=np.int64)))
def test_encode_categorical_existing_int_float32():
# if you take np.float64(np.float32(0.1)) != np.float64(0.1) since the float32 version has the lower mantisa
# bits all set to zero, and there will be another float64 that will be closer to "0.1" for float64s, so
# they aren't the same, but if to convert them to strings first then they are identical. I tend to think
# of strings are the ultimate arbiter of categorical membership since strings are cross-platform
# np.unique will tend to separate the float32 and the float64 values since they aren't the same, but then
# serialize them to the same string. The our model has ["0.1", "0.1"] as the categories!!
c = {"4": 1, "0.10000000149011612": 2}
encoded, bad = _encode_categorical_existing(np.array([int(4), np.float32(0.1)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["4"], c["0.10000000149011612"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_obj():
c = {"abc": 1, "def": 2}
encoded, bad = _encode_categorical_existing(np.array([StringHolder("abc"), StringHolder("def")], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["abc"], c["def"]], dtype=np.int64)))
def test_encode_categorical_existing_str():
c = {"abc": 1, "def": 2, "ghi": 3}
encoded, bad = _encode_categorical_existing(np.array(["abc", "ghi", "def", "something"], dtype=np.unicode_), np.array([True, True, False, True, True], dtype=np.bool_), c)
assert(np.array_equal(bad, np.array([None, None, None, None, "something"], dtype=np.object_)))
assert(np.array_equal(encoded, np.array([c["abc"], c["ghi"], 0, c["def"], -1], dtype=np.int64)))
def test_encode_categorical_existing_int8():
c = {"5": 1, "0": 2, "-9": 3}
encoded, bad = _encode_categorical_existing(np.array([5, -9, 0, 0, -9, 5, 99], dtype=np.int8), np.array([True, True, True, False, True, True, True, True], dtype=np.bool_), c)
assert(np.array_equal(bad, np.array([None, None, None, None, None, None, None, "99"], dtype=np.object_)))
assert(np.array_equal(encoded, np.array([c["5"], c["-9"], c["0"], 0, c["0"], c["-9"], c["5"], -1], dtype=np.int64)))
def test_encode_categorical_existing_bool():
c = {"False": 1, "True": 2}
encoded, bad = _encode_categorical_existing(np.array([False, True, False], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["False"], c["True"], 0, c["False"]], dtype=np.int64)))
def test_encode_categorical_existing_bool_true():
c = {"True": 1}
encoded, bad = _encode_categorical_existing(np.array([False, True, False], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), c)
assert(np.array_equal(bad, np.array(["False", None, None, "False"], dtype=np.object_)))
assert(np.array_equal(encoded, np.array([-1, c["True"], 0, -1], dtype=np.int64)))
def test_encode_categorical_existing_bool_false():
c = {"False": 1}
encoded, bad = _encode_categorical_existing(np.array([False, True, False], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), c)
assert(np.array_equal(bad, np.array([None, "True", None, None], dtype=np.object_)))
assert(np.array_equal(encoded, np.array([c["False"], -1, 0, c["False"]], dtype=np.int64)))
def test_process_column_initial_choose_floatcategories():
encoded, c = _process_column_initial(np.array([11.11, 2.2, np.float32(2.2), "2.2", StringHolder("2.2")], dtype=np.object_), None, None, 4)
assert(c["2.2"] == 1)
assert(c["2.200000047683716"] == 2)
assert(c["11.11"] == 3)
assert(np.array_equal(encoded, np.array([c["11.11"], c["2.2"], c["2.200000047683716"], c["2.2"], c["2.2"]], dtype=np.int64)))
def test_process_column_initial_choose_floats():
encoded, c = _process_column_initial(np.array([11.11, 2.2, np.float32(2.2), "2.2", StringHolder("2.2"), 3.3, 3.3], dtype=np.object_), None, None, 3)
assert(c is None)
assert(np.array_equal(encoded, np.array([11.11, 2.2, 2.200000047683716, 2.2, 2.2, 3.3, 3.3], dtype=np.float64)))
def test_unify_columns_numpy1():
X = np.array([1, 2, 3])
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(np.array_equal(X_cols[0][1], np.array([X_cols[0][2]["1"]], dtype=np.int64)))
assert(np.array_equal(X_cols[1][1], np.array([X_cols[1][2]["2"]], dtype=np.int64)))
assert(np.array_equal(X_cols[2][1], np.array([X_cols[2][2]["3"]], dtype=np.int64)))
def test_unify_columns_numpy2():
X = np.array([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(np.array_equal(X_cols[0][1], np.array([X_cols[0][2]["1"], X_cols[0][2]["4"]], dtype=np.int64)))
assert(np.array_equal(X_cols[1][1], np.array([X_cols[1][2]["2"], X_cols[1][2]["5"]], dtype=np.int64)))
assert(np.array_equal(X_cols[2][1], np.array([X_cols[2][2]["3"], X_cols[2][2]["6"]], dtype=np.int64)))
def test_unify_columns_numpy_ignore():
X = np.array([["abc", None, "def"], ["ghi", "jkl", None]])
feature_types_given=['ignore', 'ignore', 'ignore']
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in, feature_types_given))
assert(3 == len(X_cols))
assert(X_cols[0][0] == 'ignore')
assert(X_cols[0][2] is None)
assert(X_cols[0][1] is None)
assert(np.array_equal(X_cols[0][3], np.array(["abc", "ghi"], dtype=np.object_)))
assert(X_cols[1][0] == 'ignore')
assert(X_cols[1][2] is None)
assert(X_cols[1][1] is None)
assert(np.array_equal(X_cols[1][3], np.array([None, "jkl"], dtype=np.object_)))
assert(X_cols[2][0] == 'ignore')
assert(X_cols[2][2] is None)
assert(X_cols[2][1] is None)
assert(np.array_equal(X_cols[2][3], np.array(["def", None], dtype=np.object_)))
def test_unify_columns_scipy():
X = sp.sparse.csc_matrix([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([X_cols[0][2]["1"], X_cols[0][2]["4"]], dtype=np.int64)))
assert(X_cols[1][1].dtype == np.int64)
assert(np.array_equal(X_cols[1][1], np.array([X_cols[1][2]["2"], X_cols[1][2]["5"]], dtype=np.int64)))
assert(X_cols[2][1].dtype == np.int64)
assert(np.array_equal(X_cols[2][1], np.array([X_cols[2][2]["3"], X_cols[2][2]["6"]], dtype=np.int64)))
def test_unify_columns_dict1():
X = {"feature1" : [1], "feature2" : "hi", "feature3" : None}
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X, feature_names_given=["feature3", "feature2", "feature1"])
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(X_cols[0][1][0] == 0)
assert(X_cols[1][1].dtype == np.int64)
assert(X_cols[1][1][0] == X_cols[1][2]["hi"])
assert(X_cols[2][1].dtype == np.int64)
assert(X_cols[2][1][0] == X_cols[2][2]["1"])
def test_unify_columns_dict2():
X = {"feature1" : [1, 4], "feature2" : [2, 5], "feature3" : [3, 6]}
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X, feature_names_given=["feature3", "feature2", "feature1"])
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([X_cols[0][2]["3"], X_cols[0][2]["6"]], dtype=np.int64)))
assert(X_cols[1][1].dtype == np.int64)
assert(np.array_equal(X_cols[1][1], np.array([X_cols[1][2]["2"], X_cols[1][2]["5"]], dtype=np.int64)))
assert(X_cols[2][1].dtype == np.int64)
assert(np.array_equal(X_cols[2][1], np.array([X_cols[2][2]["1"], X_cols[2][2]["4"]], dtype=np.int64)))
def test_unify_columns_list1():
X = [1, 2.0, "hi", None]
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(4 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(X_cols[0][1][0] == X_cols[0][2]["1"])
assert(X_cols[1][1].dtype == np.int64)
assert(X_cols[1][1][0] == X_cols[1][2]["2.0"])
assert(X_cols[2][1].dtype == np.int64)
assert(X_cols[2][1][0] == X_cols[2][2]["hi"])
assert(X_cols[3][1].dtype == np.int64)
assert(X_cols[3][1][0] == 0)
def test_unify_columns_list2():
P1 = pd.DataFrame()
P1["feature1"] = pd.Series(np.array([1, None, np.nan], dtype=np.object_))
P2 = pd.DataFrame()
P2["feature1"] = pd.Series(np.array([1], dtype=np.float32))
P2["feature2"] = pd.Series(np.array([None], dtype=np.object_))
P2["feature3"] = pd.Series(np.array([np.nan], dtype=np.object_))
S1 = sp.sparse.csc_matrix([[1, 2, 3]])
S2 = sp.sparse.csc_matrix([[1], [2], [3]])
X = [np.array([1, 2, 3], dtype=np.int8), pd.Series([4.0, None, np.nan]), [1, 2.0, "hi"], (np.double(4.0), "bye", None), {1, 2, 3}, {"abc": 1, "def": 2, "ghi":3}.keys(), {"abc": 1, "def": 2, "ghi":3}.values(), range(1, 4), (x for x in [1, 2, 3]), np.array([1, 2, 3], dtype=np.object_), np.array([[1, 2, 3]], dtype=np.int8), np.array([[1], [2], [3]], dtype=np.int8), P1, P2, S1, S2]
X, n_samples = clean_X(X)
assert(n_samples == 16)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([c["1"], c["4.0"], c["1"], c["4.0"], c["1"], c["abc"], c["1"], c["1"], c["1"], c["1"], c["1"], c["1"], c["1"], c["1.0"], c["1"], c["1"]], dtype=np.int64)))
assert(X_cols[1][1].dtype == np.int64)
c = X_cols[1][2]
assert(np.array_equal(X_cols[1][1], np.array([c["2"], 0, c["2.0"], c["bye"], c["2"], c["def"], c["2"], c["2"], c["2"], c["2"], c["2"], c["2"], 0, 0, c["2"], c["2"]], dtype=np.int64)))
assert(X_cols[2][1].dtype == np.int64)
c = X_cols[2][2]
assert(np.array_equal(X_cols[2][1], np.array([c["3"], 0, c["hi"], 0, c["3"], c["ghi"], c["3"], c["3"], c["3"], c["3"], c["3"], c["3"], 0, 0, c["3"], c["3"]], dtype=np.int64)))
def test_unify_columns_tuple1():
X = (1, 2.0, "hi", None)
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(4 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(X_cols[0][1][0] == X_cols[0][2]["1"])
assert(X_cols[1][1].dtype == np.int64)
assert(X_cols[1][1][0] == X_cols[1][2]["2.0"])
assert(X_cols[2][1].dtype == np.int64)
assert(X_cols[2][1][0] == X_cols[2][2]["hi"])
assert(X_cols[3][1].dtype == np.int64)
assert(X_cols[3][1][0] == 0)
def test_unify_columns_tuple2():
X = (np.array([1, 2, 3], dtype=np.int8), pd.Series([4, 5, 6]), [1, 2.0, "hi"], (np.double(4.0), "bye", None), {1, 2, 3}, {"abc": 1, "def": 2, "ghi":3}.keys(), {"abc": 1, "def": 2, "ghi":3}.values(), range(1, 4), (x for x in [1, 2, 3]), np.array([1, 2, 3], dtype=np.object_))
X, n_samples = clean_X(X)
assert(n_samples == 10)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([c["1"], c["4"], c["1"], c["4.0"], c["1"], c["abc"], c["1"], c["1"], c["1"], c["1"]], dtype=np.int64)))
assert(X_cols[1][1].dtype == np.int64)
c = X_cols[1][2]
assert(np.array_equal(X_cols[1][1], np.array([c["2"], c["5"], c["2.0"], c["bye"], c["2"], c["def"], c["2"], c["2"], c["2"], c["2"]], dtype=np.int64)))
assert(X_cols[2][1].dtype == np.int64)
c = X_cols[2][2]
assert(np.array_equal(X_cols[2][1], np.array([c["3"], c["6"], c["hi"], 0, c["3"], c["ghi"], c["3"], c["3"], c["3"], c["3"]], dtype=np.int64)))
def test_unify_columns_generator1():
X = (x for x in [1, 2.0, "hi", None])
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(4 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(X_cols[0][1][0] == X_cols[0][2]["1"])
assert(X_cols[1][1].dtype == np.int64)
assert(X_cols[1][1][0] == X_cols[1][2]["2.0"])
assert(X_cols[2][1].dtype == np.int64)
assert(X_cols[2][1][0] == X_cols[2][2]["hi"])
assert(X_cols[3][1].dtype == np.int64)
assert(X_cols[3][1][0] == 0)
def test_unify_columns_generator2():
X = (x for x in [np.array([1, 2, 3], dtype=np.int8), pd.Series([4, 5, 6]), [1, 2.0, "hi"], (np.double(4.0), "bye", None), {1, 2, 3}, {"abc": 1, "def": 2, "ghi":3}.keys(), {"abc": 1, "def": 2, "ghi":3}.values(), range(1, 4), (x for x in [1, 2, 3]), np.array([1, 2, 3], dtype=np.object_)])
X, n_samples = clean_X(X)
assert(n_samples == 10)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([c["1"], c["4"], c["1"], c["4.0"], c["1"], c["abc"], c["1"], c["1"], c["1"], c["1"]], dtype=np.int64)))
assert(X_cols[1][1].dtype == np.int64)
c = X_cols[1][2]
assert(np.array_equal(X_cols[1][1], np.array([c["2"], c["5"], c["2.0"], c["bye"], c["2"], c["def"], c["2"], c["2"], c["2"], c["2"]], dtype=np.int64)))
assert(X_cols[2][1].dtype == np.int64)
c = X_cols[2][2]
assert(np.array_equal(X_cols[2][1], np.array([c["3"], c["6"], c["hi"], 0, c["3"], c["ghi"], c["3"], c["3"], c["3"], c["3"]], dtype=np.int64)))
def test_unify_columns_pandas_normal_int8():
check_pandas_normal(np.int8, -128, 127)
def test_unify_columns_pandas_normal_uint8():
check_pandas_normal(np.uint8, 0, 255)
def test_unify_columns_pandas_normal_int16():
check_pandas_normal(np.int16, -32768, 32767)
def test_unify_columns_pandas_normal_uint16():
check_pandas_normal(np.uint16, 0, 65535)
def test_unify_columns_pandas_normal_int32():
check_pandas_normal(np.int32, -2147483648, 2147483647)
def test_unify_columns_pandas_normal_uint32():
check_pandas_normal(np.uint32, 0, 4294967295)
def test_unify_columns_pandas_normal_int64():
check_pandas_normal(np.int64, -9223372036854775808, 9223372036854775807)
def test_unify_columns_pandas_normal_uint64():
check_pandas_normal(np.uint64, np.uint64("0"), np.uint64("18446744073709551615"))
def test_unify_columns_pandas_normal_bool():
check_pandas_normal(np.bool_, False, True)
def test_unify_columns_pandas_missings_float64():
check_pandas_float(np.float64, -1.1, 2.2)
def test_unify_columns_pandas_missings_longfloat():
check_pandas_float(np.longfloat, -1.1, 2.2)
def test_unify_columns_pandas_missings_float32():
check_pandas_float(np.float32, -1.1, 2.2)
def test_unify_columns_pandas_missings_float16():
check_pandas_float(np.float16, -1.1, 2.2)
def test_unify_columns_pandas_missings_Int8Dtype():
check_pandas_missings(pd.Int8Dtype(), -128, 127)
def test_unify_columns_pandas_missings_UInt8Dtype():
check_pandas_missings(pd.UInt8Dtype(), 0, 255)
def test_unify_columns_pandas_missings_Int16Dtype():
check_pandas_missings(pd.Int16Dtype(), -32768, 32767)
def test_unify_columns_pandas_missings_UInt16Dtype():
check_pandas_missings(pd.UInt16Dtype(), 0, 65535)
def test_unify_columns_pandas_missings_Int32Dtype():
check_pandas_missings(pd.Int32Dtype(), -2147483648, 2147483647)
def test_unify_columns_pandas_missings_UInt32Dtype():
check_pandas_missings(pd.UInt32Dtype(), 0, 4294967295)
def test_unify_columns_pandas_missings_Int64Dtype():
check_pandas_missings(pd.Int64Dtype(), -9223372036854775808, 9223372036854775807)
def test_unify_columns_pandas_missings_UInt64Dtype():
check_pandas_missings(pd.UInt64Dtype(), np.uint64("0"), np.uint64("18446744073709551615"))
def test_unify_columns_pandas_missings_BooleanDtype():
check_pandas_missings(pd.BooleanDtype(), False, True)
def test_unify_columns_pandas_missings_str():
check_pandas_missings(np.object_, "abc", "def")
def test_unify_columns_pandas_missings_nice_str():
check_pandas_missings(np.object_, StringHolder("abc"), "def")
def test_unify_columns_pandas_missings_pure_ints():
check_pandas_missings(np.object_, 1, 2)
def test_unify_columns_pandas_missings_pure_floats():
check_pandas_missings(np.object_, 1.1, 2.2)
def test_unify_columns_pandas_missings_mixed_floats():
check_pandas_missings(np.object_, 1.1, "2.2")
def test_unify_columns_pandas_missings_mixed_floats2():
check_pandas_missings(np.object_, StringHolder("1.1"), "2.2")
def test_unify_columns_str_throw():
X = "abc"
try:
X, n_samples = clean_X(X)
assert(False)
except:
pass
try:
feature_names_in = unify_feature_names(X)
assert(False)
except:
pass
try:
feature_names_in = ["ANYTHING"]
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(False)
except:
pass
def test_unify_columns_int_throw():
X = 1
try:
X, n_samples = clean_X(X)
assert(False)
except:
pass
try:
feature_names_in = unify_feature_names(X)
assert(False)
except:
pass
try:
feature_names_in = ["ANYTHING"]
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(False)
except:
pass
def test_unify_columns_duplicate_colnames_throw():
X = pd.DataFrame()
X["0"] = [1, 2]
X[0] = [3, 4]
try:
feature_names_in = unify_feature_names(X)
assert(False)
except:
pass
try:
feature_names_in = ["ANYTHING"]
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(False)
except:
pass
def test_unify_columns_opaque_str_throw():
# this should fail since the default string generator makes a useless as a category string like:
# <interpret.glassbox.ebm.test.test_bin.NothingHolder object at 0x0000019525E9FE48>
check_numpy_throws(np.object_, NothingHolder("abc"), "def")
def test_unify_columns_list_throw():
check_numpy_throws(np.object_, ["abc", "bcd"], "def")
def test_unify_columns_tuple_throw():
check_numpy_throws(np.object_, ("abc", "bcd"), "def")
def test_unify_columns_set_throw():
check_numpy_throws(np.object_, {"abc", "bcd"}, "def")
def test_unify_columns_dict_throw():
check_numpy_throws(np.object_, {"abc": 1, "bcd": 2}, "def")
def test_unify_columns_keys_throw():
check_numpy_throws(np.object_, {"abc": 1, "bcd": 2}.keys(), "def")
def test_unify_columns_values_throw():
check_numpy_throws(np.object_, {"abc": 1, "bcd": 2}.values(), "def")
def test_unify_columns_range_throw():
check_numpy_throws(np.object_, range(1, 2), "def")
def test_unify_columns_generator_throw():
check_numpy_throws(np.object_, (x for x in [1, 2]), "def")
def test_unify_columns_ndarray_throw():
check_numpy_throws(np.object_, np.array([1, "abc"], dtype=np.object_), "def")
def test_unify_columns_pandas_obj_to_float():
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([None, np.nan, np.float16(np.nan), 0, -1, 2.2, "-3.3", np.float16("4.4"), StringHolder("-5.5"), np.float32("6.6").item()], dtype=np.object_), dtype=np.object_)
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 10)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(X_cols[0][0] == 'continuous')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is None)
assert(X_cols[0][1].dtype == np.float64)
assert(np.isnan(X_cols[0][1][0]))
assert(np.isnan(X_cols[0][1][1]))
assert(np.isnan(X_cols[0][1][2]))
assert(X_cols[0][1][3] == 0)
assert(X_cols[0][1][4] == -1)
assert(X_cols[0][1][5] == 2.2)
assert(X_cols[0][1][6] == -3.3)
assert(X_cols[0][1][7] == 4.3984375)
assert(X_cols[0][1][8] == -5.5)
assert(X_cols[0][1][9] == 6.5999999046325684) # python internal objects are float64
def test_unify_columns_pandas_obj_to_str():
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([None, np.nan, np.float16(np.nan), 0, -1, 2.2, "-3.3", np.float16("4.4"), StringHolder("-5.5"), 5.6843418860808014e-14, "None", "nan"], dtype=np.object_), dtype=np.object_)
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 12)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
# For "5.684341886080802e-14", we need to round the 16th digit up for this to be the shortest string since
# "5.684341886080801e-14" doesn't work
# https://www.exploringbinary.com/the-shortest-decimal-string-that-round-trips-may-not-be-the-nearest/
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["0"], c["-1"], c["2.2"], c["-3.3"], c["4.3984375"], c["-5.5"], c["5.684341886080802e-14"], c["None"], c["nan"]], dtype=np.int64)))
assert(np.array_equal(na, X_cols[0][1] == 0))
def test_unify_columns_pandas_categorical():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "0", "bcd"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(len(X_cols[0][2]) == 3)
assert(X_cols[0][2]["a"] == 1)
assert(X_cols[0][2]["0"] == 2)
assert(X_cols[0][2]["bcd"] == 3)
assert(X_cols[0][1].dtype == np.int64)
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_ordinal():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "0", "bcd"], ordered=True))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'ordinal')
assert(X_cols[0][3] is None)
assert(len(X_cols[0][2]) == 3)
assert(X_cols[0][2]["a"] == 1)
assert(X_cols[0][2]["0"] == 2)
assert(X_cols[0][2]["bcd"] == 3)
assert(X_cols[0][1].dtype == np.int64)
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_shorter():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "0"], dtype=pd.CategoricalDtype(categories=["a", "0"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 5)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_equals():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "0", "bcd"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_longer():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "0", "bcd", "in_categories"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:2]))
assert(all(~na[2:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(np.array_equal(X_cols[0][3], np.array([None, None, "in_categories", None, None, None], dtype=np.object_)))
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, -1, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_reordered_shorter():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "0"], dtype=pd.CategoricalDtype(categories=["0", "a"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 5)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_reordered_equals():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "bcd", "0"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_reordered_longer1():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "0", "in_categories", "bcd"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:2]))
assert(all(~na[2:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(np.array_equal(X_cols[0][3], np.array([None, None, "in_categories", None, None, None], dtype=np.object_)))
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, -1, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_reordered_longer2():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["0", "a", "bcd", "in_categories"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:2]))
assert(all(~na[2:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(np.array_equal(X_cols[0][3], np.array([None, None, "in_categories", None, None, None], dtype=np.object_)))
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, -1, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_compressed_categories():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "bcd", "0"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
# here we're combining the "a" category and the "0" category into a single one that tracks both.
# in JSON this can be expressed as the equivalent of [["a", "0"], "bcd"]
c = {"a": 1, "0": 1, "bcd": 2}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_feature_names_numpy1():
X = np.array([1, 2, 3])
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["feature_0001", "feature_0002", "feature_0003"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[2][1][0] == 3.0)
def test_unify_feature_names_numpy2():
X = np.array([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["feature_0001", "feature_0002", "feature_0003"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_data_frame1():
X = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["0", "1", "2"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_data_frame2():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature2"] = [2, 5]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["feature1", "feature2", "feature3"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_scipy():
X = sp.sparse.csc_matrix([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["feature_0001", "feature_0002", "feature_0003"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_dict1():
X = {"feature1" : [1], "feature2" : [2], "feature3" : [3]}
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["feature1", "feature2", "feature3"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[2][1][0] == 3.0)
def test_unify_feature_names_dict2():
X = {"feature2" : [1, 4], "feature1" : [2, 5], "feature3" : [3, 6]}
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["feature1", "feature2", "feature3"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 2.0)
assert(X_cols[0][1][1] == 5.0)
assert(X_cols[1][1][0] == 1.0)
assert(X_cols[1][1][1] == 4.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_list1():
X = [1, 2, 3]
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["feature_0001", "feature_0002", "feature_0003"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[2][1][0] == 3.0)
def test_unify_feature_names_list2():
X = [pd.Series([1, 2, 3]), (4, 5, 6)]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["feature_0001", "feature_0002", "feature_0003"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_tuple1():
X = (1, 2, 3)
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["feature_0001", "feature_0002", "feature_0003"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[2][1][0] == 3.0)
def test_unify_feature_names_tuple2():
X = (np.array([1, 2, 3]), [4, 5, 6])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
assert(feature_names_in == ["feature_0001", "feature_0002", "feature_0003"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_feature_types1():
X = np.array([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'continuous', 'continuous']
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
assert(feature_names_in == ["feature_0001", "feature_0002", "feature_0003"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_feature_types2():
X = np.array([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
assert(feature_names_in == ["feature_0001", "feature_0002", "feature_0003"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_feature_types3():
X = np.array([[1, 3], [4, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given = ['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
assert(feature_names_in == ["feature_0001", "feature_0002", "feature_0003"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_pandas_feature_types1():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature2"] = [2, 5]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'continuous', 'continuous']
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
assert(feature_names_in == ["feature1", "feature2", "feature3"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_pandas_ignored_existing():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature2"] = [2, 5]
X["feature3"] = [3, 6]
feature_types_given=['continuous', 'ignore', 'continuous']
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
assert(feature_names_in == ["feature1", "feature2", "feature3"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_pandas_feature_types3():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
assert(feature_names_in == ["feature1", "feature_0001", "feature3"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_names1():
X = np.array([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X, feature_names_given=pd.Series([0, 1, 2]))
assert(feature_names_in == ["0", "1", "2"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_names2():
X = np.array([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X, feature_names_given=[0, "SOMETHING", 2])
assert(isinstance(feature_names_in, list))
assert(feature_names_in == ["0", "SOMETHING", "2"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_pandas_names1():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature2"] = [2, 5]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X, feature_names_given=pd.Series([0, 1, 2]))
assert(feature_names_in == ["0", "1", "2"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_pandas_names2():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature2"] = [2, 5]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X, feature_names_given=[0, "SOMETHING", 2])
assert(feature_names_in == ["0", "SOMETHING", "2"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_types_names1():
X = np.array([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'continuous', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=pd.Series([0, 1, 2]), feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "1", "2"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_types_names2():
X = np.array([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'continuous', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=[0, "SOMETHING", 2], feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "SOMETHING", "2"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_types_pandas_names1():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature2"] = [2, 5]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'continuous', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=pd.Series([0, 1, 2]), feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "1", "2"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_types_pandas_names2():
X =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
from random import randrange
from sklearn.model_selection import KFold, StratifiedKFold, GroupKFold
from sklearn.metrics import average_precision_score, f1_score,recall_score, accuracy_score, roc_auc_score
from lightgbm import LGBMClassifier
class LGBM_Model:
def __init__(self, features):
self.features = features
self.clf = None
def build_clf(self, n_estimators = 1000, learning_rate = 0.1, num_leaves = 16, reg_alpha = 10, reg_lambda = 7):
self.clf = LGBMClassifier( boosting_type= 'gbdt',
silent = False,
metric = 'None',
n_jobs = -1,
random_state = 10,
n_estimators = n_estimators,
max_depth = -1,
learning_rate= learning_rate,
num_leaves = num_leaves,
reg_alpha = reg_alpha,
reg_lambda = reg_lambda,
min_child_samples = 200,
# is_unbalance= 'True',
# subsample = 1,
# colsample_bytree = 1,
# min_child_weight = 1,
# min_split_gain= 0.0,
# objective= 'regression_l1',
# subsample_for_bin= 240000,
# subsample_freq= 1,
# class_weight= 'balanced',
# scale_pos_weight = 2,
)
def run(self, data, y, groups, test , eval_metric, n_splits = 10, early_stopping_rounds= 100):
oof_preds_LGBM = np.zeros((data.shape[0]))
sub_preds_LGBM = np.zeros((test.shape[0]))
df_sub_preds_LGBM = pd.DataFrame()
self.df_feature_importance = pd.DataFrame()
if not self.clf:
self.build_clf()
folds = GroupKFold(n_splits = n_splits)
for n_fold, (train_idx, valid_idx) in enumerate(folds.split(data, y, groups)):
train_x, train_y = data.iloc[train_idx], y.iloc[train_idx]
valid_x, valid_y = data.iloc[valid_idx], y.iloc[valid_idx]
print("Starting LightGBM. Fold {},Train shape: {}, test shape: {}".format(n_fold+1, data.shape, test.shape))
self.clf.fit(train_x, train_y,
eval_set = [(train_x, train_y), (valid_x, valid_y)],
eval_metric = eval_metric,
verbose= 100,
early_stopping_rounds = early_stopping_rounds,
categorical_feature = 'auto',
)
oof_preds_LGBM[valid_idx] += self.clf.predict_proba(valid_x)[:, 1]
# sub_preds_LGBM += self.clf.predict_proba(test)[:, 1]/ (folds.n_splits)
df_sub_preds_LGBM['fold_{}'.format(n_fold)] = self.clf.predict_proba(test)[:, 1]
df_fold_importance = pd.DataFrame()
df_fold_importance["feature"] = self.features
df_fold_importance["importance"] = self.clf.feature_importances_
df_fold_importance["fold"] = n_fold + 1
self.df_feature_importance =
|
pd.concat([self.df_feature_importance, df_fold_importance], axis=0)
|
pandas.concat
|
from context import dero
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pandas import Timestamp
from numpy import nan
import numpy
class DataFrameTest:
df = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_duplicate_row = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/3/2000', 1.03), #this is a duplicated row
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_weight = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 1),
(10516, 'a', '1/4/2000', 1.04, 0),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 1),
(10516, 'b', '1/4/2000', 1.08, 1),
(10517, 'a', '1/1/2000', 1.09, 0),
(10517, 'a', '1/2/2000', 1.1, 0),
(10517, 'a', '1/3/2000', 1.11, 0),
(10517, 'a', '1/4/2000', 1.12, 1),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight'])
df_nan_byvar = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', 3),
('b', 4),
], columns = ['byvar', 'val'])
df_nan_byvar_and_val = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', nan),
('b', 4),
], columns = ['byvar', 'val'])
single_ticker_df = pd.DataFrame(data = [
('a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['byvar', 'Date', 'TICKER'])
df_datetime = df.copy()
df_datetime['Date'] = pd.to_datetime(df_datetime['Date'])
df_datetime_no_ret = df_datetime.copy()
df_datetime_no_ret.drop('RET', axis=1, inplace=True)
df_gvkey_str = pd.DataFrame([
('001076','3/1/1995'),
('001076','4/1/1995'),
('001722','1/1/2012'),
('001722','7/1/2012'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str['Date'] = pd.to_datetime(df_gvkey_str['Date'])
df_gvkey_num = df_gvkey_str.copy()
df_gvkey_num['GVKEY'] = df_gvkey_num['GVKEY'].astype('float64')
df_gvkey_str2 = pd.DataFrame([
('001076','2/1/1995'),
('001076','3/2/1995'),
('001722','11/1/2011'),
('001722','10/1/2011'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str2['Date'] = pd.to_datetime(df_gvkey_str2['Date'])
df_fill_data = pd.DataFrame(
data=[
(4, 'c', nan, 'a'),
(1, 'd', 3, 'a'),
(10, 'e', 100, 'a'),
(2, nan, 6, 'b'),
(5, 'f', 8, 'b'),
(11, 'g', 150, 'b'),
],
columns=['y', 'x1', 'x2', 'group']
)
class TestCumulate(DataFrameTest):
expect_between_1_3 = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.1, 1.1),
(10517, 'a', '1/3/2000', 1.11, 1.2210000000000003),
(10517, 'a', '1/4/2000', 1.12, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'cum_RET'])
expect_first = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.092624),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.224936),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.10, 1.10),
(10517, 'a', '1/3/2000', 1.11, 1.221),
(10517, 'a', '1/4/2000', 1.12, 1.36752),
], columns = ['PERMNO','byvar','Date', 'RET', 'cum_RET'])
def test_method_between_1_3(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[1,3])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_between_m2_0(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
#Actually same result as [1,3]
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_shifted_index(self):
df = self.df.copy()
df.index = df.index + 10
cum_df = dero.pandas.cumulate(df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_first(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'])
assert_frame_equal(self.expect_first, cum_df, check_dtype=False)
def test_grossify(self):
df = self.df.copy() #don't overwrite original
df['RET'] -= 1 #ungrossify
expect_first_grossify = self.expect_first.copy()
expect_first_grossify['cum_RET'] -= 1
expect_first_grossify['RET'] -= 1
cum_df = dero.pandas.cumulate(df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'], grossify=True)
assert_frame_equal(expect_first_grossify, cum_df, check_dtype=False)
class TestGroupbyMerge(DataFrameTest):
def test_subset_max(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'max', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 1.04),
(10516, 'a', '1/2/2000', 1.02, 1.04),
(10516, 'a', '1/3/2000', 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.08),
(10516, 'b', '1/2/2000', 1.06, 1.08),
(10516, 'b', '1/3/2000', 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.12),
(10517, 'a', '1/2/2000', 1.10, 1.12),
(10517, 'a', '1/3/2000', 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.12, 1.12)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_max'])
assert_frame_equal(expect_df, out)
def test_subset_std(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'std', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 0.012909944487358068),
(10516, 'a', '1/2/2000', 1.02, 0.012909944487358068),
(10516, 'a', '1/3/2000', 1.03, 0.012909944487358068),
(10516, 'a', '1/4/2000', 1.04, 0.012909944487358068),
(10516, 'b', '1/1/2000', 1.05, 0.012909944487358068),
(10516, 'b', '1/2/2000', 1.06, 0.012909944487358068),
(10516, 'b', '1/3/2000', 1.07, 0.012909944487358068),
(10516, 'b', '1/4/2000', 1.08, 0.012909944487358068),
(10517, 'a', '1/1/2000', 1.09, 0.012909944487358068),
(10517, 'a', '1/2/2000', 1.10, 0.012909944487358068),
(10517, 'a', '1/3/2000', 1.11, 0.012909944487358068),
(10517, 'a', '1/4/2000', 1.12, 0.012909944487358068)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_std'])
assert_frame_equal(expect_df, out)
def test_nan_byvar_transform(self):
expect_df = self.df_nan_byvar.copy()
expect_df['val_transform'] = expect_df['val']
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'transform', (lambda x: x))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_transform_numeric(self):
non_standard_index = self.df_nan_byvar_and_val.copy()
non_standard_index.index = [5,6,7,8]
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
expect_df.index = [5,6,7,8]
out = dero.pandas.groupby_merge(non_standard_index, 'byvar', 'transform', (lambda x: x + 1))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_and_nonstandard_index_transform_numeric(self):
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
def test_nan_byvar_sum(self):
expect_df = pd.DataFrame(data = [
('a', 1, 1.0),
(nan, 2, nan),
('b', 3, 7.0),
('b', 4, 7.0),
], columns = ['byvar', 'val', 'val_sum'])
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'sum')
assert_frame_equal(expect_df, out)
class TestLongToWide:
expect_df_with_colindex = pd.DataFrame(data = [
(10516, 'a', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar',
'RET1/1/2000', 'RET1/2/2000',
'RET1/3/2000', 'RET1/4/2000'])
expect_df_no_colindex = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/2/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/3/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/2/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/3/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/2/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/3/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET0',
'RET1', 'RET2', 'RET3'])
input_data = DataFrameTest()
ltw_no_dup_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_dup_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_no_dup_no_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET')
ltw_dup_no_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET')
df_list = [ltw_no_dup_colindex, ltw_dup_colindex,
ltw_no_dup_no_colindex, ltw_dup_no_colindex]
def test_no_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_no_dup_colindex)
def test_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_dup_colindex)
def test_no_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_no_dup_no_colindex)
def test_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_dup_no_colindex)
def test_no_extra_vars(self):
for df in self.df_list:
assert ('__idx__','__key__') not in df.columns
class TestPortfolioAverages:
input_data = DataFrameTest()
expect_avgs_no_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001),
(1, 'b', 1.0550000000000002),
(2, 'a', 1.1050000000000002),
(2, 'b', 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET'])
expect_avgs_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001, 1.025),
(1, 'b', 1.0550000000000002, 1.0550000000000002),
(2, 'a', 1.1050000000000002, 1.12),
(2, 'b', 1.0750000000000002, 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET', 'RET_wavg'])
expect_ports = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0, 1),
(10516, 'a', '1/2/2000', 1.02, 1, 1),
(10516, 'a', '1/3/2000', 1.03, 1, 1),
(10516, 'a', '1/4/2000', 1.04, 0, 1),
(10516, 'b', '1/1/2000', 1.05, 1, 1),
(10516, 'b', '1/2/2000', 1.06, 1, 1),
(10516, 'b', '1/3/2000', 1.07, 1, 2),
(10516, 'b', '1/4/2000', 1.08, 1, 2),
(10517, 'a', '1/1/2000', 1.09, 0, 2),
(10517, 'a', '1/2/2000', 1.1, 0, 2),
(10517, 'a', '1/3/2000', 1.11, 0, 2),
(10517, 'a', '1/4/2000', 1.12, 1, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight', 'portfolio'])
avgs, ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar')
w_avgs, w_ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar', wtvar='weight')
def test_simple_averages(self):
assert_frame_equal(self.expect_avgs_no_wt, self.avgs, check_dtype=False)
def test_weighted_averages(self):
assert_frame_equal(self.expect_avgs_wt, self.w_avgs, check_dtype=False)
def test_portfolio_construction(self):
print(self.ports)
assert_frame_equal(self.expect_ports, self.ports, check_dtype=False)
assert_frame_equal(self.expect_ports, self.w_ports, check_dtype=False)
class TestWinsorize(DataFrameTest):
def test_winsor_40_subset_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.022624),
(10516, 'a', '1/2/2000', 1.022624),
(10516, 'a', '1/3/2000', 1.02672),
(10516, 'a', '1/4/2000', 1.02672),
(10516, 'b', '1/1/2000', 1.062624),
(10516, 'b', '1/2/2000', 1.062624),
(10516, 'b', '1/3/2000', 1.06672),
(10516, 'b', '1/4/2000', 1.06672),
(10517, 'a', '1/1/2000', 1.102624),
(10517, 'a', '1/2/2000', 1.102624),
(10517, 'a', '1/3/2000', 1.10672),
(10517, 'a', '1/4/2000', 1.10672),
], columns = ['PERMNO', 'byvar', 'Date', 'RET'])
wins = dero.pandas.winsorize(self.df, .4, subset='RET', byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, wins, check_less_precise=True)
class TestRegBy(DataFrameTest):
def create_indf(self):
indf = self.df_weight.copy()
indf['key'] = indf['PERMNO'].astype(str) + '_' + indf['byvar']
return indf
def test_regby_nocons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.48774684748988806, '10516_a'),
(0.9388636664168903, '10516_b'),
(0.22929206076239614, '10517_a'),
], columns = ['coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key', cons=False)
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(-32.89999999999997, 29.999999999999982, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons_low_obs(self):
indf = self.create_indf().loc[:8,:] #makes it so that one byvar only has one obs
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(nan, nan, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
class TestExpandMonths(DataFrameTest):
def test_expand_months_tradedays(self):
expect_df = pd.DataFrame(data = [
(Timestamp('2000-01-03 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-04 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-05 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-06 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-07 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-10 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-11 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-12 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-13 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-14 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-18 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-19 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-20 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-21 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-24 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-25 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-26 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-27 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-28 00:00:00'), 'a',
|
Timestamp('2000-01-01 00:00:00')
|
pandas.Timestamp
|
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=
|
Index(["A", "B", "C"], name="exp")
|
pandas.Index
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 by University of Kassel and Fraunhofer Institute for Wind Energy and Energy
# System Technology (IWES), Kassel. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import numpy as np
import pandas as pd
from pandapower.std_types import add_basic_std_types, load_std_type
from pandapower.auxiliary import PandapowerNet, get_free_id, _preserve_dtypes
from pandapower.run import reset_results
def create_empty_network(name=None, f_hz=50.):
"""
This function initializes the pandapower datastructure.
OPTIONAL:
**f_hz** (float, 50.) - power system frequency in hertz
**name** (string, None) - name for the network
RETURN:
**net** (attrdict) - PANDAPOWER attrdict with empty tables:
- bus
- ext_grid
- gen
- impedance
- line
- load
- sgen
- shunt
- trafo
- trafo3w
- ward
- xward
EXAMPLE:
net = create_empty_network()
"""
net = PandapowerNet({
# structure data
"bus": [('name', np.dtype(object)),
('vn_kv', 'f8'),
('type', np.dtype(object)),
('zone', np.dtype(object)),
('in_service', 'bool'), ],
"load": [("name", np.dtype(object)),
("bus", "u4"),
("p_kw", "f8"),
("q_kvar", "f8"),
("sn_kva", "f8"),
("scaling", "f8"),
("in_service", 'bool'),
("type", np.dtype(object))],
"sgen": [("name", np.dtype(object)),
("bus", "i8"),
("p_kw", "f8"),
("q_kvar", "f8"),
("sn_kva", "f8"),
("scaling", "f8"),
("in_service", 'bool'),
("type", np.dtype(object))],
"gen": [("name", np.dtype(object)),
("bus", "u4"),
("p_kw", "f8"),
("vm_pu", "f8"),
("sn_kva", "f8"),
("min_q_kvar", "f8"),
("max_q_kvar", "f8"),
("scaling", "f8"),
("in_service", 'bool'),
("type", np.dtype(object))],
"switch": [("bus", "i8"),
("element", "i8"),
("et", np.dtype(object)),
("type", np.dtype(object)),
("closed", "bool"),
("name", np.dtype(object))],
"shunt": [("bus", "u4"),
("name", np.dtype(object)),
("q_kvar", "f8"),
("p_kw", "f8"),
("in_service", "i8")],
"ext_grid": [("name", np.dtype(object)),
("bus", "u4"),
("vm_pu", "f8"),
("va_degree", "f8"),
("in_service", 'bool')],
"line": [("name", np.dtype(object)),
("std_type", np.dtype(object)),
("from_bus", "u4"),
("to_bus", "u4"),
("length_km", "f8"),
("r_ohm_per_km", "f8"),
("x_ohm_per_km", "f8"),
("c_nf_per_km", "f8"),
("imax_ka", "f8"),
("df", "f8"),
("parallel", "u4"),
("type", np.dtype(object)),
("in_service", 'bool')],
"trafo": [("name", np.dtype(object)),
("std_type", np.dtype(object)),
("hv_bus", "u4"),
("lv_bus", "u4"),
("sn_kva", "f8"),
("vn_hv_kv", "f8"),
("vn_lv_kv", "f8"),
("vsc_percent", "f8"),
("vscr_percent", "f8"),
("pfe_kw", "f8"),
("i0_percent", "f8"),
("shift_degree", "f8"),
("tp_side", np.dtype(object)),
("tp_mid", "i4"),
("tp_min", "i4"),
("tp_max", "i4"),
("tp_st_percent", "f8"),
("tp_pos", "i4"),
("in_service", 'bool')],
"trafo3w": [("name", np.dtype(object)),
("std_type", np.dtype(object)),
("hv_bus", "u4"),
("mv_bus", "u4"),
("lv_bus", "u4"),
("sn_hv_kva", "u8"),
("sn_mv_kva", "u8"),
("sn_lv_kva", "u8"),
("vn_hv_kv", "f8"),
("vn_mv_kv", "f8"),
("vn_lv_kv", "f8"),
("vsc_hv_percent", "f8"),
("vsc_mv_percent", "f8"),
("vsc_lv_percent", "f8"),
("vscr_hv_percent", "f8"),
("vscr_mv_percent", "f8"),
("vscr_lv_percent", "f8"),
("pfe_kw", "f8"),
("i0_percent", "f8"),
("shift_mv_degree", "f8"),
("shift_lv_degree", "f8"),
("tp_side", np.dtype(object)),
("tp_mid", "i4"),
("tp_min", "i4"),
("tp_max", "i4"),
("tp_st_percent", "f8"),
("tp_pos", "i4"),
("in_service", 'bool')],
"impedance": [("name", np.dtype(object)),
("from_bus", "u4"),
("to_bus", "u4"),
("r_pu", "f8"),
("x_pu", "f8"),
("sn_kva", "f8"),
("in_service", 'bool')],
"ward": [("name", np.dtype(object)),
("bus", "u4"),
("ps_kw", "f8"),
("qs_kvar", "f8"),
("qz_kvar", "f8"),
("pz_kw", "f8"),
("in_service", "f8")],
"xward": [("name", np.dtype(object)),
("bus", "u4"),
("ps_kw", "f8"),
("qs_kvar", "f8"),
("qz_kvar", "f8"),
("pz_kw", "f8"),
("r_ohm", "f8"),
("x_ohm", "f8"),
("vm_pu", "f8"),
("in_service", "f8")],
# geodata
"line_geodata": [("coords", np.dtype(object))],
"bus_geodata": [("x", "f8"), ("y", "f8")],
# result tables
"_empty_res_bus": [("vm_pu", "f8"),
("va_degree", "f8"),
("p_kw", "f8"),
("q_kvar", "f8")],
"_empty_res_ext_grid": [("p_kw", "f8"),
("q_kvar", "f8")],
"_empty_res_line": [("p_from_kw", "f8"),
("q_from_kvar", "f8"),
("p_to_kw", "f8"),
("q_to_kvar", "f8"),
("pl_kw", "f8"),
("ql_kvar", "f8"),
("i_ka", "f8"),
("loading_percent", "f8")],
"_empty_res_trafo": [("p_hv_kw", "f8"),
("q_hv_kvar", "f8"),
("p_lv_kw", "f8"),
("q_lv_kvar", "f8"),
("pl_kw", "f8"),
("ql_kvar", "f8"),
("i_hv_ka", "f8"),
("i_lv_ka", "f8"),
("loading_percent", "f8")],
"_empty_res_trafo3w": [("p_hv_kw", "f8"),
("q_hv_kvar", "f8"),
("p_mv_kw", "f8"),
("q_mv_kvar", "f8"),
("p_lv_kw", "f8"),
("q_lv_kvar", "f8"),
("pl_kw", "f8"),
("ql_kvar", "f8"),
("i_hv_ka", "f8"),
("i_mv_ka", "f8"),
("i_lv_ka", "f8"),
("loading_percent", "f8")],
"_empty_res_load": [("p_kw", "f8"),
("q_kvar", "f8")],
"_empty_res_sgen": [("p_kw", "f8"),
("q_kvar", "f8")],
"_empty_res_gen": [("p_kw", "f8"),
("q_kvar", "f8"),
("va_degree", "f8")],
"_empty_res_shunt": [("p_kw", "f8"),
("q_kvar", "f8"),
("vm_pu", "f8")],
"_empty_res_impedance": [("p_from_kw", "f8"),
("q_from_kvar", "f8"),
("p_to_kw", "f8"),
("q_to_kvar", "f8"),
("pl_kw", "f8"),
("ql_kvar", "f8"),
("i_from_ka", "f8"),
("i_to_ka", "f8")],
"_empty_res_ward": [("p_kw", "f8"),
("q_kvar", "f8"),
("vm_pu", "f8")],
"_empty_res_xward": [("p_kw", "f8"),
("q_kvar", "f8"),
("vm_pu", "f8")],
# internal
"_ppc": None,
"version": 1.0,
"converged": False,
"name": name,
"f_hz": f_hz
})
for s in net:
if isinstance(net[s], list):
net[s] = pd.DataFrame(np.zeros(0, dtype=net[s]))
add_basic_std_types(net)
reset_results(net)
return net
def create_bus(net, vn_kv, name=None, index=None, geodata=None, type="b",
zone=None, in_service=True, max_vm_pu=np.nan,
min_vm_pu=np.nan, **kwargs):
"""
Adds one bus in table net["bus"].
Busses are the nodes of the network that all other elements connect to.
INPUT:
**net** (PandapowerNet) - The pandapower network in which the element is created
OPTIONAL:
**name** (string, default None) - the name for this bus
**index** (int, default None) - Force a specified ID if it is available
**vn_kv** (float, default 0.4) - The grid voltage level.
**busgeodata** ((x,y)-tuple, default None) - coordinates used for plotting
**type** (string, default k) - Type of the bus. "n" - auxilary node,
"b" - busbar, "m" - muff
**zone** (string, None) - grid region
**in_service** (boolean) - True for in_service or False for out of service
OUTPUT:
**eid** (int) - The index of the created element
EXAMPLE:
create_bus(net, name = "bus1")
"""
if index and index in net["bus"].index:
raise UserWarning("A bus with index %s already exists" % index)
if index is None:
index = get_free_id(net["bus"])
# store dtypes
dtypes = net.bus.dtypes
net.bus.loc[index, ["name", "vn_kv", "type", "zone", "in_service"]] = \
[name, vn_kv, type, zone, bool(in_service)]
# and preserve dtypes
_preserve_dtypes(net.bus, dtypes)
if geodata:
if len(geodata) != 2:
raise UserWarning("geodata must be given as (x, y) tupel")
net["bus_geodata"].loc[index, ["x", "y"]] = geodata
if not np.isnan(min_vm_pu):
if "min_vm_pu" not in net.bus.columns:
net.bus.loc[:, "min_vm_pu"] = pd.Series()
net.bus.loc[index, "min_vm_pu"] = float(min_vm_pu)
if not np.isnan(max_vm_pu):
if "max_vm_pu" not in net.bus.columns:
net.bus.loc[:, "max_vm_pu"] = pd.Series()
net.bus.loc[index, "max_vm_pu"] = float(max_vm_pu)
return index
def create_buses(net, nr_buses, vn_kv=0.4, index=None, name=None, type="b", geodata=None,
zone=None, in_service=True):
"""
Adds several buses in table net["bus"] at once.
Busses are the nodal points of the network that all other elements connect to.
Input:
**net** (PandapowerNet) - The pandapower network in which the element is created
**nr_buses** (int) - The number of buses that is created
OPTIONAL:
**name** (string, default None) - the name for this bus
**index** (int, default None) - Force a specified ID if it is available
**vn_kv** (float, default 0.4) - The grid voltage level.
**geodata** ((x,y)-tuple, default None) - coordinates used for plotting
**type** (string, default k) - Type of the bus. "n" - auxilary node,
"b" - busbar, "m" - muff
**zone** (string, None) - grid region
**in_service** (boolean) - True for in_service or False for out of service
OUTPUT:
**eid** (int) - The indeces of the created elements
EXAMPLE:
create_bus(net, name = "bus1")
"""
if index:
for idx in index:
if idx in net.bus.index:
raise UserWarning("A bus with index %s already exists" % index)
else:
bid = get_free_id(net["bus"])
index = np.arange(bid, bid + nr_buses, 1)
# TODO: not needed when concating anyways?
# store dtypes
# dtypes = net.bus.dtypes
dd = pd.DataFrame(index=index, columns=net.bus.columns)
dd["vn_kv"] = vn_kv
dd["type"] = type
dd["zone"] = zone
dd["in_service"] = in_service
dd["name"] = name
net["bus"] =
|
pd.concat([net["bus"], dd], axis=0)
|
pandas.concat
|
import pandas as pd
import networkx as nx
import pytest
from kgextension.feature_selection import hill_climbing_filter, hierarchy_based_filter, tree_based_filter
from kgextension.generator import specific_relation_generator, direct_type_generator
class TestHillCLimbingFilter:
def test1_high_beta(self):
input_df = pd.read_csv("test/data/feature_selection/hill_climbing_test1_input.csv")
input_DG = nx.DiGraph()
labels = ['http://chancellor', 'http://president', 'http://European_politician',
'http://head_of_state', 'http://politician', 'http://man', 'http://person', 'http://being']
input_DG.add_nodes_from(labels)
input_DG.add_edges_from([('http://chancellor', 'http://politician'), ('http://president', 'http://politician'),
('http://chancellor', 'http://head_of_state'), ('http://president', 'http://head_of_state'), ('http://head_of_state', 'http://person'),
('http://European_politician', 'http://politician'), ('http://politician', 'http://person'),
('http://man', 'http://person'), ('http://person', 'http://being')])
expected_df = pd.read_csv("test/data/feature_selection/hill_climbing_test1_expected.csv")
output_df = hill_climbing_filter(input_df, 'uri_bool_http://class', G= input_DG, beta=0.5, k=2)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test2_generator_data_low_beta(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
input_df = specific_relation_generator(
df, columns=['link'], hierarchy_relation='http://www.w3.org/2004/02/skos/core#broader')
expected_df = pd.read_csv("test/data/feature_selection/hill_climbing_test2_expected.csv")
output_df = hill_climbing_filter(input_df, 'link_in_boolean_http://dbpedia.org/resource/Category:Prefectures_in_France', beta=0.05, k=3)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test3_nan(self):
input_df = pd.read_csv("test/data/feature_selection/hill_climbing_test3_input.csv")
input_DG = nx.DiGraph()
labels = ['http://chancellor', 'http://president', 'http://European_politician',
'http://head_of_state', 'http://politician', 'http://man', 'http://person', 'http://being']
input_DG.add_nodes_from(labels)
input_DG.add_edges_from([('http://chancellor', 'http://politician'), ('http://president', 'http://politician'),
('http://chancellor', 'http://head_of_state'), ('http://president', 'http://head_of_state'), ('http://head_of_state', 'http://person'),
('http://European_politician', 'http://politician'), ('http://politician', 'http://person'),
('http://man', 'http://person'), ('http://person', 'http://being')])
expected_df = pd.read_csv("test/data/feature_selection/hill_climbing_test3_expected.csv")
output_df = hill_climbing_filter(input_df, 'class', G= input_DG, beta=0.5, k=2)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test4_callable_function(self):
input_df = pd.read_csv("test/data/feature_selection/hill_climbing_test1_input.csv")
input_DG = nx.DiGraph()
labels = ['http://chancellor', 'http://president', 'http://European_politician',
'http://head_of_state', 'http://politician', 'http://man', 'http://person', 'http://being']
input_DG.add_nodes_from(labels)
input_DG.add_edges_from([('http://chancellor', 'http://politician'), ('http://president', 'http://politician'),
('http://chancellor', 'http://head_of_state'), ('http://president', 'http://head_of_state'), ('http://head_of_state', 'http://person'),
('http://European_politician', 'http://politician'), ('http://politician', 'http://person'),
('http://man', 'http://person'), ('http://person', 'http://being')])
def fake_metric(df, class_col, param=5):
return 1/((df.sum(axis=1)*class_col).sum()/param)
expected_df = pd.read_csv("test/data/feature_selection/hill_climbing_test4_expected.csv")
output_df = hill_climbing_filter(input_df, 'uri_bool_http://class', metric=fake_metric, G= input_DG, param=6)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test5_no_graph(self):
input_df = pd.read_csv("test/data/feature_selection/hill_climbing_test3_input.csv")
with pytest.raises(RuntimeError) as excinfo:
_ = hill_climbing_filter(input_df, 'class', beta=0.5, k=2)
assert "df.attrs['hierarchy]" in str(excinfo.value)
class TestHierarchyBasedFilter():
def test1_no_pruning_info_gain_with_G(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
expected_df = pd.read_csv("test\data\feature_selection\hierarchy_based_test1_expected.csv")
input_df = direct_type_generator(df, ["link"], regex_filter=['A'], result_type="boolean", bundled_mode=True, hierarchy=True)
input_DG = input_df.attrs['hierarchy']
output_df = hierarchy_based_filter(input_df, "link", threshold=0.99, G=input_DG, metric="info_gain", pruning=False)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test2_no_pruning_correlation(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
expected_df = pd.read_csv("test\data\feature_selection\hierarchy_based_test2_expected.csv")
input_df = direct_type_generator(df, ["link"], regex_filter=['A'], result_type="boolean", bundled_mode=True, hierarchy=True)
output_df = hierarchy_based_filter(input_df, "link", threshold=0.99, G=input_DG, metric="correlation", pruning=False)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test3_pruning_info_gain_all_remove_True(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
expected_df = pd.read_csv("test\data\feature_selection\hierarchy_based_test3_expected.csv")
input_df = direct_type_generator(df, ["link"], regex_filter=['A'], result_type="boolean", bundled_mode=True, hierarchy=True)
input_DG = input_df.attrs['hierarchy']
output_df = hierarchy_based_filter(input_df, "link", G=input_DG, threshold=0.99, metric="info_gain", pruning=True)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test4_pruning_correlation_all_remove_True(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
expected_df = pd.read_csv("test\data\feature_selection\hierarchy_based_test4_expected.csv")
input_df = direct_type_generator(df, ["link"], regex_filter=['A'], result_type="boolean", bundled_mode=True, hierarchy=True)
input_DG = input_df.attrs['hierarchy']
output_df = hierarchy_based_filter(input_df, "link", G=input_DG, threshold=0.99, metric="correlation", pruning=True)
pd.testing.assert_frame_equal(output_df, expected_df, check_like = True)
def test5_pruning_info_gain_all_remove_False(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
expected_df = pd.read_csv("test\data\feature_selection\hierarchy_based_test5_expected.csv")
input_df = direct_type_generator(df, ["link"], regex_filter=['A'], result_type="boolean", bundled_mode=True, hierarchy=True)
input_DG = input_df.attrs['hierarchy']
output_df = hierarchy_based_filter(input_df, "link", G=input_DG, threshold=0.99, metric="info_gain", pruning=True, all_remove=False)
pd.testing.assert_frame_equal(output_df, expected_df, check_like = True)
def test6_pruning_correlation_all_remove_False(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
expected_df = pd.read_csv("test\data\feature_selection\hierarchy_based_test6_expected.csv")
input_df = direct_type_generator(df, ["link"], regex_filter=['A'], result_type="boolean", bundled_mode=True, hierarchy=True)
input_DG = input_df.attrs['hierarchy']
output_df = hierarchy_based_filter(input_df, "link", G=input_DG, threshold=0.99, metric="correlation", pruning=True,
all_remove=False)
pd.testing.assert_frame_equal(output_df, expected_df, check_like = True)
def test7_no_input_G(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
expected_df = pd.read_csv("test\data\feature_selection\hierarchy_based_test7_expected.csv")
input_df = direct_type_generator(df, ["link"], regex_filter=['A'], result_type="boolean", bundled_mode=True, hierarchy=True)
output_df = hierarchy_based_filter(input_df, "link", threshold=0.99, metric="correlation", pruning=True,
all_remove=False)
pd.testing.assert_frame_equal(output_df, expected_df, check_like = True)
def test8_nan(self):
input_df = pd.read_csv("test/data/feature_selection/hill_climbing_test3_input.csv")
input_DG = nx.DiGraph()
labels = ['http://chancellor', 'http://president', 'http://European_politician',
'http://head_of_state', 'http://politician', 'http://man', 'http://person', 'http://being']
input_DG.add_nodes_from(labels)
input_DG.add_edges_from([('http://chancellor', 'http://politician'), ('http://president', 'http://politician'),
('http://chancellor', 'http://head_of_state'), ('http://president', 'http://head_of_state'), ('http://head_of_state', 'http://person'),
('http://European_politician', 'http://politician'), ('http://politician', 'http://person'),
('http://man', 'http://person'), ('http://person', 'http://being')])
expected_df = pd.read_csv("test/data/feature_selection/hierarchy_based_test8_expected.csv")
output_df = hierarchy_based_filter(input_df, 'class', G=input_DG, threshold=0.99, metric="info_gain", pruning=True)
|
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
|
pandas.testing.assert_frame_equal
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.