prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from .tdx_parser import TDXParser
import pandas as pd
import numpy as np
import json
from collections import deque
class Formula(object):
buy_kw = [r'买入', r'买', 'BUY', 'BUYIN', 'ENTERLONG']
sell_kw = [r'卖出', r'卖', 'SELL', 'SELLOUT', 'EXITLONG']
FIGURE_DATA_LEN = 200
def __init__(self, text, param_desc='', **kwargs):
self.figure_data_len = Formula.FIGURE_DATA_LEN
if 'json_len' in kwargs:
self.figure_data_len = kwargs['json_len']
self.text = text
self.param_desc = param_desc
parser = TDXParser()
self.ast = parser.parse(text)
self.const = {
'PERCENT_HLINE' : [0, 20, 50, 80, 100],
}
self._df = None
self.local = {}
self.buy = None
self.sell = None
self.builtin = {
'HHV': (self.HHV, r'%s的%s日内的最高价', 2),
'LLV': (self.LLV, r'%s的%s日内的最低价', 2),
'SUM': (self.SUM, r'%s的%s日数值之和', 2),
'REF': (self.REF, r'%s的%s日前的参考值', 2),
'CROSS': (self.CROSS, r'在%s上穿%s时触发', 2),
'NOT': (self.NOT, r'对%s取反逻辑', 1),
'IF' : (self.IF, r'IF(%s, %s, %s)', 3),
'IFF' : (self.IF, r'IFF(%s, %s, %s)', 3),
'EVERY' : (self.EVERY, r'%s周期内均满足%s', 2),
'EXIST' : (self.EXIST, r'EXIST(%s, %s)', 2),
'STD': (self.STD, r'%s周期%s的样本标准差', 2),
'VAR': (self.VAR, r'%s周期%s的样本方差', 2),
'STDP': (self.STDP, r'%s周期%s的总体标准差', 2),
'VARP': (self.VARP, r'%s周期%s的总体方差', 2),
'MAX': (self.MAX, r'取最大值(%s, %s)', 2),
'MIN': (self.MIN, r'取最小值(%s, %s)', 2),
'COUNT': (self.COUNT, r'满足条件%s在统计周期%s内的个数', 2),
'ABS': (self.ABS, r'%s的绝对值', 1),
'SQRT': (self.SQRT, r'%s的平方根', 1),
'POW': (self.POW, r'%s的%s次方', 2),
'LOG': (self.LOG, r'%s的对数', 1),
'CONST': (self.CONST, r'%s的最后值为常量', 1),
'INSIST': (self.INSIST, r'%s在周期%s到周期%s全为真', 3),
'LAST': (self.INSIST, r'%s在周期%s到周期%s全为真', 3),
'FILTER': (self.FILTER, r'过滤%s连续出现的%s个信号', 2),
'BARSLAST': (self.BARSLAST, r'满足条件%s到当前的周期数', 1),
'AVEDEV' : (self.AVEDEV, r'%s的周期%s的平均绝对偏差', 2),
'MA': (self.MA, r'%s的%s日简单移动平均', 2),
'EMA': (self.EMA, r'%s的%s日指数移动平均', 2),
'EXPEMA': (self.EMA, r'%s的%s日指数移动平均', 2),
'MEMA' : (self.MEMA, r'%s的%s周期平滑指数移动平均', 2),
'EXPMEMA' : (self.MEMA, r'%s的%s周期平滑指数移动平均', 2),
'DMA' : (self.DMA, r'%s的%s周期动态平均', 2),
'SMA' : (self.SMA, r'%s的%s周期(权重:%s)动态平均', 3),
'CONV': (self.CONV, r'%s与%s的%s周期卷积', 3),
'SAR' : (self.SAR, r'周期为%s步长为%s极值为%s的抛物转向', 3),
'SLOPE': (self.SLOPE, r'%s的周期为%s的线性回归斜率', 2),
'CLAMP': (self.CLAMP, r'限定%s的输出在(%s, %s)之间', 3),
'FORCAST': (self.FORCAST, r'%s的周期为%s的线性预测', 2),
'DRAWFUNC': (self.DRAWFUNC, r'DRAWFUNC(%s, %s, %s)', 3),
'DRAWICON': (self.DRAWICON, r'DRAWICON(%s, %s, %s)', 3),
'DRAWICONF': (self.DRAWICONF, r'DRAWICONF(%s, %s, %s)', 3),
'STICKLINE': (self.STICKLINE, r'STICKLINE(%s,%s,%s,%s,%s)', 5),
'DRAWKLINE' : (self.DRAWKLINE, r'DRAWKLINE(%s, %s, %s, %s)', 4),
'DRAWSKLINE' : (self.DRAWSKLINE, r'DRAWSKLINE(%s, %s, %s, %s)', 4),
'DRAWPKLINE' : (self.DRAWPKLINE, r'DRAWPKLINE(%s, %s, %s, %s, %s)', 5),
'DRAWNUMBER' : (self.DRAWNUMBER, r'DRAWNUMBER(%s, %s, %s)', 3),
'DRAWTEXT' : (self.DRAWTEXT, r'DRAWTEXT(%s, %s, %s)', 3),
'DRAWNULL' : (np.NaN, r'DRAWNULL', 0),
'DRAWGRID' : (self.DRAWGRID, r'DRAWGRID(%s)', 1),
'DRAWVOL' : (self.DRAWVOL, r'DRAWVOL(%s, %s)', 2),
'SIGNAL': (self.SIGNAL, r'从仓位数据%s导出指定的买入%s卖出%s信号指示',3),
'BASE': (self.BASE, r'创建%s的基准', 1),
'ASSET': (self.ASSET, r'根据价格%s,仓位%s和建仓比例%s创建资产', 3),
'SUGAR': (self.SUGAR, r'根据盈利情况进行调仓操作(仓位:%s, 价格:%s, 资产:%s)', 3),
'REMEMB': (self.REMEMB, r'记录价格(条件:%s, 值:%s)', 2),
'STOP':(self.STOP, r'止盈止损点(条件:%s, 价格:%s, 比例1:%s, 比例2:%s)', 4),
'HORIZON':(self.HORIZON, r'%s的周期为%s的地平线', 2)
}
def __mergeParam(self, param):
self.local = {}
#for k, v in self.const.items():
# self.local[k] = v
self.local.update(self.const)
if param is None:
return
#for k, v in param.items():
# self.local[k] = v
self.local.update(param)
def annotate(self, param = None):
"""
取注解, 返回文符串
"""
if self.ast is None:
print('This formula failed to be parsed.')
return None
self.__mergeParam(param)
return self.ast.annotate(self)
def paramDesc(self):
"""
取参数描述,返回参数名与描述
"""
if self.param_desc is None:
return ''
if self.param_desc == '':
return ''
return json.dumps(self.param_desc)
def get_figure(self):
"""
取绘图数据
在调用这个函数前,需要调用求值函数
调用求值函数,也可用于图表数据的刷新
"""
if self.ast is None:
print('This formula failed to be parsed.')
return None
return self.ast.figure(self)
def evaluate(self, param, fields = None, is_last_value = False):
"""
求值,设置股票代码,参数
param: 参数以dict形式给出,键名为参数的变量名
"""
if self.ast is None:
print('This formula failed to be parsed.')
raise ValueError('The formula failed to be parsed')
if isinstance(param, dict):
self.__mergeParam(param)
if isinstance(param, pd.DataFrame):
self._df = param
default_retval = self.ast.evaluate(self)
if default_retval is None:
raise ValueError('Failed to evaluate. The formula failed to be parsed.')
if fields is None:
return default_retval
retv = {}
for col in fields:
retv[col] = self.resolveSymbol(col, 0)
if not is_last_value:
return retv
last_values = np.array([v.iloc[-1] for k, v in retv.items()])
return last_values
def asDataFrame(self, columns, data_index = None):
if data_index is None:
close = self.resolveSymbol('CLOSE', 0)
if close is None or len(close) == 0:
return None
df = pd.DataFrame(index = close.index)
else:
df = pd.DataFrame(index = data_index)
for col in columns:
s = self.resolveSymbol(col, 0)
if s is not None:
df[col] = s
return df
def setSymbol(self, symbol, func_def):
old_def = None
if symbol in self.builtin:
old_def = self.builtin[symbol]
self.builtin[symbol] = func_def
return old_def
def resolveSymbol(self, symbol, n):
if n == 0:
if self._df is not None:
if symbol in self._df.columns:
return self._df[symbol]
if symbol in self.local:
variable = self.local[symbol]
if type(variable) == tuple:
variable = variable[0]
if variable is not None:
if hasattr(variable, '__call__'):
return variable()
return variable
symdef = None
if symbol in self.builtin:
symdef = self.builtin[symbol]
if n == symdef[2]:
return symdef[0]
if symdef is not None:
print('function: %s is resolved, expect %d parameters, but %d is given.' % (symbol, symdef[2], n))
if symbol in self.local:
funcdef = self.local[symbol]
if type(funcdef) == tuple:
if n == funcdef[2]:
func = funcdef[0]
else:
print('function: %s is resolved, expect %d parameters, but %d is given.' % (symbol, funcdef[2], n))
else:
func = funcdef
return func
return None
def resolveAnnotate(self, symbol):
if symbol in self.local:
variable = self.local[symbol]
if type(variable) != tuple:
return '[%s]' % symbol
return variable[1]
if symbol in self.builtin:
symdef = self.builtin[symbol]
return symdef[1]
return None
def registerFunctor(self, name, func, n):
self.builtin[name] = (func, 'DefineFunction', n)
############# 内部函数 #################
def addSymbol(self, symbol, value):
self.local[symbol] = value
if symbol in Formula.buy_kw:
if isinstance(value, pd.core.series.Series):
if value.dtype == bool:
self.buy = value
if symbol in Formula.sell_kw:
if isinstance(value, pd.core.series.Series):
if value.dtype == bool:
self.sell = value
### Formula Function Implementation ###
#引用函数
def HHV(self, param):
if param[1] == 0:
return pd.expanding_max(param[0])
return pd.rolling_max(param[0], param[1])
def LLV(self, param):
if param[1] == 0:
return pd.expanding_min(param[0])
return pd.rolling_min(param[0], param[1])
def REF(self, param):
return param[0].shift(param[1])
def EMA(self, param):
return pd.ewma(param[0], span=param[1], adjust = True)
def MA(self, param):
if param[1] == 0:
return pd.rolling_mean(param[0])
return pd.rolling_mean(param[0], param[1])
def SUM(self, param):
if param[1] == 0:
return pd.expanding_sum(param[0])
return pd.rolling_sum(param[0], param[1])
def CONST(self, param):
ret = pd.Series(index = param[0].index)
ret[:] = param[0][-1:].values[0]
return ret
def MEMA(self, param):
return pd.ewma(param[0], span=param[1] * 2 - 1, adjust = False)
def DMA(self, param):
df = pd.DataFrame(index = param[0].index)
df['X'] = param[0]
df['A'] = param[1]
class Averager:
def __init__(self):
self.Y = 0
self.start = True
def handleInput(self, row):
if self.start:
self.start = False
self.Y = row['X']
return self.Y
X = row['X']
A = row['A']
if A > 1:
A = 1
if A < 0:
A = 0
self.Y = A * X + (1 - A) * self.Y
return self.Y
avger = Averager()
result = df.apply(avger.handleInput, axis = 1, reduce = True)
return result
def SMA(self, param):
M = param[2]
if param[2] == 0:
M = 1
return pd.ewma(param[0], span = 2 * param[1] / M - 1)
def CONV(self, param):
df = pd.DataFrame(index = param[0].index)
df['X'] = param[0]
df['W'] = param[1]
class Convolution:
def __init__(self, N):
self.N = N
self.q = deque([], self.N)
self.tq = deque([], self.N)
self.s = 0
self.t = 0
def handleInput(self, row):
if len(self.q) < self.N:
if pd.isnull(row['W']) or pd.isnull(row['X']):
return np.NaN
self.q.append(row['W'] * row['X'])
self.tq.append(row['W'])
self.s += row['W'] * row['X']
self.t += row['W']
return np.NaN
ret = self.s / self.t
self.s -= self.q[0]
self.t -= self.tq[0]
delta_s = row['W'] * row['X']
delta_t = row['W']
self.s += delta_s
self.t += delta_t
self.q.append(delta_s)
self.tq.append(delta_t)
return ret
conv = Convolution(param[2])
result = df.apply(conv.handleInput, axis = 1, reduce = True)
return result
#算术逻辑函数
EPSLON = 0.0000001
def CROSS(self, param):
if not isinstance(param[0], pd.core.series.Series) and not isinstance(param[1], pd.core.series.Series):
print('Invalid data type is detected.')
return False
if not isinstance(param[0], pd.core.series.Series):
x1 = param[0]
x2 = param[0]
y1 = param[1].shift(1)
y2 = param[1]
if not isinstance(param[1], pd.core.series.Series):
x1 = param[0].shift(1)
x2 = param[0]
y1 = param[1]
y2 = param[1]
if isinstance(param[0], pd.core.series.Series) and isinstance(param[1], pd.core.series.Series):
x1 = param[0].shift(1)
x2 = param[0]
y1 = param[1].shift(1)
y2 = param[1]
return (x1 <= y1) & (x2 > y2)
def NOT(self, param):
if not isinstance(param[0], pd.core.series.Series):
if type(param[0]) != bool:
return (param[0] == 0)
else:
return not param[0]
if param[0].dtype == bool:
return (param[0] == False)
if param[0].dtype == float:
return (param[0] > -Formula.EPSLON) & (param[0] < Formula.EPSLON)
return (param[0] == 0)
def IF(self, param):
EPSLON = 0.0000001
if not isinstance(param[0], pd.core.series.Series):
if type(param[0]) == bool:
if param[0]:
return param[1]
else:
return param[2]
elif type(param[0]) == int:
if param[0] != 0:
return param[1]
else:
return param[2]
elif type(param[0]) == float:
if (param[0] < -Formula.EPSLON) or (param[0] > Formula.EPSLON):
return param[1]
else:
return param[2]
df = pd.DataFrame(index = param[0].index)
if param[0].dtype == bool:
df['X'] = param[0]
elif param[0].dtype == float:
df['X'] = ~ ((param[0] > -EPSLON) & (param[0] < EPSLON))
else:
df['X'] = (param[0] != 0)
df['A'] = param[1]
df['B'] = param[2]
def callback(row):
if row['X']:
return row['A']
else:
return row['B']
result = df.apply(callback, axis=1, reduce = True)
return result
def EVERY(self, param):
norm = self.IF([param[0], 1, 0])
result = pd.rolling_sum(norm, param[1])
return result == param[1]
def EXIST(self, param):
norm = self.IF([param[0], 1, 0])
result = pd.rolling_sum(norm, param[1])
return result > 0
def COUNT(self, param):
norm = self.IF([param[0], 1, 0])
result = pd.rolling_sum(norm, param[1])
return result
def INSIST(self, param):
norm = self.IF([param[0], 1, 0])
x1 = pd.rolling_sum(norm, param[1])
x2 = pd.rolling_sum(norm, param[2])
ret =((x1 - x2) == np.abs(param[1] - param[2]))
return ret
def FILTER(self, param):
class Counter:
def __init__(self, N):
self.state = 0
self.count = 0
self.num = N
def handleInput(self, value):
if self.state == 0:
if value:
self.state = 1
self.count = 0
return True
else:
return False
else:
self.count += 1
if self.count >= self.num:
self.state = 0
return False
counter = Counter(param[1])
ret = param[0].apply(counter.handleInput)
return ret
def BARSLAST(self, param):
class Counter:
def __init__(self):
self.count = -1
def handleInput(self, value):
if value:
self.count = 0
return self.count
elif self.count != -1:
self.count += 1
return self.count
else:
return 0
counter = Counter()
ret = param[0].apply(counter.handleInput)
return ret
#统计函数
def STD(self, param):
return | pd.rolling_std(param[0], param[1]) | pandas.rolling_std |
import pandas as pd
import pytest
import woodwork as ww
from rayml.pipelines.components import EmailFeaturizer, URLFeaturizer
@pytest.mark.parametrize(
"component_class,params", [(URLFeaturizer, {}), (EmailFeaturizer, {})]
)
def test_init(component_class, params):
assert component_class().parameters == params
def make_data_email_fit_transform(df_with_url_and_email):
return df_with_url_and_email
def make_data_url_fit_transform(df_with_url_and_email):
return df_with_url_and_email
def make_data_email_fit_transform_missing_values(df_with_url_and_email):
df_with_missing_values = df_with_url_and_email.ww.copy()
original_ltypes = df_with_url_and_email.ww.schema.logical_types
df_with_missing_values.email.iloc[0:2] = pd.NA
df_with_missing_values.ww["email_2"] = df_with_missing_values.email
df_with_missing_values.ww["email_2"].iloc[-1] = pd.NA
original_ltypes.update({"email_2": "EmailAddress"})
df_with_missing_values.ww.init(logical_types=original_ltypes)
return df_with_missing_values
def make_data_url_fit_transform_missing_values(df_with_url_and_email):
df_with_missing_values = df_with_url_and_email.ww.copy()
original_ltypes = df_with_url_and_email.ww.schema.logical_types
df_with_missing_values.url.iloc[0:2] = pd.NA
df_with_missing_values.ww["url_2"] = df_with_missing_values.url
df_with_missing_values.ww["url_2"].iloc[-1] = pd.NA
original_ltypes.update({"url_2": "URL"})
df_with_missing_values.ww.init(logical_types=original_ltypes)
return df_with_missing_values
def make_answer_email_fit_transform(df_with_url_and_email):
expected = df_with_url_and_email.ww.copy()
expected.ww["EMAIL_ADDRESS_TO_DOMAIN(email)"] = pd.Series(
["gmail.com", "yahoo.com", "abalone.com", "hotmail.com", "email.org"],
dtype="category",
)
expected.ww["IS_FREE_EMAIL_DOMAIN(email)"] = pd.Series(
[True, True, False, True, True], dtype="category"
)
expected.ww.drop(["email"], inplace=True)
return expected
def make_answer_url_fit_transform(df_with_url_and_email):
expected = df_with_url_and_email.ww.copy()
expected.ww["URL_TO_DOMAIN(url)"] = pd.Series(
[
"rayml.alteryx.com",
"woodwork.alteryx.com",
"twitter.com",
"twitter.com",
"rayml.alteryx.com",
],
dtype="category",
)
expected.ww["URL_TO_PROTOCOL(url)"] = pd.Series(["https"] * 5, dtype="category")
expected.ww["URL_TO_TLD(url)"] = | pd.Series(["com"] * 5, dtype="category") | pandas.Series |
# --------------
import pandas as pd
from sklearn import preprocessing
#path : File path
# read the dataset
# Code starts here
dataset= | pd.read_csv(path) | pandas.read_csv |
'''
Model training with the entire training data
'''
# Libraries
import pandas as pd
import numpy as np
import keras
import tensorflow as tf
from keras.models import Model
from tensorflow.keras.models import load_model
import keras.backend as K
from keras import optimizers
from keras.layers import Dense, Dropout, BatchNormalization, Conv1D, Flatten, Input, GaussianNoise, LeakyReLU, Add
from keras.utils import to_categorical, np_utils
from keras.regularizers import l2
from sklearn.model_selection import train_test_split, KFold, cross_val_score, StratifiedKFold
from sklearn.preprocessing import StandardScaler, LabelEncoder, normalize
from sklearn.utils import shuffle
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score
import matplotlib.pyplot as plt
import pickle
from keras import regularizers
from keras import backend as K
from sklearn.utils import class_weight
# GPU
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
tf.keras.backend.clear_session()
config = ConfigProto()
config.gpu_options.allow_growth = True
gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.333)
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options))
LIMIT = 3 * 1024
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
tf.config.experimental.set_virtual_device_configuration(
gpus[0],
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=LIMIT)])
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Virtual devices must be set before GPUs have been initialized
print(e)
# dataset import and preprocessing
ds = pd.read_csv("../dataset/dataset.csv")
X1 = ds.iloc[:,5:6] # (BF)
X1 = pd.DataFrame(X1)
X2 = ds.iloc[:,6:7] # rHpy
X = pd.concat([X1, X2], axis=1)
y = ds.iloc[:,7]
y_w = y
# Seed
seed = 1337
np.random.seed(1337)
# Features
# Secondary Structure Folds
infile = open('../features/SSF/feature/NF1_7.pickle','rb')
nf1_9 = pickle.load(infile)
infile.close()
# # Amino Acid Signatures in the Interaction Shells
infile = open('../features/AASIS/feature/NF2_8.pickle','rb')
nf2_8 = pickle.load(infile)
infile.close()
infile = open('../features/AASIS/feature/NF2_7.pickle','rb')
nf2_7 = pickle.load(infile)
infile.close()
infile = open('../features/AASIS/feature/NF2_6.pickle','rb')
nf2_6 = pickle.load(infile)
infile.close()
infile = open('../features/AASIS/feature/NF2_5.pickle','rb')
nf2_5 = pickle.load(infile)
infile.close()
# # Enzyme Class
infile = open('../features/EC/feature/NF3_le.pickle','rb')
nf3 = pickle.load(infile)
infile.close()
# # Motifs
infile = open('../features/Motifs/feature/NF4_13.pickle','rb')
nf4_13 = pickle.load(infile)
infile.close()
infile = open('../features/Motifs/feature/NF4_11.pickle','rb')
nf4_11 = pickle.load(infile)
infile.close()
infile = open('../features/Motifs/feature/NF4_9.pickle','rb')
nf4_9 = pickle.load(infile)
infile.close()
infile = open('../features/Motifs/feature/NF4_7.pickle','rb')
nf4_7 = pickle.load(infile)
infile.close()
infile = open('../features/Motifs/feature/NF4_5.pickle','rb')
nf4_5 = pickle.load(infile)
infile.close()
infile = open('../features/Motifs/feature/NF4_3.pickle','rb')
nf4_3 = pickle.load(infile)
infile.close()
# Feature Selection
nf1_9 = pd.DataFrame(nf1_9)
nf2_8 = pd.DataFrame(nf2_8)
nf2_7 = pd.DataFrame(nf2_7)
nf2_6 = pd.DataFrame(nf2_6)
nf2_5 = pd.DataFrame(nf2_5)
nf3 = pd.DataFrame(nf3)
nf4_3 = | pd.DataFrame(nf4_3) | pandas.DataFrame |
import pandas as pd
import os
import numpy as np
from fastprogress import master_bar,progress_bar
from tqdm import tqdm
import argparse
def getArgs():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_dir', type=str, default='/data/data20180901/processed/')
parser.add_argument('-c', '--city', type=str, default='Berlin')
parser.add_argument('-ch', '--channel', type=int, default=0)
args = parser.parse_args()
return args
if __name__=='__main__':
args = getArgs()
root = args.input_dir
city = args.city
channel = args.channel
meta = | pd.read_csv(f'{root}/meta.csv') | pandas.read_csv |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime
import datetime as dt
import os
from typing import Union
import numpy as np
import pandas as pd
import pytest
import pytz
from gs_quant.target.common import XRef, PricingLocation, Currency as CurrEnum
from numpy.testing import assert_allclose
from pandas.testing import assert_series_equal
from pandas.tseries.offsets import CustomBusinessDay
from pytz import timezone
from testfixtures import Replacer
from testfixtures.mock import Mock
import gs_quant.timeseries.measures as tm
import gs_quant.timeseries.measures_rates as tm_rates
from gs_quant.api.gs.assets import GsTemporalXRef, GsAssetApi, GsIdType, IdList, GsAsset
from gs_quant.api.gs.data import GsDataApi, MarketDataResponseFrame
from gs_quant.api.gs.data import QueryType
from gs_quant.data.core import DataContext
from gs_quant.data.dataset import Dataset
from gs_quant.data.fields import Fields
from gs_quant.errors import MqError, MqValueError, MqTypeError
from gs_quant.markets.securities import AssetClass, Cross, Index, Currency, SecurityMaster, Stock, \
Swap, CommodityNaturalGasHub
from gs_quant.session import GsSession, Environment
from gs_quant.test.timeseries.utils import mock_request
from gs_quant.timeseries import Returns
from gs_quant.timeseries.measures import BenchmarkType
_index = [pd.Timestamp('2019-01-01')]
_test_datasets = ('TEST_DATASET',)
def mock_empty_market_data_response():
df = MarketDataResponseFrame()
df.dataset_ids = ()
return df
def map_identifiers_default_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-LIBOR-BBA" in ids:
return {"USD-LIBOR-BBA": "MAPDB7QNB2TZVQ0E"}
elif "EUR-EURIBOR-TELERATE" in ids:
return {"EUR-EURIBOR-TELERATE": "MAJNQPFGN1EBDHAE"}
elif "GBP-LIBOR-BBA" in ids:
return {"GBP-LIBOR-BBA": "MAFYB8Z4R1377A19"}
elif "JPY-LIBOR-BBA" in ids:
return {"JPY-LIBOR-BBA": "MABMVE27EM8YZK33"}
elif "EUR OIS" in ids:
return {"EUR OIS": "MARFAGXDQRWM07Y2"}
def map_identifiers_swap_rate_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m" in ids:
return {"USD-3m": "MAAXGV0GZTW4GFNC"}
elif "EUR-6m" in ids:
return {"EUR-6m": "MA5WM2QWRVMYKDK0"}
elif "KRW" in ids:
return {"KRW": 'MAJ6SEQH3GT0GA2Z'}
def map_identifiers_inflation_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "CPI-UKRPI" in ids:
return {"CPI-UKRPI": "MAQ7ND0MBP2AVVQW"}
elif "CPI-CPXTEMU" in ids:
return {"CPI-CPXTEMU": "MAK1FHKH5P5GJSHH"}
def map_identifiers_cross_basis_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m/JPY-3m" in ids:
return {"USD-3m/JPY-3m": "MA99N6C1KF9078NM"}
elif "EUR-3m/USD-3m" in ids:
return {"EUR-3m/USD-3m": "MAXPKTXW2D4X6MFQ"}
elif "GBP-3m/USD-3m" in ids:
return {"GBP-3m/USD-3m": "MA8BZHQV3W32V63B"}
def get_data_policy_rate_expectation_mocker(
start: Union[dt.date, dt.datetime] = None,
end: Union[dt.date, dt.datetime] = None,
as_of: dt.datetime = None,
since: dt.datetime = None,
fields: Union[str, Fields] = None,
asset_id_type: str = None,
**kwargs) -> pd.DataFrame:
if 'meetingNumber' in kwargs:
if kwargs['meetingNumber'] == 0:
return mock_meeting_spot()
elif 'meeting_date' in kwargs:
if kwargs['meeting_date'] == dt.date(2019, 10, 24):
return mock_meeting_spot()
return mock_meeting_expectation()
def test_parse_meeting_date():
assert tm.parse_meeting_date(5) == ''
assert tm.parse_meeting_date('') == ''
assert tm.parse_meeting_date('test') == ''
assert tm.parse_meeting_date('2019-09-01') == dt.date(2019, 9, 1)
def test_currency_to_default_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_default_mocker)
asset_id_list = ["MAZ7RWC904JYHYPS", "MAJNQPFGN1EBDHAE", "MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8",
"MA4B66MW5E27U8P32SB"]
correct_mapping = ["MAPDB7QNB2TZVQ0E", "MAJNQPFGN1EBDHAE", "MAFYB8Z4R1377A19", "MABMVE27EM8YZK33",
"MA4J1YB8XZP2BPT8", "MA4B66MW5E27U8P32SB"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_default_swap_rate_asset(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_swap_rate_mocker)
asset_id_list = ['MAZ7RWC904JYHYPS', 'MAJNQPFGN1EBDHAE', 'MAJ6SEQH3GT0GA2Z']
correct_mapping = ['MAAXGV0GZTW4GFNC', 'MA5WM2QWRVMYKDK0', 'MAJ6SEQH3GT0GA2Z']
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_swap_rate_asset(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_inflation_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_inflation_mocker)
asset_id_list = ["MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
correct_mapping = ["MAQ7ND0MBP2AVVQW", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_inflation_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.currency_to_inflation_benchmark_rate('MA66CZBQJST05XKG') == 'MA66CZBQJST05XKG'
def test_cross_to_basis(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_cross_basis_mocker)
asset_id_list = ["MAYJPCVVF2RWXCES", "MA4B66MW5E27U8P32SB", "nobbid"]
correct_mapping = ["MA99N6C1KF9078NM", "MA4B66MW5E27U8P32SB", "nobbid"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_basis(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.cross_to_basis('MAYJPCVVF2RWXCES') == 'MAYJPCVVF2RWXCES'
def test_currency_to_tdapi_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA25DW5ZGC1BSC8Y', 'NOK')
bbid_mock.return_value = 'NOK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAFRSWPAF5QPNTP2' == correct_id
bbid_mock.return_value = 'CHF'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAW25BGQJH9P6DPT' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAA9MVX15AJNQCVG' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA6QCAP9B7ABS9HA' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAEE219J5ZP0ZKRK' == correct_id
bbid_mock.return_value = 'SEK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAETMVTPNP3199A5' == correct_id
bbid_mock.return_value = 'HKD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MABRNGY8XRFVC36N' == correct_id
bbid_mock.return_value = 'NZD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAH16NHE1HBN0FBZ' == correct_id
bbid_mock.return_value = 'AUD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAY8147CRK0ZP53B' == correct_id
bbid_mock.return_value = 'CAD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MANJ8SS88WJ6N28Q' == correct_id
bbid_mock.return_value = 'KRW'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAP55AXG5SQVS6C5' == correct_id
bbid_mock.return_value = 'INR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA20JHJXN1PD5HGE' == correct_id
bbid_mock.return_value = 'CNY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA4K1D8HH2R0RQY5' == correct_id
bbid_mock.return_value = 'SGD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA5CQFHYBPH9E5BS' == correct_id
bbid_mock.return_value = 'DKK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAF131NKWVRESFYA' == correct_id
asset = Currency('MA890', 'PLN')
bbid_mock.return_value = 'PLN'
assert 'MA890' == tm_rates._currency_to_tdapi_swap_rate_asset(asset)
replace.restore()
def test_currency_to_tdapi_basis_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA890', 'NOK')
bbid_mock.return_value = 'NOK'
assert 'MA890' == tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAQB1PGEJFCET3GG' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAGRG2VT11GQ2RQ9' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAHCYNB3V75JC5Q8' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAXVRBEZCJVH0C4V' == correct_id
replace.restore()
def test_check_clearing_house():
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house('lch')
assert tm_rates._ClearingHouse.CME == tm_rates._check_clearing_house(tm_rates._ClearingHouse.CME)
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house(None)
invalid_ch = ['NYSE']
for ch in invalid_ch:
with pytest.raises(MqError):
tm_rates._check_clearing_house(ch)
def test_get_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
assert dict(csaTerms='USD-1') == tm_rates._get_swap_csa_terms('USD', fed_funds_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_swap_csa_terms('EUR', estr_index)
assert {} == tm_rates._get_swap_csa_terms('EUR', euribor_index)
assert {} == tm_rates._get_swap_csa_terms('USD', usd_libor_index)
def test_get_basis_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
sofr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.SOFR.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
eonia_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EONIA.value]
assert dict(csaTerms='USD-1') == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, sofr_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_basis_swap_csa_terms('EUR', estr_index, eonia_index)
assert {} == tm_rates._get_basis_swap_csa_terms('EUR', eonia_index, euribor_index)
assert {} == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, usd_libor_index)
def test_match_floating_tenors():
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='6m')
assert swap_args == tm_rates._match_floating_tenors(swap_args)
def test_get_term_struct_date(mocker):
today = datetime.datetime.today()
biz_day = CustomBusinessDay()
assert today == tm_rates._get_term_struct_date(tenor=today, index=today, business_day=biz_day)
date_index = datetime.datetime(2020, 7, 31, 0, 0)
assert date_index == tm_rates._get_term_struct_date(tenor='2020-07-31', index=date_index, business_day=biz_day)
assert date_index == tm_rates._get_term_struct_date(tenor='0b', index=date_index, business_day=biz_day)
assert datetime.datetime(2021, 7, 30, 0, 0) == tm_rates._get_term_struct_date(tenor='1y', index=date_index,
business_day=biz_day)
def test_cross_stored_direction_for_fx_vol(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_stored_direction_for_fx_vol(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_usd_based_cross_for_fx_forecast(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_usd_based_cross(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_used_based_cross(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_to_usd_based_cross(Cross('FUN', 'EURUSD'))
replace.restore()
def test_cross_stored_direction(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_stored_direction_for_fx_vol(Cross('FUN', 'EURUSD'))
replace.restore()
def test_get_tdapi_rates_assets(mocker):
mock_asset_1 = GsAsset(asset_class='Rate', id='MAW25BGQJH9P6DPT', type_='Swap', name='Test_asset')
mock_asset_2 = GsAsset(asset_class='Rate', id='MAA9MVX15AJNQCVG', type_='Swap', name='Test_asset')
mock_asset_3 = GsAsset(asset_class='Rate', id='MANQHVYC30AZFT7R', type_='BasisSwap', name='Test_asset')
replace = Replacer()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1]
assert 'MAW25BGQJH9P6DPT' == tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict(asset_parameters_termination_date='10y', asset_parameters_effective_date='0b')
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = []
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict()
assert ['MAW25BGQJH9P6DPT', 'MAA9MVX15AJNQCVG'] == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
# test case will test matching sofr maturity with libor leg and flipping legs to get right asset
kwargs = dict(type='BasisSwap', asset_parameters_termination_date='10y',
asset_parameters_payer_rate_option=BenchmarkType.LIBOR,
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=BenchmarkType.SOFR,
asset_parameters_receiver_designated_maturity='1y',
asset_parameters_clearing_house='lch', asset_parameters_effective_date='Spot',
asset_parameters_notional_currency='USD',
pricing_location='NYC')
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_3]
assert 'MANQHVYC30AZFT7R' == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
def test_get_swap_leg_defaults():
result_dict = dict(currency=CurrEnum.JPY, benchmark_type='JPY-LIBOR-BBA', floating_rate_tenor='6m',
pricing_location=PricingLocation.TKO)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.JPY)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.USD, benchmark_type='USD-LIBOR-BBA', floating_rate_tenor='3m',
pricing_location=PricingLocation.NYC)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.USD)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.EUR, benchmark_type='EUR-EURIBOR-TELERATE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.EUR)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.SEK, benchmark_type='SEK-STIBOR-SIDE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.SEK)
assert result_dict == defaults
def test_check_forward_tenor():
valid_tenors = [datetime.date(2020, 1, 1), '1y', 'imm2', 'frb2', '1m', '0b']
for tenor in valid_tenors:
assert tenor == tm_rates._check_forward_tenor(tenor)
invalid_tenors = ['5yr', 'imm5', 'frb0']
for tenor in invalid_tenors:
with pytest.raises(MqError):
tm_rates._check_forward_tenor(tenor)
def mock_commod(_cls, _q):
d = {
'price': [30, 30, 30, 30, 35.929686, 35.636039, 27.307498, 23.23177, 19.020833, 18.827291, 17.823749, 17.393958,
17.824999, 20.307603, 24.311249, 25.160103, 25.245728, 25.736873, 28.425206, 28.779789, 30.519996,
34.896348, 33.966973, 33.95489, 33.686348, 34.840307, 32.674163, 30.261665, 30, 30, 30]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-05-01', periods=31, freq='H', tz=timezone('UTC')))
df.dataset_ids = _test_datasets
return df
def mock_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
11.6311,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 9))
df.dataset_ids = _test_datasets
return df
def mock_fair_price(_cls, _q):
d = {
'fairPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_natgas_forward_price(_cls, _q):
d = {
'forwardPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_fair_price_swap(_cls, _q):
d = {'fairPrice': [2.880]}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)]))
df.dataset_ids = _test_datasets
return df
def mock_implied_volatility(_cls, _q):
d = {
'impliedVolatility': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_missing_bucket_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"J20",
"K20",
"M20",
]
}
return pd.DataFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 8))
def mock_fx_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=[pd.Timestamp('2019-01-04T12:00:00Z')])
d = {
'strikeReference': ['delta', 'spot', 'forward'],
'relativeStrike': [25, 100, 100],
'impliedVolatility': [5, 1, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-01-01', periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_fx_forecast(_cls, _q):
d = {
'fxForecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_delta(_cls, _q):
d = {
'relativeStrike': [25, -25, 0],
'impliedVolatility': [1, 5, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_empty(_cls, _q):
d = {
'strikeReference': [],
'relativeStrike': [],
'impliedVolatility': []
}
df = MarketDataResponseFrame(data=d, index=[])
df.dataset_ids = _test_datasets
return df
def mock_fx_switch(_cls, _q, _n):
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_empty)
replace.restore()
return Cross('MA1889', 'ABC/XYZ')
def mock_curr(_cls, _q):
d = {
'swapAnnuity': [1, 2, 3],
'swapRate': [1, 2, 3],
'basisSwapRate': [1, 2, 3],
'swaptionVol': [1, 2, 3],
'atmFwdRate': [1, 2, 3],
'midcurveVol': [1, 2, 3],
'capFloorVol': [1, 2, 3],
'spreadOptionVol': [1, 2, 3],
'inflationSwapRate': [1, 2, 3],
'midcurveAtmFwdRate': [1, 2, 3],
'capFloorAtmFwdRate': [1, 2, 3],
'spreadOptionAtmFwdRate': [1, 2, 3],
'strike': [0.25, 0.5, 0.75]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_cross(_cls, _q):
d = {
'basis': [1, 2, 3],
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq(_cls, _q):
d = {
'relativeStrike': [0.75, 0.25, 0.5],
'impliedVolatility': [5, 1, 2],
'impliedCorrelation': [5, 1, 2],
'realizedCorrelation': [3.14, 2.71828, 1.44],
'averageImpliedVolatility': [5, 1, 2],
'averageImpliedVariance': [5, 1, 2],
'averageRealizedVolatility': [5, 1, 2],
'impliedVolatilityByDeltaStrike': [5, 1, 2],
'fundamentalMetric': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
idx = [pd.Timestamp(datetime.datetime.now(pytz.UTC))]
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=idx)
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.datetime.now(pytz.UTC).date() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_err(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
raise MqValueError('error while getting last')
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_empty(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame()
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_norm(_cls, _q):
d = {
'relativeStrike': [-4.0, 4.0, 0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_spot(_cls, _q):
d = {
'relativeStrike': [0.75, 1.25, 1.0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_inc(_cls, _q):
d = {
'relativeStrike': [0.25, 0.75],
'impliedVolatility': [5, 1]
}
df = MarketDataResponseFrame(data=d, index=_index * 2)
df.dataset_ids = _test_datasets
return df
def mock_meeting_expectation():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2020, 1, 29)],
'endingDate': [dt.date(2020, 1, 29)],
'meetingNumber': [2],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2020, 1, 23)],
'value': [-0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_spot():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2019, 10, 30)],
'endingDate': [dt.date(2019, 12, 18)],
'meetingNumber': [0],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2019, 10, 24)],
'value': [-0.004522570525]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_absolute():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2', 'MARFAGXDQRWM07Y2'],
'location': ['NYC', 'NYC'],
'rateType': ['Meeting Forward', 'Meeting Forward'],
'startingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'endingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'meetingNumber': [0, 2],
'valuationDate': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 10, 24), datetime.date(2020, 1, 23)],
'value': [-0.004522570525, -0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_ois_spot():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Spot'],
'startingDate': [datetime.date(2019, 12, 6)],
'endingDate': [datetime.date(2019, 12, 7)],
'meetingNumber': [-1],
'valuationDate': [datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 12, 6)],
'value': [-0.00455]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_esg(_cls, _q):
d = {
"esNumericScore": [2, 4, 6],
"esNumericPercentile": [81.2, 75.4, 65.7],
"esPolicyScore": [2, 4, 6],
"esPolicyPercentile": [81.2, 75.4, 65.7],
"esScore": [2, 4, 6],
"esPercentile": [81.2, 75.4, 65.7],
"esProductImpactScore": [2, 4, 6],
"esProductImpactPercentile": [81.2, 75.4, 65.7],
"gScore": [2, 4, 6],
"gPercentile": [81.2, 75.4, 65.7],
"esMomentumScore": [2, 4, 6],
"esMomentumPercentile": [81.2, 75.4, 65.7],
"gRegionalScore": [2, 4, 6],
"gRegionalPercentile": [81.2, 75.4, 65.7],
"controversyScore": [2, 4, 6],
"controversyPercentile": [81.2, 75.4, 65.7],
"esDisclosurePercentage": [49.2, 55.7, 98.4]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_index_positions_data(
asset_id,
start_date,
end_date,
fields=None,
position_type=None
):
return [
{'underlyingAssetId': 'MA3',
'netWeight': 0.1,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA1',
'netWeight': 0.6,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA2',
'netWeight': 0.3,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
}
]
def mock_rating(_cls, _q):
d = {
'rating': ['Buy', 'Sell', 'Buy', 'Neutral'],
'convictionList': [1, 0, 0, 0]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_gsdeer_gsfeer(_cls, assetId, start_date):
d = {
'gsdeer': [1, 1.2, 1.1],
'gsfeer': [2, 1.8, 1.9],
'year': [2000, 2010, 2020],
'quarter': ['Q1', 'Q2', 'Q3']
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
return df
def mock_factor_profile(_cls, _q):
d = {
'growthScore': [0.238, 0.234, 0.234, 0.230],
'financialReturnsScore': [0.982, 0.982, 0.982, 0.982],
'multipleScore': [0.204, 0.192, 0.190, 0.190],
'integratedScore': [0.672, 0.676, 0.676, 0.674]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_commodity_forecast(_cls, _q):
d = {
'forecastPeriod': ['3m', '3m', '3m', '3m'],
'forecastType': ['spotReturn', 'spotReturn', 'spotReturn', 'spotReturn'],
'commodityForecast': [1700, 1400, 1500, 1600]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def test_skew():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_norm)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.NORMALIZED, 4)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_spot)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock.return_value = mock_empty_market_data_response()
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert actual.empty
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_inc)
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
replace.restore()
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', None, 25)
def test_skew_fx():
replace = Replacer()
cross = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = cross
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_delta)
mock = cross
actual = tm.skew(mock, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.DELTA, 25, real_time=True)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.SPOT, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.FORWARD, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', None, 25)
replace.restore()
def test_implied_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol)
idx = pd.date_range(end=datetime.datetime.now(pytz.UTC).date(), periods=4, freq='D')
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_NEUTRAL)
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL)
replace.restore()
def test_implied_vol_no_last():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
idx = pd.date_range(end=datetime.date.today() - datetime.timedelta(days=1), periods=3, freq='D')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_err)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_empty)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace.restore()
def test_implied_vol_fx():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
# for different delta strikes
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_vol)
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL, 25)
expected = pd.Series([5, 1, 2, 3], index=pd.date_range('2019-01-01', periods=4, freq='D'), name='impliedVolatility')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_PUT, 25)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_NEUTRAL)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
# NORMALIZED not supported
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 25)
replace.restore()
def test_fx_forecast():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
actual = tm.fx_forecast(mock, '12m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.fx_forecast(mock, '3m', real_time=True)
replace.restore()
def test_fx_forecast_inverse():
replace = Replacer()
get_cross = replace('gs_quant.timeseries.measures.cross_to_usd_based_cross', Mock())
get_cross.return_value = "MATGYV0J9MPX534Z"
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
mock = Cross("MAYJPCVVF2RWXCES", 'USD/JPY')
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1 / 1.1, 1 / 1.1, 1 / 1.1], index=_index * 3, name='fxForecast'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_vol_smile():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.FORWARD, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '1d')
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
with pytest.raises(NotImplementedError):
tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '1d', real_time=True)
replace.restore()
def test_impl_corr():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.implied_correlation(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedCorrelation'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_correlation(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedCorrelation'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.implied_correlation(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
with pytest.raises(MqError):
tm.implied_correlation(..., '1m', tm.EdrDataReference.DELTA_CALL, 50, '')
replace.restore()
def test_impl_corr_n():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(MqValueError):
tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5,
composition_date=datetime.date.today())
with pytest.raises(MqValueError):
tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5, 200)
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
i_vol = pd.read_csv(os.path.join(resources, 'SPX_50_icorr_in.csv'))
i_vol.index = pd.to_datetime(i_vol['date'])
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
market_data = replace('gs_quant.timeseries.econometrics.GsDataApi.get_market_data', Mock())
market_data.return_value = i_vol
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
expected = pd.read_csv(os.path.join(resources, 'SPX_50_icorr_out.csv'))
expected.index = pd.to_datetime(expected['date'])
expected = expected['value']
actual = tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5, 50, datetime.date(2020, 8, 31),
source='PlotTool')
pd.testing.assert_series_equal(actual, expected, check_names=False)
replace.restore()
def test_real_corr():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(NotImplementedError):
tm.realized_correlation(spx, '1m', real_time=True)
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.realized_correlation(spx, '1m')
assert_series_equal(pd.Series([3.14, 2.71828, 1.44], index=_index * 3), pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_real_corr_missing():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
d = {
'assetId': ['MA4B66MW5E27U8P32SB'] * 3,
'spot': [3000, 3100, 3050],
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2020-08-01', periods=3, freq='D'))
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', lambda *args, **kwargs: df)
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', 50)
replace.restore()
def test_real_corr_n():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', composition_date=datetime.date.today())
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', 200)
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
r_vol = pd.read_csv(os.path.join(resources, 'SPX_50_rcorr_in.csv'))
r_vol.index = pd.to_datetime(r_vol['date'])
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
market_data = replace('gs_quant.timeseries.econometrics.GsDataApi.get_market_data', Mock())
market_data.return_value = r_vol
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
expected = pd.read_csv(os.path.join(resources, 'SPX_50_rcorr_out.csv'))
expected.index = pd.to_datetime(expected['date'])
expected = expected['value']
actual = tm.realized_correlation(spx, '1m', 50, datetime.date(2020, 8, 31), source='PlotTool')
pd.testing.assert_series_equal(actual, expected, check_names=False)
replace.restore()
def test_cds_implied_vol():
replace = Replacer()
mock_cds = Index('MA890', AssetClass.Equity, 'CDS')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.cds_implied_volatility(mock_cds, '1m', '5y', tm.CdsVolReference.DELTA_CALL, 10)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedVolatilityByDeltaStrike'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.cds_implied_volatility(mock_cds, '1m', '5y', tm.CdsVolReference.FORWARD, 100)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedVolatilityByDeltaStrike'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cds_implied_volatility(..., '1m', '5y', tm.CdsVolReference.DELTA_PUT, 75, real_time=True)
replace.restore()
def test_avg_impl_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
df1 = pd.DataFrame(data={'impliedVolatility': [1, 2, 3], 'assetId': ['MA1', 'MA1', 'MA1']},
index=pd.date_range(start='2020-01-01', periods=3))
df2 = pd.DataFrame(data={'impliedVolatility': [2, 3, 4], 'assetId': ['MA2', 'MA2', 'MA2']},
index=pd.date_range(start='2020-01-01', periods=3))
df3 = pd.DataFrame(data={'impliedVolatility': [2, 5], 'assetId': ['MA3', 'MA3']},
index=pd.date_range(start='2020-01-01', periods=2))
replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', mock_index_positions_data)
market_data_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock_implied_vol = MarketDataResponseFrame(pd.concat([df1, df2, df3], join='inner'))
mock_implied_vol.dataset_ids = _test_datasets
market_data_mock.return_value = mock_implied_vol
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25, 3, '1d')
assert_series_equal(pd.Series([1.4, 2.6, 3.33333],
index= | pd.date_range(start='2020-01-01', periods=3) | pandas.date_range |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import pandas as pd
from pandas import Series
def test_read_csv():
df = pd.read_csv('ex1.csv')
# print(df)
df = pd.read_table('ex1.csv', sep=',')
# print(df)
df = pd.read_csv('ex2.csv', header=None)
print(df)
def test_read_big_data():
result = | pd.read_csv('ex6.csv', nrows=5) | pandas.read_csv |
# Recurrent Neural Network to predict upward/downward trends in Stock Prices
# Data Preprocessing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from keras.models import model_from_json
# Importing the training set
dataset_train = pd.read_csv('dataset/Google_Stock_Price_Train.csv')
training_set = dataset_train.iloc[:, 1:2].values
# Feature Scaling
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
training_set_scaled = sc.fit_transform(training_set)
# Creating a data structure with 60 timesteps and 1 output
X_train = []
y_train = []
for i in range(60, 1258):
X_train.append(training_set_scaled[i-60:i, 0])
y_train.append(training_set_scaled[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
# Reshaping
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
# Building the RNN
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
# Initialising the RNN
regressor = Sequential()
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50, return_sequences = True, input_shape = (X_train.shape[1], 1)))
regressor.add(Dropout(0.2))
# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))
# Adding a third LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))
# Adding a fourth LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50))
regressor.add(Dropout(0.2))
# Adding the output layer
regressor.add(Dense(units = 1))
# Compiling the RNN
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
# Fitting the RNN to the Training set
regressor.fit(X_train, y_train, epochs = 100, batch_size = 32)
# Saving trained model to disk
# serialize model to JSON
model_json = regressor.to_json()
with open("model/model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
regressor.save_weights("model/model.h5")
print("Saved model to disk")
# Loading trained model from disk
# load json and create model
json_file = open("model/model.json", "r")
loaded_model_json = json_file.read()
json_file.close()
loaded_regressor_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_regressor_model.load_weights("model/model.h5")
# Making the predictions and visualising the results
# Getting the real stock price of 2017
dataset_test = | pd.read_csv('dataset/Google_Stock_Price_Test.csv') | pandas.read_csv |
#!/usr/bin/env python3
import logging
import luigi
import os
import pandas as pd
from PIL import Image
from core.base_tasks import JobSystemTask
from tasks.image_collection_task import ImageCollectionTask
class ImageCompressionTask(JobSystemTask):
task_namespace = 'demo'
source_path = luigi.Parameter(default="")
"""
source path containing all images to compress.
"""
recursive = luigi.BoolParameter(default=False)
"""
indicates wherever to search recursively for image files.
"""
group_id = luigi.IntParameter(default=0)
"""
ID of the current task junk group.
"""
def requires(self):
"""
Method which returns a list of tasks which have to exist before
running ImageCompressionTask.
:return: the list of required tasks
"""
return ImageCollectionTask(
source_path=self.source_path, recursive=self.recursive
)
def run(self):
"""
Main method.
"""
res_path = os.path.join(self.source_path, "res")
if not os.path.exists(res_path):
os.makedirs(res_path)
# gather data
with self.input().open('r') as fp:
image_frame = pd.read_csv(fp, sep='\t', encoding='utf-8')
partitions = len(image_frame) / 10
processed_files = 0
total_files = len(image_frame[image_frame["group_id"] == self.group_id])
progress_fraction_per_file = (
float(self.progress_fraction)/total_files if total_files > 0 else 0
)
compr_image_frame = | pd.DataFrame(columns=["file_path"]) | pandas.DataFrame |
import os
import time
import pandas as pd
from decouple import config
import spotipy
import spotipy.util as util
def login():
'''Get and set Spotify credentials.'''
os.environ['SPOTIPY_CLIENT_ID'] = config('CLIENT_ID')
os.environ['SPOTIPY_CLIENT_SECRET'] = config('CLIENT_SECRET')
os.environ['SPOTIPY_REDIRECT_URI'] = config('REDIRECT_URI')
token = util.prompt_for_user_token('<PASSWORD>','playlist-modify-public')
return token
def get_artist_id(name):
'''Get an artist ID.'''
artist = sp.search(q='artist:'+name, type='artist')
return artist['artists']['items'][0]['id']
def get_albums(artist_id):
'''Get an artist discography (full songs with features).'''
albums = sp.artist_albums(artist_id, album_type='album', country='US')['items']
discography = []
for album in albums:
tracks = get_album_tracks(album['id'], album['name'])
features = get_track_features(tracks)
full = merge_tracks_features(tracks, features)
discography.append(full)
time.sleep(1)
return discography
def get_album_tracks(album_id, album_name):
'''Get all tracks from an album.'''
album_tracks = sp.album_tracks(album_id)['items']
return [{'id': t['id'], 'name': t['name'], 'album': album_name, 'artist': t['artists'][0]['name']}
for t in album_tracks]
def get_track_features(tracks, sp):
'''Get features of a list of tracks.'''
features = sp.audio_features(tracks=tracks)
return features
def get_mult_features(track_ids):
'''Get features (in chunks) of a long playlist.'''
features = []
batch = 50
for i in range(0, len(track_ids), batch):
features = features + sp.audio_features(tracks=track_ids[i:i+batch])
return features
def merge_tracks_features(tracks, features):
'''Merge track info and track features.'''
merged = [{**track, **features[i]} for i, track in enumerate(tracks)]
return merged
def normalize(df):
'''Normalize features to avoid bias.'''
df[['tempo']]= df[['tempo']] / df[['tempo']].max()
df[['loudness']] = df[['loudness']] / df[['loudness']].min()
df[['duration_ms']] = df[['duration_ms']] / df[['duration_ms']].max()
return df
def to_csv(df, name):
'''Pandas dataframe to csv file.'''
df.to_csv(name, index=False)
def to_dataframe(data):
'''List of tracks into Pandas dataframe.'''
dataframes = [pd.DataFrame(album) for album in data]
return pd.concat(dataframes)
def get_full_playlist(user, playlist_id, sp):
'''Get tracks (with features) from a playlist, and turn it into a dataframe.'''
t = get_playlist_tracks(user, playlist_id, sp)
t_ids = [track['track']['id'] for track in t]
t_info = [{'album': track['track']['album']['name'], 'name': track['track']['name']} for track in t]
t_features = get_track_features(t_ids, sp)
tracks = [{**track, **t_features[i]} for i, track in enumerate(t_info)]
return | pd.DataFrame(tracks) | pandas.DataFrame |
# Ouverture du df
import pandas as pd
df_sv= | pd.read_csv("rayonnement-solaire-vitesse-vent-tri-horaires-regionaux.csv",";") | pandas.read_csv |
import sys
import os
import torch
import numpy as np
import torch_geometric.datasets
import pyximport
from torch_geometric.data import InMemoryDataset, download_url
import pandas as pd
from sklearn import preprocessing
pyximport.install(setup_args={'include_dirs': np.get_include()})
import os.path as osp
from torch_geometric.data import Data
import time
from torch_geometric.utils import add_self_loops, negative_sampling
from torch_geometric.data import Dataset
from functools import lru_cache
import copy
from fairseq.data import (
NestedDictionaryDataset,
NumSamplesDataset,
)
import json
import pathlib
from pathlib import Path
BASE = Path(os.path.realpath(__file__)).parent
GLOBAL_ROOT = str(BASE / 'graphormer_repo' / 'graphormer')
sys.path.insert(1, (GLOBAL_ROOT))
from data.wrapper import preprocess_item
import datetime
def find_part(hour):
if hour < 11:
part = 1
elif (hour > 11) & (hour < 20):
part = 2
else:
part = 3
return part
def prepare_raw_dataset_edge(dataset_name):
if dataset_name == 'abakan':
raw_data = pd.read_csv('datasets/abakan/raw/abakan_full_routes_final_weather_L_NaN_filtered_FIXED.csv')
all_roads_graph = list(pd.read_pickle('datasets/abakan/raw/all_roads_graph.pickle').to_networkx().edges())
all_nodes = pd.read_pickle('datasets/abakan/raw/clear_nodes.pkl')
init = pd.read_csv('datasets/abakan/raw/graph_abakan_init.csv')
elif dataset_name == 'omsk':
raw_data = pd.read_csv('datasets/omsk/raw/omsk_full_routes_final_weather_L_NaN_filtered_FIXED.csv')
all_roads_graph = list(pd.read_pickle('datasets/omsk/raw/all_roads_graph.pickle').to_networkx().edges())
# all_nodes = pd.read_pickle('datasets/omsk/raw/clear_nodes.pkl')
init = pd.read_csv('datasets/omsk/raw/graph_omsk_init.csv')
all_roads_dataset = pd.DataFrame()
all_edge_list = [list((all_roads_graph)[i]) for i in range(0,len( (all_roads_graph)))]
all_roads_dataset['edge_id']= range(0,len(init['edge_id'].unique()))
all_roads_dataset['speed'] = ' 1'
all_roads_dataset['length'] = ' 1'
all_roads_dataset[' start_point_part'] = init['quartal_id'] / len(init['quartal_id'].unique())
all_roads_dataset['finish_point_part'] = init['quartal_id'] / len(init['quartal_id'].unique())
all_roads_dataset_edges = pd.DataFrame()
all_roads_dataset_edges['source'] = [x[0] for x in all_edge_list]
all_roads_dataset_edges['target'] = [x[1] for x in all_edge_list]
# all_roads_dataset_edges = all_roads_dataset_edges.drop_duplicates().reset_index(drop = True)
trip_part = all_roads_dataset[['edge_id', 'speed', 'length', ' start_point_part', 'finish_point_part']].copy()
source_merge = pd.merge(all_roads_dataset_edges, trip_part.rename(columns = {'edge_id':'source'}), on = ['source'], how = 'left')
target_merge = pd.merge(all_roads_dataset_edges, trip_part.rename(columns = {'edge_id':'target'}), on = ['target'], how = 'left')
total_table = pd.DataFrame()
total_table['speed'] = (source_merge['speed'].apply(lambda x: [x]) + target_merge['speed'].apply(lambda x: [x]))
total_table['length'] = (source_merge['length'].apply(lambda x: [x]) + target_merge['length'].apply(lambda x: [x]))
total_table['edges'] = (source_merge['source'].apply(lambda x: [x]) + target_merge['target'].apply(lambda x: [x]))
total_table[' start_point_part'] = source_merge[' start_point_part']
total_table['finish_point_part'] = target_merge['finish_point_part']
total_table['week_period'] = datetime.datetime.now().hour
total_table['hour'] = datetime.datetime.now().weekday()
total_table['day_period'] = total_table['hour'].apply(lambda x: find_part(x))
total_table['RTA'] = 1
total_table['clouds'] = 1
total_table['snow'] = 0
total_table['temperature'] = 10
total_table['wind_dir'] = 180
total_table['wind_speed'] = 3
total_table['pressure'] = 747
total_table['source'] = source_merge['source']
total_table['target'] = source_merge['target']
# total_table = total_table.drop_duplicates().reset_index(drop = True)
return total_table
def prepare_raw_dataset_node(dataset_name):
if dataset_name == 'abakan':
raw_data = pd.read_csv('datasets/abakan/raw/abakan_full_routes_final_weather_L_NaN_filtered_FIXED.csv')
all_roads_graph = list(pd.read_pickle('datasets/abakan/raw/all_roads_graph.pickle').to_networkx().edges())
all_nodes = pd.read_pickle('datasets/abakan/raw/clear_nodes.pkl')
init = pd.read_csv('datasets/abakan/raw/graph_abakan_init.csv')
elif dataset_name == 'omsk':
raw_data = | pd.read_csv('datasets/omsk/raw/omsk_full_routes_final_weather_L_NaN_filtered_FIXED.csv') | pandas.read_csv |
import time
import csv
import gensim
import nltk
import numpy as np
import pandas as pd
from datetime import datetime
from gensim.models import Word2Vec
from gensim.models.callbacks import CallbackAny2Vec
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from progress.bar import Bar
from scipy import spatial
from tqdm import tqdm
class EpochLogger(CallbackAny2Vec):
def __init__(self):
self.starttime = 0.0
self.epoch = 0
self.tot_epochs = 100
self.single_epoch_time = 0
def on_epoch_begin(self, model):
if self.epoch != 0:
print(f"Started epoch {self.epoch}.")
def on_epoch_end(self, model):
if self.epoch == 0:
self.starttime = time.time()
self.single_epoch_time = time.time()
else:
if self.epoch != self.tot_epochs:
print(f"Finished epoch {self.epoch} in {time.time() - self.single_epoch_time}")
self.single_epoch_time = time.time()
else:
print(f"Training finished in {time.time() - self.starttime}s")
self.epoch += 1
class BiasModel:
nltk.download('averaged_perceptron_tagger')
nltk.download('vader_lexicon')
def __init__(self, comments_document, comment_column='body', output_name='outputModel',
window=4, min_frequency=10, out_dimension=200):
self.comments_document = comments_document
self.comment_column = comment_column
self.output_name = output_name
self.window = window
self.min_frequency = min_frequency
self.out_dimension = out_dimension
self.sentiment_analyzer = SentimentIntensityAnalyzer()
@staticmethod
def calculate_centroid(model, words):
embeddings = [np.array(model[w]) for w in words if w in model]
centroid = np.zeros(len(embeddings[0]))
for e in embeddings:
centroid += e
return centroid / len(embeddings)
@staticmethod
def get_cosine_distance(embedding1, embedding2):
return spatial.distance.cosine(embedding1, embedding2)
def load_csv_and_preprocess(self, path, nrowss=None, lemmatise=False):
"""
input:
nrowss <int> : number of rows to process, leave None if all
tolower <True/False> : transform all text to lowercase
returns:
List of preprocessed sentences, i.e. the input to train
"""
print(f"Processing document: {self.comments_document}")
trpCom = pd.read_csv(self.comments_document, lineterminator='\n', nrows=nrowss)
trpCom.fillna(0)
documents = []
with open(path, 'a', encoding='utf-8') as file:
for i, row in enumerate(trpCom[self.comment_column]):
if i % 500000 == 0:
print(f'Processing line {i}')
for word in documents:
file.write("%s\n" % word)
documents = []
try:
pp = gensim.utils.simple_preprocess(row)
if lemmatise:
pp = [wordnet_lemmatizer.lemmatize(w, pos="n") for w in pp]
documents.append(pp)
except TypeError:
print(f'Row {i} threw a type error.')
file.close()
print(f'Wrote corpus to file: {path}.')
return documents
def stream_load_csv_and_preprocess(self, csv_in, csv_out, corpus_out, subsample=False, fraction=None):
if subsample and fraction is None:
print("If subsampling is enabled a fraction must be specified.")
return
f_in = open(csv_in, encoding="utf-8")
reader = csv.DictReader(f_in)
tmp_df = | pd.DataFrame() | pandas.DataFrame |
import sys
sys.path.append('../')
def WriteAriesScenarioToDB(scenarioName, ForecastName, ForecastYear, start_date, end_date, User, Area, GFO = False, CorpID = ['ALL']):
from Model import ImportUtility as i
from Model import BPXDatabase as bpxdb
from Model import ModelLayer as m
import datetime as dt
Success = True
Messages = []
try:
#Query the Aries database using import methods
scenario_results, Success, Messages = i.ImportAriesByScenario(scenarioName, start_date, end_date, Area)
#Create NF columns for oil and gas (replace nan with 0)
scenario_results['OilNF'] = scenario_results['C754'] / scenario_results['GasProduction']
scenario_results['GasNF'] = scenario_results['C753'] / scenario_results['OilProduction']
scenario_results = scenario_results.fillna(0)
#Obtain list from scenario query results
CorpID_list = scenario_results['CorpID'].to_list()
CorpID_list = list(set(CorpID_list))
config = m.GetConfig()
DBObj = bpxdb.BPXDatabase(config['server'], config['database'], config['UID'])
#Linearly regress the data
#Two segments: previous month's mid average and next month's mid average - regress to both to get the values.
count = 1
for corpID in CorpID_list:
#Get the subset of results that match this wellflac
corpid_scenario_df = scenario_results.query('CorpID == @corpID')
corpid_scenario_df = corpid_scenario_df.sort_values(by = ['Date'], ascending = True)
if corpid_scenario_df.shape[0] > 1:
df_previous_row = (0, corpid_scenario_df.iloc[1])
wellflac_count = 1
header_corpID = ''
for df_row in corpid_scenario_df.iterrows():
if wellflac_count == 1:
df_next_row = corpid_scenario_df.iloc[wellflac_count]
results = InterpolateDailyRatesFromMonthlyVolumes(CurrentMonthVal = df_row[1], NextMonthVal = df_next_row)
else:
results = InterpolateDailyRatesFromMonthlyVolumes(CurrentMonthVal = df_row[1], PreviousMonthVal = df_previous_row[1])
Success, Message = WriteInterpolatedForecastToDB(df_row[1]['WellName'], corpID, ForecastName, ForecastYear, scenarioName, GFO, User, results)
if not Success:
Messages.append(Message)
break
df_previous_row = df_row
wellflac_count = wellflac_count + 1
callprogressbar(count, len(CorpID_list))
count = count + 1
except Exception as ex:
Success = False
Messages.append('Failed to write the results from chosen scenario in Aries database. ' + str(ex))
return Success, Messages
def SOHA_WriteGFOToDB_2019Database(ForecastName, ForecastYear, User, start_date, end_date, WellFlac = ['ALL'], GFO = False):
#Part of to be deprecated methods to convert SoHa internal GFO data to standard
from Model import BPXDatabase as bpxdb
from Model import QueryFile as qf
from Model import ImportUtility as imp
from Model import ModelLayer as m
import datetime as dt
import numpy as np
Sucess = True
Messages = []
try:
config = m.GetConfig()
#Create DB Object
return_df, Success, Message = imp.ImportGFOFromDB2019(start_date, end_date, WellFlac)
if not Success:
Messages.append(Message)
Production_Column_Name = '2019Zmcfd'
Success, Message = WriteInternalForecasttoDB(return_df, ForecastName, ForecastYear, Production_Column_Name, User, GFO)
if not Success:
Messages.append(Message)
except Exception as ex:
Success = False
Messages.append('Error writing GFO to DB. ' + str(ex))
return Success, Messages
def SOHA_WriteGFOToDB_2018Database(ForecastName, ForecastYear, User, start_date, end_date, WellFlac = ['ALL'], GFO = False):
#Part of to be deprecated methods to convert SoHa internal GFO data to standard
from Model import BPXDatabase as bpxdb
from Model import QueryFile as qf
from Model import ImportUtility as imp
from Model import ModelLayer as m
import datetime as dt
import numpy as np
Sucess = True
Messages = []
try:
config = m.GetConfig()
#Create DB Object
return_df, Success, Message = imp.ImportGFOFromDB2019(start_date, end_date, WellFlac)
if not Success:
Messages.append(Message)
Production_Column_Name = '2018Zmcfd'
Success, Message = WriteInternalForecasttoDB(return_df, ForecastName, ForecastYear, Production_Column_Name, User, GFO)
if not Success:
Messages.append(Message)
except Exception as ex:
Success = False
Messages.append('Error writing GFO to DB. ' + str(ex))
return Success, Messages
def SOHA_WriteInternalForecasttoDB(df,ForecastName, ForecastYear, Production_Column_Name, User, GFO=True):
#Part of to be deprecated methods to convert SoHa internal GFO data to standard
from Model import BPXDatabase as bpx
from Model import ModelLayer as m
import datetime as dt
from Model import QueryFile as qf
Success = True
Messages = []
try:
config = m.GetConfig()
DBObj = bpx.BPXDatabase(config['server'], config['database'], config['UID'])
EDWObj = bpx.GetDBEnvironment('ProdEDW', 'OVERRIDE')
wellname_list = df['WellName'].unique()
wellname_list = list(wellname_list)
if '' in wellname_list:
wellname_list.remove('')
count = 1
for name in wellname_list:
monthly_df = df.query('WellName == @name')
monthly_df = monthly_df.sort_values(by = ['Date'], ascending = True)
df_previous_row = (0, monthly_df.iloc[1])
nettingFactor = monthly_df['NettingFactor'].values[0]
well_count = 1
header_corpid = ''
for df_row in monthly_df.iterrows():
if well_count == 1:
df_next_row = monthly_df.iloc[well_count]
results = InterpolateDailyRatesFromMonthlyRates(CurrentMonthVal = df_row[1], NextMonthVal = df_next_row, GasRateField=Production_Column_Name)
elif well_count != monthly_df.shape[0] and well_count != 1:
df_next_row = monthly_df.iloc[well_count]
results = InterpolateDailyRatesFromMonthlyRates(CurrentMonthVal = df_row[1], NextMonthVal = df_next_row, PreviousMonthVal = df_previous_row[1], GasRateField=Production_Column_Name)
elif well_count == monthly_df.shape[0]:
results = InterpolateDailyRatesFromMonthlyRates(CurrentMonthVal = df_row[1], PreviousMonthVal = df_previous_row[1], GasRateField=Production_Column_Name)
for row in results.iterrows():
corpid_query = qf.EDWKeyQueryFromWellName([name])
corpid_results = EDWObj.Query(corpid_query)
if not corpid_results[1].empty:
CorpID = corpid_results[1].at[0,'CorpID']
else:
CorpID = name
WellName = name
Update_Date = dt.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
Update_User = User
if header_corpid != CorpID:
#Create Header entry
header_corpid = CorpID
ForecastHeaderObj = m.ForecastHeaderRow(WellName, CorpID, ForecastName, ForecastYear, '', [], GFO, DBObj)
Success, Message = ForecastHeaderObj.Write(Update_User, Update_Date)
if not Success:
Messages.append(Message)
Date_Key = row[1]['Date'].strftime('%m/%d/%Y')
Gas_Production = row[1]['GasProduction']
GasNF = row[1]['GasNF']
if Gas_Production >= 0 and Date_Key:
ForecastDataObj = m.ForecastDataRow(ForecastName, CorpID, Date_Key, Gas_Production, 0, 0, GasNF, 0, 0, DBObj)
Success, Message = ForecastDataObj.Write(Update_User, Update_Date)
if not Success:
Messages.append(Message)
df_previous_row = df_row
well_count = well_count + 1
callprogressbar(count, len(wellname_list))
count = count + 1
except Exception as ex:
Success = False
Messages.append('Error writing Forecast to Database. ' + str(ex))
return Success, Messages
def SOHA_WriteGasNettingFactorsFromDB(Update_User, Update_Date, wellnames = []):
from Model import BPXDatabase as bpx
from Model import QueryFile as qf
from Model import ModelLayer as m
import datetime as datetime
Success = True
Messages = []
try:
config = m.GetConfig()
DBObj = bpx.BPXDatabase(config['server'], config['database'], config['UID'])
TeamOpsObj = bpx.GetDBEnvironment('OnPrem', 'OVERRIDE')
EDWObj = bpx.GetDBEnvironment('ProdEDW', 'OVERRIDE')
#Get Well List of required netting values from data that is already in database.
query = qf.GetNettingFactorsfromDB(wellnames)
res, res_df = TeamOpsObj.Query(query)
count = 1
for idx, item in res_df.iterrows():
wellquery = qf.EDWKeyQueryFromWellName([item['WellName']])
res, well_row = EDWObj.Query(wellquery)
if not well_row.empty:
corpID = well_row['CorpID'].values[0]
NettingObj = m.GasNettingRow(item['WellName'], corpID, item['NF'], item['FirstSalesDateInput'], DBObj)
Success, Message = NettingObj.Write(Update_User, Update_Date)
if not Success:
Messages.append(Message)
callprogressbar(count, res_df.shape[0])
count = count + 1
except Exception as ex:
Success = False
Messages.append('Error during write of netting factors to DB. ' + str(ex))
return Success, Messages
def WriteDefaultMultipliers(LE_Name, DefaultValue, Update_User, Update_Date, SuppressMessages):
import datetime as datetime
from Model import BPXDatabase as bpx
from Model import ModelLayer as m
Success = True
Messages = []
try:
config = m.GetConfig()
DBObj = bpx.BPXDatabase(config['server'], config['database'], config['UID'])
#Query the LE results
LE_query = 'select * from [LEForecastDatabase].[dbo].[LE_Data] where HeaderName = \'' + LE_Name + '\''
res, df = DBObj.Query(LE_query)
count = 1
for idx, row in df.iterrows():
FracHitObj = m.FracHitMultipliersRow(row['HeaderName'], row['CorpID'], row['Date_Key'], str(DefaultValue), DBObj)
Success, Message = FracHitObj.Write(Update_User, Update_Date)
if not Success:
Messages.append(Message)
if not SuppressMessages:
callprogressbar(count, df.shape[0])
count = count + 1
except Exception as ex:
Success = False
Messages.append('Error during write of default frac hit multipliers. ' + str(ex))
return Success, Messages
def WriteLEFromExcel(LEName, LE_Date,filename, sheetname, IDstartrow, corpID_col, wellName_col, date_row, date_startcol, date_endcol, InterpolationMethod, Phase, Update_User, Update_Date, IDs = ['ALL'] ):
from datetime import datetime, date
import pandas as pd
from Model import QueryFile as qf
from Model import BPXDatabase as bpx
from Model import ImportUtility as i
Messages = []
Success = True
try:
all_data_df, Success, Message = i.ImportForecastFromExcel(filename, sheetname, IDstartrow, corpID_col, wellName_col, date_row, date_startcol, date_endcol, Phase, '', '', ['ALL'])
if Success:
if corpID_col:
IDCol = 'CorpID'
else:
IDCol = 'WellName'
Success, Message = WriteLEFromTemplate(all_data_df, InterpolationMethod, LEName, LE_Date, Update_User, IDCol)
if not Success:
Messages.append(Message)
else:
Messages.append(Message)
except Exception as ex:
Success = False
Messages.append('Error during write of LE data from Excel sheet. ' + str(ex))
return Success, Messages
def WriteForecastFromExcel(ForecastName, ForecastYear,scenarioName, GFO, filename, sheetname, IDstartrow, corpID_col, wellName_col, date_row, date_startcol, date_endcol, InterpolationMethod, Phase, Update_User, Update_Date, IDs = ['ALL'] ):
from datetime import datetime, date
import pandas as pd
from Model import QueryFile as qf
from Model import BPXDatabase as bpx
from Model import ImportUtility as i
Messages = []
Success = True
try:
all_data_df, Success, Message = i.ImportForecastFromExcel(filename, sheetname, IDstartrow, corpID_col, wellName_col, date_row, date_startcol, date_endcol, Phase, '', '', ['ALL'])
if Success:
if corpID_col:
IDCol = 'CorpID'
else:
IDCol = 'WellName'
Success, Message = WriteForecastFromTemplate(all_data_df, InterpolationMethod, ForecastName, ForecastYear, scenarioName, GFO, Update_User, IDCol)
if not Success:
Messages.append(Message)
else:
Messages.append(Message)
if not Success:
Messages.append(Message)
except Exception as ex:
Success = False
Messages.append('Error during the write of Forecast from Excel sheet. ' + str(ex))
return Success, Messages
def WriteForecastFromTemplate(all_data_df, InterpolationMethod, ForecastName, ForecastYear, scenarioName, GFO, Update_User, IDCol='WellName'):
from datetime import datetime, date
import pandas as pd
from Model import QueryFile as qf
from Model import BPXDatabase as bpx
from Model import ImportUtility as i
Success = True
Messages = []
results = []
try:
#Data Frame must be the same structure as the output from the 'Read From Excel Function
#'CorpID', 'WellName', 'Wedge', 'Date', 'Gas', 'Oil', 'Water', 'OilNF', 'GasNF'
wellname = ''
if not Success:
Messages.append(Message)
if IDCol == 'CorpID':
corpid_list = list(all_data_df['CorpID'].unique())
corpid_query = qf.EDWKeyQueryFromCorpID(corpid_list)
corpid_results, corpid_df = bpx.GetDBEnvironment('ProdEDW', 'OVERRIDE').Query(corpid_query)
well_list = list(corpid_df['WellName'].unique())
well_query = 'CorpID == @corpid'
else:
well_list = list(all_data_df['WellName'].unique())
well_query = 'WellName == @wellname'
well_list = [i for i in well_list if i]
for wellname in well_list:
wellname, corpid = i.GetWellandCorpID(wellname, '')
if not corpid:
corpid = wellname
data_df = all_data_df.query(well_query)
row_count = 1
if not data_df.empty:
df_previous_row = (0, data_df.iloc[1])
for idx, df_row in data_df.iterrows():
if InterpolationMethod == 'MonthlyRates':
if row_count == 1:
df_next_row = data_df.iloc[row_count]
results = InterpolateDailyRatesFromMonthlyRates(CurrentMonthVal = df_row, NextMonthVal = df_next_row, GasProduction='Gas', OilProduction='Oil')
elif row_count != data_df.shape[0] and row_count != 1:
df_next_row = data_df.iloc[row_count]
results = InterpolateDailyRatesFromMonthlyRates(CurrentMonthVal = df_row, NextMonthVal = df_next_row, PreviousMonthVal = df_previous_row, GasProduction='Gas', OilProduction='Oil')
elif row_count == data_df.shape[0]:
results = InterpolateDailyRatesFromMonthlyRates(CurrentMonthVal = df_row, PreviousMonthVal = df_previous_row, GasProduction='Gas', OilProduction='Oil')
elif InterpolationMethod == 'MonthlyVolume':
if row_count == 1:
df_next_row = data_df.iloc[row_count]
results = InterpolateDailyRatesFromMonthlyVolumes(CurrentMonthVal = df_row[1], NextMonthVal = df_next_row)
else:
results = InterpolateDailyRatesFromMonthlyVolumes(CurrentMonthVal = df_row[1], PreviousMonthVal = df_previous_row[1])
elif InterpolationMethod == 'None':
results = ConvertNonInterpolatedResults(df_row)
Success, Message = WriteInterpolatedForecastToDB(wellname, corpid, ForecastName, ForecastYear, scenarioName, GFO, Update_User, results)
if not Success:
Messages.append(Message)
df_previous_row = df_row
row_count = row_count + 1
except Exception as ex:
Success = False
Messages.append('Error during the writing of the forecast from template. ' + str(ex))
return Success, Messages
def WriteLEFromTemplate(all_data_df, InterpolationMethod, LEName, LE_Date, Update_User, IDCol = 'WellName'):
from datetime import datetime, date
import pandas as pd
from Model import QueryFile as qf
from Model import BPXDatabase as bpx
from Model import ImportUtility as i
Success = True
Messages = []
results = []
try:
#Data Frame must be the same structure as the output from the 'Read From Excel Function
#'CorpID', 'WellName', 'Wedge', 'Date', 'Gas', 'Oil', 'Water', 'OilNF', 'GasNF'
wellname = ''
if not Success:
Messages.append(Message)
if IDCol == 'CorpID':
corpid_list = list(all_data_df['CorpID'].unique())
corpid_query = qf.EDWKeyQueryFromCorpID(corpid_list)
corpid_results, corpid_df = bpx.GetDBEnvironment('ProdEDW', 'OVERRIDE').Query(corpid_query)
well_list = list(corpid_df['WellName'].unique())
well_query = 'CorpID == @corpid'
else:
well_list = list(all_data_df['WellName'].unique())
well_query = 'WellName == @wellname'
well_list = [i for i in well_list if i]
for wellname in well_list:
wellname, corpid = i.GetWellandCorpID(wellname, '')
if not corpid:
corpid = wellname
data_df = all_data_df.query(well_query)
row_count = 1
if not data_df.empty:
df_previous_row = (0, data_df.iloc[1])
for idx, df_row in data_df.iterrows():
if InterpolationMethod == 'MonthlyRates':
if row_count == 1:
df_next_row = data_df.iloc[row_count]
results = InterpolateDailyRatesFromMonthlyRates(CurrentMonthVal = df_row, NextMonthVal = df_next_row, GasProduction='Gas', OilProduction='Oil')
elif row_count != data_df.shape[0] and row_count != 1:
df_next_row = data_df.iloc[row_count]
results = InterpolateDailyRatesFromMonthlyRates(CurrentMonthVal = df_row, NextMonthVal = df_next_row, PreviousMonthVal = df_previous_row, GasProduction='Gas', OilProduction='Oil')
elif row_count == data_df.shape[0]:
results = InterpolateDailyRatesFromMonthlyRates(CurrentMonthVal = df_row, PreviousMonthVal = df_previous_row, GasProduction='Gas', OilProduction='Oil')
elif InterpolationMethod == 'MonthlyVolume':
if row_count == 1:
df_next_row = data_df.iloc[row_count]
results = InterpolateDailyRatesFromMonthlyVolumes(CurrentMonthVal = df_row[1], NextMonthVal = df_next_row)
else:
results = InterpolateDailyRatesFromMonthlyVolumes(CurrentMonthVal = df_row[1], PreviousMonthVal = df_previous_row[1])
elif InterpolationMethod == 'None':
results = ConvertNonInterpolatedResults(df_row)
Wedge, Message = i.GetWedgeData(corpid, True)
Success, Message = WriteInterpolatedLEToDB(LEName, wellname, corpid, '', Wedge, LE_Date, Update_User, results)
if not Success:
Messages.append(Message)
df_previous_row = df_row
row_count = row_count + 1
except Exception as ex:
Success = False
Messages.append('Error during the writing of the LE from template. ' + str(ex))
return Success, Messages
def WriteInterpolatedForecastToDB(WellName, corpID, ForecastName, ForecastYear, scenarioName, GFO, UserName, results):
import datetime as dt
import pandas as pd
from Model import ModelLayer as m
header_corpID = ''
Messages = []
for item in results.iterrows():
idx = item[0]
UpdateDate = dt.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if header_corpID != corpID:
ForecastHeaderObj = m.ForecastHeaderRow(WellName, corpID, ForecastName, ForecastYear, scenarioName, [], GFO, '')
Success, Message = ForecastHeaderObj.Write(UserName, UpdateDate)
if not Success:
Messages.append(Message)
else:
header_corpID = corpID
Date_Key = item[1]['Date'].strftime('%m/%d/%Y')
Gas_Production = item[1]['GasProduction']
Oil_Production = item[1]['OilProduction']
GasNF = item[1]['GasNF']
OilNF = item[1]['OilNF']
ForecastDataObj = m.ForecastDataRow(ForecastName, corpID, Date_Key, Gas_Production, Oil_Production, 0, GasNF, OilNF, 0, '')
Success, Message = ForecastDataObj.Write(UserName, UpdateDate)
if not Success:
Messages.append(Message)
return Success, Messages
def WriteInterpolatedLEToDB(LEName, WellName, CorpID, ForecastGeneratedFrom, Wedge, LE_Date, UserName, results):
import datetime as dt
import pandas as pd
from Model import ModelLayer as m
header_corpID = ''
Messages = []
for item in results.iterrows():
idx = item[0]
UpdateDate = dt.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if header_corpID != CorpID:
LEHeaderObj = m.LEHeaderRow(LEName, WellName, CorpID, ForecastGeneratedFrom, Wedge, LE_Date, '')
Success, Message = LEHeaderObj.Write(UserName, UpdateDate)
if not Success:
Messages.append(Message)
else:
header_corpID = CorpID
Date_Key = item[1]['Date'].strftime('%m/%d/%Y')
Gas_Production = item[1]['GasProduction']
Oil_Production = item[1]['OilProduction']
LEDataObj = m.LEDataRow(LEName, CorpID, Date_Key, Gas_Production, Oil_Production, 0, '')
Success, Message = LEDataObj.Write(UserName, UpdateDate)
if not Success:
Messages.append(Message)
return Success, Messages
def callprogressbar(iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
def InterpolateDailyRatesFromMonthlyVolumes(**kwargs):
#Take in the monthly cumulative volumes that are assinged at the 'end' of the month in Aries
#Assign daily production and return the results
from datetime import timedelta
import pandas as pd
import math
previous_month_val = ''
current_month_val = ''
next_month_val = ''
return_df = pd.DataFrame(columns = ['GasProduction', 'OilProduction', 'GasNF', 'OilNF', 'Date'])
previous_month_bool = False
current_month_bool = False
next_month_bool = False
for key, value in kwargs.items():
if key=='PreviousMonthVal':
previous_month_val = value
previous_month_bool = True
elif key=='CurrentMonthVal':
current_month_val = value
current_month_bool = True
elif key == 'NextMonthVal':
next_month_val = value
next_month_bool = True
if previous_month_bool and current_month_bool and not next_month_bool:
#Get number of days between previous val and current val (should be roughly 1 month)
previous_date = previous_month_val['Date']
current_month_date = current_month_val['Date']
diff = current_month_date - previous_date
days = diff.days
normal_days = 30.42
#Get slope between the two values
previous_gas_volume = previous_month_val['GasProduction']
current_gas_volume = current_month_val['GasProduction']
gas_slope = (current_gas_volume - previous_gas_volume) / normal_days #Average days in a month
previous_oil_volume = previous_month_val['OilProduction']
current_oil_volume = current_month_val['OilProduction']
oil_slope = (current_oil_volume - previous_oil_volume) / normal_days
if current_gas_volume > 0:
gasnettingFactor = current_month_val['GasNF']
else:
gasnettingFactor = 0
if current_oil_volume > 0:
oilnettingFactor = current_month_val['OilNF']
else:
oilnettingFactor = 0
return_row = {}
for day in range(days):
#Add an entry to the return data frame
gas_production = previous_gas_volume + (day + 1) * gas_slope
if gas_production > 0:
return_row['GasProduction'] = gas_production / normal_days
else:
return_row['GasProduction'] = 0
oil_production = previous_oil_volume + (day + 1) * oil_slope
if gas_production > 0:
return_row['OilProduction'] = oil_production / normal_days
else:
return_row['OilProduction'] = 0
return_row['Date'] = previous_date + timedelta(days = (day+1))
return_row['GasNF'] = gasnettingFactor
return_row['OilNF'] = oilnettingFactor
return_df = return_df.append(return_row, ignore_index = True)
elif current_month_bool and next_month_bool and not previous_month_bool:
current_month_date = current_month_val['Date']
next_month_date = next_month_val['Date']
diff = next_month_date - current_month_date
days =current_month_date.day
normal_days = 30.42
current_gas_volume = current_month_val['GasProduction']
next_gas_volume = next_month_val['GasProduction']
gas_slope = (next_gas_volume - current_gas_volume) / normal_days
current_oil_volume = current_month_val['OilProduction']
next_oil_volume = next_month_val['OilProduction']
oil_slope = (next_oil_volume - current_oil_volume) / normal_days
if current_gas_volume > 0:
gasnettingFactor = current_month_val['GasNF']
else:
gasnettingFactor = 0
if current_oil_volume > 0:
oilnettingFactor = current_month_val['OilNF']
else:
oilnettingFactor = 0
return_row = {}
for day in range(days):
gas_production = current_gas_volume - day * gas_slope
if gas_production > 0:
return_row['GasProduction'] = gas_production / normal_days
else:
return_row['GasProduction'] = 0
oil_production = current_oil_volume - day * oil_slope
if oil_production > 0:
return_row['OilProduction'] = oil_production / normal_days
else:
return_row['OilProduction'] = 0
return_row['Date'] = current_month_date - timedelta(days = day)
return_row['GasNF'] = gasnettingFactor
return_row['OilNF'] = oilnettingFactor
return_df = return_df.append(return_row, ignore_index = True)
return return_df
def ConvertNonInterpolatedResults(df_row):
from datetime import datetime, timedelta
import pandas as pd
return_df = pd.DataFrame(columns = ['GasProduction', 'OilProduction', 'GasNF', 'OilNF', 'Date'])
return_row = {}
return_row['GasProduction'] = df_row['Gas']
return_row['OilProduction'] = df_row['Oil']
return_row['GasNF'] = df_row['GasNF']
return_row['OilNF'] = df_row['OilNF']
return_row['Date'] = df_row['Date']
return_df = return_df.append(return_row, ignore_index = True)
return return_df
def InterpolateDailyRatesFromMonthlyRates(**kwargs):
#Take in the monthly average rates that are assinged at the 'beginning' of the month in internal databases
#Assign daily production and return the results
from datetime import datetime, timedelta
import pandas as pd
import math
previous_month_val = ''
current_month_val = ''
next_month_val = ''
return_df = pd.DataFrame(columns = ['GasProduction', 'OilProduction', 'GasNF', 'OilNF', 'Date'])
previous_month_bool = False
current_month_bool = False
next_month_bool = False
for key, value in kwargs.items():
if key=='PreviousMonthVal':
previous_month_val = value
previous_month_bool = True
elif key=='CurrentMonthVal':
current_month_val = value
current_month_bool = True
elif key == 'NextMonthVal':
next_month_val = value
next_month_bool = True
elif key == 'GasProduction':
gas_rate = value
elif key == 'OilProduction':
oil_rate = value
if previous_month_bool and current_month_bool and not next_month_bool:
#Scenario for the end of the analysis when no next month's data exists
current_month_date = current_month_val['Date']
previous_date = previous_month_val['Date']
current_gas_rate = current_month_val[gas_rate]
previous_gas_rate = previous_month_val[gas_rate]
current_oil_rate = current_month_val[oil_rate]
previous_oil_rate = previous_month_val[oil_rate]
days_in_month = pd.to_datetime(current_month_date + pd.tseries.offsets.MonthEnd(1)).date().day
days_in_previous = pd.to_datetime(previous_date + pd.tseries.offsets.MonthEnd(1)).date().day
mid_current = datetime(year = current_month_date.year, month = current_month_date.month, day= math.floor(days_in_month / 2))
mid_previous = datetime(year = previous_date.year, month = previous_date.month, day= math.floor(days_in_previous / 2))
backward_days = (mid_current - mid_previous).days
gas_backward_slope = (current_gas_rate - previous_gas_rate) / backward_days
oil_backward_slope = (current_oil_rate - previous_oil_rate) / backward_days
return_row = {}
for day in range(days_in_month):
if day < mid_current.day:
applied_days = mid_current.day - day
gas_production = current_gas_rate - applied_days * gas_backward_slope
oil_production = current_oil_rate - applied_days * oil_backward_slope
elif day == mid_current.day:
gas_production = current_gas_rate
oil_production = current_oil_rate
elif day > mid_current.day:
applied_days = day - mid_current.day
gas_production = current_gas_rate + applied_days * gas_backward_slope
oil_production = current_oil_rate + applied_days * oil_backward_slope
if gas_production > 0:
return_row['GasProduction'] = gas_production
else:
return_row['GasProduction'] = 0
if oil_production > 0:
return_row['OilProduction'] = oil_production
else:
return_row['OilProduction'] = 0
return_row['Date'] = current_month_date + timedelta(days = day)
return_row['GasNF'] = current_month_val['GasNF']
return_row['OilNF'] = current_month_val['OilNF']
return_df = return_df.append(return_row, ignore_index = True)
elif current_month_bool and next_month_bool and not previous_month_bool:
#Scenario for the beginning of the analysis when no previous month's data exists
current_month_date = current_month_val['Date']
next_month_date = next_month_val['Date']
current_gas_rate = current_month_val[gas_rate]
next_gas_rate = next_month_val[gas_rate]
current_oil_rate = current_month_val[oil_rate]
next_oil_rate = next_month_val[oil_rate]
days_in_month = pd.to_datetime(current_month_date + pd.tseries.offsets.MonthEnd(1)).date().day
days_in_next = pd.to_datetime(next_month_date + | pd.tseries.offsets.MonthEnd(1) | pandas.tseries.offsets.MonthEnd |
import logging
import os
import sys
import pandas as pd
import numpy as np
def bp(billing_period, period, pf="", sf=""):
return "%s%s%.2d%s" % (pf, billing_period, period, sf)
def avg_bp(billing_period, period):
return bp(billing_period, period, sf="_mean")
def rate(billing_period, period):
return bp(billing_period, period, "rate_", "_cum")
def rrate(billing_period, period):
return bp(billing_period, period, "rrate_", "_cum")
def weight(period):
return bp("weight", period)
def cohort_size_col(billing_period):
return bp(billing_period, 1, sf="_count")
def cohort_safe_name(cohort_list):
return "+".join(cohort_list) # TODO: make it safer
def get_dtypes(input_file, periodicity):
# improve pandas memory usage by specifying df data types on import
periodCols=pd.read_csv(input_file, sep=";" , nrows=1) \
.filter(regex='^'+periodicity).columns.to_list()
input_types= {'tracking_id':'str',
'joined_date':'str',
'region':'str',
'country':'str',
'network_operator':'str',
'pid' : 'int',
'advertiser':'str',
'advertiser_id':'int',
'periodicity':'str',
'price_point ':'float',
'joined_week':'int'}
input_types.update({x : 'float' for x in periodCols })
return input_types
# TODO: Add configuration
class SurvivalCurveModel:
periods = {'day': 365 + 1,
'week': 52 + 1,
'month': 12 + 1}
log_config = {
'format': '%(asctime)s %(levelname)s [%(filename)s:%(funcName)s:%(lineno)d] %(message)s',
'datefmt': '%Y-%m-%dT%T',
'level': 'INFO',
}
cohort_levels = [
["country"],
["country", "network_operator"],
["country", "network_operator", "advertiser"]
]
@staticmethod
def get_region_curves(periodicity):
try:
#return pd.read_pickle(os.path.join(os.path.abspath(os.path.dirname(__file__)), "data/REGION")) # TODO:must add option for monthly/weekly REGION
if periodicity == "month":
return pd.read_pickle(os.path.join(os.path.abspath(os.path.dirname(__file__)), "data/REGION_MONTH"))
else:
if periodicity == "week":
return pd.read_pickle(os.path.join(os.path.abspath(os.path.dirname(__file__)), "data/REGION_WEEK"))
else:
print("periodicity must be month or week")
except FileNotFoundError:
return None
@staticmethod
def get_cohort_level_curves(raw_training, cohort, periodicity):
timeserie = "joined_%s" % (periodicity)
cohort_weight = 10 ** len(cohort)
#cohort_weight = 1 * len(cohort)
period_count = SurvivalCurveModel.periods[periodicity]
# Group the data by the timeserie and cohort features
grouping_aggregation = {bp(periodicity, 1): ["count", "mean"]}
for period in range(2, period_count):
grouping_aggregation[bp(periodicity, period)] = ["mean"]
grouped = raw_training.groupby([timeserie] + cohort).agg(grouping_aggregation) ####
grouped.columns = ['_'.join(tup).rstrip('_') for tup in grouped.columns.values]
gr = grouped.reset_index()
# IF max billing period not in the train set yet
max_time = gr[timeserie].max()
def get_max_bp(current_period, max_time, periodicity):
max_year = int(max_time / 100)
max_period = max_time - max_year * 100
year = int(current_period / 100)
period = current_period - year * 100
if periodicity == "month":
return (max_year - year) * 12 + (max_period - period)
else:
if periodicity == "week":
return (max_year - year) * 52 + (max_period - period)
else:
#TODO: USE LOGGING instead of plain print
print("periodicity must be month or week")
gr["max_bp"] = gr.apply(lambda x: get_max_bp(x[timeserie], max_time, periodicity), axis=1)
g = gr.set_index([timeserie] + cohort)
# Remove when max billing period is in the train set.
g[rate(periodicity, 1)] = 1.0
g[rrate(periodicity, 1)] = 1.0
g.loc[g[avg_bp(periodicity, 1)] == 0, avg_bp(periodicity, 1)] = grouped[avg_bp(periodicity, 1)].mean()
print
for period in range(2, period_count):
# applies retention rate
g[rrate(periodicity, period)] = g[avg_bp(periodicity, period)] \
/ g[avg_bp(periodicity, period-1)]
g[rate(periodicity, period)] = g[rrate(periodicity, period)] \
* g[rate(periodicity, period-1)]
#applies attrition rate
# for period in range(2, period_count):
# g[rate(periodicity, period)] = g[avg_bp(periodicity, period)] \
# / g[avg_bp(periodicity, 1)]
# for period in range(2, period_count):
# g[rate(periodicity, period)] = g[rrate(periodicity, period)] \
# * g[rate(periodicity, period-1)]
rate_list = [weight(period) for period in range(2, period_count)] + \
[rate(periodicity, period) for period in range(1, period_count)]
averages = pd.DataFrame()
weight_column = cohort_size_col(periodicity)
gr = g.reset_index()
for period in range(2, period_count):
rate_col = rate(periodicity, period)
#TODO: add a recent parameter for different business models (e.g. monthly)
if period <= (period_count/2) :
if periodicity != "month":
#use 4 more recent weeks to replicate finance model
recency_condition = (gr["max_bp"] >= period) & (gr["max_bp"] <= 4 + period)
else:
recency_condition = (gr["max_bp"] >= period) & (gr["max_bp"] <= 1 + period)
else:
recency_condition = (gr["max_bp"] >= period)
valid_scope = gr[recency_condition]
averages[rate_col] = valid_scope.groupby(cohort)[rate_col].mean()
# ponderate curve weight by cohort volume
# if cohort == ["country"]:
# global country_baseline
# country_baseline= valid_scope.groupby(cohort)[weight_column].sum()
# cohort_count=country_baseline
# else:
# cohort_count= valid_scope.groupby(cohort)[weight_column].sum()
# print(country_baseline, cohort_count)
# if country_baseline/cohort_count < 0.1:
# cohort_weight= cohort_weight/100
averages[weight(period)] = valid_scope.groupby(cohort)[weight_column].sum() * cohort_weight
averages[rate(periodicity, 1)] = 1.0
return averages[rate_list].fillna(0.0)
@staticmethod
def shift_late_bill(df):
#replace zero with nulls across DataFrame
df.replace(0, np.nan, inplace=True)
idx = np.isnan(df.values).argsort(axis=1)
return pd.DataFrame(
df.values[np.arange(df.shape[0])[:, None], idx],
index=df.index,
columns=df.columns,
).fillna(0)
@staticmethod
def train(input_file, output_file, periodicity):
input_types=get_dtypes(input_file, periodicity)
logging.info("training on [{}]".format(input_file))
try:
raw_training = pd.read_csv(input_file, sep=";" , index_col='tracking_id', dtype=input_types)
except ValueError:
print(pd.read_csv(input_file, sep=";").loc[[0]])
sys.exit(1)
for cohort in SurvivalCurveModel.cohort_levels:
logging.info("get curves for cohort {} (periodicity level {})".format(cohort, periodicity))
avg = SurvivalCurveModel.get_cohort_level_curves(raw_training, cohort, periodicity)
logging.info("saving model to [{}]".format(output_file))
os.makedirs(output_file, exist_ok=True)
avg = avg.reset_index()
avg.to_pickle(output_file + "/" + cohort_safe_name(cohort))
@staticmethod
def predict(input_file, model, output_file, periodicity):
period_count = SurvivalCurveModel.periods[periodicity]
def get_result_for_curves(df, curves, cohort, periodicity):
logging.info("joining input data")
#with pd.option_context('display.max_rows', 100):
tdf = df.reset_index().merge(curves, on=cohort, how='left')
#tdf.to_pickle("/tmp/"+cohort_safe_name(cohort))
logging.info("Calculating subscriptions curves")
for period in range(2, period_count):
tdf[bp(periodicity, period)] = tdf[bp(periodicity, 1)] * \
tdf[rate(periodicity, period)] * \
tdf[weight(period)]
return tdf.fillna(0.0)
logging.info("predicting [{}] using [{}] model [{}]".format(input_file, periodicity, model))
df = pd.read_csv(input_file, sep=";" , index_col='tracking_id')
output_fields = ["tracking_id"] + \
[bp(periodicity, period) for period in range(1, period_count)]
weights = [weight(period) for period in range(2, period_count)]
results = []
logging.info("loading region curves.")
curves = SurvivalCurveModel.get_region_curves(periodicity)
logging.info("getting results for curves")
r = get_result_for_curves(df, curves, ["region"], periodicity)
results.append(r[output_fields + weights])
for cohort in SurvivalCurveModel.cohort_levels:
logging.info("loading curves for cohort level {}.".format(cohort))
curves = pd.read_pickle(model + "/" + cohort_safe_name(cohort))
logging.info("getting results for curves")
r = get_result_for_curves(df, curves, cohort, periodicity)
logging.info("Appending results")
results.append(r[output_fields + weights])
logging.info("Concatenating results")
r = pd.concat(results, sort=False)
#r.to_pickle("/tmp/results_df.pickle")
logging.info("Applying weighted averages")
result_agg = {bp(periodicity, 1): 'mean'}
for period in range(2, period_count):
result_agg[bp(periodicity, period)] = 'sum'
result_agg[weight(period)] = 'sum'
results = r.groupby(["tracking_id"]).agg(result_agg)
for period in range(2, period_count):
results[bp(periodicity, period)] = results[bp(periodicity, period)] / results[weight(period)]
# final weighted rate per transaction id
results[bp('rate', period, sf='final')]= results[bp(periodicity, period)]/results[bp(periodicity, 1)]
# final rate columns
rate_final= [bp('rate', period, sf='final') for period in range(2, period_count)]
logging.info("saving results to [{}]".format(output_file))
results.fillna(0.0).reset_index()[output_fields + rate_final].to_csv(output_file, sep=";", index=False)
@staticmethod
def consolidate(input_file, predictions_file, periodicity, model_version):
# define output dimensions list and periods
output_fields = ['tracking_id','joined_%s'%(periodicity),'region','country','network_operator','pid','advertiser','advertiser_id','periodicity','price_point']
output_period = [bp(periodicity, period) for period in range(1, SurvivalCurveModel.periods[periodicity])]
rate_final= [bp('rate', period, sf='final') for period in range(2, SurvivalCurveModel.periods[periodicity])]
#loading prediction and input prediction file
df1 = pd.read_csv(predictions_file, sep=';')
df2 = | pd.read_csv(input_file, sep=';') | pandas.read_csv |
from flask import Response, url_for, current_app
from flask_restful import Resource
import pandas as pd
import os
from pathlib import Path
from flask_mysqldb import MySQL
db = MySQL()
class FireList(Resource):
def get(self):
cur = db.connection.cursor()
cur.execute(
"""
Select * from fire_data where incident_type = 'Wildfire';
"""
)
data = cur.fetchall()
row_headers=[x[0] for x in cur.description]
json_data=[]
for result in data:
json_data.append(dict(zip(row_headers,result)))
cur.close()
df = | pd.DataFrame(json_data) | pandas.DataFrame |
import os, shutil, socket
from glob import glob
import pandas as pd, numpy as np
from functools import reduce
def main():
isfull = 0
iscollabsubclass = 1
organize_PCs = 0
sector = 2
today = '20200612'
if isfull:
given_full_classifications_organize(sector=sector, today=today)
if iscollabsubclass:
given_collab_subclassifications_merge(sector=sector)
if organize_PCs:
given_merged_organize_PCs(sector=sector)
def ls_to_df(classfile, classifier='LGB'):
indf = pd.read_csv(classfile, names=['lsname'])
pdfpaths = np.array(indf['lsname'])
# e.g.,
# vet_hlsp_cdips_tess_ffi_gaiatwo0002890062263458259968-0006_tess_v01_llc[gold PC].pdf
classes = [p.split('[')[1].replace('].pdf','') for p in pdfpaths]
pdfnames = [p.split('[')[0]+'.pdf' for p in pdfpaths]
df = pd.DataFrame({'Name':pdfnames, classifier+'_Tags':classes})
return df
def hartmanformat_to_df(classfile, classifier="JH"):
with open(classfile, 'r') as f:
lines = f.readlines()
pdfnames = [l.split(' ')[0] for l in lines]
classes = [' '.join(l.split(' ')[1:]).rstrip('\n') for l in lines]
df = pd.DataFrame({'Name':pdfnames, classifier+'_Tags':classes})
return df
def given_collab_subclassifications_merge(sector=6):
"""
LGB or JH or JNW has done classifications. merge them, save to csv.
"""
datadir = os.path.join(
os.path.expanduser('~'),
'Dropbox/proj/cdips/results/vetting_classifications/'
)
if sector==2:
classfiles = [
os.path.join(datadir, '20200612_sector-2_PCs_LGB_class.txt'),
os.path.join(datadir, '20200612_sector-2_PCs_JH_class.txt'),
os.path.join(datadir, '20200612_sector-2_PCs_JNW_class.txt')
]
elif sector==5:
classfiles = [
os.path.join(datadir, '20200604_sector-5_PCs_LGB_class.txt'),
os.path.join(datadir, '20200604_sector-5_PCs_JH_class.txt'),
os.path.join(datadir, '20200604_sector-5_PCs_JNW_class.txt')
]
elif sector==6:
classfiles = [
os.path.join(datadir, '20190621_sector-6_PCs_LGB_class.txt'),
os.path.join(datadir, '20190621_sector-6_PCs_JH_class.txt'),
os.path.join(datadir, '20190621_sector-6_PCs_JNW_class.txt')
]
elif sector==7:
classfiles = [
os.path.join(datadir, '20190621_sector-7_PCs_LGB_class.txt'),
os.path.join(datadir, '20190621_sector-7_PCs_JH_class.txt')
]
elif sector==8:
classfiles = [
os.path.join(datadir, '20191121_sector-8_PCs_LGB_class.txt'),
os.path.join(datadir, '20191121_sector-8_PCs_JH_class.txt'),
os.path.join(datadir, '20191121_sector-8_PCs_JNW_class.txt')
]
elif sector==9:
classfiles = [
os.path.join(datadir, '20191101_sector-9_PCs_LGB_class.txt'),
os.path.join(datadir, '20191101_sector-9_PCs_JH_class.txt'),
os.path.join(datadir, '20191101_sector-9_PCs_JNW_class.txt')
]
elif sector==10:
classfiles = [
os.path.join(datadir, '20191118_sector-10_PCs_LGB_class.txt'),
os.path.join(datadir, '20191118_sector-10_PCs_JH_class.txt'),
os.path.join(datadir, '20191118_sector-10_PCs_JNW_class.txt')
]
elif sector==11:
classfiles = [
os.path.join(datadir, '20191205_sector-11_PCs_LGB_class.txt'),
os.path.join(datadir, '20191205_sector-11_PCs_JH_class.txt'),
os.path.join(datadir, '20191205_sector-11_PCs_JNW_class.txt')
]
elif sector==12:
classfiles = [
os.path.join(datadir, '20200317_sector-12_PCs_LGB_class.txt'),
os.path.join(datadir, '20200317_sector-12_PCs_JH_class.txt'),
os.path.join(datadir, '20200317_sector-12_PCs_JNW_class.txt')
]
elif sector==13:
classfiles = [
os.path.join(datadir, '20200320_sector-13_PCs_LGB_class.txt'),
os.path.join(datadir, '20200320_sector-13_PCs_JH_class.txt'),
os.path.join(datadir, '20200320_sector-13_PCs_JNW_class.txt')
]
outpath = os.path.join(
datadir, 'sector-{}_PCs_MERGED_SUBCLASSIFICATIONS.csv'.format(sector)
)
print('merging {}'.format(repr(classfiles)))
dfs = []
for classfile in classfiles:
if 'LGB' in classfile:
df = ls_to_df(classfile, classifier='LGB')
elif 'JNW' in classfile:
df = ls_to_df(classfile, classifier='JNW')
elif 'JH' in classfile:
df = hartmanformat_to_df(classfile, classifier='JH')
dfs.append(df)
# merge all the classication dataframes on Name to one dataframe
mdf = reduce(lambda x, y: pd.merge(x, y, on='Name'), dfs)
mdf.to_csv(outpath, sep=';', index=False)
print('wrote {}'.format(outpath))
def given_merged_organize_PCs(sector=None):
"""
Using output from given_collab_subclassifications_merge, assign gold=2,
maybe=1, junk, and look closer at anything with average rating >1 (note:
not >=). The one exception: things classified as "not_cdips_still_good",
which go in their own pile.
"""
datadir = os.path.join(
os.path.expanduser('~'),
'Dropbox/proj/cdips/results/vetting_classifications/'
)
inpath = os.path.join(
datadir, 'sector-{}_PCs_MERGED_SUBCLASSIFICATIONS.csv'.format(sector)
)
df = pd.read_csv(inpath, sep=';')
tag_colnames = [c for c in df.columns if 'Tags' in c]
# iterate over ["LGB_tags", "JH_tags", "JNW_tags"] to get scores assigned
# by each
for tag_colname in tag_colnames:
newcol = tag_colname.split('_')[0]+'_score'
classifier_isgold = np.array(
df[tag_colname].str.lower().str.contains('gold')
)
classifier_ismaybe = np.array(
df[tag_colname].str.lower().str.contains('maybe')
)
classifier_isjunk = np.array(
df[tag_colname].str.lower().str.contains('junk')
)
df[newcol] = (
2*classifier_isgold +
1*classifier_ismaybe +
0*classifier_isjunk
)
df['average_score'] = (
df['LGB_score'] + df['JH_score'] + df['JNW_score']
) / 3
threshold_cutoff = 1.0
df['clears_threshold'] = (df['average_score'] > threshold_cutoff)
#
# nb. not_cdips_still_good will go in a special pile!
#
classifier_isnotcdipsstillgood = np.array(
df["LGB_Tags"].str.lower().str.contains('not_cdips_still_good')
)
df['is_not_cdips_still_good'] = classifier_isnotcdipsstillgood
outpath = os.path.join(
datadir, 'sector-{}_PCs_MERGED_RATINGS.csv'.format(sector)
)
df.to_csv(outpath, sep=';', index=False)
print('made {}'.format(outpath))
#
# output:
# 1) things that clear threshold, and are CDIPS objects (not field stars)
# 2) things that are in the "not CDIPS, still good" pile
#
df_clears_threshold = df[df.clears_threshold & ~df.is_not_cdips_still_good]
df_is_not_cdips_still_good = df[df.is_not_cdips_still_good]
#
# 1) CDIPS OBJECTS
#
outpath = os.path.join(
datadir, 'sector-{}_PCs_CLEAR_THRESHOLD.csv'.format(sector)
)
df_clears_threshold.to_csv(outpath, sep=';', index=False)
print('made {}'.format(outpath))
# now copy to new directory
outdir = os.path.join(datadir, 'sector-{}_CLEAR_THRESHOLD'.format(sector))
if not os.path.exists(outdir):
os.mkdir(outdir)
if sector==6:
srcdir = '/nfs/phtess2/ar0/TESS/PROJ/lbouma/cdips/results/vetting_classifications/20190617_sector-6_PC_cut'
elif sector==7:
srcdir = '/nfs/phtess2/ar0/TESS/PROJ/lbouma/cdips/results/vetting_classifications/20190618_sector-7_PC_cut'
elif sector in [8,9,10,11]:
# NB. I "remade" these vetting plots to add the neighborhood charts
srcdir = glob(
'/nfs/phtess2/ar0/TESS/PROJ/lbouma/cdips/results/vetting_classifications/2019????_sector-{}_PC_cut_remake'.
format(sector)
)
assert len(srcdir) == 1
srcdir = srcdir[0]
elif sector in [1,2,3,4,5,12,13,14]:
srcdir = glob(
'/nfs/phtess2/ar0/TESS/PROJ/lbouma/cdips/results/vetting_classifications/2020????_sector-{}_PC_cut'.
format(sector)
)
assert len(srcdir) == 1
srcdir = srcdir[0]
for n in df_clears_threshold['Name']:
src = os.path.join(srcdir, str(n))
dst = os.path.join(outdir, str(n))
if not os.path.exists(dst):
try:
shutil.copyfile(src, dst)
print('copied {} -> {}'.format(src, dst))
except FileNotFoundError:
print('WRN! DID NOT FIND {}'.format(src))
else:
print('found {}'.format(dst))
#
# 2) NOT_CDIPS_STILL_GOOD
#
outpath = os.path.join(
datadir, 'sector-{}_PCs_NOT_CDIPS_STILL_GOOD.csv'.format(sector)
)
df_is_not_cdips_still_good.to_csv(outpath, sep=';', index=False)
print('made {}'.format(outpath))
# now copy to new directory
outdir = os.path.join(
datadir, 'sector-{}_NOT_CDIPS_STILL_GOOD'.format(sector)
)
if not os.path.exists(outdir):
os.mkdir(outdir)
if sector in [6,7]:
raise NotImplementedError
elif sector in [8,9,10,11]:
srcdir = glob(
'/nfs/phtess2/ar0/TESS/PROJ/lbouma/cdips/results/vetting_classifications/2019????_sector-{}_PC_cut_remake'.
format(sector)
)
assert len(srcdir) == 1
srcdir = srcdir[0]
elif sector in [1,2,3,4,5,12,13,14]:
srcdir = glob(
'/nfs/phtess2/ar0/TESS/PROJ/lbouma/cdips/results/vetting_classifications/2020????_sector-{}_PC_cut'.
format(sector)
)
assert len(srcdir) == 1
srcdir = srcdir[0]
for n in df_is_not_cdips_still_good['Name']:
src = os.path.join(srcdir, str(n))
dst = os.path.join(outdir, str(n))
if not os.path.exists(dst):
shutil.copyfile(src, dst)
print('copied {} -> {}'.format(src, dst))
else:
print('found {}'.format(dst))
def given_full_classifications_organize(
sector=7,
today='20190618'
):
"""
given a directory classified w/ TagSpaces tags, produce a CSV file with the
classifications
also produce CSV file with PC only classifications
also produce directories with cuts to send to vetting collaborators
"""
##########################################
# modify these
outdir = os.path.join(
os.path.expanduser('~'),
'Dropbox/proj/cdips/results/vetting_classifications/'
)
classified_dir = os.path.join(
outdir, 'sector_{}_{}_LGB_DONE/'.format(sector,today)
)
outtocollabdir = os.path.join(
outdir, '{}_sector-{}_{}'
)
outname = '{}_LGB_sector{}_classifications.csv'.format(today,sector)
##########################################
outpath = os.path.join(outdir, outname)
pdfpaths = glob(os.path.join(classified_dir,'*pdf'))
if not len(pdfpaths) > 1:
raise AssertionError('bad pdfpaths. no glob matches.')
for p in pdfpaths:
if '[' not in p:
raise AssertionError('got {} with no classification'.format(p))
classes = [p.split('[')[1].replace('].pdf','') for p in pdfpaths]
pdfnames = list(map(os.path.basename,
[p.split('[')[0].replace('vet_','').replace('_llc','') for p in pdfpaths]))
df = | pd.DataFrame({'Name':pdfnames, 'Tags':classes}) | pandas.DataFrame |
import pandas as pd
import numpy as np
import os
import json
DATA_DIR = "data/"
FILE_NAME = "data.csv"
FINAL_DATA = "rearranged_data.xlsx"
DATA_SPECS = "data_specs.json"
with open(DATA_SPECS, 'r') as f:
DATA_SPECS_DICT = json.load(f)
# Load data
df = pd.read_csv(os.path.join(DATA_DIR, FILE_NAME), delimiter=";")
# function to copy serial
def copy_serial(row):
if not pd.isnull(row["ZG04"]):
row["SERIAL"] = row["ZG04"]
elif not pd.isnull(row["ZG05"]):
row["SERIAL"] = row["ZG05"]
return row
# move serial to serial from w01
df = df.apply(lambda row: copy_serial(row), axis=1)
# Drop lines where we have no serial number
df = df[~pd.isnull(df["SERIAL"])]
# Function to extract group
serial_group = dict()
def extract_variable(row):
if not pd.isnull(row["ZG04"]):
serial_group.update({row["SERIAL"]:"MS"})
elif not | pd.isnull(row["ZG05"]) | pandas.isnull |
import pandas as pd
def print_stage(stage_str):
count = 100
occupied_count = len(stage_str)
separator_num = int((count - occupied_count) / 2)
separator_str = "=" * separator_num
print_str = f"{separator_str}{stage_str}{separator_str}"
print(print_str)
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
def read_files(file_name, lines_constraint=None):
results = []
with open(file_name) as f:
count = 0
for line in f:
results.append(line.replace("\n", ""))
if lines_constraint:
count += 1
if count >= lines_constraint:
break
return results
def write_predictions(preds, split, name):
with open(f"./{name}.{split}.pred", "w") as f:
f.write("\n".join(preds))
def write_scores(scores, split, name):
report = {}
for k in ["1", "2", "l"]:
for m in ["precision", "recall", "f1"]:
report[f"rouge-{k}-{m}"] = [scores[f"rouge-{k}-{m[0]}"]]
df = | pd.DataFrame(report) | pandas.DataFrame |
# ' % kmergrammar
# ' % <NAME> mm2842
# ' % 15th May 2017
# ' # Introduction
# ' Some of the code below is still under active development
# ' ## Required libraries
# + name = 'import_libraries', echo=False
import os
import sys
import numpy as np
import pandas as pd
import sqlalchemy
import logging
import time
from math import log
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from itertools import product
from sklearn import metrics
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.externals import joblib
# + name= 'hello_world', echo=False
def hello_world():
print("Aksunai qanuipit!")
def createKmerSet(kmersize):
"""
write all possible kmers
:param kmersize: integer, 8
:return uniq_kmers: list of sorted unique kmers
"""
kmerSet = set()
nucleotides = ["a", "c", "g", "t"]
kmerall = product(nucleotides, repeat=kmersize)
for i in kmerall:
kmer = ''.join(i)
kmerSet.add(kmer)
uniq_kmers = sorted(list(kmerSet))
return uniq_kmers
def compute_kmer_entropy(kmer):
"""
compute shannon entropy for each kmer
:param kmer: string
:return entropy: float
"""
prob = [float(kmer.count(c)) / len(kmer) for c in dict.fromkeys(list(kmer))]
entropy = - sum([p * log(p) / log(2.0) for p in prob])
return round(entropy, 2)
def make_stopwords(kmersize):
"""
write filtered out kmers
:param kmersize: integer, 8
:return stopwords: list of sorted low-complexity kmers
"""
kmersize_filter = {5: 1.3, 6: 1.3, 7: 1.3, 8: 1.3, 9: 1.3, 10: 1.3}
limit_entropy = kmersize_filter.get(kmersize)
kmerSet = set()
nucleotides = ["a", "c", "g", "t"]
kmerall = product(nucleotides, repeat=kmersize)
for n in kmerall:
kmer = ''.join(n)
if compute_kmer_entropy(kmer) < limit_entropy:
kmerSet.add(make_newtoken(kmer))
else:
continue
stopwords = sorted(list(kmerSet))
return stopwords
def createNewtokenSet(kmersize):
"""
write all possible newtokens
:param kmersize: integer, 8
:return uniq_newtokens: list of sorted unique newtokens
"""
newtokenSet = set()
uniq_kmers = createKmerSet(kmersize)
for kmer in uniq_kmers:
newtoken = make_newtoken(kmer)
newtokenSet.add(newtoken)
uniq_newtokens = sorted(list(newtokenSet))
return uniq_newtokens
def make_newtoken(kmer):
"""
write a collapsed kmer and kmer reverse complementary as a newtoken
:param kmer: string e.g., "AT"
:return newtoken: string e.g., "atnta"
:param kmer: string e.g., "TA"
:return newtoken: string e.g., "atnta"
"""
kmer = str(kmer).lower()
newtoken = "n".join(sorted([kmer, kmer.translate(str.maketrans('tagc', 'atcg'))[::-1]]))
return newtoken
def write_ngrams(sequence):
"""
write a bag of newtokens of size n
:param sequence: string e.g., "ATCG"
:param (intern) kmerlength e.g., 2
:return newtoken_string: string e.g., "atnta" "gatc" "cgcg"
"""
seq = str(sequence).lower()
finalstart = (len(seq) - kmerlength) + 1
allkmers = [seq[start:(start + kmerlength)] for start in range(0, finalstart)]
tokens = [make_newtoken(kmer) for kmer in allkmers if len(kmer) == kmerlength and "n" not in kmer]
newtoken_string = " ".join(tokens)
return newtoken_string
def save_plot_prc(precision, recall, avg_prec, figure_file, name):
"""
make plot for precission recall
:param precission: precission
:param recall: recall
:param avg_prec: avg_prec
:param figure_file: figure_file
:param name: name
:return plot precission recall curve
"""
plt.clf()
title = 'Precision Recall Curve - double strand ' + name
plt.title(title)
plt.plot(recall, precision, label='Precission = %0.2f' % avg_prec)
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.savefig(figure_file)
def save_plot_roc(false_positive_rate, true_positive_rate, roc_auc, figure_file, name):
"""
make plot for roc_auc
:param false_positive_rate: false_positive_rate
:param true_positive_rate: true_positive_rate
:param roc_auc: roc_auc
:param figure_file: figure_file
:param name: name
:return roc_auc
"""
plt.clf()
title = 'Receiver Operating Characteristic - double strand ' + name
plt.title(title)
plt.plot(false_positive_rate, true_positive_rate, 'b', label='AUC = %0.2f' % roc_auc)
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([-0.1, 1.2])
plt.ylim([-0.1, 1.2])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.savefig(figure_file)
if sys.argv[1] == "-help":
print(
"Usage: python kgrammar_bag-of-k-mer_training_testing.py [kmersize, integer] [mode filtered, 'True', or 'False' (i.e., mode full)] [dataset_name, string]")
print("Example: python kgrammar_bag-of-k-mer_training_testing.py 8 False FEA4")
quit()
else:
kmersize = sys.argv[1] # e.g 8
if sys.argv[2] == 'True':
filtered = True
full = False
mode = "_mode_filtered_"
elif sys.argv[2] == 'False':
filtered = False
full = True
mode = "_mode_full_"
dataset_name = sys.argv[3] # e.g "KN1"
kmerlength = int(kmersize)
newtoken_size = 1 + (kmerlength * 2)
pathname = os.path.dirname(sys.argv[0])
WORKING_DIR = os.path.abspath(pathname)
all_tokens = createNewtokenSet(kmerlength)
if kmerlength > 4:
stpwrds = make_stopwords(kmerlength)
else:
filtered = False
full = True
mode = "_mode_full_"
print("for k < 5 only full mode is available!")
expected_tokens = len(all_tokens)
run_id = str(int(time.time()))
file_name = WORKING_DIR + '/output/bag-of-k-mers/' + dataset_name + '/kgrammar_bag-of-k-mers_model_' + run_id + '_' + dataset_name + '_' + str(
kmerlength) + '_' + mode + '.txt'
logging.basicConfig(level=logging.INFO, filename=file_name, filemode="a+",
format="%(asctime)-15s %(levelname)-8s %(message)s")
logging.info("kmer_grammar_bag-of-k-mers RUN ID")
logging.info(run_id)
logging.info("WORKING_DIR")
logging.info(WORKING_DIR)
logging.info("input: kmerlength")
logging.info(str(kmersize))
logging.info("input: dataset")
logging.info(str(dataset_name))
logging.info("input: filtered")
logging.info(filtered)
inengine = 'sqlite:///' + WORKING_DIR + '/input_databases/' + dataset_name + '/data_model.db'
dbcon = sqlalchemy.create_engine(inengine)
logging.info(inengine)
print('*' * 80)
print("Kgrammer run id: ", run_id)
print("-d %s -k %d -filtered %s" % (dataset_name, kmerlength, str(sys.argv[2])))
trainquery = "SELECT * FROM train ORDER BY RANDOM()"
dftrain = | pd.read_sql_query(trainquery, dbcon) | pandas.read_sql_query |
import os
import subprocess
from glob import glob
import argparse
import sys
from em import molecule
from em.dataset import metrics
from mpi4py import MPI
from mpi4py.futures import MPICommExecutor
from concurrent.futures import wait
from scipy.spatial import cKDTree
import numpy as np
import pandas as pd
import traceback
import random
import json
from json import encoder
from skimage.measure import regionprops
from scipy.ndimage import distance_transform_edt, gaussian_filter
from Bio.PDB import PDBParser, PDBIO
def convert(o):
if isinstance(o, np.generic): return o.item()
raise TypeError
# Intersección de mapas simulados de pedazos con original
# Si hay traslape debe anotarse
# Obtiene mapa anotado según label, tipo float
# Revisa pedazos no asociados, utiliza holgura, hace una pasada
# obtiene stats
# Lo guarda en disco
def annotateSample(map_id, indexes, df, fullness,columns, output_dir):
map_path = df.at[indexes[0], columns['map_path']]
annotated_path = os.path.join(output_dir,map_path.replace('.','_gt.'))
contourLvl = float(df.at[indexes[0], columns['contourLevel']])
map_to_annotate = molecule.Molecule(map_path, recommendedContour=contourLvl)
data_map = map_to_annotate.emMap.data()
map_mask = map_to_annotate.getContourMasks()[1]
result = {}
result['map_path'] = map_path
result['contourLevel'] = contourLvl
result['total'] = map_to_annotate.getVolume()[1]
# Set to 0 all voxels outside contour level, otherwise fill with a marker
marker = 10000
data_map[np.logical_not(map_mask)] = 0
data_map[map_mask] = marker
labels = []
chain_label_id_dict = {}
print('Tagging em map {}'.format(os.path.basename(map_path)))
for i in indexes:
segment_path = df.at[i, columns['subunit_path']]
if os.path.exists(segment_path):
segment_label = int(float(df.at[i, columns['chain_label']]))
chain_label_id_dict[df.at[i,columns['chain_label']]] = df.at[i,columns['chain_id']]
segment_map = molecule.Molecule(segment_path, recommendedContour=0.001)
segment_mask = segment_map.getContourMasks()[1]
print("Number of voxels in segment {}".format(np.sum(segment_mask)))
masks_intersec = np.logical_and(map_mask, segment_mask)
print("Number of voxels in intersection {}".format(np.sum(masks_intersec)))
data_map[masks_intersec] = segment_label
labels.append(segment_label)
print("Chain {}, voxels {}".format(segment_label,segment_map.getVolume()[1]))
print(" Matching {} of {} voxels".format(np.sum(masks_intersec), np.sum(segment_mask)))
else:
return ValueError('There is a problem getting segments for {}'.format(aligned_path))
# Get non assigned voxels
dim1,dim2,dim3 = np.where(data_map == marker)
nonassigned_points = np.array(list(map(list,zip(dim1,dim2,dim3))))
# Get assigned voxels coords
dim1,dim2,dim3 = np.where(np.logical_and((data_map != marker), (data_map != 0)))
# Combine list of indexes into a list of points in 3D space
assigned_points = list(map(list,zip(dim1,dim2,dim3)))
print("Asigned voxels : {}".format(len(assigned_points)))
print("Non asigned voxels : {}".format(len(nonassigned_points)))
print("Total number of voxels: {}".format(map_to_annotate.getVolume()[1]))
# If any voxel remain
if (len(nonassigned_points) > 0) & (len(assigned_points)>0):
# Create KDTree with assigned points
tree = cKDTree(assigned_points)
# Search for nearest point
d,i = tree.query(nonassigned_points)
neighbors_index = tree.data[i].astype(int)
# Use voxels inside fullnes value only
mask = d <= fullness
mask_inv = np.logical_not(mask)
points_to_reassign = nonassigned_points[mask]
points_to_discard = nonassigned_points[mask_inv]
neighbors_index = neighbors_index[mask]
d1_i, d2_i, d3_i = neighbors_index[:,0], neighbors_index[:,1], neighbors_index[:,2]
# Replace values in map with search result
values_to_map = data_map[d1_i,d2_i,d3_i]
for point,value in zip(points_to_reassign,values_to_map):
data_map[point[0],point[1],point[2]] = value
# Set voxels outside fullness value to 0
for point in points_to_discard:
data_map[point[0],point[1],point[2]] = 0
result['voxels_reasigned'] = np.sum(mask)
result['voxels_discarted'] = np.sum(mask_inv)
else:
print(" No more voxels to assign")
result['voxels_reasigned'] = 0
result['voxels_discarted'] = 0
dim1,dim2,dim3 = np.where(data_map == marker)
if len(dim1)>0:
print("there shuldnt be markers in array of labels.. check this {}".format(os.path.basename(map_path)))
# print labels
voxels_dict = {}
for l in labels:
voxels_dict[l]=np.sum(data_map==l)
filename = map_path.replace(str(map_path[-4:]), '_'+chain_label_id_dict[l]+'.npy')
map_masked = np.copy(data_map)
print("Voxels for label {} :{}".format(l, voxels_dict[l]))
map_masked[data_map==l] = 1.0
map_masked[data_map!=l] = 0.0
print("saved volume of {}".format(map_masked.sum()))
np.save(filename, map_masked)
print("saved {}".format(filename))
# Compute euler numbers
euler_dict = {}
for region in regionprops(data_map.astype(np.int32)):
euler_dict[region.label] = region.euler_number
# Save map
result['euler_segments'] = json.dumps(euler_dict, default=convert)
result['voxels_assigned'] = json.dumps(voxels_dict, default=convert)
result['tag_path'] = annotated_path
result['map_id'] = map_id
map_to_annotate.setData(data_map)
map_to_annotate.save(annotated_path)
return result
def annotatePoints(df, i, output_path, number_points=3, gaussian_std=3):
output_df = pd.DataFrame(columns=['id','map_path','contourLevel','subunit', 'tagged_path', 'number_points','tagged_points_path'])
#print("aa{}".format(df.iloc[i]['tagged_path']))
tagged_map = molecule.Molecule(df.iloc[i]['tagged_path'], 0.001).getEmMap().data()
#print("unique",np.unique(tagged_map))
for region in regionprops(tagged_map.astype(np.int32)):
label = int(region.label)
region_gt = np.copy(tagged_map)
region_gt[ region_gt != label ] = 0.0
region_gt[ region_gt == label ] = 1.0
#print("number",np.sum(region_gt==1.0))
#print("in label {}".format(label))
basename = df.iloc[i]['id']+'_'+str(label)+'.npy'
region_path = os.path.join(output_path,basename)
#print("pathh {}".format(region_path))
distance = distance_transform_edt(region_gt)
distance[distance != 1] = 0
index_x, index_y, index_z = np.where(distance == 1)
chosen_indexes = np.random.choice(len(index_x), number_points, replace=False)
#print("indexes:",chosen_indexes)
index_x = index_x[chosen_indexes]
index_y = index_y[chosen_indexes]
index_z = index_z[chosen_indexes]
point_array = np.zeros_like(region_gt)
point_array[index_x,index_y,index_z] = 1.0
point_array = gaussian_filter(point_array, gaussian_std)
np.save(region_path,point_array)
#print("saved {}".format(np.sum(point_array)))
output_df = output_df.append({'id':df.iloc[i]['id'], 'map_path':df.iloc[i]['map_path'], 'contourLevel':df.iloc[i]['contourLevel'], 'subunit':label, 'tagged_path':df.iloc[i]['tagged_path'], 'number_points':number_points, 'tagged_points_path':region_path}, ignore_index=True)
#print("output_df: ", output_df)
return output_df
def compute_adjacency(df, i):
# Get EM map id
map_id = df.iloc[i]['id']
# Get pdb path and chain id
pdb_path = df.iloc[i]['pdb_path']
chain = df.iloc[i]['fitted_entries']
# Create parser and get readed object
parser = PDBParser(PERMISSIVE = True, QUIET = True)
pdb_obj = parser.get_structure(chain, pdb_path)
# Compute dictionary to translate chain id (letter) to chain label (number)
chain_id_list = [chain._id for chain in pdb_obj.get_chains()]
chain_label_list = [i for i in range(1,len(chain_id_list)+1)]
dict_label_id_chain = dict(zip(chain_id_list,chain_label_list))
# Create dictionaries to store coords and kdtree for each chain
dict_chain_kdtree = dict()
# Create dictionary to store final adjency data
adjacency_dict = dict()
# Compute kdtree for each chain and assign it along with their coords to the corresponding chain label in dict
for c in pdb_obj.get_chains():
ca_coord_list = [atom.coord for atom in c.get_atoms() if atom.name=="CA"]
chain_id = c.id
print("get {} atoms for chain {}".format(len(ca_coord_list), chain_id))
if len(ca_coord_list) == 0:
continue
else:
kdtree = cKDTree(ca_coord_list)
dict_chain_kdtree[dict_label_id_chain[chain_id]] = kdtree
# Loop over chains again to compute adjacency (if exists an atom from other chain at a distance of 4 o less Angstroms )
for c in dict_chain_kdtree.keys():
# Get atoms coords for current chain from dict
current_chain_adjacency_dict = dict()
current_kdtree = dict_chain_kdtree[c]
# For every other chain, loop atoms to find adjacency or until atom list is empty.
for c_i in dict_chain_kdtree.keys():
if c == c_i:
continue
else:
print("Comparing {} against {}".format(c,c_i))
# Get kdtree to compare with
chain_kdtree = dict_chain_kdtree[c_i]
# Get adjacent atoms within radius of 4 Angstroms
adjacent_atoms = current_kdtree.query_ball_tree(chain_kdtree, r=5)
number_adjacencies = np.sum([len(adjacent) for adjacent in adjacent_atoms])
if number_adjacencies > 0:
current_chain_adjacency_dict[c_i] = 1
else:
current_chain_adjacency_dict[c_i] = 0
adjacency_dict[c] = current_chain_adjacency_dict
label_id_chain = json.dumps(dict_label_id_chain, default=convert)
adjacency = json.dumps(adjacency_dict, default=convert)
return pd.Series( [map_id, label_id_chain, adjacency], index=['map_id','chain_id_to_label','adjacency'])
def mapMetricsCompute(row,match_dict):
map_id = row['id']
tagged_path = row['tagged_path']
contour = 0.001
compare_path = match_dict[map_id]
sample = molecule.Molecule(tagged_path, contour)
labeled = molecule.Molecule(compare_path, contour)
iou = metrics.intersection_over_union(sample, labeled)
h = metrics.homogenity(sample, labeled)
p = metrics.proportion(sample, labeled)
c = metrics.consistency(sample, labeled)
return pd.Series( [map_id, row['map_path'], tagged_path, row['contourLevel'], compare_path, iou, h, p, c ], index=['id', 'map_path','tagged_path', 'contourLevel', 'reference_path', 'iou', 'homogenity', 'proportion', 'consistency'])
def doParallelTagging(df, fullness, gt_path, columns):
unique_id_list = df[columns['id']].unique().tolist()
# Construct dataframe to store results
output_df = pd.DataFrame(columns=['id','map_path','contourLevel','tagged_path','subunits','matched_subunits','voxels','voxels_matched','voxels_discarted','voxels_reassigned','voxels_assigned','euler_segments'])
print("Spawn procecess...")
comm = MPI.COMM_WORLD
size = comm.Get_size()
with MPICommExecutor(comm, root=0, worker_size=size) as executor:
if executor is not None:
futures = []
# For each map, perform annotation
for i in unique_id_list:
subunit_indexes = df.loc[df[columns['id']]==i].index.tolist()
futures.append(executor.submit(annotateSample,i, subunit_indexes, df, fullness, columns, gt_path))
wait(futures)
for f in futures:
try:
res = f.result()
map_id = res['map_id']
voxels_assigned = json.loads(res['voxels_assigned'])
euler_segments = json.loads(res['euler_segments'])
voxels_reassigned = res['voxels_reasigned']
voxels_discarted = res['voxels_discarted']
tagged_path = res['tag_path']
map_path = res['map_path']
contour = res['contourLevel']
voxels_num = res['total']
print("Received {}".format(res))
# Get number of segments matched
segments_matched = 0
voxels_matched = 0
for key in voxels_assigned.keys():
matched_num = voxels_assigned[key]
if matched_num > 0:
segments_matched+=1
voxels_matched += matched_num
#'tagged_path', 'subunits','matched_subunits', 'voxels', 'voxels_matched', 'matched_per_segment'
output_df = output_df.append({'id':map_id, 'map_path':map_path, 'contourLevel':contour, 'tagged_path':tagged_path, 'subunits':len(voxels_assigned.keys()), 'matched_subunits':segments_matched, 'voxels':voxels_num, 'voxels_matched':voxels_matched, 'voxels_discarted':voxels_discarted, 'voxels_reassigned':voxels_reassigned, 'voxels_assigned':voxels_assigned, 'euler_segments':euler_segments}, ignore_index=True)
except ValueError as error:
print("Error asignating segments for {}".format(map_id))
return output_df
def doParallelAdjacency(df):
id_list = df.index.tolist()
print("Spawn procecess...")
comm = MPI.COMM_WORLD
size = comm.Get_size()
output_df = | pd.DataFrame(columns=['map_id','chain_id_to_label', 'adjacency']) | pandas.DataFrame |
# coding=UTF-8
import pandas as pd
from sklearn.model_selection import train_test_split
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'
from torch.utils.data import DataLoader
import torch
from model import Model
from util import extract_coords, coords2str # 导入函数
from util import PATH, train, test # 导入常量(路径、训练集、测试集DataFrame对象)
from dataset import CarDataset
from model import criterion
from mAP import calculate_mAP
import torch.optim as optim
from torch.optim import lr_scheduler
from tqdm import tqdm
import torch.nn
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
# 建立一个DataFrame对象,每一行包含每辆车的坐标和姿态信息。
train_images_dir = PATH + '/train_images/{}.jpg'
test_images_dir = PATH + '/test_images/{}.jpg'
df_train, df_dev = train_test_split(train, test_size=0.1,
random_state=42) # 划分训练集和验证集
df_test = test
df_dev_1 = df_dev.copy()
df_dev_pred_1 = pd.DataFrame()
df_dev_pred_2 = | pd.DataFrame() | pandas.DataFrame |
'''
Single cell tracking data processing script
<NAME> (<EMAIL>), <NAME>(<EMAIL>), <NAME>(<EMAIL>)
purpose:
this notebook aims to be a general tool for analysis of single cell migration data with use of opensource tools.
Input data:
the script can process cell tracking data from ImageJ, Lineage Mapper, Metamorph, or Usiigaci tracker.
If you use this code, please cite the following paper:
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, Usiigaci: Label-free instance-aware cell tracking in phase contrast microscopy using Mask R-CNN.
Version:
v1.0 2018.08.19
License:
This script is released under MIT license
Copyright <2018> <Okinawa Institute of Science and Technology Graduate University>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
#import libraries
import numpy as np
import pandas as pd
import scipy
from IPython.core.display import display
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.art3d import Line3DCollection
from matplotlib.collections import LineCollection
from matplotlib import colors as mcolors
from matplotlib.colors import ListedColormap, BoundaryNorm
import seaborn as sns
import os
from itertools import groupby
from operator import itemgetter
import imageio
from read_roi import read_roi_file
from read_roi import read_roi_zip
#Definition
#define the frames throughout the experiments
n_frames = 61
# define the time interval between each frame
t_inc = 10 # in minutes
print("Total frame of time lapse is %d" %(n_frames))
print("Time interval is %d minutes"%(t_inc))
#define the data location
location = r'C:\Users\Davince\Dropbox (OIST)\Deeplearning_system\tracking project\Testautomaticfinding'
#define the location type = 'folder, 'csv
location_type ='folder'
#define the data_type = 'ImageJ', 'Usiigaci', LineageMapper', or 'Metamorph'
data_type = 'Usiigaci'
#input data loading
if data_type=='ImageJ':
if location_type == 'csv':
df_ij = pd.read_csv(location)
n_cells_ij = int(len(df_ij) / n_frames)
timestamps = np.linspace(0, n_frames*t_inc, n_frames+1)
print("Cell track numbers is %d"%(n_cells_ij))
elif data_type=='LineageMapper':
if location_type=='csv':
df_LM = pd.read_csv(location)
count = df_LM['Cell ID'].value_counts()
cell_ids_LM = count[count==n_frames].index.tolist()
n_cells_LM = int(len(cell_ids_LM))
timestamps = np.linspace(0, n_frames*t_inc, n_frames+1)
print("Cell track number is: " + str(n_cells_LM))
col_names = df_LM.columns.tolist()
selected_df = pd.DataFrame(columns=col_names)
for i in cell_ids_LM:
selected_df = selected_df.append(df_LM.loc[df_LM['Cell ID']==i].copy())
selected_df.reset_index(drop=True, inplace=True)
elif data_type=='Metamorph':
if location_type=='csv':
df_meta = pd.read_csv(location)
count = df_meta['Object #'].value_counts()
cell_ids_meta = count[count==n_frames].index.tolist()
n_cells_meta = int(len(cell_ids_meta))
timestamps = np.linspace(0, n_frames*t_inc, n_frames+1)
print("Cell track number is:" + str(n_cells_meta))
col_names = df_meta.columns.tolist()
selected_df = pd.DataFrame(columns=col_names)
for i in cell_ids_meta:
selected_df = selected_df.append(df_meta.loc[df_meta['Object #']==i].copy())
selected_df.reset_index(drop=True, inplace=True)
elif data_type=='Usiigaci':
if location_type=='csv':
df_usiigaci = pd.read_csv(location)
count = df_usiigaci['particle'].value_counts()
cell_ids_usiigaci = count[count==n_frames].index.tolist() # finding only cells that exist through all the framee
n_cells_usiigaci = int(len(cell_ids_usiigaci))
timestamps = np.linspace(0, n_frames*t_inc, n_frames+1)
print("Cell track number is:" + str(n_cells_usiigaci))
col_names = df_usiigaci.columns.tolist()
selected_df = pd.DataFrame(columns=col_names)
for i in cell_ids_usiigaci:
selected_df = selected_df.append(df_usiigaci.loc[df_usiigaci['particle']==i].copy())
selected_df.reset_index(drop=True, inplace=True)
if location_type == 'folder':
#looks for tracks.csv in nested folders
all_files = []
sub_directory = []
for root, dirs, files in os.walk(location):
for file in files:
if file.endswith("tracks.csv"):
relativePath = os.path.relpath(root, location)
if relativePath == ".":
relativePath = ""
all_files.append((relativePath.count(os.path.sep),relativePath, file))
all_files.sort(reverse=True)
for (count, folder), files in groupby(all_files, itemgetter(0, 1)):
sub_directory.append(folder)
print("Found the following directories containing Usiigaci tracked results:")
print("\n".join(str(x) for x in sub_directory))
print("Making new ids and concatenate dataframe")
frame_list = []
for i in range(0, len(sub_directory)):
path = os.path.join(location, str(sub_directory[i]+"\\tracks.csv"))
replicate_id = sub_directory[i].split('_')[0]
df_usiigaci = pd.read_csv(path)
#number of index is
cell_number = df_usiigaci.index.size
new_id_list = []
for i in range(0, df_usiigaci.index.size):
new_id = replicate_id + "_" + str(df_usiigaci.iloc[i, 0])
new_id_list.append(new_id)
df_usiigaci['newid'] = new_id_list
frame_list.append(df_usiigaci)
#display(df)
#create new pandas dataframe with all the csv data.
df_combined = | pd.concat(frame_list, ignore_index=True) | pandas.concat |
"""
.. _serp:
Import Search Engine Results Pages (SERPs) for Google and YouTube
=================================================================
"""
__all__ = ['SERP_GOOG_VALID_VALS', 'YOUTUBE_TOPIC_IDS',
'YOUTUBE_VID_CATEGORY_IDS', 'serp_goog', 'serp_youtube',
'set_logging_level', 'youtube_channel_details',
'youtube_video_details']
import datetime
import logging
from itertools import product
import pandas as pd
if int(pd.__version__[0]) >= 1:
from pandas import json_normalize
else:
from pandas.io.json import json_normalize
import requests
SERP_GOOG_LOG_FMT = ('%(asctime)s | %(levelname)s | %(filename)s:%(lineno)d '
'| %(funcName)s | %(message)s')
logging.basicConfig(format=SERP_GOOG_LOG_FMT)
##############################################################################
# Google variables
##############################################################################
SERP_GOOG_VALID_VALS = dict(
fileType={
'bas', 'c', 'cc', 'cpp', 'cs', 'cxx', 'doc', 'docx', 'dwf', 'gpx',
'h', 'hpp', 'htm', 'html', 'hwp', 'java', 'kml', 'kmz', 'odp', 'ods',
'odt', 'pdf', 'pl', 'ppt', 'pptx', 'ps', 'py', 'rtf', 'svg', 'swf',
'tex', 'text', 'txt', 'wap', 'wml', 'xls', 'xlsx', 'xml',
},
c2coff={0, 1},
cr={
'countryAF', 'countryAL', 'countryDZ', 'countryAS', 'countryAD',
'countryAO', 'countryAI', 'countryAQ', 'countryAG', 'countryAR',
'countryAM', 'countryAW', 'countryAU', 'countryAT', 'countryAZ',
'countryBS', 'countryBH', 'countryBD', 'countryBB', 'countryBY',
'countryBE', 'countryBZ', 'countryBJ', 'countryBM', 'countryBT',
'countryBO', 'countryBA', 'countryBW', 'countryBV', 'countryBR',
'countryIO', 'countryBN', 'countryBG', 'countryBF', 'countryBI',
'countryKH', 'countryCM', 'countryCA', 'countryCV', 'countryKY',
'countryCF', 'countryTD', 'countryCL', 'countryCN', 'countryCX',
'countryCC', 'countryCO', 'countryKM', 'countryCG', 'countryCD',
'countryCK', 'countryCR', 'countryCI', 'countryHR', 'countryCU',
'countryCY', 'countryCZ', 'countryDK', 'countryDJ', 'countryDM',
'countryDO', 'countryTP', 'countryEC', 'countryEG', 'countrySV',
'countryGQ', 'countryER', 'countryEE', 'countryET', 'countryEU',
'countryFK', 'countryFO', 'countryFJ', 'countryFI', 'countryFR',
'countryFX', 'countryGF', 'countryPF', 'countryTF', 'countryGA',
'countryGM', 'countryGE', 'countryDE', 'countryGH', 'countryGI',
'countryGR', 'countryGL', 'countryGD', 'countryGP', 'countryGU',
'countryGT', 'countryGN', 'countryGW', 'countryGY', 'countryHT',
'countryHM', 'countryVA', 'countryHN', 'countryHK', 'countryHU',
'countryIS', 'countryIN', 'countryID', 'countryIR', 'countryIQ',
'countryIE', 'countryIL', 'countryIT', 'countryJM', 'countryJP',
'countryJO', 'countryKZ', 'countryKE', 'countryKI', 'countryKP',
'countryKR', 'countryKW', 'countryKG', 'countryLA', 'countryLV',
'countryLB', 'countryLS', 'countryLR', 'countryLY', 'countryLI',
'countryLT', 'countryLU', 'countryMO', 'countryMK', 'countryMG',
'countryMW', 'countryMY', 'countryMV', 'countryML', 'countryMT',
'countryMH', 'countryMQ', 'countryMR', 'countryMU', 'countryYT',
'countryMX', 'countryFM', 'countryMD', 'countryMC', 'countryMN',
'countryMS', 'countryMA', 'countryMZ', 'countryMM', 'countryNA',
'countryNR', 'countryNP', 'countryNL', 'countryAN', 'countryNC',
'countryNZ', 'countryNI', 'countryNE', 'countryNG', 'countryNU',
'countryNF', 'countryMP', 'countryNO', 'countryOM', 'countryPK',
'countryPW', 'countryPS', 'countryPA', 'countryPG', 'countryPY',
'countryPE', 'countryPH', 'countryPN', 'countryPL', 'countryPT',
'countryPR', 'countryQA', 'countryRE', 'countryRO', 'countryRU',
'countryRW', 'countrySH', 'countryKN', 'countryLC', 'countryPM',
'countryVC', 'countryWS', 'countrySM', 'countryST', 'countrySA',
'countrySN', 'countryCS', 'countrySC', 'countrySL', 'countrySG',
'countrySK', 'countrySI', 'countrySB', 'countrySO', 'countryZA',
'countryGS', 'countryES', 'countryLK', 'countrySD', 'countrySR',
'countrySJ', 'countrySZ', 'countrySE', 'countryCH', 'countrySY',
'countryTW', 'countryTJ', 'countryTZ', 'countryTH', 'countryTG',
'countryTK', 'countryTO', 'countryTT', 'countryTN', 'countryTR',
'countryTM', 'countryTC', 'countryTV', 'countryUG', 'countryUA',
'countryAE', 'countryUK', 'countryUS', 'countryUM', 'countryUY',
'countryUZ', 'countryVU', 'countryVE', 'countryVN', 'countryVG',
'countryVI', 'countryWF', 'countryEH', 'countryYE', 'countryYU',
'countryZM', 'countryZW'
},
gl={
'ad', 'ae', 'af', 'ag', 'ai', 'al', 'am', 'an', 'ao', 'aq', 'ar',
'as', 'at', 'au', 'aw', 'az', 'ba', 'bb', 'bd', 'be', 'bf', 'bg',
'bh', 'bi', 'bj', 'bm', 'bn', 'bo', 'br', 'bs', 'bt', 'bv', 'bw',
'by', 'bz', 'ca', 'cc', 'cd', 'cf', 'cg', 'ch', 'ci', 'ck', 'cl',
'cm', 'cn', 'co', 'cr', 'cs', 'cu', 'cv', 'cx', 'cy', 'cz', 'de',
'dj', 'dk', 'dm', 'do', 'dz', 'ec', 'ee', 'eg', 'eh', 'er', 'es',
'et', 'fi', 'fj', 'fk', 'fm', 'fo', 'fr', 'ga', 'gd', 'ge', 'gf',
'gh', 'gi', 'gl', 'gm', 'gn', 'gp', 'gq', 'gr', 'gs', 'gt', 'gu',
'gw', 'gy', 'hk', 'hm', 'hn', 'hr', 'ht', 'hu', 'id', 'ie', 'il',
'in', 'io', 'iq', 'ir', 'is', 'it', 'jm', 'jo', 'jp', 'ke', 'kg',
'kh', 'ki', 'km', 'kn', 'kp', 'kr', 'kw', 'ky', 'kz', 'la', 'lb',
'lc', 'li', 'lk', 'lr', 'ls', 'lt', 'lu', 'lv', 'ly', 'ma', 'mc',
'md', 'mg', 'mh', 'mk', 'ml', 'mm', 'mn', 'mo', 'mp', 'mq', 'mr',
'ms', 'mt', 'mu', 'mv', 'mw', 'mx', 'my', 'mz', 'na', 'nc', 'ne',
'nf', 'ng', 'ni', 'nl', 'no', 'np', 'nr', 'nu', 'nz', 'om', 'pa',
'pe', 'pf', 'pg', 'ph', 'pk', 'pl', 'pm', 'pn', 'pr', 'ps', 'pt',
'pw', 'py', 'qa', 're', 'ro', 'ru', 'rw', 'sa', 'sb', 'sc', 'sd',
'se', 'sg', 'sh', 'si', 'sj', 'sk', 'sl', 'sm', 'sn', 'so', 'sr',
'st', 'sv', 'sy', 'sz', 'tc', 'td', 'tf', 'tg', 'th', 'tj', 'tk',
'tl', 'tm', 'tn', 'to', 'tr', 'tt', 'tv', 'tw', 'tz', 'ua', 'ug',
'uk', 'um', 'us', 'uy', 'uz', 'va', 'vc', 've', 'vg', 'vi', 'vn',
'vu', 'wf', 'ws', 'ye', 'yt', 'za', 'zm', 'zw',
},
filter={0, 1},
hl={
'af', 'sq', 'sm', 'ar', 'az', 'eu', 'be', 'bn', 'bh', 'bs', 'bg',
'ca', 'zh-CN', 'zh-TW', 'hr', 'cs', 'da', 'nl', 'en', 'eo', 'et',
'fo', 'fi', 'fr', 'fy', 'gl', 'ka', 'de', 'el', 'gu', 'iw', 'hi',
'hu', 'is', 'id', 'ia', 'ga', 'it', 'ja', 'jw', 'kn', 'ko', 'la',
'lv', 'lt', 'mk', 'ms', 'ml', 'mt', 'mr', 'ne', 'no', 'nn', 'oc',
'fa', 'pl', 'pt-BR', 'pt-PT', 'pa', 'ro', 'ru', 'gd', 'sr', 'si',
'sk', 'sl', 'es', 'su', 'sw', 'sv', 'tl', 'ta', 'te', 'th', 'ti',
'tr', 'uk', 'ur', 'uz', 'vi', 'cy', 'xh', 'zu'
},
imgColorType={
'color', 'gray', 'mono', 'trans'
},
imgDominantColor={
'black',
'blue',
'brown',
'gray',
'green',
'orange',
'pink',
'purple',
'red',
'teal',
'white',
'yellow',
},
imgSize={
'huge',
'icon',
'large',
'medium',
'small',
'xlarge',
'xxlarge',
},
imgType={
'clipart',
'face',
'lineart',
'stock',
'photo',
'animated'
},
lr={
'lang_ar', 'lang_bg', 'lang_ca', 'lang_zh-CN', 'lang_zh-TW',
'lang_hr', 'lang_cs', 'lang_da', 'lang_nl', 'lang_en', 'lang_et',
'lang_fi', 'lang_fr', 'lang_de', 'lang_el', 'lang_iw', 'lang_hu',
'lang_is', 'lang_id', 'lang_it', 'lang_ja', 'lang_ko', 'lang_lv',
'lang_lt', 'lang_no', 'lang_pl', 'lang_pt', 'lang_ro', 'lang_ru',
'lang_sr', 'lang_sk', 'lang_sl', 'lang_es', 'lang_sv', 'lang_tr',
},
num={1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
rights={
'cc_publicdomain', 'cc_attribute', 'cc_sharealike',
'cc_noncommercial', 'cc_nonderived'
},
safe={'active', 'off'},
searchType={None, 'image'},
siteSearchFilter={'e', 'i'},
start=range(1, 92)
)
##############################################################################
# YouTube variables
##############################################################################
YOUTUBE_TOPIC_IDS = {
'Entertainment topics': {'Entertainment (parent topic)': '/m/02jjt',
'Humor': '/m/09kqc',
'Movies': '/m/02vxn',
'Performing arts': '/m/05qjc',
'Professional wrestling': '/m/066wd',
'TV shows': '/m/0f2f9'},
'Gaming topics': {'Action game': '/m/025zzc',
'Action-adventure game': '/m/02ntfj',
'Casual game': '/m/0b1vjn',
'Gaming (parent topic)': '/m/0bzvm2',
'Music video game': '/m/02hygl',
'Puzzle video game': '/m/04q1x3q',
'Racing video game': '/m/01sjng',
'Role-playing video game': '/m/0403l3g',
'Simulation video game': '/m/021bp2',
'Sports game': '/m/022dc6',
'Strategy video game': '/m/03hf_rm'},
'Lifestyle topics': {'Fashion': '/m/032tl',
'Fitness': '/m/027x7n',
'Food': '/m/02wbm',
'Hobby': '/m/03glg',
'Lifestyle (parent topic)': '/m/019_rr',
'Pets': '/m/068hy',
'Physical attractiveness [Beauty]': '/m/041xxh',
'Technology': '/m/07c1v',
'Tourism': '/m/07bxq',
'Vehicles': '/m/07yv9'},
'Music topics': {'Christian music': '/m/02mscn',
'Classical music': '/m/0ggq0m',
'Country': '/m/01lyv',
'Electronic music': '/m/02lkt',
'Hip hop music': '/m/0glt670',
'Independent music': '/m/05rwpb',
'Jazz': '/m/03_d0',
'Music (parent topic)': '/m/04rlf',
'Music of Asia': '/m/028sqc',
'Music of Latin America': '/m/0g293',
'Pop music': '/m/064t9',
'Reggae': '/m/06cqb',
'Rhythm and blues': '/m/06j6l',
'Rock music': '/m/06by7',
'Soul music': '/m/0gywn'},
'Other topics': {'Knowledge': '/m/01k8wb'},
'Society topics': {'Business': '/m/09s1f',
'Health': '/m/0kt51',
'Military': '/m/01h6rj',
'Politics': '/m/05qt0',
'Religion': '/m/06bvp',
'Society (parent topic)': '/m/098wr'},
'Sports topics': {'American football': '/m/0jm_',
'Baseball': '/m/018jz',
'Basketball': '/m/018w8',
'Boxing': '/m/01cgz',
'Cricket': '/m/09xp_',
'Football': '/m/02vx4',
'Golf': '/m/037hz',
'Ice hockey': '/m/03tmr',
'Mixed martial arts': '/m/01h7lh',
'Motorsport': '/m/0410tth',
'Sports (parent topic)': '/m/06ntj',
'Tennis': '/m/07bs0',
'Volleyball': '/m/07_53'}
}
YOUTUBE_VID_CATEGORY_IDS = {
'Action/Adventure': '32',
'Anime/Animation': '31',
'Autos & Vehicles': '2',
'Classics': '33',
'Comedy': '34',
'Documentary': '35',
'Drama': '36',
'Education': '27',
'Entertainment': '24',
'Family': '37',
'Film & Animation': '1',
'Foreign': '38',
'Gaming': '20',
'Horror': '39',
'Howto & Style': '26',
'Movies': '30',
'Music': '10',
'News & Politics': '25',
'Nonprofits & Activism': '29',
'People & Blogs': '22',
'Pets & Animals': '15',
'Sci-Fi/Fantasy': '40',
'Science & Technology': '28',
'Short Movies': '18',
'Shorts': '42',
'Shows': '43',
'Sports': '17',
'Thriller': '41',
'Trailers': '44',
'Travel & Events': '19',
'Videoblogging': '21'
}
SERP_YTUBE_VALID_VALS = dict(
channelType={'any', 'show'},
eventType={'completed', 'live', 'upcoming'},
forContentOwner={True, False, 'true', 'false'},
forDeveloper={True, False, 'true', 'false'},
forMine={True, False, 'true', 'false'},
maxResults=range(51),
order={'date', 'rating', 'relevance', 'title',
'videoCount', 'viewCount'},
regionCode={
'ad', 'ae', 'af', 'ag', 'ai', 'al', 'am', 'an', 'ao', 'aq', 'ar',
'as', 'at', 'au', 'aw', 'az', 'ba', 'bb', 'bd', 'be', 'bf', 'bg',
'bh', 'bi', 'bj', 'bm', 'bn', 'bo', 'br', 'bs', 'bt', 'bv', 'bw',
'by', 'bz', 'ca', 'cc', 'cd', 'cf', 'cg', 'ch', 'ci', 'ck', 'cl',
'cm', 'cn', 'co', 'cr', 'cs', 'cu', 'cv', 'cx', 'cy', 'cz', 'de',
'dj', 'dk', 'dm', 'do', 'dz', 'ec', 'ee', 'eg', 'eh', 'er', 'es',
'et', 'fi', 'fj', 'fk', 'fm', 'fo', 'fr', 'ga', 'gd', 'ge', 'gf',
'gh', 'gi', 'gl', 'gm', 'gn', 'gp', 'gq', 'gr', 'gs', 'gt', 'gu',
'gw', 'gy', 'hk', 'hm', 'hn', 'hr', 'ht', 'hu', 'id', 'ie', 'il',
'in', 'io', 'iq', 'ir', 'is', 'it', 'jm', 'jo', 'jp', 'ke', 'kg',
'kh', 'ki', 'km', 'kn', 'kp', 'kr', 'kw', 'ky', 'kz', 'la', 'lb',
'lc', 'li', 'lk', 'lr', 'ls', 'lt', 'lu', 'lv', 'ly', 'ma', 'mc',
'md', 'mg', 'mh', 'mk', 'ml', 'mm', 'mn', 'mo', 'mp', 'mq', 'mr',
'ms', 'mt', 'mu', 'mv', 'mw', 'mx', 'my', 'mz', 'na', 'nc', 'ne',
'nf', 'ng', 'ni', 'nl', 'no', 'np', 'nr', 'nu', 'nz', 'om', 'pa',
'pe', 'pf', 'pg', 'ph', 'pk', 'pl', 'pm', 'pn', 'pr', 'ps', 'pt',
'pw', 'py', 'qa', 're', 'ro', 'ru', 'rw', 'sa', 'sb', 'sc', 'sd',
'se', 'sg', 'sh', 'si', 'sj', 'sk', 'sl', 'sm', 'sn', 'so', 'sr',
'st', 'sv', 'sy', 'sz', 'tc', 'td', 'tf', 'tg', 'th', 'tj', 'tk',
'tl', 'tm', 'tn', 'to', 'tr', 'tt', 'tv', 'tw', 'tz', 'ua', 'ug',
'uk', 'um', 'us', 'uy', 'uz', 'va', 'vc', 've', 'vg', 'vi', 'vn',
'vu', 'wf', 'ws', 'ye', 'yt', 'za', 'zm', 'zw',
},
relevanceLanguage={
'af', 'sq', 'sm', 'ar', 'az', 'eu', 'be', 'bn', 'bh', 'bs', 'bg',
'ca', 'zh-CN', 'zh-TW', 'zh-Hans', 'zh-Hant', 'hr', 'cs', 'da',
'nl', 'en', 'eo', 'et', 'fo', 'fi', 'fr', 'fy', 'gl', 'ka', 'de',
'el', 'gu', 'iw', 'hi', 'hu', 'is', 'id', 'ia', 'ga', 'it', 'ja',
'jw', 'kn', 'ko', 'la', 'lv', 'lt', 'mk', 'ms', 'ml', 'mt', 'mr',
'ne', 'no', 'nn', 'oc', 'fa', 'pl', 'pt-BR', 'pt-PT', 'pa', 'ro',
'ru', 'gd', 'sr', 'si', 'sk', 'sl', 'es', 'su', 'sw', 'sv', 'tl',
'ta', 'te', 'th', 'ti', 'tr', 'uk', 'ur', 'uz', 'vi', 'cy', 'xh',
'zu'
},
safeSearch={'moderate', 'none', 'strict'},
topicId={
'/m/04rlf', '/m/02mscn', '/m/0ggq0m', '/m/01lyv', '/m/02lkt',
'/m/0glt670', '/m/05rwpb', '/m/03_d0', '/m/028sqc', '/m/0g293',
'/m/064t9', '/m/06cqb', '/m/06j6l', '/m/06by7', '/m/0gywn',
'/m/0bzvm2', '/m/025zzc', '/m/02ntfj', '/m/0b1vjn', '/m/02hygl',
'/m/04q1x3q', '/m/01sjng', '/m/0403l3g', '/m/021bp2', '/m/022dc6',
'/m/03hf_rm', '/m/06ntj', '/m/0jm_', '/m/018jz', '/m/018w8',
'/m/01cgz', '/m/09xp_', '/m/02vx4', '/m/037hz', '/m/03tmr',
'/m/01h7lh', '/m/0410tth', '/m/07bs0', '/m/07_53', '/m/02jjt',
'/m/09kqc', '/m/02vxn', '/m/05qjc', '/m/066wd', '/m/0f2f9',
'/m/019_rr', '/m/032tl', '/m/027x7n', '/m/02wbm', '/m/03glg',
'/m/068hy', '/m/041xxh', '/m/07c1v', '/m/07bxq', '/m/07yv9',
'/m/098wr', '/m/09s1f', '/m/0kt51', '/m/01h6rj', '/m/05qt0',
'/m/06bvp', '/m/01k8wb'
},
type={'channel', 'playlist', 'video'},
videoCaption={'any', 'closedCaption', 'none'},
videoCategoryId={
'1', '2', '10', '15', '17', '18', '19', '20', '21', '22', '23',
'24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34',
'35', '36', '37', '38', '39', '40', '41', '42', '43', '44'
},
videoDefinition={'any', 'high', 'standard'},
videoDimension={'2d', '3d', 'any'},
videoDuration={'any', 'long', 'medium', 'short'},
videoEmbeddable={'any', True, 'true'},
videoLicense={'any', 'creativeCommon', 'youtube'},
videoSyndicated={'any', True, 'true'},
videoType={'any', 'episode', 'movie'},
)
def _split_by_comma(s, length=50):
"""Group a comma-separated string into a list of at-most
``length``-length words each."""
str_split = s.split(',')
str_list = []
for i in range(0, len(str_split) + length, length):
temp_str = ','.join(str_split[i:i+length])
if temp_str:
str_list.append(temp_str)
return str_list
def youtube_video_details(key, vid_ids):
"""Return details of videos for which the ids are given.
Assumes ``ids`` is a comma-separated list of video ids with
no spaces."""
base_url = ('https://www.googleapis.com/youtube/v3/videos?part='
'contentDetails,id,liveStreamingDetails,localizations,player,'
'recordingDetails,snippet,statistics,status,topicDetails')
vid_ids = _split_by_comma(vid_ids, length=50)
final_df = pd.DataFrame()
for vid_id in vid_ids:
params = {'id': vid_id, 'key': key}
logging.info(msg='Requesting: ' + 'video details')
video_resp = requests.get(base_url, params=params)
if video_resp.status_code >= 400:
raise Exception(video_resp.json())
items_df = pd.DataFrame(video_resp.json()['items'])
details = ['snippet', 'topicDetails', 'statistics',
'status', 'contentDetails']
detail_df = pd.DataFrame()
for detail in details:
try:
detail_df = pd.concat([
detail_df,
pd.DataFrame([x[detail] for x in
video_resp.json()['items']])
], axis=1)
except KeyError:
continue
temp_df = pd.concat([items_df, detail_df], axis=1)
final_df = final_df.append(temp_df, sort=False, ignore_index=True)
return final_df
def youtube_channel_details(key, channel_ids):
"""Return details of channels for which the ids are given.
Assumes ``ids`` is a comma-separated list of channel ids with
no spaces."""
base_url = ('https://www.googleapis.com/youtube/v3/channels?part='
'snippet,contentDetails,statistics')
channel_ids = _split_by_comma(channel_ids, length=50)
final_df = pd.DataFrame()
for channel_id in channel_ids:
params = {'id': channel_id, 'key': key}
logging.info(msg='Requesting: ' + 'channel details')
channel_resp = requests.get(base_url, params=params)
if channel_resp.status_code >= 400:
raise Exception(channel_resp.json())
items_df = pd.DataFrame(channel_resp.json()['items'])
details = ['snippet', 'statistics', 'contentDetails']
detail_df = pd.DataFrame()
for detail in details:
try:
detail_df = pd.concat([
detail_df,
pd.DataFrame([x[detail] for x in
channel_resp.json()['items']])
], axis=1)
except KeyError:
continue
temp_df = pd.concat([items_df, detail_df], axis=1)
final_df = final_df.append(temp_df, sort=False, ignore_index=True)
return final_df
def _dict_product(d):
"""Return the product of all values of a dict, while
coupling each value with its key.
This is used to generate multiple queries out of
possibly multiple arguments in serp_goog.
>>> d = {'a': [1], 'b': [2, 3, 4], 'c': [5, 6]}
>>> _dict_product(d)
>>> [{'a': 1, 'b': 2, 'c': 5},
{'a': 1, 'b': 2, 'c': 6},
{'a': 1, 'b': 3, 'c': 5},
{'a': 1, 'b': 3, 'c': 6},
{'a': 1, 'b': 4, 'c': 5},
{'a': 1, 'b': 4, 'c': 6}]
"""
items = list(d.items())
keys = [x[0] for x in items]
values = [x[1] for x in items]
dicts = []
for prod in product(*values):
tempdict = dict(zip(keys, prod))
dicts.append(tempdict)
return dicts
def serp_goog(q, cx, key, c2coff=None, cr=None,
dateRestrict=None, exactTerms=None, excludeTerms=None,
fileType=None, filter=None, gl=None, highRange=None,
hl=None, hq=None, imgColorType=None, imgDominantColor=None,
imgSize=None, imgType=None, linkSite=None, lowRange=None,
lr=None, num=None, orTerms=None, relatedSite=None,
rights=None, safe=None, searchType=None, siteSearch=None,
siteSearchFilter=None, sort=None, start=None):
"""Query Google and get search results in a DataFrame.
For each parameter, you can supply single or multiple values / arguments.
If you pass multiple arguments, all the possible combinations of
arguments (the product) will be requested, and you will get one
DataFrame combining all queries. See examples below.
:param q: The search expression.
:param cx: The custom search engine ID to use for this
request.
:param key: The API key of your custom search engine.
:param c2coff: Enables or disables Simplified and
Traditional Chinese Search. The default value for this
parameter is 0 (zero), meaning that the feature is enabled.
Supported values are:1: Disabled0: Enabled (default)
:param cr: Restricts search results to documents
originating in a particular country. You may use Boolean
operators in the cr parameter's value.Google Search
determines the country of a document by analyzing:the top-
level domain (TLD) of the document's URLthe geographic
location of the Web server's IP addressSee the Country
Parameter Values page for a list of valid values for this
parameter.
:param dateRestrict: Restricts results to URLs based on
date. Supported values include:d[number]: requests results
from the specified number of past days.
- d[number]: requests results from the specified number of past days.
- w[number]: requests results from the specified number of past weeks.
- m[number]: requests results from the specified number of past months.
- y[number]: requests results from the specified number of past years.
:param exactTerms: Identifies a phrase that all
documents in the search results must contain.
:param excludeTerms: Identifies a word or phrase that
should not appear in any documents in the search results.
:param fileType: Restricts results to files of a
specified extension. A list of file types indexable by
Google can be found in Search Console Help Center.
:param filter: Controls turning on or off the duplicate
content filter.See Automatic Filtering for more information
about Google's search results filters. Note that host
crowding filtering applies only to multi-site searches.By
default, Google applies filtering to all search results to
improve the quality of those results. Acceptable values
are: "0": Turns off duplicate content filter. "1": Turns
on duplicate content filter.
:param gl: Geolocation of end user. The gl parameter
value is a two-letter country code. The gl parameter boosts
search results whose country of origin matches the parameter
value. See the Country Codes page for a list of valid
values.Specifying a gl parameter value should lead to more
relevant results. This is particularly true for
international customers and, even more specifically, for
customers in English- speaking countries other than the
United States.
:param highRange: Specifies the ending value for a
search range.Use lowRange and highRange to append an
inclusive search range of lowRange...highRange to the query.
:param hl: Sets the user interface language. Explicitly
setting this parameter improves the performance and the
quality of your search results.See the Interface
Languages section of Internationalizing Queries and Results
Presentation for more information, and Supported Interface
Languages for a list of supported languages.
:param hq: Appends the specified query terms to the
query, as if they were combined with a logical AND operator.
:param imgColorType: Returns black and white, grayscale,
or color images: mono, gray, and color. Acceptable values
are: "color": color "gray": gray "mono": mono
:param imgDominantColor: Returns images of a specific
dominant color. Acceptable values are: "black": black
"blue": blue "brown": brown "gray": gray "green": green
"orange": orange "pink": pink "purple": purple "red": red
"teal": teal "white": white "yellow": yellow
:param imgSize: Returns images of a specified size.
Acceptable values are: "huge": huge "icon": icon "large":
large "medium": medium "small": small "xlarge": xlarge
"xxlarge": xxlarge
:param imgType: Returns images of a type. Acceptable
values are: "clipart": clipart "face": face "lineart":
lineart "news": news "photo": photo
:param linkSite: Specifies that all search results
should contain a link to a particular URL
:param lowRange: Specifies the starting value for a
search range. Use lowRange and highRange to append an
inclusive search range of lowRange...highRange to the query.
:param lr: Restricts the search to documents written in
a particular language (e.g., lr=lang_ja). Acceptable values
are: "lang_ar": Arabic "lang_bg": Bulgarian "lang_ca":
Catalan "lang_cs": Czech "lang_da": Danish "lang_de":
German "lang_el": Greek "lang_en": English "lang_es":
Spanish "lang_et": Estonian "lang_fi": Finnish "lang_fr":
French "lang_hr": Croatian "lang_hu": Hungarian
"lang_id": Indonesian "lang_is": Icelandic "lang_it":
Italian "lang_iw": Hebrew "lang_ja": Japanese "lang_ko":
Korean "lang_lt": Lithuanian "lang_lv": Latvian
"lang_nl": Dutch "lang_no": Norwegian "lang_pl": Polish
"lang_pt": Portuguese "lang_ro": Romanian "lang_ru":
Russian "lang_sk": Slovak "lang_sl": Slovenian "lang_sr":
Serbian "lang_sv": Swedish "lang_tr": Turkish "lang_zh-
CN": Chinese (Simplified) "lang_zh-TW": Chinese
(Traditional)
:param num: Number of search results to return.Valid
values are integers between 1 and 10, inclusive.
:param orTerms: Provides additional search terms to
check for in a document, where each document in the search
results must contain at least one of the additional search
terms.
:param relatedSite: Specifies that all search results
should be pages that are related to the specified URL.
:param rights: Filters based on licensing. Supported
values include: cc_publicdomain, cc_attribute,
cc_sharealike, cc_noncommercial, cc_nonderived, and
combinations of these.
:param safe: Search safety level. Acceptable values
are: "active": Enables SafeSearch filtering. "off":
Disables SafeSearch filtering. (default)
:param searchType: Specifies the search type: image. If
unspecified, results are limited to webpages. Acceptable
values are: "image": custom image search.
:param siteSearch: Specifies all search results should
be pages from a given site.
:param siteSearchFilter: Controls whether to include or
exclude results from the site named in the siteSearch
parameter. Acceptable values are: "e": exclude "i":
include
:param sort: The sort expression to apply to the
results.
:param start: The index of the first result to
return.Valid value are integers starting 1 (default) and the
second result is 2 and so forth. For example &start=11 gives
the second page of results with the default "num" value of
10 results per page.Note: No more than 100 results will ever
be returned for any query with JSON API, even if more than
100 documents match the query, so setting (start + num) to
more than 100 will produce an error. Note that the maximum
value for num is 10.
The following function call will produce two queries:
"hotel" in the USA, and "hotel" in France
>>> serp_goog(q='hotel', gl=['us', 'fr'], cx='YOUR_CX', key='YOUR_KEY')
The below function call will prouce four queries and make four requests:
"fligts" in UK
"fligts" in Australia
"tickets" in UK
"tickets" in Australia
'cr' here refers to 'country restrict', which focuses on content
originating from the specified country.
>>> serp_goog(q=['flights', 'tickets'], cr=['countryUK', 'countryAU'],
cx='YOUR_CX', key='YOUR_KEY')
"""
params = locals()
supplied_params = {k: v for k, v in params.items() if params[k] is not None}
for p in supplied_params:
if isinstance(supplied_params[p], (str, int)):
supplied_params[p] = [supplied_params[p]]
for p in supplied_params:
if p in SERP_GOOG_VALID_VALS:
if not set(supplied_params[p]).issubset(SERP_GOOG_VALID_VALS[p]):
raise ValueError('Please make sure you provide a'
' valid value for "{}", valid values:\n'
'{}'.format(p,
sorted(SERP_GOOG_VALID_VALS[p])))
params_list = _dict_product(supplied_params)
base_url = 'https://www.googleapis.com/customsearch/v1?'
specified_cols = ['searchTerms', 'rank', 'title', 'snippet',
'displayLink', 'link', 'queryTime', 'totalResults']
responses = []
for param in params_list:
param_log = ', '.join([k + '=' + str(v) for k, v in param.items()])
logging.info(msg='Requesting: ' + param_log)
resp = requests.get(base_url, params=param)
if resp.status_code >= 400:
raise Exception(resp.json())
responses.append(resp)
result_df = pd.DataFrame()
for i, resp in enumerate(responses):
request_metadata = resp.json()['queries']['request'][0]
del request_metadata['title']
search_info = resp.json()['searchInformation']
if int(search_info['totalResults']) == 0:
df = pd.DataFrame(columns=specified_cols, index=range(1))
df['searchTerms'] = request_metadata['searchTerms']
# These keys don't appear in the response so they have to be
# added manually
for missing in ['lr', 'num', 'start', 'c2coff']:
if missing in params_list[i]:
df[missing] = params_list[i][missing]
else:
df = pd.DataFrame(resp.json()['items'])
df['cseName'] = resp.json()['context']['title']
start_idx = request_metadata['startIndex']
df['rank'] = range(start_idx, start_idx + len(df))
for missing in ['lr', 'num', 'start', 'c2coff']:
if missing in params_list[i]:
df[missing] = params_list[i][missing]
meta_columns = {**request_metadata, **search_info}
df = df.assign(**meta_columns)
df['queryTime'] = datetime.datetime.now(tz=datetime.timezone.utc)
df['queryTime'] = pd.to_datetime(df['queryTime'])
if 'image' in df:
img_df = json_normalize(df['image'])
img_df.columns = ['image.' + c for c in img_df.columns]
df = pd.concat([df, img_df], axis=1)
result_df = result_df.append(df, sort=False, ignore_index=True)
ordered_cols = (list(set(params_list[i]).difference({'q', 'key', 'cx'})) +
specified_cols)
non_ordered = result_df.columns.difference(set(ordered_cols))
final_df = result_df[ordered_cols + list(non_ordered)]
if 'pagemap' in final_df:
pagemap_df = pd.DataFrame()
for p in final_df['pagemap']:
try:
temp_pagemap_df = json_normalize(p)
pagemap_df = pagemap_df.append(temp_pagemap_df, sort=False)
except Exception as e:
temp_pagemap_df = pd.DataFrame({'delete_me': None},
index=range(1))
pagemap_df = pagemap_df.append(temp_pagemap_df, sort=False)
pagemap_df = pagemap_df.reset_index(drop=True)
if 'delete_me' in pagemap_df:
del pagemap_df['delete_me']
for col in pagemap_df:
if col in final_df:
pagemap_df = pagemap_df.rename(columns={col: 'pagemap_' + col})
final_df = pd.concat([final_df, pagemap_df], axis=1)
if 'metatags' in pagemap_df:
metatag_df = pd.DataFrame()
for m in pagemap_df['metatags']:
try:
temp_metatags_df = json_normalize(m)
metatag_df = metatag_df.append(temp_metatags_df,
sort=False)
except Exception as e:
temp_metatags_df = pd.DataFrame({'delete_me': None},
index=range(1))
metatag_df = metatag_df.append(temp_metatags_df,
sort=False)
metatag_df = metatag_df.reset_index(drop=True)
if 'delete_me' in metatag_df:
del metatag_df['delete_me']
for col in metatag_df:
if col in final_df:
metatag_df = metatag_df.rename(columns={col: 'metatag_' + col})
final_df = pd.concat([final_df, metatag_df], axis=1)
return final_df
def serp_youtube(key, q=None, channelId=None, channelType=None, eventType=None,
forContentOwner=None, forDeveloper=None, forMine=None,
location=None, locationRadius=None, maxResults=None,
onBehalfOfContentOwner=None, order=None, pageToken=None,
publishedAfter=None, publishedBefore=None, regionCode=None,
relatedToVideoId=None, relevanceLanguage=None,
safeSearch=None, topicId=None, type=None, videoCaption=None,
videoCategoryId=None, videoDefinition=None,
videoDimension=None, videoDuration=None, videoEmbeddable=None,
videoLicense=None, videoSyndicated=None, videoType=None):
"""Query the YouTube API and get search results in a DataFrame.
For each parameter you can supply a single or multiple value(s).
Looping and merging results is handled automatically in case of multiple
values.
:param q: (string) The ``q`` parameter specifies the query term to
search for. Your request can also use the Boolean NOT (-) and OR (|)
operators to exclude videos or to find videos that are associated with
one of several search terms. For example, to search for videos
matching either "boating" or "sailing", set the ``q`` parameter value
to boating|sailing. Similarly, to search for videos matching either
"boating" or "sailing" but not "fishing", set the q parameter value to
boating|sailing -fishing. Note that the pipe character must be URL-
escaped when it is sent in your API request. The URL-escaped value for
the pipe character is %7C.
:param channelId: (string) The ``channelId`` parameter
indicates that the API response should only contain resources
created by the channel. Note: Search results are constrained
to a maximum of 500 videos if your request specifies a value
for the ``channelId`` parameter and sets the ``type`` parameter value
to video, but it does not also set one of the ``forContentOwner``,
``forDeveloper``, or ``forMine`` filters.
:param channelType: (string) The ``channelType`` parameter
lets you restrict a search to a particular type of
channel. Acceptable values are:
any – Return all channels.
show – Only retrieve shows.
:param eventType: (string) The ``eventType`` parameter
restricts a search to broadcast events. If you specify a value
for this parameter, you must also set the type parameter's
value to video. Acceptable values are:
completed – Only include completed broadcasts.
live – Only include active broadcasts.
upcoming – Only include upcoming broadcasts.
:param forContentOwner: (boolean) This parameter can
only be used in a properly authorized request, and it is
intended exclusively for YouTube content partners. The
``forContentOwner`` parameter restricts the search to only
retrieve videos owned by the content owner identified by
the ``onBehalfOfContentOwner`` parameter. If ``forContentOwner``
is set to true, the request must also meet these
requirements:The ``onBehalfOfContentOwner`` parameter is
required.The user authorizing the request must be using
an account linked to the specified content owner. The
``type`` parameter value must be set to video.None of the
following other parameters can be set: ``videoDefinition``,
``videoDimension``, ``videoDuration``, ``videoLicense``,
``videoEmbeddable``, ``videoSyndicated``, ``videoType``.
:param forDeveloper: (boolean) This parameter can only
be used in a properly authorized request. The ``forDeveloper``
parameter restricts the search to only retrieve videos
uploaded via the developer's application or website. The
API server uses the request's authorization credentials to
identify the developer. The ``forDeveloper`` parameter can be
used in conjunction with optional search parameters like
the ``q`` parameter. For this feature, each uploaded video is
automatically tagged with the project number that is
associated with the developer's application in the Google
Developers Console. When a search request subsequently sets
the ``forDeveloper`` parameter to ``true`` the API server uses the
request's authorization credentials to identify the
developer. Therefore, a developer can restrict results to
videos uploaded through the developer's own app or website
but not to videos uploaded through other apps or sites.
:param forMine: (boolean) This parameter can only be used in
a properly authorized request. The ``forMine`` parameter restricts
the search to only retrieve videos owned by the authenticated
user. If you set this parameter to ``true``, then the ``type``
parameter's value must also be set to ``video``. In addition, none
of the following other parameters can be set in the same
request: ``videoDefinition``, ``videoDimension``, ``videoDuration``,
``videoLicense``, ``videoEmbeddable``, ``videoSyndicated``,
``videoType``.
:param relatedToVideoId: (string) The
``relatedToVideoId`` parameter retrieves a list of videos
that are related to the video that the parameter ``value``
identifies. The parameter ``value`` must be set to a
YouTube video ID and, if you are using this parameter,
the ``type`` parameter must be set to video.Note that if
the ``relatedToVideoId`` parameter is set, the only other
supported parameters are ``part``, ``maxResults``, ``pageToken``,
``regionCode``, ``relevanceLanguage``, ``safeSearch``, ``type`` (which
must be set to video), and ``fields``.
:param location: (string) The ``location`` parameter, in
conjunction with the ``locationRadius`` parameter, defines a
circular geographic area and also restricts a search to videos
that specify, in their metadata, a geographic location that
falls within that area. The parameter value is a string that
specifies latitude/longitude coordinates e.g.
(37.42307,-122.08427).The location parameter value identifies
the point at the center of the area. The ``locationRadius``
parameter specifies the maximum distance that the location
associated with a video can be from that point for the video to
still be included in the search results. The API returns an
error if your request specifies a value for the ``location``
parameter but does not also specify a value for the
``locationRadius`` parameter.
:param locationRadius: (string) The ``locationRadius``
parameter, in conjunction with the ``location`` parameter,
defines a circular geographic area. The parameter value
must be a floating point number followed by a measurement
unit. Valid measurement units are m, km, ft, and mi. For
example, valid parameter values include 1500m, 5km,
10000ft, and 0.75mi. The API does not support
``locationRadius`` parameter values larger than 1000
kilometers. Note: See the definition of the ``location``
parameter for more information.
:param maxResults: (unsigned integer) The ``maxResults``
parameter specifies the maximum number of items that should
be returned in the result set. Acceptable values are 0 to 50,
inclusive. The default value is 5.
:param onBehalfOfContentOwner: (string) This
parameter can only be used in a properly
authorized request. Note: This parameter is
intended exclusively for YouTube content
partners.The ``onBehalfOfContentOwner`` parameter
indicates that the request's authorization
credentials identify a YouTube CMS user who is
acting on behalf of the content owner specified
in the parameter value. This parameter is
intended for YouTube content partners that own
and manage many different YouTube channels. It
allows content owners to authenticate once and
get access to all their video and channel data,
without having to provide authentication
credentials for each individual channel. The CMS
account that the user authenticates with must be
linked to the specified YouTube content owner.
:param order: (string) The order parameter specifies the
method that will be used to order resources in the API response.
The default value is relevance. Acceptable values are:
date – Resources are sorted in reverse chronological order based on the
date they were created.
rating – Resources are sorted from highest to lowest rating.
relevance – Resources are sorted based on their relevance to the search
query. This is the default value for this parameter.
title – Resources are sorted alphabetically by title.
videoCount – Channels are sorted in descending order of their number of
uploaded videos.
viewCount – Resources sorted from highest to lowest number of views.
For live broadcasts, videos are sorted by number of concurrent viewers
while the broadcasts are ongoing.
:param pageToken: (string) The ``pageToken`` parameter
identifies a specific page in the result set that should be
returned. In an API response, the ``nextPageToken`` and
``prevPageToken`` properties identify other pages that could be
retrieved.
:param publishedAfter: (datetime) The ``publishedAfter``
parameter indicates that the API response should only
contain resources created at or after the specified time.
The value is an RFC 3339 formatted date-time value
(1970-01-01T00:00:00Z).
:param publishedBefore: (datetime) The
``publishedBefore`` parameter indicates that the API
response should only contain resources created before or
at the specified time. The value is an RFC 3339
formatted date-time value (1970-01-01T00:00:00Z).
:param regionCode: (string) The ``regionCode`` parameter
instructs the API to return search results for videos that
can be viewed in the specified country. The parameter value
is an ISO 3166-1 alpha-2 country code.
:param relevanceLanguage: (string) The
``relevanceLanguage`` parameter instructs the API to
return search results that are most relevant to the
specified language. The parameter value is typically
an ISO 639-1 two-letter language code. However, you
should use the values zh-Hans for simplified Chinese
and zh-Hant for traditional Chinese. Please note that
results in other languages will still be returned if
they are highly relevant to the search query term.
:param safeSearch: (string) The ``safeSearch`` parameter
indicates whether the search results should include
restricted content as well as standard content. Acceptable
values are:
moderate – YouTube will filter some content from search results and,
at the least, will filter content that is restricted in your locale.
Based on their content, search results could be removed from search
results or demoted in search results. This is the default parameter
value.
none – YouTube will not filter the search result set.
strict – YouTube will try to exclude all restricted content from the
search result set.
Based on their content, search results
could be removed from search results or demoted in search
results.
:param topicId: (string) The ``topicId`` parameter indicates
that the API response should only contain resources associated
with the specified topic. The value identifies a Freebase topic
ID.
:param type: (string) The ``type`` parameter restricts a search
query to only retrieve a particular type of resource. The value is
a comma-separated list of resource types. The default value is
video,channel,playlist. Acceptable values are: channel, playlist, and
video
:param videoCaption: (string) The ``videoCaption``
parameter indicates whether the API should filter video
search results based on whether they have captions. If you
specify a value for this parameter, you must also set the
``type`` parameter's value to video. Acceptable values are:
any – Do not filter results based on caption availability.
closedCaption – Only include videos that have captions.
none – Only include videos that do not have captions.
:param videoCategoryId: (string) The ``videoCategoryId``
parameter filters video search results based on their
category. If you specify a value for this parameter, you
must also set the ``type`` parameter's value to video.
:param videoDefinition: (string) The ``videoDefinition``
parameter lets you restrict a search to only include
either high definition (HD) or standard definition (SD)
videos. HD videos are available for playback in at least
720p, though higher resolutions, like 1080p, might also
be available. If you specify a value for this parameter,
you must also set the ``type`` parameter's value to
video. Acceptable values are:
any – Return all videos, regardless of their resolution.
high – Only retrieve HD videos.
standard – Only retrieve videos in standard definition.
:param videoDimension: (string) The ``videoDimension``
parameter lets you restrict a search to only retrieve 2D
or 3D videos. If you specify a value for this parameter,
you must also set the ``type`` parameter's value to
video. Acceptable values are:
2d – Restrict search results to exclude 3D videos.
3d – Restrict search results to only include 3D videos.
any – Include both 3D and non-3D videos in returned results.
This is the default value.
:param videoDuration: (string) The ``videoDuration``
parameter filters video search results based on their
duration. If you specify a value for this parameter, you
must also set the ``type`` parameter's value to
video. Acceptable values are:
any – Do not filter video search results based on their duration.
This is the default value.
long – Only include videos longer than 20 minutes.
medium – Only include videos that are between four and 20 minutes
long (inclusive).
short – Only include videos that are less than four minutes long.
:param videoEmbeddable: (string) The ``videoEmbeddable``
parameter lets you to restrict a search to only videos
that can be embedded into a webpage. If you specify a
value for this parameter, you must also set the ``type``
parameter's value to video. Acceptable values are:
any – Return all videos, embeddable or not.
true – Only retrieve embeddable videos.
:param videoLicense: (string) The ``videoLicense``
parameter filters search results to only include videos
with a particular license. YouTube lets video uploaders
choose to attach either the Creative Commons license or the
standard YouTube license to each of their videos. If you
specify a value for this parameter, you must also set the
``type`` parameter's value to video. Acceptable values are:
any – Return all videos, regardless of which license they have,
that match the query parameters.
creativeCommon – Only return videos that have a Creative Commons
license.
Users can reuse videos with this license in other videos that they
create.
youtube – Only return videos that have the standard YouTube license.
:param videoSyndicated: (string) The ``videoSyndicated``
parameter lets you to restrict a search to only videos
that can be played outside youtube.com. If you specify a
value for this parameter, you must also set the ``type``
parameter's value to video. Acceptable values are:
any – Return all videos, syndicated or not.
true – Only retrieve syndicated videos.
:param videoType: (string) The ``videoType`` parameter lets
you restrict a search to a particular type of videos. If you
specify a value for this parameter, you must also set the ``type``
parameter's value to video. Acceptable values are:
any – Return all videos.
episode – Only retrieve episodes of shows.
movie – Only retrieve movies.
"""
params = locals()
supplied_params = {k: v for k, v in params.items() if params[k]}
type_vid_params = {'eventType', 'relatedToVideoId', 'videoCaption',
'videoCategoryId', 'videoDefinition', 'videoDimension',
'videoDuration', 'videoEmbeddable', 'videoLicense',
'videoSyndicated', 'videoType', 'forMine',
'forContentOwner'}
if (supplied_params.get('type') != 'video' and
type_vid_params.intersection(set(supplied_params.keys()))):
raise ValueError('You need to set type="video" if you want to set'
' any of the following:' + str(type_vid_params))
for p in supplied_params:
if isinstance(supplied_params[p], (str, int)):
supplied_params[p] = [supplied_params[p]]
for p in supplied_params:
if p in SERP_YTUBE_VALID_VALS:
if not set(supplied_params[p]).issubset(SERP_YTUBE_VALID_VALS[p]):
raise ValueError('Please make sure you provide a'
' valid value for "{}", valid values:\n{}'
.format(p,
sorted([str(x) for x in
SERP_YTUBE_VALID_VALS[p]])))
params_list = _dict_product(supplied_params)
base_url = "https://www.googleapis.com/youtube/v3/search?part=snippet"
responses = []
for param in params_list:
param_log = ', '.join([k + '=' + str(v) for k, v in param.items()])
logging.info(msg='Requesting: ' + param_log)
resp = requests.get(base_url, params=param)
if resp.status_code >= 400:
raise Exception(resp.json())
responses.append(resp)
result_df = pd.DataFrame()
for i, resp in enumerate(responses):
snippet_df = pd.DataFrame([x['snippet'] for x in resp.json()['items']])
id_df = pd.DataFrame([x['id'] for x in resp.json()['items']])
if 'channelId' in id_df:
id_df = id_df.drop('channelId', axis=1)
if 'thumbnails' in snippet_df:
thumb_df = json_normalize(snippet_df['thumbnails'])
else:
thumb_df = pd.DataFrame()
page_info = resp.json()['pageInfo']
temp_df = pd.concat([snippet_df, id_df, thumb_df],
axis=1).assign(**page_info)
temp_df['rank'] = range(1, len(temp_df)+1)
if len(temp_df) == 0:
empty_df_cols = ['title', 'description', 'publishedAt',
'channelTitle', 'kind', 'videoId', 'channelId']
temp_df = temp_df.assign(q=[params_list[i]['q']])
temp_df = temp_df.assign(**dict.fromkeys(empty_df_cols))
temp_df = temp_df.assign(**page_info)
del params_list[i]['key']
temp_df = temp_df.assign(**params_list[i])
temp_df['nextPageToken'] = resp.json().get('nextPageToken')
result_df = result_df.append(temp_df, sort=False,
ignore_index=True)
result_df['queryTime'] = datetime.datetime.now(tz=datetime.timezone.utc)
result_df['queryTime'] = | pd.to_datetime(result_df['queryTime']) | pandas.to_datetime |
import logging
import re
import os
import yaml
import pandas as pd
from fddc.regex import parse_regex
from openpyxl import Workbook
from openpyxl.utils.dataframe import dataframe_to_rows
from openpyxl.utils.cell import get_column_letter
logger = logging.getLogger('spreadsheetcleaner')
# Functions to categorise data
def make_category(config):
def categorize(series):
for c in config:
if series == c['code']:
return c['code']
elif c['code'] in str(series):
return c['code']
elif c['name'] in str(series):
return c['code']
for r in c.get('regex',[]):
p = parse_regex(r)
if p.match(str(series)) is not None:
return c['code']
return 'not matched'
return categorize
def make_map(original, new):
'''
Creates map of {original value:new value}
'''
values = dict(zip(original, new))
return values
# Function to create data validation object
def validation_from_list(validation_range):
'''Creates a data validation object based on validation_range (cell range to point to).
Requires import of DataValidation from openpyxl.worksheet.datavalidation '''
from openpyxl.worksheet.datavalidation import DataValidation
validation = DataValidation(type='list', formula1=validation_range)
validation.error = 'Your entry is not in the list'
validation.errorTitle = 'Invalid Entry'
validation.prompt = 'Please select from the list'
validation.promptTitle = 'List Selection'
return validation
# Main function to go through spreadsheet and replace data
def clean(input_file, output_file, matching_report_file, data_config, **args):
'''Replaces values in spreadsheet by standardized values following rules in data_config.
Saves clean spreadsheet in clean_path and matching report in matching_path'''
# Set up two workbooks to write both clean spreadsheet and matching report in unique spreadsheets
writer_clean = pd.ExcelWriter(output_file, engine='xlsxwriter') # create object to store clean data
wb = Workbook() # create object to store matching report
ws_ref = wb.active
ws_ref.title = "References" # This sheet will hold the validation references
reference_count = 0 #keep track of the columns filled with validation references
references = pd.DataFrame()
# Run through sheets within spreadsheet (matching items in data_config)
for item in data_config:
data = pd.read_excel(input_file, sheet_name=item)
df = data.copy()
settings = data_config[item]
# Create new sheet in matching report
ws = wb.create_sheet(item)
# Create header
ws['A1'] = 'column'
ws['B1'] = 'former_value'
ws['C1'] = 'new_value'
for cell in ws['A'] + ws[1]:
cell.style = 'Pandas'
# Run through config criteria for that sheet
for col in settings:
if df[df[col].notnull()].shape[0] > 0: # Only look at non-empty columns
# Map current value to match value
categorize = make_category(settings[col])
df[col] = df[col].str.strip()
unique = df[col].unique()
clean_series = | pd.Series(unique) | pandas.Series |
import logging
import requests
import pandas as pd
from openbb_terminal import config_terminal as cfg
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
from openbb_terminal.cryptocurrency.discovery.pycoingecko_model import read_file_data
logger = logging.getLogger(__name__)
def get_slug(coin: str) -> str:
"""
Get Santiment slug mapping and return corresponding slug for a given coin
"""
df = pd.DataFrame(read_file_data("santiment_slugs.json"))
slug = df.loc[df["ticker"] == coin.upper()]["slug"].values[0]
return slug
@log_start_end(log=logger)
def get_github_activity(
coin: str,
dev_activity: bool,
interval: str,
start: str,
end: str,
) -> pd.DataFrame:
"""Returns a list of developer activity for a given coin and time interval.
[Source: https://santiment.net/]
Parameters
----------
coin : str
Crypto symbol to check github activity
dev_activity: bool
Whether to filter only for development activity
start : int
Initial date like string (e.g., 2021-10-01)
end : int
End date like string (e.g., 2021-10-01)
interval : str
Interval frequency (e.g., 1d)
Returns
-------
pd.DataFrame
developer activity over time
"""
activity_type = "dev_activity" if dev_activity else "github_activity"
slug = get_slug(coin)
headers = {
"Content-Type": "application/graphql",
"Authorization": f"Apikey {cfg.API_SANTIMENT_KEY}",
}
# pylint: disable=line-too-long
data = f'\n{{ getMetric(metric: "{activity_type}"){{ timeseriesData( slug: "{slug}" from: "{start}" to: "{end}" interval: "{interval}"){{ datetime value }} }} }}' # noqa: E501
response = requests.post(
"https://api.santiment.net/graphql", headers=headers, data=data
)
df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import datetime
import dateutil.parser
import Utils
#
# given a synthea object, covert it to it's equivalent omop objects
#
class SyntheaToOmop6:
#
# Check the model matches
#
def __init__(self, model_schema, utils):
self.model_schema = model_schema
self.utils = utils
#
# synthea patients to omop
#
def patientsToOmop(self, df, personmap, person_id, location_id):
#df = df.sort_values('Id') sort to get better match to original synthea to omop conversion for comparison
df['persontmp'] = df.index + person_id # copy index into a temp column. If accessed directly corrupts dataframe
df['locationtmp'] = df.index + location_id # copy index into a temp column. If accessed directly corrupts dataframe
person = pd.DataFrame(columns=self.model_schema['person'].keys())
person['person_id'] = df['persontmp']
person['gender_concept_id'] = df['GENDER'].apply(self.utils.getGenderConceptCode)
person['year_of_birth'] = df['BIRTHDATE'].apply(self.utils.getYearFromSyntheaDate)
person['month_of_birth'] = df['BIRTHDATE'].apply(self.utils.getMonthFromSyntheaDate)
person['day_of_birth'] = df['BIRTHDATE'].apply(self.utils.getDayFromSyntheaDate)
person['race_concept_id'] = df['RACE'].apply(self.utils.getRaceConceptCode)
person['ethnicity_concept_id'] = df['ETHNICITY'].apply(self.utils.getEthnicityConceptCode)
person['birth_datetime'] = df['BIRTHDATE'].apply(self.utils.getDefaultTimestamp)
person['death_datetime'] = df['DEATHDATE'].apply(self.utils.getDefaultTimestamp)
person['location_id'] = df['locationtmp']
person['gender_source_value'] = df['GENDER']
person['person_source_value'] = df['Id']
person['gender_source_concept_id'] = '0'
person['race_source_value'] = df['RACE']
person['race_source_concept_id'] = '0'
person['ethnicity_source_value'] = df['ETHNICITY']
person['ethnicity_source_concept_id'] = '0'
personappend = pd.DataFrame(columns=["person_id","synthea_patient_id"])
personappend["person_id"] = person['person_id']
personappend["synthea_patient_id"] = df['Id']
personmap = personmap.append(personappend)
person = person[person['gender_concept_id'] != 0] # filter out person's with missing or unknown gender
location = pd.DataFrame(columns=self.model_schema['location'].keys())
location['location_id'] = df['locationtmp']
location['address_1'] = df['ADDRESS']
location['city'] = df['CITY']
location['state'] = df['STATE']
location['zip'] = df['ZIP']
location['county'] = df['COUNTY']
location['location_source_value'] = df['Id']
location['latitude'] = df['LAT']
location['longitude'] = df['LON']
# create empty death dataframe
death = pd.DataFrame()
return (person, location, death, personmap, person_id + len(person), location_id + len(location))
def conditionsToOmop(self, df, srctostdvm, condition_occurrence_id, drug_exposure_id, observation_id, personmap, visitmap):
df['conditiontmp'] = df.index + condition_occurrence_id # copy index into a temp column.
df['drugexposuretmp'] = df.index + drug_exposure_id # copy index into a temp column.
df['observationtmp'] = df.index + observation_id # copy index into a temp column.
df = pd.merge(df, personmap, left_on='PATIENT', right_on='synthea_patient_id', how='left')
df = pd.merge(df, visitmap, left_on='ENCOUNTER', right_on='synthea_encounter_id', how='left')
condition_occurrence = pd.DataFrame(columns=self.model_schema['condition_occurrence'].keys())
condition_occurrence['condition_occurrence_id'] = df['conditiontmp']
condition_occurrence['person_id'] = df['person_id']
srctostdvm_filtered = srctostdvm[(srctostdvm["target_domain_id"]=='Condition') & (srctostdvm["target_vocabulary_id"]=='SNOMED') & (srctostdvm["target_standard_concept"]=='S') & (srctostdvm["target_invalid_reason"].isnull())]
concept_df = pd.merge(df['CODE'],srctostdvm_filtered[['source_code','target_concept_id']], left_on='CODE', right_on='source_code', how='left')
condition_occurrence['condition_concept_id'] = concept_df['target_concept_id'].fillna('0').astype(int)
condition_occurrence['condition_start_date'] = df['START']
condition_occurrence['condition_start_datetime'] = df['START'].apply(self.utils.getDefaultTimestamp)
condition_occurrence['condition_end_date'] = df['STOP']
condition_occurrence['condition_end_datetime'] = df['STOP'].apply(self.utils.getDefaultTimestamp)
condition_occurrence['condition_type_concept_id'] = '32020'
condition_occurrence['stop_reason'] = '0'
condition_occurrence['visit_occurrence_id'] = df['visit_occurrence_id']
condition_occurrence['visit_detail_id'] = '0'
condition_occurrence['condition_source_value'] = df['CODE']
condition_occurrence['condition_source_concept_id'] = df['CODE']
drug_exposure = pd.DataFrame(columns=self.model_schema['drug_exposure'].keys())
drug_exposure['drug_exposure_id'] = df['drugexposuretmp']
drug_exposure['person_id'] = df['person_id']
srctostdvm_filtered = srctostdvm[(srctostdvm["target_domain_id"]=='Drug') & (srctostdvm["target_vocabulary_id"]=='RxNorm') & (srctostdvm["target_standard_concept"]=='S') & (srctostdvm["target_invalid_reason"].isnull())]
concept_df = pd.merge(df['CODE'],srctostdvm_filtered[['source_code','target_concept_id']], left_on='CODE', right_on='source_code', how='left')
drug_exposure['drug_concept_id'] = concept_df['target_concept_id'].fillna('0').astype(int)
drug_exposure['drug_exposure_start_date'] = df['START']
drug_exposure['drug_exposure_start_datetime'] = df['START'].apply(self.utils.getDefaultTimestamp)
drug_exposure['drug_exposure_end_date'] = df['STOP']
drug_exposure['drug_exposure_end_datetime'] = df['STOP'].apply(self.utils.getDefaultTimestamp)
drug_exposure['verbatim_end_date'] = df['STOP']
drug_exposure['visit_occurrence_id'] = df['visit_occurrence_id']
drug_exposure['drug_source_value'] = df['CODE']
drug_exposure['drug_source_concept_id'] = df['CODE']
drug_exposure['drug_type_concept_id'] = '581452'
drug_exposure['refills'] = '0'
drug_exposure['quantity'] = '0'
drug_exposure['days_supply'] = '0'
drug_exposure['route_concept_id'] = '0'
drug_exposure['lot_number'] = '0'
drug_exposure['visit_detail_id'] = '0'
observation = pd.DataFrame(columns=self.model_schema['observation'].keys())
observation['observation_id'] = df['observationtmp']
observation['person_id'] = df['person_id']
srctostdvm_filtered = srctostdvm[(srctostdvm["target_domain_id"]=='Observation') & (srctostdvm["target_vocabulary_id"]=='SNOMED') & (srctostdvm["target_invalid_reason"].isnull())]
concept_df = pd.merge(df['CODE'],srctostdvm_filtered[['source_code','target_concept_id']], left_on='CODE', right_on='source_code', how='left')
observation['observation_concept_id'] = concept_df['target_concept_id'].fillna('0').astype(int)
observation['observation_date'] = df['START']
observation['observation_datetime'] = df['START'].apply(self.utils.getDefaultTimestamp)
observation['value_as_concept_id'] = '0'
observation['qualifier_concept_id'] = '0'
observation['unit_concept_id'] = '0'
observation['visit_occurrence_id'] = df['visit_occurrence_id']
observation['visit_detail_id'] = '0'
observation['observation_source_value'] = df['CODE']
observation['observation_source_concept_id'] = df['CODE']
observation['observation_type_concept_id'] = '38000280'
return (condition_occurrence, drug_exposure, observation, condition_occurrence_id + len(condition_occurrence) , drug_exposure_id + len(drug_exposure), observation_id + len(observation))
def careplansToOmop(self, df):
pass
def observationsToOmop(self, df, srctostdvm, srctosrcvm, measurement_id, personmap,visitmap):
# filter synthea observations with no encounter (original etl does this)
df['measurementtmp'] = df.index + measurement_id # copy index into a temp column.
df = df[~df.ENCOUNTER.isnull()]
df = pd.merge(df, personmap, left_on='PATIENT', right_on='synthea_patient_id', how='left')
df = pd.merge(df, visitmap, left_on='ENCOUNTER', right_on='synthea_encounter_id', how='left')
measurement = pd.DataFrame(columns=self.model_schema['measurement'].keys())
measurement['measurement_id'] = df['measurementtmp']
measurement['person_id'] = df['person_id']
measurement['measurement_date'] = df['DATE']
measurement['measurement_datetime'] = df['DATE'].apply(self.utils.getDefaultTimestamp)
measurement['measurement_time'] = df['DATE'] # check
measurement['visit_occurrence_id'] = df['visit_occurrence_id']
measurement['visit_detail_id'] = '0'
srctostdvm_filtered = srctostdvm[(srctostdvm["target_domain_id"]=='Measurement') & (srctostdvm["target_standard_concept"]=='S') & (srctostdvm["target_invalid_reason"].isnull())]
concept_df = pd.merge(df[['CODE']],srctostdvm_filtered[['source_code','target_concept_id']], left_on='CODE', right_on='source_code', how='left')
measurement['measurement_concept_id'] = concept_df['target_concept_id'].fillna('0').astype(int)
measurement['measurement_source_value'] = df['CODE']
measurement['measurement_source_concept_id'] = df['CODE']
measurement['measurement_type_concept_id'] = '5001'
measurement['operator_concept_id'] = '0'
measurement['value_as_number'] = df['VALUE']
measurement['value_as_concept_id'] = '0'
measurement['unit_source_value'] = df['UNITS']
measurement['value_source_value'] = df['VALUE']
return (measurement, measurement_id + len(measurement))
def proceduresToOmop(self, df, srctostdvm, procedure_occurrence_id, personmap, visitmap):
df['proceduretmp'] = df.index + procedure_occurrence_id # copy index into a temp column.
df = pd.merge(df, personmap, left_on='PATIENT', right_on='synthea_patient_id', how='left')
df = pd.merge(df, visitmap, left_on='ENCOUNTER', right_on='synthea_encounter_id', how='left')
# do procedures really map to measurements? There is no value and units?
#measurement = pd.DataFrame(columns=self.model_schema['measurement'].keys())
#measurement['person_id'] = df['PATIENT'].apply(self.patienthash)
#measurement['measurement_date'] = df['DATE']
#measurement['measurement_time'] = df['DATE'] # check
#measurement['value_as_number'] = df['VALUE']
#measurement['visit_occurrence_id'] = df['CODE']
#measurement['measurement_concept_id'] = df['CODE']
#measurement['measurement_type_concept_id'] = '5001'
#measurement['measurement_source_value'] = df['CODE']
#measurement['measurement_source_concept_id'] = df['CODE']
#measurement['unit_source_value'] = df['UNITS']
#measurement['value_source_value'] = df['VALUE']
procedure_occurrence = pd.DataFrame(columns=self.model_schema['procedure_occurrence'].keys())
procedure_occurrence['procedure_occurrence_id'] = df['proceduretmp']
procedure_occurrence['person_id'] = df['person_id']
srctostdvm_filtered = srctostdvm[(srctostdvm["target_domain_id"]=='Procedure') & (srctostdvm["target_vocabulary_id"]=='SNOMED') & (srctostdvm["target_standard_concept"]=='S') & (srctostdvm["target_invalid_reason"].isnull())]
concept_df = pd.merge(df[['CODE']],srctostdvm_filtered[['source_code','target_concept_id']], left_on='CODE', right_on='source_code', how='left')
procedure_occurrence['procedure_concept_id'] = concept_df['target_concept_id'].fillna('0').astype(int)
procedure_occurrence['procedure_date'] = df['DATE']
procedure_occurrence['procedure_datetime'] = df['DATE'].apply(self.utils.getDefaultTimestamp)
procedure_occurrence['visit_occurrence_id'] = df['visit_occurrence_id']
procedure_occurrence['visit_detail_id'] = '0'
procedure_occurrence['procedure_type_concept_id'] = '38000275'
procedure_occurrence['modifier_concept_id'] = '0'
procedure_occurrence['procedure_source_value'] = df['CODE']
procedure_occurrence['procedure_source_concept_id'] = df['CODE']
return (procedure_occurrence, procedure_occurrence_id + len(procedure_occurrence))
def immunizationsToOmop(self, df, srctostdvm, drug_exposure_id, personmap, visitmap):
df['drugexposuretmp'] = df.index + drug_exposure_id # copy index into a temp column.
df = pd.merge(df, personmap, left_on='PATIENT', right_on='synthea_patient_id', how='left')
df = pd.merge(df, visitmap, left_on='ENCOUNTER', right_on='synthea_encounter_id', how='left')
drug_exposure = pd.DataFrame(columns=self.model_schema['drug_exposure'].keys())
drug_exposure['drug_exposure_id'] = df['drugexposuretmp']
drug_exposure['person_id'] = df['person_id']
srctostdvm_filtered = srctostdvm[(srctostdvm["target_domain_id"]=='Drug') & (srctostdvm["target_vocabulary_id"]=='CVX') & (srctostdvm["target_standard_concept"]=='S') & (srctostdvm["target_invalid_reason"].isnull())]
concept_df = pd.merge(df['CODE'],srctostdvm_filtered[['source_code','target_concept_id']], left_on='CODE', right_on='source_code', how='left')
drug_exposure['drug_concept_id'] = concept_df['target_concept_id'].fillna('0').astype(int)
drug_exposure['drug_exposure_start_date'] = df['DATE']
drug_exposure['drug_exposure_start_datetime'] = df['DATE'].apply(self.utils.getDefaultTimestamp)
drug_exposure['drug_exposure_end_date'] = df['DATE']
drug_exposure['drug_exposure_end_datetime'] = df['DATE'].apply(self.utils.getDefaultTimestamp)
drug_exposure['verbatim_end_date'] = df['DATE']
drug_exposure['visit_occurrence_id'] = df['visit_occurrence_id']
drug_exposure['drug_source_value'] = df['CODE']
drug_exposure['drug_source_concept_id'] = df['CODE']
drug_exposure['drug_type_concept_id'] = '581452'
drug_exposure['refills'] = '0'
drug_exposure['quantity'] = '0'
drug_exposure['days_supply'] = '0'
drug_exposure['route_concept_id'] = '0'
drug_exposure['lot_number'] = '0'
drug_exposure['visit_detail_id'] = '0'
return (drug_exposure, drug_exposure_id + len(drug_exposure))
def encountersToOmop(self, df, observation_period_id, visit_occurrence_id, personmap, visitmap):
df['visittmp'] = df.index + visit_occurrence_id # copy index into a temp column.
df = pd.merge(df, personmap, left_on='PATIENT', right_on='synthea_patient_id', how='left')
# preprocess df
df['observation_period_start_date'] = df['START'].apply(self.utils.isoTimestampToDate)
df['observation_period_end_date'] = df['STOP'].apply(self.utils.isoTimestampToDate)
start = df.groupby('person_id')['observation_period_start_date'].agg(['first']).reset_index()
stop = df.groupby('person_id')['observation_period_end_date'].agg(['last']).reset_index()
observation_tmp = pd.merge(start, stop, on='person_id', how='inner')
observation_period = pd.DataFrame(columns=self.model_schema['observation_period'].keys())
observation_period['observationtmp'] = observation_tmp.index + observation_period_id
observation_period['observation_period_id'] = observation_period['observationtmp']
observation_period['person_id'] = observation_tmp['person_id']
observation_period['observation_period_start_date'] = observation_tmp['first']
observation_period['observation_period_end_date'] = observation_tmp['last']
observation_period['period_type_concept_id'] = '44814724'
observation_period = observation_period.drop('observationtmp', 1)
observation_period_id = observation_period_id + len(observation_period)
visit_occurrence = pd.DataFrame(columns=self.model_schema['visit_occurrence'].keys())
visit_occurrence['visit_occurrence_id'] = df['visittmp']
visit_occurrence['person_id'] = df['person_id']
visit_occurrence['visit_start_date'] = df['START']
visit_occurrence['visit_end_date'] = df['STOP']
visit_occurrence['visit_concept_id'] = df['ENCOUNTERCLASS'].apply(self.utils.getVisitConcept)
visit_occurrence['visit_source_value'] = df['ENCOUNTERCLASS']
visit_occurrence['visit_type_concept_id'] = '44818517'
visitappend = pd.DataFrame(columns=["visit_occurrence_id","synthea_encounter_id"])
visitappend["visit_occurrence_id"] = visit_occurrence['visit_occurrence_id']
visitappend["synthea_encounter_id"] = df['Id']
visitmap = visitmap.append(visitappend)
return (observation_period, visit_occurrence, visit_occurrence_id + len(visit_occurrence), observation_period_id + len(observation_period), visitmap)
def organizationsToOmop(self, df, care_site_id):
care_site = pd.DataFrame(columns=self.model_schema['care_site'].keys())
return (care_site, care_site_id + len(care_site))
def providersToOmop(self, df, provider_id):
provider = pd.DataFrame(columns=self.model_schema['provider'].keys())
return (provider, provider_id)
def payertransitionToOmop(self, df):
pass
def allergiesToOmop(self, df, srctostdvm, observation_id, personmap, visitmap):
df['observationtmp'] = df.index + observation_id # copy index into a temp column.
df = pd.merge(df, personmap, left_on='PATIENT', right_on='synthea_patient_id', how='left')
df = | pd.merge(df, visitmap, left_on='ENCOUNTER', right_on='synthea_encounter_id', how='left') | pandas.merge |
from datetime import datetime, timedelta
from importlib import reload
import string
import sys
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
Timedelta,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestSeriesDtypes:
def test_dt64_series_astype_object(self):
dt64ser = Series(date_range("20130101", periods=3))
result = dt64ser.astype(object)
assert isinstance(result.iloc[0], datetime)
assert result.dtype == np.object_
def test_td64_series_astype_object(self):
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="timedelta64[ns]")
result = tdser.astype(object)
assert isinstance(result.iloc[0], timedelta)
assert result.dtype == np.object_
@pytest.mark.parametrize("dtype", ["float32", "float64", "int64", "int32"])
def test_astype(self, dtype):
s = Series(np.random.randn(5), name="foo")
as_typed = s.astype(dtype)
assert as_typed.dtype == dtype
assert as_typed.name == s.name
def test_dtype(self, datetime_series):
assert datetime_series.dtype == np.dtype("float64")
assert datetime_series.dtypes == np.dtype("float64")
@pytest.mark.parametrize("value", [np.nan, np.inf])
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
def test_astype_cast_nan_inf_int(self, dtype, value):
# gh-14265: check NaN and inf raise error when converting to int
msg = "Cannot convert non-finite values \\(NA or inf\\) to integer"
s = Series([value])
with pytest.raises(ValueError, match=msg):
s.astype(dtype)
@pytest.mark.parametrize("dtype", [int, np.int8, np.int64])
def test_astype_cast_object_int_fail(self, dtype):
arr = Series(["car", "house", "tree", "1"])
msg = r"invalid literal for int\(\) with base 10: 'car'"
with pytest.raises(ValueError, match=msg):
arr.astype(dtype)
def test_astype_cast_object_int(self):
arr = Series(["1", "2", "3", "4"], dtype=object)
result = arr.astype(int)
tm.assert_series_equal(result, Series(np.arange(1, 5)))
def test_astype_datetime(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0)])
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)])
s[1] = np.nan
assert s.dtype == "M8[ns]"
s = s.astype("O")
assert s.dtype == np.object_
def test_astype_datetime64tz(self):
s = Series(date_range("20130101", periods=3, tz="US/Eastern"))
# astype
result = s.astype(object)
expected = Series(s.astype(object), dtype=object)
tm.assert_series_equal(result, expected)
result = Series(s.values).dt.tz_localize("UTC").dt.tz_convert(s.dt.tz)
tm.assert_series_equal(result, s)
# astype - object, preserves on construction
result = Series(s.astype(object))
expected = s.astype(object)
tm.assert_series_equal(result, expected)
# astype - datetime64[ns, tz]
result = Series(s.values).astype("datetime64[ns, US/Eastern]")
tm.assert_series_equal(result, s)
result = Series(s.values).astype(s.dtype)
tm.assert_series_equal(result, s)
result = s.astype("datetime64[ns, CET]")
expected = Series(date_range("20130101 06:00:00", periods=3, tz="CET"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [str, np.str_])
@pytest.mark.parametrize(
"series",
[
Series([string.digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series([string.digits * 10, tm.rands(63), tm.rands(64), np.nan, 1.0]),
],
)
def test_astype_str_map(self, dtype, series):
# see gh-4405
result = series.astype(dtype)
expected = series.map(str)
tm.assert_series_equal(result, expected)
def test_astype_str_cast_dt64(self):
# see gh-9757
ts = Series([Timestamp("2010-01-04 00:00:00")])
s = ts.astype(str)
expected = Series([str("2010-01-04")])
tm.assert_series_equal(s, expected)
ts = Series([Timestamp("2010-01-04 00:00:00", tz="US/Eastern")])
s = ts.astype(str)
expected = Series([str("2010-01-04 00:00:00-05:00")])
tm.assert_series_equal(s, expected)
def test_astype_str_cast_td64(self):
# see gh-9757
td = Series([Timedelta(1, unit="d")])
ser = td.astype(str)
expected = Series([str("1 days")])
tm.assert_series_equal(ser, expected)
def test_astype_unicode(self):
# see gh-7758: A bit of magic is required to set
# default encoding to utf-8
digits = string.digits
test_series = [
Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
| Series(["データーサイエンス、お前はもう死んでいる"]) | pandas.Series |
# TensorFlow and tf.keras
import tensorflow as tf #https://adventuresinmachinelearning.com/python-tensorflow-tutorial/
#import keras
from sklearn.model_selection import train_test_split
# Helper libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
#****************************************
#**********start of user block***********
filename_list=['./NN_data/0MTM_scan_CORI_2.csv',
'./NN_data/0MTM_scan_PC.csv',
'./NN_data/0MTM_scan_CORI_1.csv',
'./NN_data/0MTM_scan_CORI_3_large_nu.csv']
epochs = 100
batch_size = 100
checkpoint_path='./tmp/checkpoint_gamma'
Read_from_checkpoint=False
#**********end of user block*************
#****************************************
#*********start of creating of model***************
def create_model(checkpoint_path):
#creating the model
model = tf.keras.Sequential([
tf.keras.Input(shape=(7)),
#tf.keras.layers.Dense(units=7, input_shape=[7],activation='relu'),
tf.keras.layers.Dense(units=10240, activation='relu'),
#tf.keras.layers.Dense(units=32, activation='relu'),
#tf.keras.layers.Dense(units=32, activation='relu'),
#tf.keras.layers.Dense(units=256, activation='relu'),
#tf.keras.layers.Dense(units=1024, activation='relu'),
#tf.keras.layers.Dense(units=256, activation='relu'),
#tf.keras.layers.Dense(units=16, activation='relu'),
#tf.keras.layers.Dropout(0.2),
#tf.keras.layers.Dense(units=8, activation='relu'),
tf.keras.layers.Dense(units=1, activation='sigmoid')
])
model.summary()
model.compile(loss='binary_crossentropy',\
optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.001),
#optimizer=tf.keras.optimizers.Adam(learning_rate=0.003),\
metrics=['accuracy'])
#*create callback function (optional)
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self,epoch,log={}):
#print(log.get.keys())
#print(log.get('epoch'))
if(log.get('val_accuracy')>0.99):
print('val_accuracy>0.99, stop training!')
self.model.stop_training=True
callbacks=myCallback()
import os
if not os.path.exists('./tmp'):
os.mkdir('./tmp')
#https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/ModelCheckpoint
cp_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_path,
verbose=1,
save_weights_only=True,
save_freq='epoch')
lr_callback = tf.keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.1, patience=10, verbose=0,
mode='auto', min_delta=0.0001, cooldown=0, min_lr=0
)
callback_func=[cp_callback,callbacks,lr_callback]
#*********end of creating of model***************
return model,callback_func
def load_data(filename_list):
#*******start of loading data*******************
for i in range(len(filename_list)):
filename=filename_list[i]
df=pd.read_csv(filename)
df=df.dropna()
try:
df=df.drop(columns=['change'])
except:
pass
df_unstable=df.query('omega_omega_n!=0 and gamma_omega_n>0')
df_stable=df.query('omega_omega_n==0 or gamma_omega_n<=0')
df_unstable['unstable']=[1]*len(df_unstable)
df_stable['unstable']=[0]*len(df_stable)
df= | pd.concat([df_unstable, df_stable], axis=0) | pandas.concat |
from pandas import DataFrame, read_csv
from pandas import concat
import os
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
"""
Frame a time series as a supervised learning dataset.
Arguments:
data: Sequence of observations as a list or NumPy array.
n_in: Number of lag observations as input (X).
n_out: Number of observations as output (y).
dropnan: Boolean whether or not to drop rows with NaN values.
Returns:
Pandas DataFrame of series framed for supervised learning.
"""
n_vars = 1 if type(data) is list else data.shape[1]
df = | DataFrame(data) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 11 15:36:08 2018
@author: work
"""
import os, glob
import numpy as np
import pandas as pd
from load import load_from_cache, save_to_cache, load_file, load_from_cache_multi
def trans2square(image):
h, w = image.shape[:2]
new_shape = max(h, w)
if (new_shape, new_shape)!=(h, w):
y1, x1 = (new_shape - h)//2, (new_shape - w)//2
y2, x2 = new_shape - h-y1, new_shape - w-x1
image = np.pad(image, ((y1,y2),(x1,x2)), mode='constant',
constant_values = 0)
else:
y1, y2, x1, x2 = 0,0,0,0
return image, y1, y2, x1, x2
def prob_to_rles(lab_img, threshold=10):
for i in range(1, lab_img.max() + 1):
yield rle_encoding(lab_img == i)
def rle_encoding(x):
dots = np.where(x.T.flatten() == 1)[0]
run_lengths = []
prev = -2
for b in dots:
if (b>prev+1): run_lengths.extend((b + 1, 0))
run_lengths[-1] += 1
prev = b
return run_lengths
def make_submission(preds_df):
df = load_from_cache('stage2_df')
result = []
for ind in preds_df.index:
mask = preds_df.loc[ind, 'pred']
assert len(np.unique(mask))==mask.max()+1
result.append(list(prob_to_rles(mask)))
new_test_ids=[]
rles=[]
for n, id_ in enumerate(df['id']):
rles.extend(result[n])
new_test_ids.extend([id_]*len(result[n]))
sub = pd.DataFrame()
sub['ImageId'] = new_test_ids
sub['EncodedPixels'] = pd.Series(rles).apply(lambda x: ' '.join(str(y) for y in x))
sub.to_csv(os.path.join('..//cache','sub-scale3.csv'), index=False)
def main():
################################################################################
weight_dir = '/media/work/Data/dsb/cache/UnetRCNN_180410-221747'
################################################################################
df_name = 'stage2_df'
df = load_from_cache(df_name)
tags = ['quarter', 'half', None, 'two']
preds = []
for tag in tags:
if tag is None:
fl = os.path.join(weight_dir, '{}.dat'.format(df_name))
pred = load_file(fl)
elif tag == 'two':
fl_names = glob.glob(os.path.join(weight_dir, '{}_{}'.format(df_name, tag),
'{}_{}_[0-9+].dat'.format(df_name, tag)))+\
glob.glob(os.path.join(weight_dir, '{}_{}'.format(df_name, tag),
'{}_{}_[0-9][0-9].dat'.format(df_name, tag)))
pred = load_from_cache_multi(os.path.join(weight_dir,
'{}_{}'.format(df_name, tag),
'{}_{}'.format(df_name, tag)),
nb=len(fl_names))
else:
fl = os.path.join(weight_dir, '{}_{}.dat'.format(df_name,tag))
pred = load_file(fl)
preds.append(pred)
nb_fls = len(tags)
results = []
for ind in df.index:
masks = [pred.loc[ind, 'pred'] for pred in preds]
scores = [pred.loc[ind, 'con'] for pred in preds]
res={}
for key, vals in zip(np.arange(nb_fls),scores):
for nb in range(len(vals)):
res['{}_{}'.format(key, nb)] = vals[nb]
res = | pd.Series(res) | pandas.Series |
import warnings
import numpy as np
import pandas as pd
from scipy.stats import entropy
from medpy.metric import histogram
import config
def jsd(x, y):
'''
Jensen Shannon Divergence
Author: jonathanfriedman
'''
warnings.filterwarnings("ignore", category=RuntimeWarning)
x = np.array(x)
y = np.array(y)
d1 = x*np.log2(2*x/(x+y))
d2 = y*np.log2(2*y/(x+y))
d1[np.isnan(d1)] = 0
d2[np.isnan(d2)] = 0
d = 0.5*np.sum(d1+d2)
return d
def compute_distances(metric, outfile):
'''
compute a distance metric between a main corpus and seed corpus
and write to a numpy file
'''
scores = np.zeros(seed_doctopics.shape[0])
for doctopic in main_doctopics:
scores = np.vstack((scores, np.apply_along_axis(metric,
1, seed_doctopics, doctopic)))
scores = np.delete(scores, [0], axis=0)
np.save(config.path_output + '{}'.format(outfile), scores)
def chunker(seq, size):
'''
function to chunk dataframe into equal size chunks, except remainder
http://stackoverflow.com/questions/25699439/how-to-iterate-over-consecutive-chunks-of-pandas-dataframe-efficiently
'''
return (seq[pos:pos + size] for pos in xrange(0, len(seq), size))
# read document composition file (mallet output)
with open(config.path_input + 'mallet_composition_500.txt', 'r') as f:
hs = pd.read_csv(f, sep='\t', header=None, usecols=range(2, 501))
doctopics = | pd.DataFrame.as_matrix(hs) | pandas.DataFrame.as_matrix |
# -*- coding: utf-8 -*-
"""
.. module:: trend
:synopsis: Trend Indicators.
.. moduleauthor:: <NAME> (Bukosabino)
"""
import pandas as pd
import numpy as np
from .utils import *
def macd(close, n_fast=12, n_slow=26, fillna=False):
"""Moving Average Convergence Divergence (MACD)
Is a trend-following momentum indicator that shows the relationship between
two moving averages of prices.
https://en.wikipedia.org/wiki/MACD
Args:
close(pandas.Series): dataset 'Close' column.
n_fast(int): n period short-term.
n_slow(int): n period long-term.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
emafast = ema(close, n_fast, fillna)
emaslow = ema(close, n_slow, fillna)
macd = emafast - emaslow
if fillna:
macd = macd.replace([np.inf, -np.inf], np.nan).fillna(0)
return pd.Series(macd, name='MACD_%d_%d' % (n_fast, n_slow))
def macd_signal(close, n_fast=12, n_slow=26, n_sign=9, fillna=False):
"""Moving Average Convergence Divergence (MACD Signal)
Shows EMA of MACD.
https://en.wikipedia.org/wiki/MACD
Args:
close(pandas.Series): dataset 'Close' column.
n_fast(int): n period short-term.
n_slow(int): n period long-term.
n_sign(int): n period to signal.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
emafast = ema(close, n_fast, fillna)
emaslow = ema(close, n_slow, fillna)
macd = emafast - emaslow
macd_signal = ema(macd, n_sign, fillna)
if fillna:
macd_signal = macd_signal.replace([np.inf, -np.inf], np.nan).fillna(0)
return pd.Series(macd_signal, name='MACD_sign')
def macd_diff(close, n_fast=12, n_slow=26, n_sign=9, fillna=False):
"""Moving Average Convergence Divergence (MACD Diff)
Shows the relationship between MACD and MACD Signal.
https://en.wikipedia.org/wiki/MACD
Args:
close(pandas.Series): dataset 'Close' column.
n_fast(int): n period short-term.
n_slow(int): n period long-term.
n_sign(int): n period to signal.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
emafast = ema(close, n_fast, fillna)
emaslow = ema(close, n_slow, fillna)
macd = emafast - emaslow
macdsign = ema(macd, n_sign, fillna)
macd_diff = macd - macdsign
if fillna:
macd_diff = macd_diff.replace([np.inf, -np.inf], np.nan).fillna(0)
return pd.Series(macd_diff, name='MACD_diff')
def ema_indicator(close, n=12, fillna=False):
"""EMA
Exponential Moving Average via Pandas
Args:
close(pandas.Series): dataset 'Close' column.
n_fast(int): n period short-term.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
ema_ = ema(close, n, fillna)
return pd.Series(ema_, name='ema')
def adx(high, low, close, n=14, fillna=False):
"""Average Directional Movement Index (ADX)
The Plus Directional Indicator (+DI) and Minus Directional Indicator (-DI)
are derived from smoothed averages of these differences, and measure trend
direction over time. These two indicators are often referred to collectively
as the Directional Movement Indicator (DMI).
The Average Directional Index (ADX) is in turn derived from the smoothed
averages of the difference between +DI and -DI, and measures the strength
of the trend (regardless of direction) over time.
Using these three indicators together, chartists can determine both the
direction and strength of the trend.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:average_directional_index_adx
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
cs = close.shift(1)
pdm = high.combine(cs, lambda x1, x2: max(x1, x2) if np.isnan(x1) == False and np.isnan(x2) == False else np.nan)
pdn = low.combine(cs, lambda x1, x2: min(x1, x2) if np.isnan(x1) == False and np.isnan(x2) == False else np.nan)
tr = pdm - pdn
trs_initial = np.zeros(n-1)
trs = np.zeros(len(close) - (n - 1))
trs[0] = tr.dropna()[0:n].sum()
tr = tr.reset_index(drop=True)
for i in range(1, len(trs)-1):
trs[i] = trs[i-1] - (trs[i-1]/float(n)) + tr[n+i]
up = high - high.shift(1)
dn = low.shift(1) - low
pos = abs(((up > dn) & (up > 0)) * up)
neg = abs(((dn > up) & (dn > 0)) * dn)
dip_mio = np.zeros(len(close) - (n - 1))
dip_mio[0] = pos.dropna()[0:n].sum()
pos = pos.reset_index(drop=True)
for i in range(1, len(dip_mio)-1):
dip_mio[i] = dip_mio[i-1] - (dip_mio[i-1]/float(n)) + pos[n+i]
din_mio = np.zeros(len(close) - (n - 1))
din_mio[0] = neg.dropna()[0:n].sum()
neg = neg.reset_index(drop=True)
for i in range(1, len(din_mio)-1):
din_mio[i] = din_mio[i-1] - (din_mio[i-1]/float(n)) + neg[n+i]
dip = np.zeros(len(trs))
for i in range(len(trs)):
dip[i] = 100 * (dip_mio[i]/trs[i])
din = np.zeros(len(trs))
for i in range(len(trs)):
din[i] = 100 * (din_mio[i]/trs[i])
dx = 100 * np.abs((dip - din) / (dip + din))
adx = np.zeros(len(trs))
adx[n] = dx[0:n].mean()
for i in range(n+1, len(adx)):
adx[i] = ((adx[i-1] * (n - 1)) + dx[i-1]) / float(n)
adx = np.concatenate((trs_initial, adx), axis=0)
adx = pd.Series(data=adx, index=close.index)
if fillna:
adx = adx.replace([np.inf, -np.inf], np.nan).fillna(20)
return pd.Series(adx, name='adx')
def adx_pos(high, low, close, n=14, fillna=False):
"""Average Directional Movement Index Positive (ADX)
The Plus Directional Indicator (+DI) and Minus Directional Indicator (-DI)
are derived from smoothed averages of these differences, and measure trend
direction over time. These two indicators are often referred to collectively
as the Directional Movement Indicator (DMI).
The Average Directional Index (ADX) is in turn derived from the smoothed
averages of the difference between +DI and -DI, and measures the strength
of the trend (regardless of direction) over time.
Using these three indicators together, chartists can determine both the
direction and strength of the trend.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:average_directional_index_adx
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
cs = close.shift(1)
pdm = high.combine(cs, lambda x1, x2: max(x1, x2) if np.isnan(x1) == False and np.isnan(x2) == False else np.nan)
pdn = low.combine(cs, lambda x1, x2: min(x1, x2) if np.isnan(x1) == False and np.isnan(x2) == False else np.nan)
tr = pdm - pdn
trs_initial = np.zeros(n-1)
trs = np.zeros(len(close) - (n - 1))
trs[0] = tr.dropna()[0:n].sum()
tr = tr.reset_index(drop=True)
for i in range(1, len(trs)-1):
trs[i] = trs[i-1] - (trs[i-1]/float(n)) + tr[n+i]
up = high - high.shift(1)
dn = low.shift(1) - low
pos = abs(((up > dn) & (up > 0)) * up)
neg = abs(((dn > up) & (dn > 0)) * dn)
dip_mio = np.zeros(len(close) - (n - 1))
dip_mio[0] = pos.dropna()[0:n].sum()
pos = pos.reset_index(drop=True)
for i in range(1, len(dip_mio)-1):
dip_mio[i] = dip_mio[i-1] - (dip_mio[i-1]/float(n)) + pos[n+i]
dip = np.zeros(len(close))
for i in range(1, len(trs)-1):
dip[i+n] = 100 * (dip_mio[i]/trs[i])
dip = pd.Series(data=dip, index=close.index)
if fillna:
dip = dip.replace([np.inf, -np.inf], np.nan).fillna(20)
return pd.Series(dip, name='adx_pos')
def adx_neg(high, low, close, n=14, fillna=False):
"""Average Directional Movement Index Negative (ADX)
The Plus Directional Indicator (+DI) and Minus Directional Indicator (-DI)
are derived from smoothed averages of these differences, and measure trend
direction over time. These two indicators are often referred to collectively
as the Directional Movement Indicator (DMI).
The Average Directional Index (ADX) is in turn derived from the smoothed
averages of the difference between +DI and -DI, and measures the strength
of the trend (regardless of direction) over time.
Using these three indicators together, chartists can determine both the
direction and strength of the trend.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:average_directional_index_adx
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
cs = close.shift(1)
pdm = high.combine(cs, lambda x1, x2: max(x1, x2) if np.isnan(x1) == False and np.isnan(x2) == False else np.nan)
pdn = low.combine(cs, lambda x1, x2: min(x1, x2) if np.isnan(x1) == False and np.isnan(x2) == False else np.nan)
tr = pdm - pdn
trs_initial = np.zeros(n-1)
trs = np.zeros(len(close) - (n - 1))
trs[0] = tr.dropna()[0:n].sum()
tr = tr.reset_index(drop=True)
for i in range(1, len(trs)-1):
trs[i] = trs[i-1] - (trs[i-1]/float(n)) + tr[n+i]
up = high - high.shift(1)
dn = low.shift(1) - low
pos = abs(((up > dn) & (up > 0)) * up)
neg = abs(((dn > up) & (dn > 0)) * dn)
din_mio = np.zeros(len(close) - (n - 1))
din_mio[0] = neg.dropna()[0:n].sum()
neg = neg.reset_index(drop=True)
for i in range(1, len(din_mio)-1):
din_mio[i] = din_mio[i-1] - (din_mio[i-1]/float(n)) + neg[n+i]
din = np.zeros(len(close))
for i in range(1, len(trs)-1):
din[i+n] = 100 * (din_mio[i]/float(trs[i]))
din = pd.Series(data=din, index=close.index)
if fillna:
din = din.replace([np.inf, -np.inf], np.nan).fillna(20)
return pd.Series(din, name='adx_neg')
def vortex_indicator_pos(high, low, close, n=14, fillna=False):
"""Vortex Indicator (VI)
It consists of two oscillators that capture positive and negative trend
movement. A bullish signal triggers when the positive trend indicator
crosses above the negative trend indicator or a key level.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:vortex_indicator
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
tr = high.combine(close.shift(1), max) - low.combine(close.shift(1), min)
trn = tr.rolling(n).sum()
vmp = np.abs(high - low.shift(1))
vmm = np.abs(low - high.shift(1))
vip = vmp.rolling(n).sum() / trn
if fillna:
vip = vip.replace([np.inf, -np.inf], np.nan).fillna(1)
return | pd.Series(vip, name='vip') | pandas.Series |
# -*- coding:utf-8 -*-
import sys
import time
import datetime
import pandas as pd
import numpy as np
import logging
# 显示小数位数
pd.set_option('display.float_format', lambda x: '%.3f' % x)
# 显示所有列
pd.set_option('display.max_columns', 1000)
# 显示所有行
pd.set_option('display.max_rows', 1000)
# 设置value的显示长度为100,默认为50
pd.set_option('max_colwidth', 100)
# 当console中输出的列数超过1000的时候才会换行
pd.set_option('display.width', 1000)
def quiet_logs(sc):
# 控制台不打印警告信息
logger = sc._jvm.org.apache.log4j
logger.LogManager.getLogger("org").setLevel(logger.Level.ERROR)
logger.LogManager.getLogger("akka").setLevel(logger.Level.ERROR)
logger_py4j = logging.getLogger('py4j')
logger_py4j.setLevel(logging.ERROR)
def df_head(hc_df, lines=5):
if hc_df:
df = hc_df.toPandas()
return df.head(lines)
else:
return None
class Py4jHdfs:
"""
python操作HDFS
"""
def __init__(self, sc):
self.sc = sc
self.filesystem = self.get_file_system()
def path(self, file_path):
"""
创建hadoop path对象
:param sc sparkContext对象
:param file_path 文件绝对路径
:return org.apache.hadoop.fs.Path对象
"""
path_class = self.sc._gateway.jvm.org.apache.hadoop.fs.Path
return path_class(file_path)
def get_file_system(self):
"""
创建FileSystem
:param sc SparkContext
:return FileSystem对象
"""
filesystem_class = self.sc._gateway.jvm.org.apache.hadoop.fs.FileSystem
hadoop_configuration = self.sc._jsc.hadoopConfiguration()
return filesystem_class.get(hadoop_configuration)
def ls(self, path, is_return=False):
"""
读取文件列表,相当于hadoop fs -ls命令
:param path hdfs绝对路径
:return file_list 文件列表
"""
def file_or_dir(is_file, is_dir):
if is_file:
return 'is_file:True'
elif is_dir:
return 'is_directory:True'
else:
return 'unknow'
filesystem = self.get_file_system()
status = filesystem.listStatus(self.path(path))
try:
file_index = str(status[0].getPath()).index(path) + len(path)
except:
print([])
file_list = [(str(m.getPath())[file_index:],
str(round(m.getLen() / 1024.0 / 1024.0, 2)) + ' MB',
str(datetime.datetime.fromtimestamp(m.getModificationTime() / 1000)),
str(file_or_dir(m.isFile(), m.isDirectory()))) for m in status]
if file_list and not is_return:
for f in file_list:
print(f)
if not file_list:
print([])
if is_return:
return file_list
def exists(self, path):
return self.filesystem.exists(self.path(path))
def mkdir(self, path):
return self.filesystem.mkdirs(self.path(path))
def mkdirs(self, path, mode="755"):
return self.filesystem.mkdirs(self.path(path))
def set_replication(self, path, replication):
return self.filesystem.setReplication(self.path(path), replication)
def mv(self, path1, path2):
return self.filesystem.rename(self.path(path1), self.path(path2))
def rm(self, path, recursive=True, print_info=True):
"""
直接删除文件,不可恢复!
:param path 文件或文件夹
:param recursive 是否递归删除,默认为True
"""
try:
result = self.filesystem.delete(self.path(path), recursive)
if result:
if print_info:
print('[Info]: Remove File Successful!')
return True
else:
if print_info:
print('[Error]: Remove File Failed!')
return result
except Exception as e:
if print_info:
print('[Error]: %s' % e)
def safe_rm(self, path, trash_path='.Trash/Current'):
"""
删除文件,可恢复
可删除文件/文件夹
:path 需删除的文件的绝对路径
"""
try:
self.filesystem.rename(self.path(path), self.path(trash_path + path))
print('[Info]: Safe Remove File Successful!')
except:
try:
self.rm(self.path(trash_path + path))
self.filesystem.rename(self.path(path), self.path(trash_path + path))
print('[Info]: Safe Remove File Successful!')
except Exception as e:
print('[Error]: %s' % e)
print('[Error]: Remove File Failed!')
return True
def exists(self, path):
return self.filesystem.exists(self.path(path))
def chmod(self, path, mode):
self.filesystem.setPermission(self.path(path), mode)
def chown(self, path, owner, group):
self.filesystem.setOwner(self.path(path), owner, group)
# def get(self, src, dst, del_src=False,use_raw_local_file_system=True):
# self.filesystem.copyToLocalFile(del_src, self.path(src), dst, use_raw_local_file_system)
# def put(self, src, dst, del_src=False, overwrite=True):
# self.filesystem.copyFromLocalFile(del_src, src, dst, overwrite)
def run_time_count(func):
"""
计算函数运行时间
装饰器:@run_time_count
"""
def run(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
print("Function [{0}] run time is {1} second(s).".format(func.__name__, round(time.time() - start, 4)))
return result
return run
@run_time_count
def write(self, path, contents, encode='utf-8', overwrite_or_append='overwrite'):
"""
写内容到hdfs文件
:param sc SparkContext
:param path 绝对路径
:param contents 文件内容 字符串或字符串列表 例如:rdd.collect() 形如:['str0,str1,str2','str3,str4,str5']
:param encode 输出编码格式
:param overwrite_or_append 写模式:覆盖或追加
"""
try:
filesystem = self.get_file_system()
if overwrite_or_append == 'overwrite':
out = filesystem.create(self.path(path), True)
elif overwrite_or_append == 'append':
out = filesystem.append(self.path(path))
if isinstance(contents, list):
for content in contents:
out.write(bytearray(content + '\r\n', encode))
elif sys.version_info.major == 3 and isinstance(contents, str):
out.write(bytearray(contents, encode))
elif sys.version_info.major == 2 and (isinstance(contents, str) or isinstance(contents, unicode)):
out.write(bytearray(contents, encode))
else:
print('[Error]: Input data format is not right!')
return False
out.flush()
out.close()
print('[Path]: %s' % path)
print('[Info]: File Saved!')
return True
except Exception as e:
print('[Error]: %s' % e)
return False
@run_time_count
def read(self, path, sep=',', header=None, nrows=None):
"""
读取hdfs上存储的utf-8编码的单个csv,txt文件,输出为pandas.DataFrame
:param path 文件所在hdfs路径
:param sep 文本分隔符
:param header 设为0时表示第一行作为列名
:param nrows 读取行数
"""
filesystem = self.get_file_system()
file = filesystem.open(self.path(path))
# print(file)
data = []
line = True
nrow = 0
if not nrows:
nrows_ = np.inf
else:
nrows_ = nrows
while line and nrow <= nrows_:
try:
nrow = nrow + 1
line = file.readLine()
data.append(line.encode('raw_unicode_escape').decode('utf-8').split(sep))
except Exception as e:
print('[Info]: %s' % str(e))
break
file.close()
if header == 0:
data = pd.DataFrame(data[1:], columns=data[0])
elif header:
data = | pd.DataFrame(data, columns=header) | pandas.DataFrame |
"""
Generate fake data for households survey raw input data.
"""
from mimesis.schema import Field, Schema
import random
import pandas as pd
from datetime import datetime, timedelta
from pathlib import Path
from helpers import code_mask
from helpers import random_date
from helpers import random_integer
import helpers_weight
_ = Field('en-gb', seed=42, providers=[helpers_weight.Distribution])
yes_no_choice = ["Yes", "No"]
yes_no_none_choice = yes_no_choice + [None]
def generate_survey_v0_data(directory, file_date, records, swab_barcodes, blood_barcodes):
"""
Generate survey v0 data. Depends on lab swabs and lab bloods.
"""
v0_data_description = (
lambda: {
'ONS Household ID': _('random.custom_code', mask='############', digit='#'),
'Visit ID': _('random.custom_code', mask='DVS-##########', digit='#'),
'Type of Visit': _('choice', items=['Follow-up', 'First']),
'Visit Date/Time': _('datetime.formatted_datetime', fmt="%Y-%m-%d %H:%M:%S UTC", start=2018, end=2022),
'Street': _('address.street_name'),
'City': _('address.city'),
'County': _('address.state'),
'Postcode': _('address.postal_code'),
'Full_name': _('person.full_name'),
'Email': _('person.email', domains=['gsnail.ac.uk']),
'Swab Barcode 1': _('choice', items=swab_barcodes),
'Bloods Barcode 1': _('choice', items=blood_barcodes)
}
)
schema = Schema(schema=v0_data_description)
survey_v0 = pd.DataFrame(schema.create(iterations=records))
survey_v0.to_csv(directory / f"survey_v0_{file_date}.csv", index=False)
return survey_v0
def generate_survey_v1_data(directory, file_date, records, swab_barcodes, blood_barcodes):
"""
Generate survey v1 data. Depends on lab swabs and lab bloods.
"""
v1_data_description = (
lambda: {
'ONS Household ID': _('random.custom_code', mask='############', digit='#'),
'Visit ID': _('random.custom_code', mask='DVS-##########', digit='#'),
'Type of Visit': _('choice', items=['Follow-up', 'First']),
'Visit Date/Time': _('datetime.formatted_datetime', fmt="%Y-%m-%d %H:%M:%S UTC", start=2018, end=2022),
'Street': _('address.street_name'),
'City': _('address.city'),
'County': _('address.state'),
'Postcode': _('address.postal_code'),
'Title': _('person.title'),
'First_name': _('person.first_name'),
'Last_name': _('person.last_name'),
'Email': _('person.email', domains=['gsnail.ac.uk']),
'Swab_Barcode_1': _('choice', items=swab_barcodes),
'bloods_barcode_1': _('choice', items=blood_barcodes)
}
)
schema = Schema(schema=v1_data_description)
survey_v1 = pd.DataFrame(schema.create(iterations=records))
survey_v1.to_csv(directory / f"survey_v1_{file_date}.csv", index=False)
return survey_v1
def generate_survey_v2_data(directory, file_date, records, swab_barcodes, blood_barcodes):
"""
Generate survey v2 data.
"""
start_date_list = datetime(2022, 1, 1)
end_date_list = datetime(2022, 1, 10)
ons_voyager_2_data_description = (
lambda: {
"ons_household_id" : _('random.custom_code', mask='############', digit='#'),
"Visit_ID" : _('choice', items=[_('random.custom_code', mask='DVH-##########', digit='#'),
_('random.custom_code', mask='DHVF-##########', digit='#')]),
"Visit Status" : _('choice', items=["Completed", "Dispatched", "Household did not attend", "Partially Completed", "Withdrawn"]),
"Participant_Visit_status" : _('choice', items=[None, "Cancelled", "Completed", "Patient did not attend", "Re-scheduled", "Scheduled"]),
"Participant_status" : _('choice', items=["Active", "Completed", "Withdrawn"]),
"Withdrawal_reason" : _('choice', items=[None, "Bad experience with tester / survey" ,
"Moving location" ,"No longer convenient" ,"No longer wants to take part" ,
"Participant does not want to self swab" ,"Swab / blood process to distressing" ,
"Too many visits"]),
"Type_of_Visit" : _('choice', items=["First Visit", "Follow-up Visit"]),
"Visit_Order" : _('choice', items=[None, "First Visit" ,"Follow-up 1" ,"Follow-up 2" ,"Follow-up 3" ,
"Follow-up 4" ,"Month 10" ,"Month 11" ,"Month 12" ,"Month 13" ,"Month 14",
"Month 15", "Month 18", "Month 2", "Month 3", "Month 4", "Month 5", "Month 6",
"Month 7", "Month 8", "Month 9"]),
"Work_Type_Picklist" : _('choice', items=[None, "Blood and Swab", "Fingerprick and Swab", "Swab Only"]),
# Should follow YYYY-mm-ddTHH:MM:SS.sssZ
"Visit_Date_Time" : _('choice', items=[random_date(start=start_date_list, end=end_date_list, format="%Y-%m-%dT%H:%M:%S.%f")[:-3]+"Z"]),
"Street" : _('choice', items=[None, _('address.street_name')]),
"City" : _('choice', items=[None, _('address.city')]),
"County" : _('choice', items=[None, _('address.province')]),
"Postcode" : _('choice', items=[None, _('address.postal_code')]),
"Cohort" : _('choice', items=["Blood and Swab", "Swab Only"]),
"Fingerprick_Status" : _('choice', items=[None, "Accepted", "At least one person consented", "Declined", "Invited", "Not invited"]),
"Household_Members_Under_2_Years" : _('choice', items=yes_no_none_choice),
"Infant_1" : random_integer(0, 8, 0.1),
"Infant_2" : random_integer(0, 8, 0.1),
"Infant_3" : random_integer(0, 8, 0.1),
"Infant_4" : random_integer(0, 8, 0.1),
"Infant_5" : random_integer(0, 8, 0.1),
"Infant_6" : random_integer(0, 8, 0.1),
"Infant_7" : random_integer(0, 8, 0.1),
"Infant_8" : random_integer(0, 8, 0.1),
"Household_Members_Over_2_and_Not_Present": _('choice', items=[None, "Yes", "No"]),
"Person_1": random_integer(9, 100, 0.1),
"Person_2": random_integer(9, 100, 0.1),
"Person_3": random_integer(9, 100, 0.1),
"Person_4": random_integer(9, 100, 0.1),
"Person_5": random_integer(9, 100, 0.1),
"Person_6": random_integer(9, 100, 0.1),
"Person_7": random_integer(9, 100, 0.1),
"Person_8": random_integer(9, 110, 0.1),
'Person_1_Not_Consenting_Age': random_integer(9, 110, 0.1),
'Person1_Reason_for_Not_Consenting': _('choice',
items=["0","Don't want to participate",
"N","N/A","N/a","N:a","NA","NONE","Na","No","Not interested",
"an", None]),
'Person_2_Not_Consenting_Age': random_integer(9, 110, 0.2),
'Person2_Reason_for_Not_Consenting': _('choice',
items=["0","Don't want to participate",
"N","N/A","N/a","N:a","NA","NONE","Na","No","Not interested",
"an", None]),
'Person_3_Not_Consenting_Age': random_integer(9, 110, 0.3),
'Person3_Reason_for_Not_Consenting': _('choice',
items=["0","Don't want to participate",
"N","N/A","N/a","N:a","NA","NONE","Na","No","Not interested",
"an", None]),
'Person_4_Not_Consenting_Age': random_integer(9, 110, 0.4),
'Person4_Reason_for_Not_Consenting': _('choice',
items=["0","Don't want to participate",
"N","N/A","N/a","N:a","NA","NONE","Na","No","Not interested",
"an", None]),
'Person_5_Not_Consenting_Age': random_integer(9, 110, 0.5),
'Person5_Reason_for_Not_Consenting': _('choice',
items=["0","Don't want to participate",
"N","N/A","N/a","N:a","NA","NONE","Na","No","Not interested",
"an", None]),
'Person_6_Not_Consenting_Age': random_integer(9, 110, 0.5),
'Person6_Reason_for_Not_Consenting': _('choice',
items=["0","Don't want to participate",
"N","N/A","N/a","N:a","NA","NONE","Na","No","Not interested",
"an", None]),
'Person_7_Not_Consenting_Age': random_integer(9, 110, 0.6),
'Person7_Reason_for_Not_Consenting': _('choice',
items=['0', 'Consenting', "Doesn't want to participate",
'No one elae in house wants to participate',
'Not Available', 'Not interested','na', None]),
'Person_8_Not_Consenting_Age': random_integer(9, 110, 0.7),
'Person8_Reason_for_Not_Consenting': _('choice',
items=["0","Don't want to participate",
"N","N/A","N/a","N:a","NA","NONE","Na","No","Not interested",
"an", None]),
'Person_9_Not_Consenting_Age': random_integer(9, 110, 0.8),
'Person9_Reason_for_Not_Consenting': _('choice',
items=["0","Consenting","Don't want to participate",
"Left To go to uni","N","N/A",
"N/a","NA","NONE","Na","No","Not interested",
"The","na", None]),
'Participant_id': _('random.custom_code', mask='DHR-############', digit='#'),
'Title': _('choice', items=["Dr.","Miss.","Mr.","Mrs.","Ms.","Prof.", None]),
'First_Name': _('person.first_name'),
'Middle_Name': _('person.first_name'),
'Last_Name': _('person.last_name'),
# Format dd/mm/YYY HH:MM
'DoB': _('discrete_distribution', population=[_('datetime.formatted_datetime', fmt="%d/%m/%Y %H:%M", start=1980, end=2021), None], weights=[0.9, 0.1]),
'Email': _('choice', items=[_('person.email', domains=['gsnail.ac.uk']), None]),
'Have_landline_number': _('choice', items=yes_no_none_choice),
'Have_mobile_number': _('choice', items=yes_no_none_choice),
'Have_email_address': _('choice', items=yes_no_none_choice),
'Prefer_receive_vouchers': _('choice', items=["Email","Paper(Post)"]),
'Confirm_receive_vouchers': _('choice', items=["false","true"]),
'No_Email_address': random.randint(0,1),
'Able_to_take_blood': _('choice', items=yes_no_none_choice),
'No_Blood_reason_fingerprick': _('choice',
items=["Bruising or pain after first attempt","Couldn't get enough blood",
"No stock","Other","Participant felt unwell/fainted",
"Participant refused to give blood on this visit",
"Participant time constraints","Two attempts made", None]),
'No_Blood_reason_venous': _('choice',
items=["Bruising or pain after first attempt","No stock",
"Non-contact visit. Household self-isolating","Other",
"Participant dehydrated","Participant felt unwell/fainted",
"Participant refused","Participant time constraints",
"Poor venous access","Two attempts made", None]),
'bloods_barcode_1': _('discrete_distribution', population=blood_barcodes,
weights=[1/len(blood_barcodes)]*len(blood_barcodes),
null_prop=0.2
),
'Swab_Barcode_1': _('discrete_distribution', population=swab_barcodes,
weights=[1/len(swab_barcodes)]*len(swab_barcodes),
null_prop=0.2
),
# Format: YYYY-mm-ddTHH:MM:SS.sssZ
'Date_Time_Samples_Taken': _('discrete_distribution', population=[random_date(start=start_date_list, end = end_date_list, format="%Y-%m-%dT%H:%M:%S.%f")[:-3]+"Z", None], weights=[0.5, 0.5]),
'Sex': _('choice', items=["Female","Male", None]),
'Gender': _('choice', items=['Female', 'Male', 'Prefer not to say', None]),
'Ethnic_group': _('choice', items=['Asian or Asian British', 'Black or African or Caribbean or Black British',
'Mixed/Multiple Ethnic Groups', 'Other Ethnic Group', 'White']),
'Ethnicity': _('choice', items=['Any other Asian background', 'Any other Black background',
'Any other Mixed background', 'Any other ethnic group',
'Any other white background', 'Asian or Asian British-Bangladeshi',
'Asian or Asian British-Chinese', 'Asian or Asian British-Indian',
'Asian or Asian British-Pakistani', 'Black,Caribbean,African-African',
'Black,Caribbean,Afro-Caribbean', 'Mixed-White & Asian',
'Mixed-White & Black African', 'Mixed-White & Black Caribbean',
'Other ethnic group-Arab', 'White-British', 'White-Gypsy or Irish Traveller',
'White-Irish']),
'Ethnicity_Other': _('text.sentence'), #free text field, can be null 1 to 249
'Consent_to_First_Visit': _('choice', items = yes_no_choice),
'Consent_to_Five_Visits': _('choice', items = yes_no_choice),
'Consent_to_April_22': _('choice', items = yes_no_choice),
'Consent_to_Sixteen_Visits': _('choice', items = yes_no_choice),
'Consent_to_Blood_Test': _('choice', items = yes_no_choice),
'Consent_to_Finger_prick_A1_A3': _('choice', items = yes_no_none_choice),
'Consent_to_extend_study_under_16_B1_B3': _('choice', items = yes_no_none_choice),
'Consent_to_be_Contacted_Extra_Research': _('choice', items = yes_no_choice),
'Consent_to_be_Contacted_Extra_ResearchYN': _('choice', items = yes_no_none_choice),
'Consent_to_use_of_Surplus_Blood_Samples': _('choice', items = yes_no_choice),
'Consent_to_use_of_Surplus_Blood_SamplesYN': _('choice', items = yes_no_none_choice),
'Approached_for_blood_samples?': _('choice', items = yes_no_none_choice),
'Consent_to_blood_samples_if_positive': _('choice', items=['False', 'True']),
'Consent_to_blood_samples_if_positiveYN': _('choice', items = yes_no_none_choice),
'Consent_to_fingerprick_blood_samples': _('choice', items=['False', 'True']),
'Accepted_invite_to_fingerprick': _('choice', items = yes_no_none_choice),
'Re_consented_for_blood': _('choice', items=['False', 'True']),
'What_is_the_title_of_your_main_job': _('text.sentence'), #free text field, can be null 1 to 73
'What_do_you_do_in_your_main_job_business': _('text.sentence'), #free text field, can be null 1 to 333
'Occupations_sectors_do_you_work_in': _('choice', items=['Armed forces', 'Art or entertainment or recreation',
'Arts or Entertainment or Recreation', 'Arts or entertainment or recreation',
'Civil Service or Local Government', 'Financial Services (incl. insurance)',
'Financial services (incl. insurance)',
'Food Production and agriculture (incl. farming)',
'Food production and agriculture (incl. farming)', 'Health care',
'Hospitality (e.g. hotel or restaurant or cafe)',
'Information technology and communication', 'Manufacturing or construction',
'Other employment sector (specify)', 'Other occupation sector',
'Other occupation sector (specify)', 'Personal Services (e.g. hairdressers or tattooists)',
'Retail Sector (incl. wholesale)', 'Retail sector (incl. wholesale)', 'Social Care',
'Social care', 'Teaching and education', 'Transport (incl. storage and logistic)',
'Transport (incl. storage and logistics)', 'Transport (incl. storage or logistic)', None]),
'occupation_sector_other': _('text.sentence'), #free text field, can be null 1 to 75
'Work_in_a_nursing_residential_care_home': _('choice', items=yes_no_none_choice),
'Do_you_currently_work_in_healthcare': _('choice', items=[None, 'Primary care (e.g. GP, dentist)', 'Secondary care (e.g. hospital)',
' Other healthcare (e.g. mental health)']),
'Direct_contact_patients_clients_resid': _('choice', items=yes_no_none_choice),
'Have_physical_mental_health_or_illnesses': _('choice', items=yes_no_none_choice),
'physical_mental_health_or_illness_reduces_activity_ability': _('choice', items=[None, 'Not at all', 'Yes, a little', 'Yes, a lot']),
'Have_you_ever_smoked_regularly': _('choice', items=yes_no_none_choice),
'Do_you_currently_smoke_or_vape': _('choice', items=[None, 'Yes, cigarettes', 'Yes, cigar', 'Yes, pipe', 'Yes, vape/e-cigarettes']),
'Do_you_currently_smoke_or_vape_at_all': _('choice', items=[None, 'Cigarettes', 'Cigar', 'Pipe', 'Vape/e-cigarettes', 'Hookah/shisha pipes']),
'Smoke_Yes_cigarettes': _('choice', items=yes_no_none_choice),
'Smoke_Yes_cigar': _('choice', items=yes_no_none_choice),
'Smoke_Yes_pipe': _('choice', items=yes_no_none_choice),
'Smoke_Yes_vape_e_cigarettes': _('choice', items=yes_no_none_choice),
'Smoke_Hookah/shisha pipes': _('choice', items=yes_no_none_choice),
'What_is_your_current_working_status': _('choice', items=[None, '5y and older in full-time education',
'Employed and currently working (including if on leave or sick leave for less than 4 weeks)',
'Attending university (including if temporarily absent)'
'Self-employed and currently working (include if on leave or sick leave for less than 4 weeks)']),
'Paid_employment': _('choice', items=yes_no_none_choice),
'Main_Job_Changed': _('choice', items=yes_no_none_choice),
'Where_are_you_mainly_working_now': _('choice', items=[None, 'Both (work from home and work somewhere else)',
'From home (in the same grounds or building as your home)'
'Somewhere else (not at your home)' , 'Somewhere else (not your home)']),
'How_often_do_you_work_elsewhere': _('choice', items=[None, '0', '1', '2', '3', '4', '5', '6', '7', 'Participant Would Not/Could Not Answer',
'up to 1']),
'How_do_you_get_to_and_from_work_school': _('choice', items=[None, 'Bus', 'Car or Van', 'On foot', 'Bicycle', 'Other method']),
'Can_you_socially_distance_at_work': _('choice', items=[None, 'Difficult to maintain 2 meters - but I can usually be at least 1m from other people',
'Easy to maintain 2m - it is not a problem to stay this far away from other people',
'Very difficult to be more than 1 meter away as my work means I am in close contact with others on a regular basis']),
'Had_symptoms_in_the_last_7_days': _('choice', items=yes_no_none_choice),
'Which_symptoms_in_the_last_7_days': _('choice', items=[None, 'Fever ', 'Muscle ache', 'Weakness/tiredness', 'Sore Throat']),
'Date_of_first_symptom_onset': _('discrete_distribution', population=[random_date(start=start_date_list, end=end_date_list), None], weights=[0.5, 0.5]),
'Symptoms_7_Fever': _('choice', items=yes_no_none_choice),
'Symptoms_7_Muscle_ache_myalgia': _('choice', items=yes_no_none_choice),
'Symptoms_7_Fatigue_weakness': _('choice', items=yes_no_none_choice),
'Symptoms_7_Sore_throat': _('choice', items=yes_no_none_choice),
'Symptoms_7_Cough': _('choice', items=yes_no_none_choice),
'Symptoms_7_Shortness_of_breath': _('choice', items=yes_no_none_choice),
'Symptoms_7_Headache': _('choice', items=yes_no_none_choice),
'Symptoms_7_Nausea_vomiting': _('choice', items=yes_no_none_choice),
'Symptoms_7_Abdominal_pain': _('choice', items=yes_no_none_choice),
'Symptoms_7_Diarrhoea': _('choice', items=yes_no_none_choice),
'Symptoms_7_Loss_of_taste': _('choice', items=yes_no_none_choice),
'Symptoms_7_Loss_of_smell': _('choice', items=yes_no_none_choice),
'Are_you_self_Isolating_S2': _('choice', items=[
[
"No",
"Yes because you have/have had symptoms of COVID-19 or a positive test",
"Yes because you live with someone who has/has had symptoms or a positive test but you haven't had symptoms yourself",
"Yes for other reasons related to reducing your risk of getting COVID-19 (e.g. going into hospital or shielding)",
"Yes for other reasons related to you having had an increased risk of getting COVID-19 (e.g. having been in contact with a known case or quarantining after travel abroad)",
] * 4 + [
None
] * 3 + [
"Participant Would Not/Could Not Answer",
"Yes because you have/have had symptoms of COVID-19",
"Yes because you live with someone who has/has had symptoms but you haven't had them yourself",
"Yes for other reasons (e.g. going into hospital or quarantining)"
]
]),
'Do_you_think_you_have_Covid_Symptoms': _('choice', items=['Yes', 'Participant Would Not/Could Not Answer','No']*2 + [None]),
'Contact_Known_Positive_COVID19_28_days': _('choice', items=yes_no_none_choice),
'If_Known_last_contact_date': _('discrete_distribution', population=[random_date(start=start_date_list, end = end_date_list), None], weights=[0.5, 0.5]),
'If_Known_type_of_contact_S2': _('choice', items=['Living in your own home','Outside your home', None]),
'Contact_Suspect_Positive_COVID19_28_d': _('choice', items=yes_no_none_choice),
'If_suspect_last_contact_date': _('discrete_distribution', population=[random_date(start=start_date_list, end = end_date_list), None], weights=[0.5, 0.5]),
'If_suspect_type_of_contact_S2': _('choice', items=['Living in your own home','Outside your home', None]),
'You_been_Hospital_last_28_days': _('choice', items=yes_no_none_choice),
'OtherHouse_been_Hospital_last_28_days': _('choice', items=['Yes', 'Participant Would Not/Could Not Answer','No'] + [None]),
'Your_been_in_Care_Home_last_28_days': _('choice', items=['Yes', 'Participant Would Not/Could Not Answer','No']*2 + [None]),
'OtherHouse_been_in_Care_Home_last_28_days': _('choice', items=['Yes', 'Participant Would Not/Could Not Answer','No']*2 + [None]),
'Hours_a_day_with_someone_else': _('choice', items=[_('random.randints', amount=8, a=0, b=24)] + [None]),
'Physical_Contact_18yrs': _('choice', items=['0', '1-5', '11-20', '21 or more', '6-10', 'Participant Would Not/Could Not Answer', None]),
'Physical_Contact_18_to_69_yrs': _('choice', items=['0', '1-5', '11-20', '21 or more', '6-10', 'Participant Would Not/Could Not Answer', None]),
'Physical_Contact_70_yrs': _('choice', items=[None, "0", "1-5", "11-20", "21 or more", "6-10", "Participant Would Not/Could Not Answer"]),
"Social_Distance_Contact_18yrs" : _('choice', items=[None, "0", "1-5", "11-20", "21 or more", "6-10", "Participant Would Not/Could Not Answer"]),
"Social_Distance_Contact_18_to_69_yrs" : _('choice', items=[None, "0", "1-5", "11-20", "21 or more", "6-10", "Participant Would Not/Could Not Answer"]),
"Social_Distance_Contact_70_yrs" : _('choice', items=[None, "0", "1-5", "11-20", "21 or more", "6-10", "Participant Would Not/Could Not Answer"]),
"1Hour_or_Longer_another_person_home" : _('choice', items=[None, "1","2","3","4","5","6","7 times or more","None",
"Participant Would Not/Could Not Answer"]),
"1Hour_or_Longer_another_person_yourhome" : _('choice', items=[None, "1","2","3","4","5","6","7 times or more","None",
"Participant Would Not/Could Not Answer"]),
"Times_Outside_Home_For_Shopping" : _('choice', items=[None, "0", "1","2","3","4","5","6","7 times or more","None",
"Participant Would Not/Could Not Answer"]),
"Shopping_last_7_days" : _('choice', items=[None, "0", "1","2","3","4","5","6","7 times or more","None","Participant Would Not/Could Not Answer"]),
"Socialise_last_7_days" : _('choice', items=[None, "0", "1","2","3","4","5","6","7 times or more","None","Participant Would Not/Could Not Answer"]),
"Regular_testing_COVID" : _('choice', items=yes_no_none_choice),
"Face_Covering_or_Mask_outside_of_home" : _('choice', items=[None, "My face is already covered for other reasons (e.g. religious or cultural reasons)",
"No", "Participant Would Not/Could Not Answer", "Yes at work/school only",
"Yes in other situations only (including public transport or shops)",
"Yes in other situations only (including public transport/shops)",
"Yes usually both at work/school and in other situations"]),
"Face_Mask_Work_Place" : _('choice', items=[None, "My face is already covered for other reasons (e.g. religious or cultural reasons)",
"Never", "Not going to place of work or education", "Participant Would Not/Could Not Answer",
"Yes always", "Yes sometimes"]),
"Face_Mask_Other_Enclosed_Places" : _('choice', items=[None, "My face is already covered for other reasons (e.g. religious or cultural reasons)",
"Never", "Not going to other enclosed public spaces or using public transport",
"Participant Would Not/Could Not Answer", "Yes always", "Yes sometimes"]),
"Do_you_think_you_have_had_Covid_19" : _('choice', items=yes_no_none_choice),
"think_had_covid_19_any_symptoms" : _('choice', items=yes_no_none_choice),
"think_had_covid_19_which_symptoms": _('choice', items=[None, _('text.answer')]), # does this need multiple values concatted?
"Previous_Symptoms_Fever" : _('choice', items=yes_no_none_choice),
"Previous_Symptoms_Muscle_ache_myalgia" : _('choice', items=yes_no_none_choice),
"Previous_Symptoms_Fatigue_weakness" : _('choice', items=yes_no_none_choice),
"Previous_Symptoms_Sore_throat" : _('choice', items=yes_no_none_choice),
"Previous_Symptoms_Cough" : _('choice', items=yes_no_none_choice),
"Previous_Symptoms_Shortness_of_breath" : _('choice', items=yes_no_none_choice),
"Previous_Symptoms_Headache" : _('choice', items=yes_no_none_choice),
"Previous_Symptoms_Nausea_vomiting" : _('choice', items=yes_no_none_choice),
"Previous_Symptoms_Abdominal_pain" : _('choice', items=yes_no_none_choice),
'Previous_Symptoms_Diarrhoea': _('choice', items=yes_no_none_choice),
'Previous_Symptoms_Loss_of_taste': _('choice', items=yes_no_none_choice),
'Previous_Symptoms_Loss_of_smell': _('choice', items=yes_no_none_choice),
'If_yes_Date_of_first_symptoms': _('discrete_distribution', population=[random_date(start=start_date_list, end = end_date_list), None], weights=[0.5, 0.5]),
'Did_you_contact_NHS': _('choice', items=yes_no_none_choice),
'Were_you_admitted_to_hospital': _('choice', items=yes_no_none_choice),
'Have_you_had_a_swab_test': _('choice', items=yes_no_none_choice),
'If_Yes_What_was_result': _('choice', items=["All tests failed", "One or more negative tests but none positive",
"One or more negative tests but none were positive",
"One or more positive test(s)", "Waiting for all results",
None]),
'If_positive_Date_of_1st_ve_test': _('discrete_distribution', population=[random_date(start=start_date_list, end = end_date_list), None], weights=[0.5, 0.5]),
'If_all_negative_Date_last_test': _('discrete_distribution', population=[random_date(start=start_date_list, end = end_date_list), None], weights=[0.5, 0.5]),
'Have_you_had_a_blood_test_for_Covid': _('choice', items=yes_no_none_choice),
'What_was_the_result_of_the_blood_test': _('choice', items=["All tests failed", "One or more negative tests but none positive",
"One or more negative tests but none were positive",
"One or more positive test(s)", "Waiting for all results",
None]),
'Where_was_the_test_done': _('choice', items=["Home Test", "In the NHS (e.g. GP or hospital)",
"Participant Would Not/Could Not Answer", "Private Lab",
None]),
'If_ve_Blood_Date_of_1st_ve_test': _('discrete_distribution', population=[random_date(start=start_date_list, end = end_date_list), None], weights=[0.5, 0.5]),
'If_all_ve_blood_Date_last_ve_test': _('discrete_distribution', population=[random_date(start=start_date_list, end = end_date_list), None], weights=[0.5, 0.5]),
'Have_Long_Covid_Symptoms': _('choice', items=yes_no_none_choice),
'Long_Covid_Reduce_Activities': _('choice', items=["Not at all", "Yes a little", "Yes a lot", None]),
'Long_Covid_Symptoms': _('choice', items=["Fever ", " Headache ", " Muscle ache ", " Weakness/tiredness ",
"Nausea/vomiting", "Abdominal pain", "Diarrhoea", "Sore Throat",
"Cough", "Shortness of breath", "Loss of taste", "Loss of smell",
"Loss of appetite", "Chest pain", "Palpitations", "Vertigo/dizziness",
"Worry/anxiety", "Low mood/not enjoying anything", "Trouble sleeping",
"Memory loss or confusion", "Difficulty concentrating", "ALL No", "Yes",
None]),
'Long_Covid_Fever': _('choice', items=yes_no_none_choice),
'Long_Covid_Weakness_tiredness': _('choice', items=yes_no_none_choice),
'Long_Covid_Diarrhoea': _('choice', items=yes_no_none_choice),
'Long_Covid_Loss_of_smell': _('choice', items=yes_no_none_choice),
'Long_Covid_Shortness_of_breath': _('choice', items=yes_no_none_choice),
'Long_Covid_Vertigo_dizziness': _('choice', items=yes_no_none_choice),
'Long_Covid_Trouble_sleeping': _('choice', items=yes_no_none_choice),
'Long_Covid_Headache': _('choice', items=yes_no_none_choice),
'Long_Covid_Nausea_vomiting': _('choice', items=yes_no_none_choice),
'Long_Covid_Loss_of_appetite': _('choice', items=yes_no_none_choice),
'Long_Covid_Sore_throat': _('choice', items=yes_no_none_choice),
'Long_Covid_Chest_pain': _('choice', items=yes_no_none_choice),
'Long_Covid_Worry_anxiety': _('choice', items=yes_no_none_choice),
'Long_Covid_Memory_loss_or_confusion': _('choice', items=yes_no_none_choice),
'Long_Covid_Muscle_ache': _('choice', items=yes_no_none_choice),
'Long_Covid_Abdominal_pain': _('choice', items=yes_no_none_choice),
'Long_Covid_Loss_of_taste': _('choice', items=yes_no_none_choice),
'Long_Covid_Cough': _('choice', items=yes_no_none_choice),
'Long_Covid_Palpitations': _('choice', items=yes_no_none_choice),
'Long_Covid_Low_mood_not_enjoying_anything': _('choice', items=yes_no_none_choice),
'Long_Covid_Difficulty_concentrating': _('choice', items=yes_no_none_choice),
'Have_you_been_offered_a_vaccination': _('choice', items=yes_no_none_choice),
'Vaccinated_Against_Covid': _('choice', items=yes_no_none_choice),
'Type_Of_Vaccination': _('choice', items=["Don't know type", "From a research study/trial", "Janssen/Johnson&Johnson",
"Moderna", "Novavax", "Other / specify", "Oxford/AstraZeneca", "Pfizer/BioNTech",
"Sinopharm", "Sinovac", "Sputnik", "Valneva", None]),
'Vaccination_Other': _('choice', items=["OtherVaxx", None]), # This is usually freetext, depends on the previous question, may need to change
'Number_Of_Doses': _('choice', items=["1", "2", "3 or more", None]),
'Date_Of_Vaccination': _('discrete_distribution', population=[random_date(start=start_date_list, end = end_date_list), None], weights=[0.5, 0.5]),
'Have_you_been_outside_UK_since_April': _('choice', items=yes_no_none_choice),
'been_outside_uk_last_country': _('choice', items=["Afghanistan", "Bangladesh", "Cambodia", "Denmark", "Ecuador",
"Gabon", "Honduras", "Iceland", "Jamaica", "Kenya", "Latvia",
"Madagascar", "Namibia", "Oman", "Pakistan", "Qatar", "Romania", "Samoa",
"Thailand", "Uganda", "Venezuela", "Zimbabwe", None]),
'been_outside_uk_last_date': _('discrete_distribution', population=[_('datetime.formatted_datetime', fmt="%d/%m/%Y", start=2020, end=2022), None], weights=[0.5, 0.5]),
'Have_you_been_outside_UK_Lastspoke': _('choice', items=yes_no_none_choice)
}
)
schema = Schema(schema=ons_voyager_2_data_description)
survey_responses = pd.DataFrame(schema.create(iterations=records))
survey_responses.to_csv(directory / f"ONSECRF5_Datafile_{file_date}.csv", index=False, sep="|")
return survey_responses
def generate_ons_gl_report_data(directory, file_date, records):
"""
Generate dummy swab test results.
"""
start_date_list = datetime(2022, 1, 1)
end_date_list = datetime(2022, 1, 10)
ons_gl_report_data_description = (
lambda: {
'Sample': _('random.custom_code', mask='ONS########', digit='#'),
'Result': _('choice', items=['Negative', 'Positive', 'Void']),
'Date Tested': _('datetime.formatted_datetime', fmt="%Y-%m-%d %H:%M:%S UTC", start=2018, end=2022),
'Lab ID': _('choice', items=['GLS']),
'testKit': _('choice', items=['rtPCR', None]),
'CH1-Target': _('choice', items=['ORF1ab', None]),
'CH1-Result': _('choice', items=['Inconclusive', 'Negative', 'Positive', 'Rejected']),
'CH1-Cq': _('float_number', start=10.0, end=40.0, precision=12),
'CH2-Target': _('choice', items=['N gene', None]),
'CH2-Result': _('choice', items=['Inconclusive', 'Negative', 'Positive', 'Rejected']),
'CH2-Cq': _('float_number', start=10.0, end=40.0, precision=12),
'CH3-Target': _('choice', items=['S gene', None]),
'CH3-Result': _('choice', items=['Inconclusive', 'Negative', 'Positive', 'Rejected']),
'CH3-Cq': _('float_number', start=10.0, end=40.0, precision=12),
'CH4-Target': _('choice', items=['S gene', None]),
'CH4-Result': _('choice', items=['Positive', 'Rejected']),
'CH4-Cq': _('float_number', start=15.0, end=30.0, precision=12)
}
)
schema = Schema(schema=ons_gl_report_data_description)
survey_ons_gl_report = pd.DataFrame(schema.create(iterations=records))
survey_ons_gl_report.to_csv(directory / f"ONS_GL_Report_{file_date}_0000.csv", index=False)
return survey_ons_gl_report
def generate_unioxf_medtest_data(directory, file_date, records, target):
"""
Generate survey v2 data. Depends on lab swabs and lab bloods.
"""
start_date_list = datetime(2022, 1, 1)
end_date_list = datetime(2022, 1, 10)
unioxf_medtest_data_description = (
lambda: {
'Serum Source ID': _('random.custom_code', mask='ONS########', digit='#'),
'Blood Sample Type': _('choice', items=['Venous', 'Capillary']),
'Plate Barcode': _('random.custom_code', mask=f'ONS_######C{target}-#', digit='#'),
'Well ID': _('random.custom_code', mask='@##', char='@', digit='#'),
'Detection': _('choice', items=['DETECTED', 'NOT detected', 'failed']),
'Monoclonal quantitation (Colourimetric)': _('float_number', start=0.0, end=3251.11, precision=4),
'Monoclonal bounded quantitation (Colourimetric)': _('float_number', start=20, end=400, precision=1),
'Monoclonal undiluted quantitation (Colourimetric)': _('integer_number', start=0, end=20000),
'Date ELISA Result record created': _('datetime.formatted_datetime', fmt="%Y-%m-%d", start=2018, end=2022),
'Date Samples Arrayed Oxford': _('datetime.formatted_datetime', fmt="%Y-%m-%d", start=2018, end=2022),
'Date Samples Received Oxford': _('datetime.formatted_datetime', fmt="%Y-%m-%d", start=2018, end=2022),
'Voyager Date Created': _('datetime.formatted_datetime', fmt="%Y-%m-%d %H:%M:%S", start=2018, end=2022)
}
)
schema = Schema(schema=unioxf_medtest_data_description)
survey_unioxf_medtest = pd.DataFrame(schema.create(iterations=records))
survey_unioxf_medtest.to_csv(directory / f"Unioxf_medtest{target}_{file_date}.csv", index=False)
return survey_unioxf_medtest
def generate_northern_ireland_data(directory, file_date, records):
"""
generate northern ireland file.
"""
northern_ireland_data_description = (
lambda: {
'UIC': _('random.custom_code', mask='############', digit='#'),
'Sample': _('random.custom_code', mask="#&&&", digit='#', char='&'),
'oa11': code_mask(mask="N0000####",min_code=["N00000001",None],max_code=["N00004537",None]),
'laua': code_mask(mask="N090000##",min_code=["N09000001",None],max_code=["N09000011",None]),
'ctry': "N92000002",
'GOR9D': "N99999999",
'lsoa11': code_mask(mask="95&&##S#",min_code=["95AA01S1",None],max_code=["95ZZ16S2",None]),
'msoa11': "N99999999",
"oac11": code_mask(mask="#&#",min_code=["1A1",None],max_code=["8B3",None],use_incremntal_letters=True),
"CIS20CD": code_mask(mask="J06000###",min_code="J06000229",max_code="J06000233"),
"rgn":"N92000002",
"imd": code_mask(mask="00###",min_code=["00001",None],max_code=["00890",None]),
"interim_id": 999
}
)
schema = Schema(schema=northern_ireland_data_description)
northern_ireland_data = pd.DataFrame(schema.create(iterations=records))
northern_ireland_data.to_csv(directory / f"CIS_Direct_NI_{file_date}.csv", index=False)
return northern_ireland_data
def generate_sample_direct_data(directory, file_date, records):
"""
generate sample direct eng data
"""
sample_direct_eng_description = (
lambda: {
'UAC': _('random.custom_code', mask='############', digit='#'),
'LA_CODE': _('random.custom_code', mask='&########', digit='#'),
'Bloods': _('choice', items=["Swab only", "Swab and Blood"]),
'oa11': code_mask(mask="X00######",min_code=["E00000001","W00000001","S00088956",None],max_code=["E00176774","W00010265","S00135306",None]),
'laua': code_mask(mask="X########",min_code=["E06000001","E07000008","E08000001","E09000001","W06000001","S12000005",None],max_code=["E06000062","E07000246","E08000037","E09000033","W06000024","S12000050",None]),
'ctry': _('choice', items=["E92000001","W92000004","S92000003"]),
'CUSTODIAN_REGION_CODE': _('choice', items=[code_mask(mask="E########",min_code="E12000001",max_code="E12000009"),"W99999999 ","S99999999",None]),
'lsoa11': code_mask(mask="E########",min_code=["E01000001","W01000001","S01006506",None],max_code=["E01033768","W01001958","S01013481",None]),
'msoa11': code_mask(mask="E########",min_code=["E02000001","W02000001","S02001236",None],max_code=["E02006934","W02000423","S02002514",None]),
'ru11ind': _('choice', items=[code_mask(mask="&#",min_code="A1",max_code="F2"), code_mask(mask="#",min_code="1",max_code="8"),None]),
'oac11': _('choice', items=[code_mask(mask="#&#",min_code="1A1",max_code="8B3"), "9Z9",None]),
'rgn': _('choice', items=[code_mask(mask="E########",min_code="E12000001",max_code="E12000009"), "W92000004", "S92000003",None]),
'imd': _('choice', items=[_('random.randint', a=1, b=32844),None]),
'interim_id': _('choice', items=[_('random.randint', a=1, b=138),None])
}
)
schema = Schema(schema=sample_direct_eng_description)
sample_direct_data = pd.DataFrame(schema.create(iterations=records))
sample_direct_data.to_csv(directory / f"sample_direct_eng_wc{file_date}.csv", index=False)
return sample_direct_data
def generate_historic_bloods_data(directory, file_date, records):
"""
Generate historic bloods file
"""
historic_bloods_description = (
lambda: {
'blood_barcode_OX': code_mask(mask="ONS########",min_code="ONS00000001",max_code="ONS99999999"),
'received_ox_date': _('datetime.formatted_datetime', fmt="%Y-%m-%d", start=2018, end=2022),
'result_tdi': random.choices(["Positive", "Negative", "Could not process", "Insufficient sample", None], weights=(2,2,2,2,1), k=1)[0],
'result_siemens': random.choices(["Positive", "Negative", "Insufficient sample", None], weights=(2,2,2,1), k=1)[0],
'result_tdi_date': _('datetime.formatted_datetime', fmt="%Y-%m-%d", start=2018, end=2022),
'assay_tdi': _('random.randint', a=100000, b=14000000),
'assay_siemens': random.choices([str(_('random.uniform', a=0, b=10, precision=2)), "< 0.05", "> 10.00"], weights=(3,1,1), k=1)[0],
'plate_tdi': code_mask(mask="ONS_######",min_code="ONS_000001",max_code="ONS_999999"),
'well_tdi': code_mask(mask="&##",min_code="A11",max_code="Z99"),
'lims_id': code_mask(mask="ONS########",min_code="ONS00000001",max_code="ONS99999999"),
'blood_sample_type': _('choice', items=["Venous","Capillary"]),
'voyager_blood_dt_time': _('datetime.formatted_datetime', fmt="%Y-%m-%d", start=2018, end=2022),
'arrayed_ox_date': _('datetime.formatted_datetime', fmt="%Y-%m-%d %H:%M:%S UTC", start=2018, end=2022),
'assay_mabs': _('random.uniform', a=0, b=150000, precision=6),
}
)
schema = Schema(schema=historic_bloods_description)
historic_bloods_data = pd.DataFrame(schema.create(iterations=records))
historic_bloods_data.to_csv(directory / f"historic_bloods_{file_date}.csv", index=False)
return historic_bloods_data
def generate_unprocessed_bloods_data(directory, file_date, records):
"""
generate unprocessed bloods data
"""
unprocessed_bloods_description = (
lambda: {
'Date Received':_('datetime.formatted_datetime', fmt="%Y-%m-%d %H:%M:%S UTC", start=2018, end=2022),
'Sample ID': code_mask(mask="[ONS,ons]########",min_code="ONS00000001",max_code="ONS99999999"),
'Rejection Code': _('random.randint', a=1, b=9999),
'Reason for rejection': _('text.sentence'),
'Sample Type V/C': _('choice', items=["V","C"])
}
)
schema = Schema(schema=unprocessed_bloods_description)
unprocessed_bloods_data = pd.DataFrame(schema.create(iterations=records))
unprocessed_bloods_data.to_csv(directory / f"unprocessed_bloods_{file_date}.csv", index=False)
return unprocessed_bloods_data
if __name__ == "__main__":
raw_dir = Path(__file__).parent.parent / "generated_data"
swab_dir = raw_dir / "swab"
blood_dir = raw_dir / "blood"
survey_dir = raw_dir / "survey"
northern_ireland_dir = raw_dir / "northern_ireland_sample"
sample_direct_dir = raw_dir / "england_wales_sample"
unprocessed_bloods_dir = raw_dir / "unprocessed_blood"
historic_bloods_dir = raw_dir / "historic_blood"
historic_swabs_dir = raw_dir / "historic_swab"
historic_survey_dir = raw_dir / "historic_survey"
for directory in [swab_dir, blood_dir, survey_dir, northern_ireland_dir, sample_direct_dir, unprocessed_bloods_dir, historic_bloods_dir, historic_swabs_dir, historic_survey_dir]:
directory.mkdir(parents=True, exist_ok=True)
file_date = datetime.now()
lab_date_1 = datetime.strftime(file_date - timedelta(days=1), format="%Y%m%d")
lab_date_2 = datetime.strftime(file_date - timedelta(days=2), format="%Y%m%d")
file_date = datetime.strftime(file_date, format="%Y%m%d")
# Historic files
historic_bloods = generate_historic_bloods_data(historic_bloods_dir, file_date, 30)
historic_swabs = generate_ons_gl_report_data(historic_swabs_dir, file_date, 30)
historic_v2 = generate_survey_v2_data(
directory = historic_survey_dir,
file_date = file_date,
records = 100,
swab_barcodes = historic_swabs["Sample"].unique().tolist(),
blood_barcodes = historic_bloods['blood_barcode_OX'].unique().tolist()
)
# Delta files
lab_swabs_1 = generate_ons_gl_report_data(swab_dir, file_date, 10)
lab_swabs_2 = generate_ons_gl_report_data(swab_dir, lab_date_1, 10)
lab_swabs_3 = generate_ons_gl_report_data(swab_dir, lab_date_2, 10)
lab_swabs = | pd.concat([lab_swabs_1, lab_swabs_2, lab_swabs_3]) | pandas.concat |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
from pandas import Timestamp
def create_dataframe(tuple_data):
"""Create pandas df from tuple data with a header."""
return pd.DataFrame.from_records(tuple_data[1:], columns=tuple_data[0])
### REUSABLE FIXTURES --------------------------------------------------------
@pytest.fixture()
def indices_3years():
"""Three indices over 3 years."""
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0, 100.0, 100.0),
(Timestamp('2012-02-01 00:00:00'), 101.239553643, 96.60525323799999, 97.776838217),
(Timestamp('2012-03-01 00:00:00'), 102.03030533, 101.450821724, 96.59101862),
(Timestamp('2012-04-01 00:00:00'), 104.432402661, 98.000263617, 94.491213369),
(Timestamp('2012-05-01 00:00:00'), 105.122830333, 95.946873831, 93.731891785),
(Timestamp('2012-06-01 00:00:00'), 103.976692567, 97.45914568100001, 90.131064035),
(Timestamp('2012-07-01 00:00:00'), 106.56768678200001, 94.788761174, 94.53487522),
(Timestamp('2012-08-01 00:00:00'), 106.652151036, 98.478217946, 92.56165627700001),
(Timestamp('2012-09-01 00:00:00'), 108.97290730799999, 99.986521241, 89.647230903),
(Timestamp('2012-10-01 00:00:00'), 106.20124385700001, 99.237117891, 92.27819603799999),
(Timestamp('2012-11-01 00:00:00'), 104.11913898700001, 100.993436318, 95.758970985),
(Timestamp('2012-12-01 00:00:00'), 107.76600978, 99.60424011299999, 95.697091336),
(Timestamp('2013-01-01 00:00:00'), 98.74350698299999, 100.357120656, 100.24073830200001),
(Timestamp('2013-02-01 00:00:00'), 100.46305431100001, 99.98213513200001, 99.499007278),
(Timestamp('2013-03-01 00:00:00'), 101.943121499, 102.034291064, 96.043392231),
(Timestamp('2013-04-01 00:00:00'), 99.358987741, 106.513055039, 97.332012817),
(Timestamp('2013-05-01 00:00:00'), 97.128074038, 106.132168479, 96.799806436),
(Timestamp('2013-06-01 00:00:00'), 94.42944162, 106.615734964, 93.72086654600001),
(Timestamp('2013-07-01 00:00:00'), 94.872365481, 103.069773446, 94.490515359),
(Timestamp('2013-08-01 00:00:00'), 98.239415397, 105.458081805, 93.57271149299999),
(Timestamp('2013-09-01 00:00:00'), 100.36774827100001, 106.144579258, 90.314524375),
(Timestamp('2013-10-01 00:00:00'), 100.660205114, 101.844838294, 88.35136848399999),
(Timestamp('2013-11-01 00:00:00'), 101.33948384799999, 100.592230114, 93.02874928899999),
(Timestamp('2013-12-01 00:00:00'), 101.74876982299999, 102.709038791, 93.38277933200001),
(Timestamp('2014-01-01 00:00:00'), 101.73439491, 99.579700011, 104.755837919),
(Timestamp('2014-02-01 00:00:00'), 100.247760523, 100.76732961, 100.197855834),
(Timestamp('2014-03-01 00:00:00'), 102.82080245600001, 99.763171909, 100.252537549),
(Timestamp('2014-04-01 00:00:00'), 104.469889684, 96.207920184, 98.719797067),
(Timestamp('2014-05-01 00:00:00'), 105.268899775, 99.357641836, 99.99786671),
(Timestamp('2014-06-01 00:00:00'), 107.41649204299999, 100.844974811, 96.463821506),
(Timestamp('2014-07-01 00:00:00'), 110.146087435, 102.01075029799999, 94.332755083),
(Timestamp('2014-08-01 00:00:00'), 109.17068484100001, 101.562418115, 91.15410351700001),
(Timestamp('2014-09-01 00:00:00'), 109.872892919, 101.471759564, 90.502291475),
(Timestamp('2014-10-01 00:00:00'), 108.508436998, 98.801947543, 93.97423224399999),
(Timestamp('2014-11-01 00:00:00'), 109.91248118, 97.730489099, 90.50638234200001),
(Timestamp('2014-12-01 00:00:00'), 111.19756703600001, 99.734704555, 90.470418612),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2013-01-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2014-01-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_3years_start_feb(weights_3years):
return weights_3years.shift(1, freq='MS')
@pytest.fixture()
def weight_shares_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 0.489537029, 0.21362007800000002, 0.29684289199999997),
(Timestamp('2013-01-01 00:00:00'), 0.535477885, 0.147572705, 0.31694941),
(Timestamp('2014-01-01 00:00:00'), 0.512055362, 0.1940439, 0.293900738),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_shares_start_feb(weight_shares_3years):
return weight_shares_3years.shift(1, freq='MS')
@pytest.fixture()
def indices_1year(indices_3years):
return indices_3years.loc['2012', :]
@pytest.fixture()
def weights_1year(weights_3years):
return weights_3years.loc['2012', :]
@pytest.fixture()
def indices_6months(indices_3years):
return indices_3years.loc['2012-Jan':'2012-Jun', :]
@pytest.fixture()
def weights_6months(weights_3years):
return weights_3years.loc['2012', :]
@pytest.fixture()
def indices_transposed(indices_3years):
return indices_3years.T
@pytest.fixture()
def weights_transposed(weights_3years):
return weights_3years.T
@pytest.fixture()
def indices_missing(indices_3years):
indices_missing = indices_3years.copy()
change_to_nans = [
('2012-06', 2),
('2012-12', 3),
('2013-10', 2),
('2014-07', 1),
]
for sl in change_to_nans:
indices_missing.loc[sl] = np.nan
return indices_missing
@pytest.fixture()
def indices_missing_transposed(indices_missing):
return indices_missing.T
### AGGREGATION FIXTURES -----------------------------------------------------
@pytest.fixture()
def aggregate_outcome_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0),
(Timestamp('2012-02-01 00:00:00'), 99.22169156),
(Timestamp('2012-03-01 00:00:00'), 100.29190240000001),
(Timestamp('2012-04-01 00:00:00'), 100.10739720000001),
(Timestamp('2012-05-01 00:00:00'), 99.78134264),
(Timestamp('2012-06-01 00:00:00'), 98.47443727),
(Timestamp('2012-07-01 00:00:00'), 100.4796172),
(Timestamp('2012-08-01 00:00:00'), 100.7233716),
(Timestamp('2012-09-01 00:00:00'), 101.31654509999998),
(Timestamp('2012-10-01 00:00:00'), 100.5806089),
(Timestamp('2012-11-01 00:00:00'), 100.9697697),
(Timestamp('2012-12-01 00:00:00'), 102.4399192),
(Timestamp('2013-01-01 00:00:00'), 99.45617890000001),
(Timestamp('2013-02-01 00:00:00'), 100.08652959999999),
(Timestamp('2013-03-01 00:00:00'), 100.0866599),
(Timestamp('2013-04-01 00:00:00'), 99.7722843),
(Timestamp('2013-05-01 00:00:00'), 98.35278839),
(Timestamp('2013-06-01 00:00:00'), 96.00322344),
(Timestamp('2013-07-01 00:00:00'), 95.96105198),
(Timestamp('2013-08-01 00:00:00'), 97.82558448),
(Timestamp('2013-09-01 00:00:00'), 98.03388747),
(Timestamp('2013-10-01 00:00:00'), 96.93374613),
(Timestamp('2013-11-01 00:00:00'), 98.59512718),
(Timestamp('2013-12-01 00:00:00'), 99.23888357),
(Timestamp('2014-01-01 00:00:00'), 102.2042938),
(Timestamp('2014-02-01 00:00:00'), 100.3339127),
(Timestamp('2014-03-01 00:00:00'), 101.4726729),
(Timestamp('2014-04-01 00:00:00'), 101.17674840000001),
(Timestamp('2014-05-01 00:00:00'), 102.57269570000001),
(Timestamp('2014-06-01 00:00:00'), 102.9223313),
(Timestamp('2014-07-01 00:00:00'), 103.9199248),
(Timestamp('2014-08-01 00:00:00'), 102.3992605),
(Timestamp('2014-09-01 00:00:00'), 102.54967020000001),
(Timestamp('2014-10-01 00:00:00'), 102.35333840000001),
(Timestamp('2014-11-01 00:00:00'), 101.8451732),
(Timestamp('2014-12-01 00:00:00'), 102.8815443),
],
).set_index(0, drop=True).squeeze()
@pytest.fixture()
def aggregate_outcome_1year(aggregate_outcome_3years):
return aggregate_outcome_3years.loc['2012']
@pytest.fixture()
def aggregate_outcome_6months(aggregate_outcome_3years):
return aggregate_outcome_3years.loc['2012-Jan':'2012-Jun']
@pytest.fixture()
def aggregate_outcome_missing():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0),
(Timestamp('2012-02-01 00:00:00'), 99.22169156),
(Timestamp('2012-03-01 00:00:00'), 100.29190240000001),
(Timestamp('2012-04-01 00:00:00'), 100.10739720000001),
(Timestamp('2012-05-01 00:00:00'), 99.78134264),
(Timestamp('2012-06-01 00:00:00'), 98.75024119),
(Timestamp('2012-07-01 00:00:00'), 100.4796172),
(Timestamp('2012-08-01 00:00:00'), 100.7233716),
(Timestamp('2012-09-01 00:00:00'), 101.31654509999998),
(Timestamp('2012-10-01 00:00:00'), 100.5806089),
(Timestamp('2012-11-01 00:00:00'), 100.9697697),
(Timestamp('2012-12-01 00:00:00'), 105.2864531),
(Timestamp('2013-01-01 00:00:00'), 99.45617890000001),
(Timestamp('2013-02-01 00:00:00'), 100.08652959999999),
(Timestamp('2013-03-01 00:00:00'), 100.0866599),
(Timestamp('2013-04-01 00:00:00'), 99.7722843),
(Timestamp('2013-05-01 00:00:00'), 98.35278839),
(Timestamp('2013-06-01 00:00:00'), 96.00322344),
(Timestamp('2013-07-01 00:00:00'), 95.96105198),
(Timestamp('2013-08-01 00:00:00'), 97.82558448),
(Timestamp('2013-09-01 00:00:00'), 98.03388747),
(Timestamp('2013-10-01 00:00:00'), 96.08353503),
(Timestamp('2013-11-01 00:00:00'), 98.59512718),
(Timestamp('2013-12-01 00:00:00'), 99.23888357),
(Timestamp('2014-01-01 00:00:00'), 102.2042938),
(Timestamp('2014-02-01 00:00:00'), 100.3339127),
(Timestamp('2014-03-01 00:00:00'), 101.4726729),
(Timestamp('2014-04-01 00:00:00'), 101.17674840000001),
(Timestamp('2014-05-01 00:00:00'), 102.57269570000001),
(Timestamp('2014-06-01 00:00:00'), 102.9223313),
(Timestamp('2014-07-01 00:00:00'), 97.38610996),
(Timestamp('2014-08-01 00:00:00'), 102.3992605),
(Timestamp('2014-09-01 00:00:00'), 102.54967020000001),
(Timestamp('2014-10-01 00:00:00'), 102.35333840000001),
(Timestamp('2014-11-01 00:00:00'), 101.8451732),
(Timestamp('2014-12-01 00:00:00'), 102.8815443),
],
).set_index(0, drop=True).squeeze()
### WEIGHTS FIXTURES ------------------------------------------------------
@pytest.fixture()
def reindex_weights_to_indices_outcome_start_jan():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-02-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-03-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-04-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-05-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-06-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-07-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-08-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-09-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-10-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-11-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-12-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2013-01-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-02-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-03-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-04-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-05-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-06-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-07-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-08-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-09-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-10-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-11-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-12-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2014-01-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-02-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-03-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-04-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-05-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-06-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-07-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-08-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-09-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-10-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-11-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-12-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
],
).set_index(0, drop=True)
@pytest.fixture()
def reindex_weights_to_indices_outcome_start_feb():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-02-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-03-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-04-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-05-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-06-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-07-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-08-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-09-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-10-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-11-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-12-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2013-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2013-02-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-03-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-04-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-05-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-06-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-07-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-08-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-09-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-10-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-11-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-12-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2014-01-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2014-02-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-03-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-04-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-05-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-06-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-07-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
( | Timestamp('2014-08-01 00:00:00') | pandas.Timestamp |
import os
import numpy as np
import pandas as pd
import xml.etree.ElementTree as etree
from lxml.etree import iterparse
def read_xml(fname, ObsClass, translate_dic={'locationId': 'locatie'},
to_mnap=False, remove_nan=False, verbose=False):
"""read a FEWS XML-file with measurements, return list of ObsClass objects
Parameters
----------
fname : str
full path to file
ObsClass : type
class of the observations, e.g. GroundwaterObs or WaterlvlObs
to_mnap : boolean, optional
if True a column with 'stand_m_tov_nap' is added to the dataframe
remove_nan : boolean, optional
remove nan values from measurements, flag information about the
nan values is also lost
verbose : boolean, optional
print additional information to the screen (default is False).
Returns
-------
list of ObsClass objects
list of timeseries stored in ObsClass objects
"""
tree = etree.parse(fname)
root = tree.getroot()
obs_list = []
for i in range(len(root)):
if root[i].tag.endswith('series'):
series = {}
date = []
time = []
events = []
for j in range(len(root[i])):
if root[i][j].tag.endswith('header'):
for k in range(len(root[i][j])):
prop = root[i][j][k].tag.split('}')[-1]
val = root[i][j][k].text
if prop == 'x' or prop == 'y' or prop == 'lat' or prop == 'lon':
val = float(val)
series[prop] = val
if verbose:
if prop == 'locationId':
print('read {}'.format(val))
elif root[i][j].tag.endswith('event'):
date.append(root[i][j].attrib.pop('date'))
time.append(root[i][j].attrib.pop('time'))
events.append({**root[i][j].attrib})
# combine events in a dataframe
index = pd.to_datetime(
[d + ' ' + t for d, t in zip(date, time)],
errors="coerce")
ts = | pd.DataFrame(events, index=index, dtype=float) | pandas.DataFrame |
"""
Created on Tue May 26 21:58:04 2020
Author: <NAME>
"""
import pandas as pd
import dash
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
import plotly.figure_factory as ff
import plotly.graph_objects as go
from datetime import datetime as dt
import numpy as np
from random import shuffle
import os
import math
import requests
import datetime
MAPBOX_TOKEN=os.environ.get('MAPBOX_TOKEN', None)
BE_KEY=os.environ.get('BE_KEY', None)
valid_colors=["#CF5C60","#717ECD","#4AB471","#F3AE4E","#D96383","#4EB1CB"]
shuffle(valid_colors)
##Databases
####
df = pd.read_csv('data/bunkering_ops_mediterranean.csv', parse_dates=True)
df["start_of_service"]=pd.to_datetime(df["start_of_service"])
df["vessel_inside_port"]= | pd.to_datetime(df["vessel_inside_port"]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings, simplefilter
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
from numbers import Number
from fractions import Fraction
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import lib, iNaT, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
from pandas import compat
from pandas.compat import u, PY2, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
ensure_int32,
ensure_categorical)
from pandas.util import testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == 'set' else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize(
"ll", [{}, {'A': 1}, Series([1])])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize(
"ll", ['1', 1, [1, 2], (1, 2), range(2), Index([1])])
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
@pytest.mark.parametrize("has_keys", [True, False])
@pytest.mark.parametrize("has_getitem", [True, False])
@pytest.mark.parametrize("has_contains", [True, False])
def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
class DictLike(object):
def __init__(self, d):
self.d = d
if has_keys:
def keys(self):
return self.d.keys()
if has_getitem:
def __getitem__(self, key):
return self.d.__getitem__(key)
if has_contains:
def __contains__(self, key):
return self.d.__contains__(key)
d = DictLike({1: 2})
result = inference.is_dict_like(d)
expected = has_keys and has_getitem and has_contains
assert result is expected
def test_is_file_like(mock):
class MockFile(object):
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
assert not is_file(mock.Mock())
@pytest.mark.parametrize(
"ll", [collections.namedtuple('Test', list('abc'))(1, 2, 3)])
def test_is_names_tuple_passes(ll):
assert inference.is_named_tuple(ll)
@pytest.mark.parametrize(
"ll", [(1, 2, 3), 'a', Series({'pi': 3.14})])
def test_is_names_tuple_fails(ll):
assert not inference.is_named_tuple(ll)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass(object):
pass
class UnhashableClass1(object):
__hash__ = None
class UnhashableClass2(object):
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1,
3.14,
np.float64(3.14),
'a',
tuple(),
(1, ),
HashableClass(), )
not_hashable = ([], UnhashableClass1(), )
abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
# old-style classes in Python 2 don't appear hashable to
# collections.Hashable but also seem to support hash() by default
if PY2:
class OldStyleClass():
pass
c = OldStyleClass()
assert not isinstance(c, compat.Hashable)
assert inference.is_hashable(c)
hash(c) # this will not raise
@pytest.mark.parametrize(
"ll", [re.compile('ad')])
def test_is_re_passes(ll):
assert inference.is_re(ll)
@pytest.mark.parametrize(
"ll", ['x', 2, 3, object()])
def test_is_re_fails(ll):
assert not inference.is_re(ll)
@pytest.mark.parametrize(
"ll", [r'a', u('x'),
r'asdf',
re.compile('adsf'),
u(r'\u2233\s*'),
re.compile(r'')])
def test_is_recompilable_passes(ll):
assert inference.is_re_compilable(ll)
@pytest.mark.parametrize(
"ll", [1, [], object()])
def test_is_recompilable_fails(ll):
assert not inference.is_re_compilable(ll)
class TestInference(object):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
assert lib.infer_dtype(arr) == compare
# object array of bytes
arr = arr.astype(object)
assert lib.infer_dtype(arr) == compare
# object array of bytes with missing values
assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare
def test_isinf_scalar(self):
# GH 11352
assert libmissing.isposinf_scalar(float('inf'))
assert libmissing.isposinf_scalar(np.inf)
assert not libmissing.isposinf_scalar(-np.inf)
assert not libmissing.isposinf_scalar(1)
assert not libmissing.isposinf_scalar('a')
assert libmissing.isneginf_scalar(float('-inf'))
assert libmissing.isneginf_scalar(-np.inf)
assert not libmissing.isneginf_scalar(np.inf)
assert not libmissing.isneginf_scalar(1)
assert not libmissing.isneginf_scalar('a')
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = {'', 'NULL', 'nan'}
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with pytest.raises(ValueError, match=msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = {-999, -999.0}
out = lib.maybe_convert_numeric(data, nan_values, coerce)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([str(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
@pytest.mark.parametrize("arr", [
np.array([2**63, np.nan], dtype=object),
np.array([str(2**63), np.nan], dtype=object),
np.array([np.nan, 2**63], dtype=object),
np.array([np.nan, str(2**63)], dtype=object)])
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
result = lib.maybe_convert_numeric(arr, set(),
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
def test_convert_numeric_uint64_nan_values(self, coerce):
arr = np.array([2**63, 2**63 + 1], dtype=object)
na_values = {2**63}
expected = (np.array([np.nan, 2**63 + 1], dtype=float)
if coerce else arr.copy())
result = lib.maybe_convert_numeric(arr, na_values,
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("case", [
np.array([2**63, -1], dtype=object),
np.array([str(2**63), -1], dtype=object),
np.array([str(2**63), str(-1)], dtype=object),
np.array([-1, 2**63], dtype=object),
np.array([-1, str(2**63)], dtype=object),
np.array([str(-1), str(2**63)], dtype=object)])
def test_convert_numeric_int64_uint64(self, case, coerce):
expected = case.astype(float) if coerce else case.copy()
result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("value", [-2**63 - 1, 2**64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2**63, -1], dtype=object)
exp = np.array([2**63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_mixed_dtypes_remain_object_array(self):
# GH14956
array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1],
dtype=object)
result = lib.maybe_convert_objects(array, convert_datetime=1)
tm.assert_numpy_array_equal(result, array)
class TestTypeInference(object):
# Dummy class used for testing with Python objects
class Dummy():
pass
def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype):
# see pandas/conftest.py
inferred_dtype, values = any_skipna_inferred_dtype
# make sure the inferred dtype of the fixture is as requested
assert inferred_dtype == lib.infer_dtype(values, skipna=True)
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
assert result == 'integer'
result = lib.infer_dtype([])
assert result == 'empty'
# GH 18004
arr = np.array([np.array([], dtype=object),
np.array([], dtype=object)])
result = lib.infer_dtype(arr)
assert result == 'empty'
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'integer'
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
assert result == 'integer'
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, np.nan, False], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
assert result == 'floating'
def test_decimals(self):
# GH15690
arr = np.array([Decimal(1), Decimal(2), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([1.0, 2.0, Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([Decimal(1), Decimal('NaN'), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'decimal'
def test_string(self):
pass
def test_unicode(self):
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr, skipna=True)
expected = 'unicode' if PY2 else 'string'
assert result == expected
@pytest.mark.parametrize('dtype, missing, skipna, expected', [
(float, np.nan, False, 'floating'),
(float, np.nan, True, 'floating'),
(object, np.nan, False, 'floating'),
(object, np.nan, True, 'empty'),
(object, None, False, 'mixed'),
(object, None, True, 'empty')
])
@pytest.mark.parametrize('box', [pd.Series, np.array])
def test_object_empty(self, box, missing, dtype, skipna, expected):
# GH 23421
arr = box([missing, missing], dtype=dtype)
result = lib.infer_dtype(arr, skipna=skipna)
assert result == expected
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'datetime64'
def test_infer_dtype_datetime(self):
arr = np.array([Timestamp('2011-01-01'),
Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.datetime64('2011-01-01'),
np.datetime64('2011-01-01')], dtype=object)
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)])
assert lib.infer_dtype(arr) == 'datetime'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Timestamp('2011-01-02')])
assert | lib.infer_dtype(arr) | pandas._libs.lib.infer_dtype |
""" Python module to match and merge (synchronise) data points from measurement time series """
__author__ = '<NAME>'
import pandas as pd
"""
Matching and merging measurement data points from different measurements that have been recorded with slightly different time values, where the maximum lag can be specified.
Input: Expects a dictionary of Pandas measurement dataframes as input, e.g. {'SMU': df1, 'Throughput Measures': df2}
Output: A Pandas data frame of matched and merged data points
Parameters:
dataframes dictionary that contains Pandas data frames of different measurement series as specified above
max_lag the time window in which matching data points have to lie. 1 second (1s) by default
merge with merge=True, a merged data frame will be returned, otherwise separate dataframes will be returned
"""
def synchronize(dataframes, max_lag='1s', merge=False):
merged_data = | pd.DataFrame() | pandas.DataFrame |
import cProfile
from enum import Enum
import enum
import io
from math import cos, sin, degrees, radians
from pathlib import Path
import pstats
from typing import List, Tuple
from numba import njit
import numpy as np
from numpy import ndarray
import pandas as pd
from pyquaternion import Quaternion
from scipy.spatial.transform import Rotation
class DFKeys(Enum):
TIME = "Time"
POSITION_X = "position_x"
POSITION_Y = "position_y"
POSITION_Z = "position_Z"
ORIENTATION_W = "orientation_w"
ORIENTATION_X = "orientation_x"
ORIENTATION_Y = "orientation_y"
ORIENTATION_Z = "orientation_z"
ROLL = "roll"
PITCH = "pitch"
YAW = "yaw"
SURGE = "surge_vel"
SWAY = "sway_vel"
HEAVE = "heave_vel"
ROLL_VEL = "roll_vel"
PITCH_VEL = "pitch_vel"
YAW_VEL = "yaw_vel"
FORCE_X = "force_x"
FORCE_Y = "force_y"
FORCE_Z = "force_z"
TORQUE_X = "torque_x"
TORQUE_Y = "torque_y"
TORQUE_Z = "torque_z"
ORIENTATIONS_QUAT = [
DFKeys.ORIENTATION_W.value,
DFKeys.ORIENTATION_X.value,
DFKeys.ORIENTATION_Y.value,
DFKeys.ORIENTATION_Z.value,
]
ORIENTATIONS_EULER = [
DFKeys.ROLL.value,
DFKeys.PITCH.value,
DFKeys.YAW.value,
]
POSITIONS = [
DFKeys.POSITION_X.value,
DFKeys.POSITION_Y.value,
DFKeys.POSITION_Z.value,
]
LINEAR_VELOCITIES = [
DFKeys.SURGE.value,
DFKeys.SWAY.value,
DFKeys.HEAVE.value,
]
ANGULAR_VELOCITIES = [
DFKeys.ROLL_VEL.value,
DFKeys.PITCH_VEL.value,
DFKeys.YAW_VEL.value,
]
TAU_DOFS = [
DFKeys.FORCE_X.value,
DFKeys.FORCE_Y.value,
DFKeys.FORCE_Z.value,
DFKeys.TORQUE_X.value,
DFKeys.TORQUE_Y.value,
DFKeys.TORQUE_Z.value,
]
ETA_DOFS = POSITIONS + ORIENTATIONS_QUAT
ETA_EULER_DOFS = POSITIONS + ORIENTATIONS_EULER
NU_DOFS = LINEAR_VELOCITIES + ANGULAR_VELOCITIES
PREPROCESSED_DIR = Path("data/preprocessed")
SYNTHETIC_DIR = Path("data/synthetic")
PARAM_EST_DIR = Path("results/parameter_estimation")
PARAM_EST_SIM_DIR = PARAM_EST_DIR / "simulations"
@njit
def R(quat: np.ndarray) -> np.ndarray:
"""Compute rotation matrix from BODY to NED given
a quaternion on the form [eta, eps1, eps2, eps3]
Based on 2.72 in fossen 2021 draft.
Args:
quat (np.ndarray): quaternion on the form [eta, eps1, eps2, eps3]
Returns:
np.ndarray: linear velocity rotation matrix
"""
eta: float = quat[0]
eps1: float = quat[1]
eps2: float = quat[2]
eps3: float = quat[3]
return np.array(
[
[
1 - 2 * (eps2 ** 2 + eps3 ** 2),
2 * (eps1 * eps2 - eps3 * eta),
2 * (eps1 * eps3 + eps2 * eta),
],
[
2 * (eps1 * eps2 + eps3 * eta),
1 - 2 * (eps1 ** 2 + eps3 ** 2),
2 * (eps2 * eps3 - eps1 * eta),
],
[
2 * (eps1 * eps3 - eps2 * eta),
2 * (eps2 * eps3 + eps1 * eta),
1 - 2 * (eps1 ** 2 + eps2 ** 2),
],
]
)
@njit
def T(quat: np.ndarray) -> np.ndarray:
"""Computes angular velocity rotation matrix from BODY to NED.
Based on 2.78) in fossen 2021 draft
Args:
quat (np.ndarray): quaternion on the form [eta, eps1, eps2, eps3]
Returns:
np.ndarray: angular velocity rotation matrix
"""
eta: float = quat[0]
eps1: float = quat[1]
eps2: float = quat[2]
eps3: float = quat[3]
return 0.5 * np.array(
[
[-eps1, -eps2, -eps3],
[eta, -eps3, eps2],
[eps3, eta, -eps1],
[-eps2, eps1, eta],
]
)
@njit
def Jq(eta: np.ndarray) -> np.ndarray:
"""Combined R and T rotation matrix for transform of nu.
Based on eq 2.83) from fossen 2021 draft
Args:
eta (np.ndarray): position and orientation (quat) in NED
Returns:
np.ndarray: rotation matrix from BODY to NED
"""
orientation = eta[3:7]
J = np.zeros((7, 6))
J[0:3, 0:3] = R(orientation)
J[3:7, 3:6] = T(orientation)
return J.astype(np.float64)
@njit
def normalize(v: np.ndarray) -> np.ndarray:
norm = np.linalg.norm(v)
if norm == 0:
return v
return (v / norm).astype(np.float64)
@njit
def mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> np.ndarray:
"""Mean squared error between two 2D arrays
Args:
y_true (np.ndarray): measurements
y_pred (np.ndarray): predicted values
Returns:
np.ndarray: array of squared errors
"""
return (
np.square(np.subtract(y_true, y_pred)).sum(axis=0) / y_pred.shape[0]
).astype(np.float64)
@njit
def mean_absolute_error(y_true: np.ndarray, y_pred: np.ndarray) -> np.ndarray:
"""Mean absolute error between two 2D arrays
Args:
y_true (np.ndarray): measurements
y_pred (np.ndarray): predicted values
Returns:
np.ndarray: array of squared errors
"""
return np.absolute(
(np.subtract(y_true, y_pred)).sum(axis=0) / y_pred.shape[0]
).astype(np.float64)
@njit
def mean_absolute_error_with_log(y_true: np.ndarray, y_pred: np.ndarray) -> np.ndarray:
"""Mean absolute error between two 2D arrays
Args:
y_true (np.ndarray): measurements
y_pred (np.ndarray): predicted values
Returns:
np.ndarray: array of squared errors
"""
errors_abs = np.absolute(
(np.subtract(y_true, y_pred)).sum(axis=0) / y_pred.shape[0]
).astype(np.float64)
for i, error in enumerate(errors_abs):
if error > 1:
errors_abs[i] = np.log(error)
return errors_abs
@njit
def is_poistive_def(A: np.ndarray) -> bool:
if is_symmetric(A):
return (np.linalg.eigvals(A) > 0).all()
else:
return False
@njit
def is_symmetric(A: np.ndarray) -> bool:
tol = 1e-8
return (np.abs(A - A.T) < tol).all()
@njit
def normalizer(x: np.ndarray, normalize_quaternions: bool) -> np.ndarray:
"""Normalize quaternions in eta of a full state [eta, nu]
Args:
x (np.ndarray): full state [eta, nu]
Returns:
np.ndarray: [eta, nu] with normalized quaternions
"""
if normalize_quaternions:
x[3:7] = normalize(x[3:7])
return x
def quat_to_degrees(q_w: float, q_x: float, q_y: float, q_z: float) -> np.ndarray:
"""Convert a quaternion to degrees using the zyx convetion
Args:
q_w (float): [description]
q_x (float): [description]
q_y (float): [description]
q_z (float): [description]
Returns:
List[float]: list with roll, pitch, yaw in degrees
"""
yaw, pitch, roll = Quaternion(
w=q_w,
x=q_x,
y=q_y,
z=q_z,
).yaw_pitch_roll
return np.array([degrees(x) for x in [roll, pitch, yaw]])
def degrees_to_quat_rotation(roll: float, pitch: float, yaw: float) -> np.ndarray:
r = Rotation.from_euler("zyx", [yaw, pitch, roll], degrees=True)
q_x, q_y, q_z, q_w = r.as_quat()
return np.array([q_w, q_x, q_y, q_z])
def radians_to_quat_rotation(roll: float, pitch: float, yaw: float) -> np.ndarray:
r = Rotation.from_euler("zyx", [yaw, pitch, roll])
q_x, q_y, q_z, q_w = r.as_quat()
return np.array([q_w, q_x, q_y, q_z])
def get_nu(df: pd.DataFrame) -> np.ndarray:
return df[NU_DOFS].to_numpy()
def get_eta(df: pd.DataFrame) -> np.ndarray:
"""Retrieve eta from dataframe
Args:
df (pd.DataFrame): dataframe containing eta
Returns:
np.ndarray: [x, y, z, q_w, q_x, q_y, q_z]
"""
return df[ETA_DOFS].to_numpy()
def get_tau(df: pd.DataFrame) -> np.ndarray:
return df[TAU_DOFS].to_numpy()
def make_df(
time: np.ndarray,
eta: np.ndarray = None,
nu: np.ndarray = None,
tau: np.ndarray = None,
) -> pd.DataFrame:
data = {DFKeys.TIME.value: time}
if type(eta) is np.ndarray:
for dof, values in zip(ETA_DOFS, eta.T):
data[dof] = values
if type(nu) is np.ndarray:
for dof, values in zip(NU_DOFS, nu.T):
data[dof] = values
if type(tau) is np.ndarray:
for dof, values in zip(TAU_DOFS, tau.T):
data[dof] = values
return pd.DataFrame(data)
def profile(func):
def wrapper(*args, **kwargs):
prof = cProfile.Profile()
retval = prof.runcall(func, *args, **kwargs)
save_file = Path("profiling") / (func.__name__ + ".profile")
Path.mkdir(save_file.parent, parents=True, exist_ok=True)
s = io.StringIO()
ps = pstats.Stats(prof, stream=s).sort_stats(pstats.SortKey.CUMULATIVE)
ps.print_stats()
with open(save_file, "w") as perf_file:
perf_file.write(s.getvalue())
return retval
return wrapper
def load_tau(csv_path: Path):
return pd.read_csv(csv_path)[TAU_DOFS].to_numpy(dtype=np.float64)
def load_data(
csv_path: Path, head_num=None, dtype=np.float64
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
if head_num:
df = | pd.read_csv(csv_path) | pandas.read_csv |
import sqlite3
import csv
import gc
import numpy as np
import pandas as pd
from dateutil import parser
from scipy import sparse
import lightgbm as lgb
from helpers import *
evl_path = '/infer/'
db_path = 'dream_challenge_3.sqlite3'
measurement_list = [3024561, 3025313, 3002069, 3036955, 3018910, 3020990,
3015632, 3021447, 3027946, 3007696, 3021513, 3003932,
3028193, 3024128, 3027597, 4154790, 4152194, 3004295,
3010156, 3035569, 3018405, 3024929, 3021706, 3027801,
3000483, 3010300, 3004501, 3034962, 3011424, 3030260,
3028653, 3000963, 3002173, 3022493, 3004119, 3045807,
3000185, 3002400, 3021044, 3023103, 3000593, 3037556,
3007144, 3008342, 3013650, 3014502, 40765040,3016502,
3020630, 3029872, 3037121, 3005029, 4239408, 3019550,
3009596, 3009966, 3027114, 3007070, 3007352, 3015232,
3006906, 3013784, 3021119, 3034244, 3000637, 3022192,
3005770, 3016723, 3017250, 3016662, 3051825, 3004239,
44783982]
class OmopParser_predict(object):
def __init__(self):
self.name = 'omop_assembler'
def build_database(self):
# connect to database
con = sqlite3.connect(db_path)
# build table person
query = """
CREATE TABLE person_evl
(
person_id INT,
gender_concept_id INT,
year_of_birth INT,
ethnicity_concept_id INT
)
"""
c = con.cursor()
c.execute(query)
# load data from csv files
with open(evl_path + 'person.csv') as f:
reader = csv.reader(f)
name = next(reader, None)
idx = [name.index(c) for c in ['person_id', 'gender_concept_id', 'year_of_birth', 'ethnicity_concept_id']]
question_marks = str((query.count('\n')-4)*'?,')[:-1]
for field in reader:
c.execute("INSERT INTO person_evl VALUES"+'('+question_marks+')', [field[i] for i in idx])
con.commit()
con.close()
print('Table person built')
# Table condition
con = sqlite3.connect(db_path)
query = """
CREATE TABLE condition_evl
(
condition_occurrence_id INT NOT NULL,
person_id INT,
condition_concept_id INT,
condition_start_date CHAR(14),
condition_end_date CHAR(14),
condition_type_concept_id INT,
visit_occurrence_id INT,
condition_source_concept_id INT
)
"""
c = con.cursor()
c.execute(query)
# load data from csv files
with open(evl_path + 'condition_occurrence.csv') as f:
reader = csv.reader(f)
name = next(reader, None)
idx = [name.index(c) for c in ['condition_occurrence_id', 'person_id', 'condition_concept_id', \
'condition_start_date', 'condition_end_date', 'condition_type_concept_id',\
'visit_occurrence_id', 'condition_source_concept_id']]
question_marks = str((query.count('\n')-4)*'?,')[:-1]
for field in reader:
c.execute("INSERT INTO condition_evl VALUES"+'('+question_marks+')', [field[i] for i in idx])
con.commit()
con.close()
print('Table condition built')
# TABLE visit
con = sqlite3.connect(db_path)
query = """
CREATE TABLE visit_evl
(
visit_occurrence_id INT,
person_id INT,
visit_concept_id INT,
visit_start_date VARCHAR(14),
visit_end_date VARCHAR(14),
visit_type_concept_id INT,
provider_id INT,
care_site_id INT,
visit_source_concept_id INT
)
"""
c = con.cursor()
c.execute(query)
# load data from csv files
with open(evl_path + 'visit_occurrence.csv') as f:
reader = csv.reader(f)
name = next(reader, None)
idx = [name.index(c) for c in ['visit_occurrence_id', 'person_id', 'visit_concept_id', \
'visit_start_date', 'visit_end_date', 'visit_type_concept_id',\
'provider_id', 'care_site_id', 'visit_source_concept_id']]
question_marks = str((query.count('\n')-4)*'?,')[:-1]
for field in reader:
c.execute("INSERT INTO visit_evl VALUES"+'('+question_marks+')', [field[i] for i in idx])
con.commit()
con.close()
print('Table visit built')
# Table drug
con = sqlite3.connect(db_path)
query = """
CREATE TABLE drug_evl
(
drug_exposure_id INT,
person_id INT,
drug_concept_id INT,
drug_exposure_start_date VARCHAR(14),
drug_exposure_end_date VARCHAR(14),
quantity INT
)
"""
c = con.cursor()
c.execute(query)
# load data from csv files
with open(evl_path + 'drug_exposure.csv') as f:
reader = csv.reader(f)
name = next(reader, None)
idx = [name.index(c) for c in ['drug_exposure_id', 'person_id', 'drug_concept_id', \
'drug_exposure_start_date', 'drug_exposure_end_date', 'quantity']]
question_marks = str((query.count('\n')-4)*'?,')[:-1]
for field in reader:
c.execute("INSERT INTO drug_evl VALUES"+'('+question_marks+')', [field[i] for i in idx])
con.commit()
con.close()
print('Table drug built')
# TABLE measurement
con = sqlite3.connect(db_path)
query = """
CREATE TABLE measurement_evl
(
measurement_date CHAR(10),
person_id INT,
value_as_number DECIMAL(11,4),
measurement_concept_id INT
)
"""
c = con.cursor()
c.execute(query)
# load data from csv files
with open(evl_path + 'measurement.csv') as f:
reader = csv.reader(f)
name = next(reader, None)
idx = [name.index(c) for c in ['measurement_date', 'person_id',
'value_as_number', 'measurement_concept_id']]
question_marks = str((query.count('\n')-4)*'?,')[:-1]
for field in reader:
try:
msm_id = float(field[name.index('measurement_concept_id')])
if msm_id in measurement_list:
c.execute("INSERT INTO measurement_evl VALUES"+'('+question_marks+')', [field[i] for i in idx])
except:
pass
con.commit()
con.close()
# TABLE procedure
conn = sqlite3.connect(db_path)
query = """
CREATE TABLE procedure_evl
(
person_id INT,
procedure_concept_id INT,
procedure_date VARCHAR(14)
)
"""
c = conn.cursor()
c.execute(query)
# load data from csv files
with open(evl_path + 'procedure_occurrence.csv') as f:
reader = csv.reader(f)
name = next(reader, None)
idx = [name.index(c) for c in ['person_id', 'procedure_concept_id', 'procedure_date']]
question_marks = str((query.count('\n')-4)*'?,')[:-1]
for field in reader:
c.execute("INSERT INTO procedure_evl VALUES"+'('+question_marks+')', [field[i] for i in idx])
conn.commit()
conn.close()
# build table observation
con = sqlite3.connect(db_path)
query = """
CREATE TABLE observation_evl
(
person_id INT,
observation_date CHAR(14),
observation_concept_id INT
)
"""
c = con.cursor()
c.execute(query)
# load data from csv files
with open(evl_path + 'observation.csv') as f:
reader = csv.reader(f)
name = next(reader, None)
idx = [name.index(c) for c in ['person_id', 'observation_date', 'observation_concept_id']]
question_marks = str((query.count('\n')-4)*'?,')[:-1]
for field in reader:
c.execute("INSERT INTO observation_evl VALUES"+'('+question_marks+')', [field[i] for i in idx])
con.commit()
con.close()
# TABLE last_visit
con = sqlite3.connect(db_path)
query = """
create table last_date_evl as
with t1 as (
select person_id, condition_start_date as date from condition_evl
union
select person_id, drug_exposure_start_date as date from drug_evl
union
select person_id, measurement_date as date from measurement_evl
union
select person_id, max(visit_start_date, visit_end_date) as date from visit_evl
union
select person_id, procedure_date as date from procedure_evl
)
select person_id, max(t1.date) as last_visit
from t1
group by person_id
"""
c = con.cursor()
c.execute(query)
con.commit()
con.close()
print('Table last_date built')
def predict(self):
# patient dict
con = sqlite3.connect(db_path)
query = """
select distinct person_id
from person_evl
"""
df_person = | pd.read_sql_query(query, con) | pandas.read_sql_query |
# standard imports
import os
import glob
import inspect
from pprint import pprint
import pickle as pkl
import copy
import pandas as pd
import numpy as np
from tqdm import tqdm
import logging
import subprocess
import warnings
import itertools
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord, match_coordinates_sky
from astropy.visualization import ZScaleInterval
from astropy import units as u
from astropy.utils.exceptions import AstropyWarning
warnings.simplefilter('ignore', AstropyWarning)
try:
from p_tqdm import p_map
_parallel = True
except ModuleNotFoundError:
print('package "p_tqdm" not installed, cannot do parallel processing')
_parallel = False
# internal imports
import LOSSPhotPypeline
import LOSSPhotPypeline.utils as LPPu
from LOSSPhotPypeline.image import Phot, FitsInfo, FileNames
# setup tqdm for pandas
tqdm.pandas()
class LPP(object):
'''Lick Observatory Supernova Search Photometry Reduction Pipeline'''
def __init__(self, targetname, interactive = True, parallel = True, cal_diff_tol = 0.05, force_color_term = False, max_display_phase = 120,
wdir = '.', cal_use_common_ref_stars = False, sep_tol = 8, pct_increment = 0.05, in_pct_floor = 0.8, autoloadsave = False):
'''Instantiation instructions'''
# basics from instantiation
self.targetname = targetname.replace(' ', '')
self.config_file = targetname + '.conf'
self.interactive = interactive
self.wdir = os.path.abspath(wdir) # working directory for running (particularly idl code)
if (parallel is True) and (_parallel) is True:
self.parallel = True
else:
self.parallel = False
self.cal_diff_tol = cal_diff_tol # starting calibration difference tolerance
self.abs_cal_tol = 0.2 # do not proceed with the pipeline if in non-interactive mode and cal tol exceeds this
self.min_ref_num = 2 # minimum number of ref stars
self.pct_increment = pct_increment # amount to increment percentage requirement down by if doing ref check
self.in_pct_floor = in_pct_floor # minimum percentage of images ref stars must be in if doing ref check
self.checks = ['filter', 'date'] # default checks to perform on image list
self.phase_limits = (-60, 2*365) # phase bounds in days relative to disc. date to keep if "date" check performed
self.cal_use_common_ref_stars = cal_use_common_ref_stars # override requirement that each image have all ref stars
self.sep_tol = sep_tol # radius around target in arcseconds to exclude candidate reference stars from
# log file
self.logfile = self.targetname.replace(' ', '') + '.log'
self.build_log()
# sourced from configuration file
self.targetra = None
self.targetdec = None
self.photsub = False
self.photmethod = 'all'
self.refname = 'TBD'
self.photlistfile = 'TBD'
# discovery date (mjd)
self.disc_date_mjd = None
# check if config file exists -- if not then generate template
if not os.path.exists(self.config_file):
self.log.warn('No configuration file detected, complete template ({}) before proceeding.'.format(self.config_file + '.template'))
LPPu.genconf(targetname = self.targetname, config_file = self.config_file + '.template')
return
# general variables
self.filter_set_ref = ['B', 'V', 'R', 'I', 'CLEAR']
self.first_obs = None
self.phot_cols = {'3.5p': 3, '5p': 5, '7p': 7, '9p': 9, '1fh': 11, '1.5fh': 13, '2fh': 15, 'psf': 17}
self.calmethod = 'psf' # can be set to any key in phot_cols, but recommended is 'psf'
self.image_list = [] # list of image file names
self.phot_instances = [] # Phot instance for each image
self.aIndex = [] # indices of all images in phot_instances
self.wIndex = [] # subset of aIndex to work on
self.bfIndex = [] # indices of images with unsupported filters
self.ucIndex = [] # indices of WCS fail images, even though _c
self.bdIndex = [] # indices of images with dates outside of phase boundaries
self.pfIndex = [] # indices of photometry failures
self.psfIndex = [] # indices of photometry (sub) failures
self.cfIndex = [] # indices of calibration failures
self.csfIndex = [] # indices of calibration (sub) failures
self.noIndex = []
self.nosIndex = []
self.mrIndex = pd.Index([]) # keep track of indices to remove manually
self.run_success = False # track run success
# calibration variables
self.cal_source = 'auto'
self.calfile = 'TBD'
self.calfile_use = 'TBD'
self.force_color_term = force_color_term
self.calibration_dir = 'calibration'
if not os.path.isdir(self.calibration_dir):
os.makedirs(self.calibration_dir)
self.radecfile = os.path.join(self.calibration_dir, self.targetname + '_radec.txt')
self.radec = None
self.cal_IDs = 'all'
self.cal_arrays = None
self.cal_force_clear = False
self.max_display_phase = max_display_phase # num days to show rel to disc for interactive calibration
# keep track of counts of color terms
self.color_terms = {'kait1': 0, 'kait2': 0, 'kait3': 0, 'kait4': 0,
'nickel1': 0, 'nickel2': 0,
'Landolt': 0}
self.color_terms_used = None
# load configuration file
loaded = False
while not loaded:
try:
self.loadconf()
loaded = True
except FileNotFoundError:
LPPu.genconf(targetname = self.targetname, config_file = self.config_file + '.template')
print('Configuration could not be loaded. Template generated: {}'.format(self.config_file + '.template'))
response = input('Specify configuration file (*****.conf) or q to quit > ')
if 'q' == response.lower():
return
else:
self.config_file = response
# lightcurve variables
self.lc_dir = 'lightcurve'
self.lc_base = os.path.join(self.lc_dir, 'lightcurve_{}_'.format(self.targetname))
self.lc_ext = {'raw': '_natural_raw.dat',
'bin': '_natural_bin.dat',
'group': '_natural_group.dat',
'standard': '_standard.dat',
'ul': '_natural_ul.dat'}
# galaxy subtraction variables
self.template_images = None
self.templates_dir = 'templates'
# data directories
self.data_dir = os.path.dirname(self.refname)
self.error_dir = self.data_dir + '_sim'
# steps in standard reduction procedure
self.current_step = 0
self.steps = [self.load_images,
self.check_images,
self.find_ref_stars,
self.match_refcal_stars,
self.do_galaxy_subtraction_all_image,
self.do_photometry_all_image,
self.get_sky_all_image,
self.do_calibration,
self.get_zeromag_all_image,
self.get_limmag_all_image,
self.generate_lc,
self.write_summary]
# save file
self.savefile = self.targetname.replace(' ', '') + '.sav'
if os.path.exists(self.savefile):
if self.interactive:
load = input('Load saved state from {}? ([y]/n) > '.format(self.savefile))
else:
load = 'n' # run fresh if in non-interactive mode
if autoloadsave :
load = 'y' # run fresh if in non-interactive mode, unless this keyword is set
if 'n' not in load.lower():
self.load()
# make sure that the selected calmethod is one of the photmethods
if self.calmethod not in self.photmethod:
self.log.warn('Calibration method must be one of the photometry methods. Exiting.')
return
###################################################################################################
# Configuration File Methods
###################################################################################################
def loadconf(self):
'''
reads config file and sets class attributes accordingly
the most accurate accounting of system state is stored in the binary savefile
'''
# load config file and try to standardize keys
conf = pd.read_csv(self.config_file, header = None, delim_whitespace = True, comment = '#',
index_col = 0, squeeze = True).replace(np.nan, '')
conf.index = conf.index.str.lower()
# read and set values (including the type)
self.targetra = float(conf['targetra'])
self.targetdec = float(conf['targetdec'])
if conf['photsub'].lower() == 'yes': # defaults to False in all other cases
self.photsub = True
if conf['calsource'].lower() in ['psf','sdss','apass']: # only set if a known source is specified
self.cal_source = conf['calsource'].lower()
if conf['photmethod'].lower() == 'all':
self.photmethod = list(self.phot_cols.keys())
elif ',' not in conf['photmethod'].lower():
if conf['photmethod'].lower().strip() in self.phot_cols.keys():
self.photmethod = [conf['photmethod'].lower().strip()]
else:
print('{} is not a valid photometry method. Available options are:'.format(conf['photmethod'].strip()))
print(', '.join(self.phot_col.keys()))
self.photmethod = input('Enter selection(s) > ').strip().replace(' ', '').split(',')
else:
proposed = conf['photmethod'].strip().split(',')
if set(proposed).issubset(set(self.phot_cols.keys())):
self.photmethod = proposed
else:
print('At least one of {} is not a valid photometry method. Available options are:'.format(conf['photmethod'].strip()))
print(', '.join(self.phot_cols.keys()))
self.photmethod = input('Enter selection(s) > ').strip().replace(' ', '').split(',')
self.refname = conf['refname']
self.photlistfile = conf['photlistfile']
if conf['forcecolorterm'].strip() in self.color_terms.keys():
self.force_color_term = conf['forcecolorterm'].strip()
self.log.info('{} loaded'.format(self.config_file))
###################################################################################################
# Logging
###################################################################################################
def build_log(self):
'''starts and sets up log'''
self.log = logging.getLogger('LOSSPhotPypeline')
self.log.setLevel(logging.DEBUG)
# don't duplicate entries
if self.log.hasHandlers():
self.log.handlers.clear()
# internal logging
fh = logging.FileHandler(self.logfile)
fh.setFormatter(logging.Formatter('%(asctime)s in %(funcName)s with level %(levelname)s ::: %(message)s'))
self.log.addHandler(fh)
# if in interactive mode, print log at or above INFO on screen
if self.interactive:
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
sh.setFormatter(logging.Formatter('\n'+'*'*60+'\n%(message)s\n'+'*'*60))
self.log.addHandler(sh)
# used by contextlib to log all idl and bash outputs, while hiding from screen
self.log.write = lambda msg: self.log.debug('[external] ' + msg) if msg != '\n' else None
self.log.info('Welcome to the LOSS Photometry Pypeline (LPP)')
###################################################################################################
# UI / Automation Methods
###################################################################################################
def __iter__(self):
return self
def next(self, *args, **kwargs):
'''performs next reduction step (arguments for that step can be passed through)'''
if self.current_step < len(self.steps):
self.steps[self.current_step](*args, **kwargs)
self.current_step += 1
self.save()
self.summary()
else:
raise StopIteration
def skip(self):
'''skip current step'''
self.log.info('skipping step: {}'.format(self.steps[self.current_step].__name__))
self.go_to(self.current_step + 1)
self.summary()
def go_to(self, step = None):
'''go to specified step, or choose interactively'''
if type(step) == int:
self.current_step = step
self.summary()
else:
self.summary()
print('\nChoose an option:\n')
print('primary reduction steps:')
for i, step in enumerate(self.steps):
if i == self.current_step:
print('{} --- {} (current step)'.format(i, step.__name__))
else:
print('{} --- {}'.format(i, step.__name__))
print('\nadditional options:')
print('n --- add new image(s) by filename(s)')
print('nf --- add new images from file of names')
print('p --- plot light curve from file')
print('c --- cut points from specific light curve')
print('cr --- cut points from specific raw light curve and regenerate subsequent light curves')
print('q --- quit\n')
resp = input('selection > ').lower()
if 'n' == resp:
new_images = input('enter name(s) or new images (comma separated) > ')
if ',' not in new_images:
new_image_list = [new_images]
else:
new_image_list = [fl.strip() for fl in new_images.split(',')]
self.process_new_images(new_image_list = new_image_list)
elif 'nf' == resp:
new_image_file = input('enter name of new image file > ')
self.process_new_images(new_image_file = new_image_file)
elif 'p' == resp:
lc_file = input('enter light curve file (including relative path) to plot > ')
self.plot_lc([lc_file])
elif (resp == 'c') or (resp == 'cr'):
lc_file = input('enter light curve file (including relative path) to cut points from > ')
regenerate = False
if resp == 'cr':
regenerate = True
self.cut_lc_points(lc_file, regenerate = True)
else:
try:
self.current_step = int(resp)
except ValueError:
return
self.summary()
def save(self):
'''saves current state of pipeline'''
vs = vars(self).copy()
vs.pop('steps')
vs.pop('log')
with open(self.savefile, 'wb') as f:
pkl.dump(vs, f)
self.log.info('{} written'.format(self.savefile))
def load(self, savefile = None, summary = True):
'''re-initializes pipeline from saved state in file'''
if savefile is None:
savefile = self.savefile
with open(savefile, 'rb') as f:
vs = pkl.load(f)
for v in vs.keys():
s = 'self.{} = vs["{}"]'.format(v, v)
exec(s)
self.log.info('{} loaded'.format(savefile))
if summary:
self.summary()
def summary(self):
'''print summary of pipeline status'''
print('\n' + '*'*60)
print('Reduction status for {}'.format(self.targetname))
print('Interactive: {}'.format(self.interactive))
print('Photsub Mode: {}'.format(self.photsub))
print('*'*60 + '\n')
if self.current_step == 0:
print('Beginning of reduction pipeline.\n')
else:
print('Previous step: {}'.format(self.steps[self.current_step - 1].__name__))
print(self.steps[self.current_step - 1].__doc__ + '\n')
try:
print('--> Next step: {}'.format(self.steps[self.current_step].__name__))
print(self.steps[self.current_step].__doc__ + '\n')
except IndexError:
print('End of reduction pipeline.')
self.save()
return
try:
print('----> Subsequent step: {}'.format(self.steps[self.current_step + 1].__name__))
print(self.steps[self.current_step + 1].__doc__ + '\n')
except IndexError:
print('End of reduction pipeline.')
def run(self, skips = []):
'''run through reduction steps'''
while True:
if self.current_step in skips:
self.skip()
else:
try:
self.next()
except StopIteration:
break
def show_variables(self):
'''prints instance variables'''
pprint(vars(self))
def show_methods(self):
'''show available methods'''
print('method: docstring')
for name in LPP.__dict__.keys():
if name[:2] != '__' and name != 'show_methods':
print('{}: {}'.format(name, LPP.__dict__[name].__doc__))
###################################################################################################
# Reduction Pipeline Methods
###################################################################################################
def load_images(self):
'''reads image list file to generate lists of image names and Phot instances'''
self.image_list = pd.read_csv(self.photlistfile, header = None, delim_whitespace = True,
comment = '#', squeeze = True)
if self.interactive:
print('\nSelected image files')
print('*'*60 + '\n')
print(self.image_list)
print('\n')
self.log.info('image list loaded from {}'.format(self.photlistfile))
self.log.info('generating list of Phot instances from image list')
self.phot_instances = self._im2inst(self.image_list) # radec is None if running in order
# set indices
self.aIndex = self.image_list.index
self.wIndex = self.aIndex
def check_images(self):
'''only keep images that are in a supported filter and without file format issues'''
# filter check
if 'filter' in self.checks:
filter_check = lambda img: True if img.filter.upper() in self.filter_set_ref else False
self.log.info('checking filters')
bool_idx = self.phot_instances.loc[self.wIndex].progress_apply(filter_check)
self.bfIndex = self.wIndex[~pd.Series(bool_idx)]
self.log.info('dropping {} images due to unsupported filter'.format(len(self.bfIndex)))
self.wIndex = self.wIndex.drop(self.bfIndex)
# uncal check
if 'uncal' in self.checks:
cal_check = lambda img: True if ('RADECSYS' not in img.header) else (False if (img.header['RADECSYS'] == '-999') else True)
self.log.info('checking images for WCS')
bool_idx = self.phot_instances.loc[self.wIndex].progress_apply(cal_check)
self.ucIndex = self.wIndex[~pd.Series(bool_idx)]
self.log.info('dropping {} images for failed WCS'.format(len(self.ucIndex)))
self.wIndex = self.wIndex.drop(self.ucIndex)
if 'date' in self.checks:
if self.disc_date_mjd is None:
self.log.warn('discovery date not set, cannot do date check')
return
date_check = lambda img: True if ((img.mjd >= (self.disc_date_mjd + self.phase_limits[0])) and
(img.mjd <= (self.disc_date_mjd + self.phase_limits[1]))) else False
self.log.info('checking phases')
bool_idx = self.phot_instances.loc[self.wIndex].progress_apply(date_check)
self.bdIndex = self.wIndex[~pd.Series(bool_idx)]
self.log.info('dropping {} images that are outside of phase bounds'.format(len(self.bdIndex)))
self.wIndex = self.wIndex.drop(self.bdIndex)
# if there are none left, end pipeline
if len(self.wIndex) == 0:
self.log.warn('all images removed by checks --- cannot proceed')
self.run_success = False
self.current_step = self.steps.index(self.write_summary) - 1
return
def find_ref_stars(self):
'''identify all suitable stars in ref image, compute ra & dec, write radecfile, store in instance'''
# if radecfile already exists, no need to do it
if os.path.exists(self.radecfile):
self.log.info('radecfile already exists, loading only')
self.radec = pd.read_csv(self.radecfile, delim_whitespace=True, skiprows = (0,1,3,4,5), names = ['RA','DEC'])
# set radec in Phot instances
for img in self.phot_instances.loc[self.wIndex]:
img.radec = self.radec
return
if self.refname == '' :
self.log.warn('refname has not been assigned, please do it first!')
return
# instantiate object to manage names
ref = Phot(self.refname, calmethod = self.calmethod)
# use sextractor to extract all stars to be used as refstars
sxcp = os.path.join(os.path.dirname(inspect.getfile(LOSSPhotPypeline)), 'conf', 'sextractor_config')
config = os.path.join(sxcp, 'kait.sex')
filt = os.path.join(sxcp, 'gauss_2.0_5x5.conv')
par = os.path.join(sxcp, 'kait.par')
star = os.path.join(sxcp, 'default.nnw')
cmd_list = ['sex', self.refname,
'-c', config,
'-PARAMETERS_NAME', par,
'-FILTER_NAME', filt,
'-STARNNW_NAME', star,
'-CATALOG_NAME', ref.sobj,
'-CHECKIMAGE_NAME', ref.skyfit]
p = subprocess.Popen(cmd_list, stdout = subprocess.PIPE, stderr = subprocess.PIPE, universal_newlines = True)
stdout, stderr = p.communicate()
self.log.debug(stdout)
self.log.debug(stderr)
# make sure process succeeded
if not os.path.exists(ref.sobj):
self.log.warn('SExtractor failed --- no sobj file generated, check!')
return
# read sobj file of X_IMAGE and Y_IMAGE columns, as well as MAG_APER for sort
with fits.open(ref.sobj) as hdul:
data = hdul[1].data
# sort according to magnitude, from small/bright to hight/faint
data.sort(order = 'MAG_APER')
imagex = data.X_IMAGE
imagey = data.Y_IMAGE
# transform to RA and DEC using ref image header information
cs = WCS(header = ref.header)
imagera, imagedec = cs.all_pix2world(imagex, imagey, 0)
# remove any identified "stars" that are too close to target
coords = SkyCoord(imagera, imagedec, unit = (u.deg, u.deg))
target_coords = SkyCoord(self.targetra, self.targetdec, unit = (u.deg, u.deg))
offsets = coords.separation(target_coords).arcsecond
imagera = imagera[offsets > self.sep_tol]
imagedec = imagedec[offsets > self.sep_tol]
# write radec file
with open(self.radecfile, 'w') as f:
f.write('TARGET\n')
f.write(' RA DEC\n')
f.write(' {:.7f} {:.7f}\n'.format(self.targetra, self.targetdec))
f.write('\nREFSTARS\n')
f.write(' RA DEC\n')
for i in range(len(imagera)):
f.write(' {:.7f} {:.7f}\n'.format(imagera[i], imagedec[i]))
self.log.info('{} written'.format(self.radecfile))
self.radec = | pd.read_csv(self.radecfile, delim_whitespace=True, skiprows = (0,1,3,4,5), names = ['RA','DEC']) | pandas.read_csv |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
from unittest import TestCase
import numpy as np
import pandas as pd
import statsmodels
from kats.consts import TimeSeriesData
from kats.detectors.cusum_model import (
CUSUMDetectorModel,
CusumScoreFunction,
)
statsmodels_ver = float(
re.findall("([0-9]+\\.[0-9]+)\\..*", statsmodels.__version__)[0]
)
class TestCUSUMDetectorModel(TestCase):
def test_increase(self) -> None:
np.random.seed(100)
scan_window = 24 * 60 * 60 # in seconds
historical_window = 3 * 24 * 60 * 60 # in seconds
test_data_window = 16 # in hours
df_increase = pd.DataFrame(
{
"ts_value": np.concatenate(
[np.random.normal(1, 0.2, 156), np.random.normal(1.5, 0.2, 12)]
),
"time": | pd.date_range("2020-01-01", periods=168, freq="H") | pandas.date_range |
import requests
import json
import pandas as pd
import numpy as np
import datetime as dt
import psycopg2 as pg
import pymongo as pm
import os
import eod_api
api = eod_api.api
e_date = (dt.datetime.now() - dt.timedelta(1)).strftime('%Y-%m-%d')
error_log = []
def get_db_exchanges():
con = pg.connect(database = 'securities_master', user = 'postgres')
cur = con.cursor()
command = 'SELECT * FROM exchange'
cur.execute(command)
data = cur.fetchall()
db_exchanges_df = pd.DataFrame(
data,
columns = [
'id',
'code',
'name',
'short_name',
'country',
'currency',
'created_date',
'last_updated_date',
'last_price_update_date',
'last_fundamental_update_date'
]
)
db_exchanges_df.set_index('id', inplace = True)
cur.close()
con.close()
return db_exchanges_df
def get_db_fundamentals(instrument_id):
pass
def get_db_fund_watchlist():
con = pg.connect(database = 'securities_master', user = 'postgres')
cur = con.cursor()
command = 'SELECT * FROM fund_watchlist'
cur.execute(command)
data = cur.fetchall()
db_fund_watchlist_df = pd.DataFrame(
data,
columns = ['id', 'instrument_id', 'created_date', 'last_updated_date']
)
db_fund_watchlist_df.set_index('id', inplace = True)
cur.close()
con.close()
return db_fund_watchlist_df
def get_db_indices():
con = pg.connect(database = 'securities_master', user = 'postgres')
cur = con.cursor()
command = 'SELECT * FROM benchmark_index'
cur.execute(command)
data = cur.fetchall()
db_indices_df = pd.DataFrame(
data,
columns = [
'id',
'short_name',
'name',
'city',
'country',
'timezone_offset',
'created_date',
'last_updated_date'
]
)
db_indices_df.set_index('id', inplace = True)
cur.close()
con.close()
return db_indices_df
def get_db_instruments(exchange_id = None):
con = pg.connect(database = 'securities_master', user = 'postgres')
cur = con.cursor()
if exchange_id == None:
command = 'SELECT * FROM instrument'
else:
command = f'SELECT * FROM instrument WHERE exchange_id = {exchange_id}'
# command = ('SELECT sym.id, sym.index_id, sym.ticker, bm.id, bm.short_name FROM symbol AS sym'
# 'JOIN benchmark_index AS bm ON (sym.index_id = bm.id')
cur.execute(command)
data = cur.fetchall()
cols = [
'id',
'exchange_id',
'ticker',
'instrument_type',
'name',
'currency',
'created_date',
'last_updated_date'
]
db_instruments_df = pd.DataFrame(
data,
columns = cols
)
db_instruments_df.set_index('id', inplace = True)
cur.close()
con.close()
return db_instruments_df
def get_db_price(instrument_id = None, price_date = None, include_ticker = False):
con = pg.connect(database = 'securities_master', user = 'postgres')
cur = con.cursor()
cols = [
'id',
'data_vendor_id',
'instrument_id',
'price_date',
'created_date',
'last_updated_date',
'open_price',
'high_price',
'low_price',
'close_price',
'adj_close_price',
'volume'
]
if include_ticker:
ticker_join = ' JOIN instrument ON (daily_price.instrument_id = instrument.id)'
instr_cols = [
'inst_id',
'exchange_id',
'ticker',
'instrument_type',
'name',
'currency',
'inst_created_date',
'inst_last_update_date'
]
cols.extend(instr_cols)
else:
ticker_join = ''
if (instrument_id == None) & (price_date == None):
command = 'SELECT * FROM daily_price{ticker_join}'
elif (instrument_id != None) & (price_date == None):
command = f'SELECT * FROM daily_price{ticker_join} WHERE instrument_id = {instrument_id}'
elif (instrument_id == None) & (price_date != None):
command = f'SELECT * FROM daily_price{ticker_join} WHERE price_date = \'{price_date}\''
else:
command = (f'SELECT * FROM daily_price{ticker_join} '
f'WHERE instrument_id = {instrument_id} AND price_date = \'{price_date}\'')
cur.execute(command)
data = cur.fetchall()
db_prices_df = pd.DataFrame(
data,
columns = cols
)
db_prices_df.set_index('id', inplace = True)
if include_ticker:
drop_cols = [
'inst_id',
'exchange_id',
# 'ticker',
'instrument_type',
'name',
'currency',
'inst_created_date',
'inst_last_update_date'
]
db_prices_df.drop(drop_cols, axis = 1, inplace = True)
cur.close()
con.close()
return db_prices_df
def get_eod_bulk_price(ex, e_date = e_date):
'''
Parameters
----------
ex : string : exchange (eg. US)
Returns
-------
df : pandas dataframe
'''
url = (
f'http://eodhistoricaldata.com/api/eod-bulk-last-day/{ex}'
f'?api_token={api}&fmt=json&date={e_date}'
)
response = requests.get(url)
data = response.text
bulk_data = pd.read_json(data)
# bulk_data = json.loads(data)
return bulk_data
def get_eod_constituents(index, s_date = '1990-01-01'):
url = (f'https://eodhistoricaldata.com/api/fundamentals/{index}.INDX?'
f'api_token={api}&historical=1&from={s_date}&to={e_date}')
response = requests.get(url)
data = response.text
df = pd.read_json(data)
general_info = df['General'].dropna()
constituents = df['Components'].dropna()
if constituents.shape[0] > 0:
constituent_keys = list(constituents[0].keys())
constituent_values = [list(i.values()) for i in constituents]
constituents = pd.DataFrame.from_records(constituent_values, columns = constituent_keys)
return constituents, general_info
def get_eod_corp_act(sec, ex, corp_act_type, s_date = '1900-01-01'):
'''
Parameters
----------
sec : string : security (eg. AAPL)
ex : string : exchange (eg. US)
corp_act_type: type of corporate action ('div', 'splits', 'shorts')
s_date : string : 'yyyy-mm-dd' format
Returns
-------
df : pandas dataframe
'''
valid_types = ['div', 'splits', 'shorts']
if corp_act_type in valid_types:
url = (f'https://eodhistoricaldata.com/api/{corp_act_type}/'
f'{sec}.{ex}?api_token={api}&from={s_date}&fmt=json')
response = requests.get(url)
data = response.text
df = pd.read_json(data).T
df.set_index('date', inplace = True)
return df
else:
print('Not a valid corporate action type.')
def get_eod_etf(sec, ex, s_date = '1900-01-01'):
'''
Parameters
----------
sec : string : security (eg. AAPL)
ex : string : exchange (eg. US)
s_date : string : 'yyyy-mm-dd' format
Returns
-------
df : pandas dataframe
'''
url = (f'https://eodhistoricaldata.com/api/fundamentals/{sec}.{ex}?'
f'api_token={api}&historical=1&from={s_date}&to={e_date}')
response = requests.get(url)
data = response.text
df = pd.read_json(data)
return df
def get_eod_exchanges(format = 'df'):
valid_formats = ['json', 'df']
if format in valid_formats:
url = f'https://eodhistoricaldata.com/api/exchanges-list/?api_token={api}&fmt=json'
response = requests.get(url)
data = response.text
if format == 'json':
exchanges = json.loads(data)
elif format == 'df':
exchanges = pd.read_json(data)
return exchanges
def get_eod_fundamentals(sec, ex, s_date = '1900-01-01'):
'''
Parameters
----------
sec : string : security (eg. AAPL)
ex : string : exchange (eg. US)
s_date : string : 'yyyy-mm-dd' format
Returns
-------
fundamentals : dictionary object
'''
url = (f'https://eodhistoricaldata.com/api/fundamentals/{sec}.{ex}?from={s_date}&to={e_date}'
f'&api_token={api}&period=d&fmt=json')
response = requests.get(url)
data = response.text
fundamentals = json.loads(data)
return fundamentals
def get_eod_instruments(exchange = 'INDX', format = 'df'):
valid_formats = ['json', 'df']
if format in valid_formats:
url = (
f'https://eodhistoricaldata.com/api/exchange-symbol-list/{exchange}'
f'?api_token={api}&fmt=json'
)
response = requests.get(url)
data = response.text
if format == 'json':
instruments = json.loads(data)
elif format == 'df':
instruments = pd.read_json(data)
return instruments
def get_eod_price(sec, ex, s_date = '1900-01-01'):
'''
Parameters
----------
sec : string : security (eg. AAPL)
ex : string : exchange (eg. US)
s_date : string : 'yyyy-mm-dd' format
Returns
-------
df : pandas dataframe
'''
url = (f'https://eodhistoricaldata.com/api/eod/{sec}.{ex}?from={s_date}&to={e_date}'
f'&api_token={api}&period=d&fmt=json')
try:
response = requests.get(url)
data = response.text
df = pd.read_json(data)
if df.shape[0] > 0:
df.set_index('date', inplace = True)
return df
except:
error_log.append(['Error: get_eod_price', url])
return pd.DataFrame()
def eod_bulk_prices_to_db(eod_bulk_prices_df, exch_id, data_vendor_id):
con = pg.connect(database = 'securities_master', user = 'postgres')
cur = con.cursor()
command = (f'select instrument.id, instrument.ticker, exchange.code '
f'from instrument join exchange on (instrument.exchange_id = exchange.id) '
f'where exchange.id = {exch_id}')
cur.execute(command)
data = cur.fetchall()
map_df = pd.DataFrame(
data,
columns = ['id', 'ticker', 'exchange_code']
)
map_df.set_index('id', inplace = True)
now = dt.datetime.now().strftime('%Y-%m-%d')
cols = ('data_vendor_id, instrument_id, price_date, created_date, last_updated_date, '
'open_price, high_price, low_price, close_price, adj_close_price, volume')
for ind, row in eod_bulk_prices_df.iterrows():
try:
ticker = str(row['code'])
# exchange = str(row['exchange_short_name'])
instrument_id = map_df[map_df['ticker'] == ticker].index[0]
price_date = row['date'].strftime('%Y-%m-%d')
open_price = row['open']
high_price = row['high']
low_price = row['low']
close_price = row['close']
adj_close_price = row['adjusted_close']
volume = row['volume']
vals = (
f"'{data_vendor_id}', '{instrument_id}', '{price_date}', "
f"'{now}', '{now}', '{open_price}', "
f"'{high_price}', '{low_price}', '{close_price}', "
f"'{adj_close_price}', '{volume}'"
)
command = f'INSERT INTO daily_price ({cols}) VALUES ({vals})'
cur.execute(command)
except:
error_log.append(row)
con.commit()
cur.close()
con.close()
def eod_constituents_to_db(eod_constituents_df, index_id):
con = pg.connect(database = 'securities_master', user = 'postgres')
cur = con.cursor()
now = dt.datetime.now().strftime('%Y-%m-%d')
command = (
f'SELECT date FROM benchmark_index_member WHERE index_id = {index_id} '
f'ORDER BY date DESC LIMIT 1'
)
cur.execute(command)
data = cur.fetchall()
if len(data) > 0:
last_date = data[0][0]
else:
last_date = dt.date(1990, 1, 1)
if last_date < dt.date.today():
cols = 'index_id, ticker, exchange_code, date, last_update_date'
for ind, row in eod_constituents_df.iterrows():
ticker = str(row['Code'])
exchange_code = row['Exchange']
vals = f"'{index_id}', '{ticker}', '{exchange_code}', '{now}', '{now}'"
command = f'INSERT INTO benchmark_index_member ({cols}) VALUES ({vals})'
cur.execute(command)
con.commit()
cur.close()
con.close()
def eod_exchanges_to_db(eod_exchanges_df, db_exchanges_df):
con = pg.connect(database = 'securities_master', user = 'postgres')
cur = con.cursor()
now = dt.datetime.now().strftime('%Y-%m-%d')
cols = 'code, name, short_name, country, currency, created_date, last_update_date'
new_exchanges = [ex for ex in eod_exchanges_df['Code'] if ex not in db_exchanges_df['code'].values]
exchanges_df = eod_exchanges_df[eod_exchanges_df['Code'].isin(new_exchanges)]
# upload new exchanges to 'exchange' table in SMDB
for ind, row in exchanges_df.iterrows():
code = str(row['Code'])
name = str(row['Name']).replace("'", "")
short_name = row['OperatingMIC']
country = str(row['Country'])
currency = str(row['Currency'])
vals = f"'{code}', '{name}', '{short_name}', '{country}', '{currency}', '{now}', '{now}'"
command = f'INSERT INTO exchange ({cols}) VALUES ({vals})'
cur.execute(command)
con.commit()
cur.close()
con.close()
def eod_fundamentals_to_db(eod_fundamentals):
# client = pm.MongoClient()
# db = client['test']
# result = db.fundamentals.insert_one(eod_fundamentals)
# print('One post: {0}'.format(result.inserted_id)) ## confirmed in shell that this has worked
print('Done')
def eod_index_to_db(info_df):
con = pg.connect(database = 'securities_master', user = 'postgres')
cur = con.cursor()
code, name, country = str(info_df['Code']), str(info_df['Name']), str(info_df['CountryName'])
now = dt.datetime.now().strftime('%Y-%m-%d')
cols = 'short_name, name, country, created_date, last_updated_date'
vals = f"'{code}', '{name}', '{country}', '{now}', '{now}'"
command = f'INSERT INTO benchmark_index ({cols}) VALUES ({vals})'
cur.execute(command)
con.commit()
cur.close()
con.close()
def eod_instruments_to_db(eod_instruments_df, db_instruments_df, db_exchange_id):
if eod_instruments_df.shape[0] > 0:
con = pg.connect(database = 'securities_master', user = 'postgres')
cur = con.cursor()
now = dt.datetime.now().strftime('%Y-%m-%d')
cols = ('exchange_id, ticker, instrument_type, name, currency, created_date, last_update_date')
missing_tickers = [
t for t in eod_instruments_df['Code'] if t not in db_instruments_df['ticker'].values
]
instruments_df = eod_instruments_df[eod_instruments_df['Code'].isin(missing_tickers)]
# upload new instruments to 'symbols' table in SMDB
for ind, row in instruments_df.iterrows():
ticker = str(row['Code']).replace("'", "\'\'")
name = str(row['Name']).replace("'", "\'\'")
currency = str(row['Currency'])
instrument_type = str(row['Type'])
vals = (
f"'{db_exchange_id}', '{ticker}', '{instrument_type}', "
f"'{name}', '{currency}', '{now}', '{now}'"
)
command = f'INSERT INTO instrument ({cols}) VALUES ({vals})'
cur.execute(command)
con.commit()
cur.close()
con.close()
def eod_prices_to_db(eod_prices_df, db_prices_df, instrument_id, data_vendor_id):
con = pg.connect(database = 'securities_master', user = 'postgres')
cur = con.cursor()
now = dt.datetime.now().strftime('%Y-%m-%d')
cols = ('data_vendor_id, instrument_id, price_date, created_date, last_updated_date, '
'open_price, high_price, low_price, close_price, adj_close_price, volume')
# new_prices = [
# p.strftime('%Y-%m-%d') for p in eod_prices_df.index
# if p not in db_prices_df['price_date'].values
# ]
prices_df = eod_prices_df#[eod_prices_df.index.isin(new_prices)]
# upload new exchanges to 'exchange' table in SMDB
for ind, row in prices_df.iterrows():
try:
price_date = ind.strftime('%Y-%m-%d')
open_price = row['open']
high_price = row['high']
low_price = row['low']
close_price = row['close']
adj_close_price = row['adjusted_close']
volume = row['volume']
vals = (
f"'{data_vendor_id}', '{instrument_id}', '{price_date}', "
f"'{now}', '{now}', '{open_price}', "
f"'{high_price}', '{low_price}', '{close_price}', "
f"'{adj_close_price}', '{volume}'"
)
command = f'INSERT INTO daily_price ({cols}) VALUES ({vals})'
cur.execute(command)
except:
error_log.append(['Error: eod_prices_to_db', row])
con.commit()
cur.close()
con.close()
def db_update_instruments():
# adds new exchanges if missing from SMDB
eod_exchanges_df = get_eod_exchanges()
db_exchanges_df = get_db_exchanges()
eod_exchanges_to_db(eod_exchanges_df, db_exchanges_df)
# loop through every exchange in SMDB
db_exchanges_df = get_db_exchanges() # get updated list
x = 0
for exch_id, exch_data in db_exchanges_df.iterrows():
percent_done = round((x / db_exchanges_df.shape[0]) * 100, 2)
print(f'Part 1: {percent_done}% complete. Working on Exchange: {exch_data["code"]}.')
eod_instruments_df = get_eod_instruments(exch_data['code'])
db_instruments_df = get_db_instruments(exch_id)
eod_instruments_to_db(eod_instruments_df, db_instruments_df, exch_id)
x += 1
print('Part 1: 100% complete.')
def db_update_index_constituents():
eod_indices_df = get_eod_instruments('INDX')
db_indices_df = get_db_indices()
for x, ind in enumerate(eod_indices_df['Code']):
percent_done = round((x / len(eod_indices_df['Code'])) * 100, 2)
print(f'Part 2: {percent_done}% complete. Working on Index: {ind}.')
eod_constituents_df, info = get_eod_constituents(ind)
# adds new indices if missing from SMDB
if ind not in db_indices_df['short_name'].values:
eod_index_to_db(info)
db_indices_df = get_db_indices() # gets updated list
# adds new symbols if missing from SMDB and records constituents per index
if eod_constituents_df.shape[0] > 0:
db_index_id = db_indices_df[db_indices_df['short_name'] == ind].index[0]
eod_constituents_to_db(eod_constituents_df, db_index_id)
print('Part 1: 100% complete.')
def db_update_prices():
'''
This is the function for importing price data that is more than just one day. For single
date prices, see the `db_update_bulk_prices` function
The first loop iterates through the exchanges in the `exchange` tables.
The second loop iterates through the instruments in `instrument` table for that exchange.
There is no initial reference to the SMDB with this fucntion to check if price data
already exists.
'''
db_exchanges_df = get_db_exchanges()
# TEMP SOLUTION
# db_exchanges_df.drop(1, inplace = True)
# db_exchanges_df = db_exchanges_df.loc[8, :].to_frame().T # Testing data for XETRA Exchange
# db_exchanges_df = db_exchanges_df.loc[2, :].to_frame().T # Same again for LSE listed companies
# db_exchanges_df = db_exchanges_df.loc[68, :].to_frame().T # Same again for Indices
# db_exchanges_df = db_exchanges_df.loc[75, :].to_frame().T # Same again for TSE listed companies
# db_exchanges_df = db_exchanges_df.loc[64, :].to_frame().T # Same again for EUFUND
db_exchanges_df = db_exchanges_df.loc[1, :].to_frame().T # Same again for US listed companies
y = 0
for exch_id, exch_data in db_exchanges_df.iterrows():
db_instruments_df = get_db_instruments(exch_id)
if db_exchanges_df.index == 64: # EUFUND
db_fund_watchlist_df = get_db_fund_watchlist()
db_instruments_df = db_instruments_df.loc[db_fund_watchlist_df['instrument_id'].values, :]
percent_done = round(((y)/ db_exchanges_df.shape[0]) * 100, 2)
print(f'Part ?: {percent_done}% complete. Working on prices for Exchange: {exch_data["code"]}.')
last_price_list = []
x = 0
for instrument_id, instrument_data in db_instruments_df.iterrows():
# db_prices_df = get_db_price(instrument_id)
sub_percent_done = round((x / db_instruments_df.shape[0]) * 100, 2)
print(f'\t{sub_percent_done}% complete. {instrument_data["ticker"]} uploading.')
## This is where using the SMDB index will be useful
# if db_prices_df.shape[0] > 0:
# last_db_date = db_prices_df['price_date'].sort_values(ascending = False).values[0]
# start_date = (last_db_date + dt.timedelta(1))
# else:
# start_date = dt.date(1900, 1, 1)
start_date = dt.date(1900, 1, 1) ## TEMP SOLUTION
if start_date < (dt.date.today() - dt.timedelta(1)):
ticker = instrument_data['ticker']
exch_code = exch_data['code']
eod_prices_df = get_eod_price(ticker, exch_code, start_date.strftime('%Y-%m-%d'))
if eod_prices_df.shape[0] > 0:
eod_prices_to_db(eod_prices_df, | pd.DataFrame() | pandas.DataFrame |
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
from collections import namedtuple
import math
import geopy.distance
pd.set_option('display.max_rows', 10000)
DRONE_HEIGHT = 100 - 1.5
def generate_dataset_gps():
tx_coord = (63.40742, 10.47752) #ole sine koordinater
Measurement = namedtuple("Measurement", ['bin_dist','true_dist','measured_dist','error','mean_clk_ticks','var_clk_ticks','num_pkt_tx','num_pkt_rx', 'pdr'])
measurements = []
#convert degrees decimal minutes to decimal degrees
def dmm2dd(d, dm):
m = math.floor(dm)
s = (dm - m) * 60
dd = float(d) + float(m)/60 + float(s)/(60*60)
return dd
#convert logged coord to decimal degrees
def convert_coord(n_coord, e_coord):
n_coord = n_coord.replace("N","")
d_n,dm_n = int(n_coord[:2]), float(n_coord[2:])
e_coord = e_coord.replace("E","")
d_e,dm_e = int(e_coord[:3]), float(e_coord[3:])
return (dmm2dd(d_n, dm_n), dmm2dd(d_e, dm_e))
distance_bins = []
for d in range(0,800,50):
distance_bins.append(round(math.sqrt(d*d + DRONE_HEIGHT*DRONE_HEIGHT)))
with open('data/raw-combined/Combined.csv') as file:
skip_header = True
for line in file.readlines():
line = line.strip()
values = line.split(",")
if(skip_header) :
skip_header = False
continue
measured_dist = int(values[4])
#remove bad measurements
if measured_dist < 80:
continue
gps_coords = convert_coord(values[9], values[10])
true_dist = math.floor(math.sqrt((geopy.distance.distance(gps_coords, tx_coord).m)**2 + DRONE_HEIGHT**2))
error = measured_dist - true_dist
#sort measurements into bins since true dist varies a lot
bin_dist = 0
for d in distance_bins:
if(true_dist + 5 + d/50 >= d):
bin_dist = d
else:
break
mean_clk_ticks = int(values[5])
var_clk_ticks = int(values[6])
num_pkt_tx = int(values[8])
num_pkt_rx = int(values[7])
pdr = num_pkt_rx/num_pkt_tx
#drop measurements with low pdr
if(pdr < 0.5):
continue
measurement = Measurement(bin_dist,true_dist, measured_dist, error, mean_clk_ticks, var_clk_ticks, num_pkt_tx, num_pkt_rx, pdr)
measurements.append(measurement)
df = | pd.DataFrame(measurements) | pandas.DataFrame |
#!/usr/bin/env python3
# author : <NAME>
import argparse
import os
import shutil
import numpy as np
import pandas as pd
from pmapper.pharmacophore import Pharmacophore as P
from scipy.spatial.distance import pdist
def create_parser():
parser = argparse.ArgumentParser(description='Search for xyz files in a directory, calc 3D pharmacophore hashes '
'and save models with distinct hashes to an output directory.')
parser.add_argument('-i', '--input', metavar='DIR_NAME', required=True, type=str,
help='directory with xyz file of pharmacophore models.')
parser.add_argument('-s', '--bin_step', metavar='NUMERIC', required=False, default=1, type=float,
help='bin step. Default: 1.')
parser.add_argument('-o', '--output', metavar='DIR_NAME', required=True, type=str,
help='xyz files having distinct 3D pharmacophore hashes will be stored to the specified '
'directory.')
parser.add_argument('-d', '--output_hashes', metavar='hashes.txt', required=False, type=str, default=None,
help='text file with 3D pharmacophore hashes and auxiliary information for all input '
'pharmacophores. If not specified the file named hashes.txt will be created in the '
'input directory.')
return parser
def entry_point():
parser = create_parser()
args = parser.parse_args()
if args.output_hashes is None:
args.output_hashes = os.path.join(os.path.dirname(args.input), 'hashes.txt')
files = [f for f in os.listdir(args.input) if f.endswith(".xyz")]
data = []
for fname in sorted(files):
p = P(bin_step=args.bin_step)
p.load_from_xyz(os.path.join(args.input, fname))
h = p.get_signature_md5()
features = p.get_feature_coords()
fstr = ''.join(sorted(item[0] for item in features))
fcount = len(features)
fcount_uniq = len(set(item[1] for item in features))
dist = pdist(np.array(list(item[1] for item in features)), metric='euclidean')
try:
max_dist = round(max(dist), 1)
except ValueError:
max_dist = 0
data.append([fname.replace('.xyz', ''), h, fcount, fcount_uniq, fstr, max_dist])
df = | pd.DataFrame(data, columns=['filename', 'hash', 'count', 'ucount', 'features', 'max_dist']) | pandas.DataFrame |
import statsmodels.formula.api as smf
import numpy as np
import torch
from torch import nn
import pandas as pd
import scipy as sp
from tqdm.auto import tqdm
from boardlaw import sql, elos
import aljpy
from pavlov import stats, runs
import pandas as pd
from boardlaw import arena
# All Elos internally go as e^d; Elos in public are in base 10^(d/400)
ELO = 400/np.log(10)
GLOBAL_GAMES = 1024
@aljpy.autocache()
def _trial_elos(boardsize, counter):
# So in the paper we have two evaluation schemes: one where 1024 games are played between all agents,
# and another where >>64k games are played against the best agent. Both of the these evaluation schemes
# are saved in the same database, so to stop the 64k-results skewing everything, we grab the first 1000
# games played by each pair.
trials = (sql.trial_query(boardsize, 'bee/%')
.query('black_wins + white_wins >= 512')
.groupby(['black_agent', 'white_agent'])
.first().reset_index())
ws, gs = elos.symmetrize(trials)
return elos.solve(ws, gs)
def trial_elos(boardsize):
counter = sql.file_change_counter()
return _trial_elos(boardsize, counter)
def load():
ags = sql.agent_query().query('test_c == 1/16')
es = []
for b in tqdm(ags.boardsize.unique()):
es.append(trial_elos(b))
es = | pd.concat(es) | pandas.concat |
import pandas as pd
import psycopg2
from Results import *
# Connect to database
conn = psycopg2.connect(host='localhost', port=5432, database='postgres')
# Obtain stock price and predicted stock price of top 20 stock components
query = """select l.ticker as ticker, l.tradedate as tradedate,
l.closeprice as price, r.closeprice as predictedprice
from stock.stockprice as l join stock.pred_fbp_price_sp500_base as r
on l.ticker = r.ticker and l.tradedate = r.tradedate
where l.ticker in (
select ticker from stock.stockshareoutstanding
order by shareoutstanding desc limit 20)
and date_part('year', l.tradedate) between 2019 and 2020
order by 1, 2; """
stockprice = | pd.io.sql.read_sql(query, conn) | pandas.io.sql.read_sql |
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
from copy import deepcopy
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt.utils.random_ import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.portfolio['attach_call_seq'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_execute_order_nb():
# Errors, ignored and rejected orders
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(-100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.nan, 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.inf, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.nan, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., np.nan, 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., -10., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., np.nan, 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., -100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, -10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=0))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=-10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=np.nan))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., np.nan, 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., -10., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(0, 10))
assert exec_state == ExecuteOrderState(cash=100.0, position=10.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(15, 10, max_size=10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=1.))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100, 0., np.inf, np.nan, 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(100, 10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-200, 10, direction=Direction.LongOnly, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, fixed_fees=1000))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=180.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=909.0, position=-100.0, debt=900.0, free_cash=-891.0)
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=7.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=125.0, position=-2.5, debt=25.0, free_cash=75.0)
assert_same_tuple(order_result, OrderResult(
size=7.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-2.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=-2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-7.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-10.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(150., -5., 0., 150., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=300.0, position=-20.0, debt=150.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=50.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(1000., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 17.5, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=850.0, position=3.571428571428571, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.571428571428571, price=17.5, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 100, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=37.5, position=-4.375, debt=43.75, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=0.625, price=100.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 10., 0., -50., 10., 100., 0, 0),
nb.order_nb(-20, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=150.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 1., 0., -50., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=10.0, position=0.0, debt=0.0, free_cash=-40.0)
assert_same_tuple(order_result, OrderResult(
size=1.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., -100., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=-100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-20, 10, fees=0.1, slippage=0.1, fixed_fees=1., lock_cash=True))
assert exec_state == ExecuteOrderState(cash=80.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_orders ############# #
order_size = pd.Series([np.inf, -np.inf, np.nan, np.inf, -np.inf], index=price.index)
order_size_wide = order_size.vbt.tile(3, keys=['a', 'b', 'c'])
order_size_one = pd.Series([1, -1, np.nan, 1, -1], index=price.index)
def from_orders_both(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='both', **kwargs)
def from_orders_longonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='longonly', **kwargs)
def from_orders_shortonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='shortonly', **kwargs)
class TestFromOrders:
def test_one_column(self):
record_arrays_close(
from_orders_both().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_orders_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1), (8, 2, 0, 100.0, 1.0, 0.0, 0),
(9, 2, 1, 100.0, 2.0, 0.0, 1), (10, 2, 3, 50.0, 4.0, 0.0, 0), (11, 2, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0), (4, 2, 0, 100.0, 1.0, 0.0, 1), (5, 2, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_size_inf(self):
record_arrays_close(
from_orders_both(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_orders_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 198.01980198019803, 2.02, 0.0, 1),
(2, 0, 3, 99.00990099009901, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 1),
(2, 0, 3, 49.504950495049506, 4.04, 0.0, 0), (3, 0, 4, 49.504950495049506, 5.05, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1),
(2, 0, 3, 50.0, 4.0, 0.0, 0), (3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 0), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 1), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.2, 0),
(6, 1, 3, 1.0, 4.0, 0.4, 1), (7, 1, 4, 1.0, 5.0, 0.5, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 2.0, 0), (10, 2, 3, 1.0, 4.0, 4.0, 1), (11, 2, 4, 1.0, 5.0, 5.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.1, 0),
(6, 1, 3, 1.0, 4.0, 0.1, 1), (7, 1, 4, 1.0, 5.0, 0.1, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 1.0, 0), (10, 2, 3, 1.0, 4.0, 1.0, 1), (11, 2, 4, 1.0, 5.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_orders_both(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 0.9, 0.0, 1), (5, 1, 1, 1.0, 2.2, 0.0, 0),
(6, 1, 3, 1.0, 3.6, 0.0, 1), (7, 1, 4, 1.0, 5.5, 0.0, 0), (8, 2, 0, 1.0, 0.0, 0.0, 1),
(9, 2, 1, 1.0, 4.0, 0.0, 0), (10, 2, 3, 1.0, 0.0, 0.0, 1), (11, 2, 4, 1.0, 10.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0), (2, 0, 3, 0.5, 4.0, 0.0, 1),
(3, 0, 4, 0.5, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0), (8, 2, 0, 1.0, 1.0, 0.0, 1),
(9, 2, 1, 1.0, 2.0, 0.0, 0), (10, 2, 3, 1.0, 4.0, 0.0, 1), (11, 2, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_orders_both(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 1, 1.0, 2.0, 0.0, 1), (5, 1, 3, 1.0, 4.0, 0.0, 0),
(6, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 3, 1.0, 4.0, 0.0, 0), (5, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_lock_cash(self):
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[143.12812469365747, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-49.5, -49.5]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[94.6034702480149, 47.54435839623566]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[49.5, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[1.4312812469365748, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-96.16606313106556, -96.16606313106556]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[0.4699090272918124, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[98.06958012596222, 98.06958012596222]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = from_orders_both(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 1000., 2., 0., 1),
(2, 0, 3, 500., 4., 0., 0), (3, 0, 4, 1000., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 200., 2., 0., 1),
(6, 1, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-6600.0, 0.0]
])
)
pf = from_orders_longonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 100., 2., 0., 1),
(2, 0, 3, 50., 4., 0., 0), (3, 0, 4, 50., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 100., 2., 0., 1),
(6, 1, 3, 50., 4., 0., 0), (7, 1, 4, 50., 5., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[200.0, 200.0],
[200.0, 200.0],
[0.0, 0.0],
[250.0, 250.0]
])
)
pf = from_orders_shortonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1000., 1., 0., 1), (1, 0, 1, 550., 2., 0., 0),
(2, 0, 3, 1000., 4., 0., 1), (3, 0, 4, 800., 5., 0., 0),
(4, 1, 0, 100., 1., 0., 1), (5, 1, 1, 100., 2., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[-900.0, 0.0],
[-900.0, 0.0],
[-900.0, 0.0],
[-4900.0, 0.0],
[-3989.6551724137926, 0.0]
])
)
def test_allow_partial(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1), (4, 1, 1, 1000.0, 2.0, 0.0, 1), (5, 1, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0), (4, 1, 0, 1000.0, 1.0, 0.0, 1), (5, 1, 3, 1000.0, 4.0, 0.0, 1),
(6, 1, 4, 1000.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_orders_both(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_longonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_shortonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_orders_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0,
100.0, 0.0, 0.0, 1.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 1, 0.0, 100.0, 0.0, 0.0, 2.0, 200.0, -np.inf, 2.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 2.0, 200.0, 200.0, 2.0, 0.0, 1, 0, -1, 1),
(2, 0, 0, 2, 400.0, -100.0, 200.0, 0.0, 3.0, 100.0, np.nan, 3.0, 0,
2, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 3.0, 100.0, np.nan, np.nan, np.nan, -1, 1, 0, -1),
(3, 0, 0, 3, 400.0, -100.0, 200.0, 0.0, 4.0, 0.0, np.inf, 4.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 4.0, 0.0, 100.0, 4.0, 0.0, 0, 0, -1, 2),
(4, 0, 0, 4, 0.0, 0.0, 0.0, 0.0, 5.0, 0.0, -np.inf, 5.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 5.0, 0.0, np.nan, np.nan, np.nan, -1, 2, 6, -1)
], dtype=log_dt)
)
def test_group_by(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
size=pd.DataFrame([
[0., 0., np.inf],
[0., np.inf, -np.inf],
[np.inf, -np.inf, 0.],
[-np.inf, 0., np.inf],
[0., np.inf, -np.inf],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_orders_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
def test_value(self):
record_arrays_close(
from_orders_both(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0),
(2, 0, 3, 0.25, 4.0, 0.0, 1), (3, 0, 4, 0.2, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_amount(self):
record_arrays_close(
from_orders_both(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=75., size_type='targetamount',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_value(self):
record_arrays_close(
from_orders_both(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 25.0, 2.0, 0.0, 0), (7, 1, 2, 8.333333333333332, 3.0, 0.0, 0),
(8, 1, 3, 4.166666666666668, 4.0, 0.0, 0), (9, 1, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 25.0, 2.0, 0.0, 0),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 0), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 0),
(4, 0, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=50., size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0),
(2, 0, 1, 25.0, 2.0, 0.0, 1), (3, 1, 1, 25.0, 2.0, 0.0, 1),
(4, 2, 1, 25.0, 2.0, 0.0, 0), (5, 0, 2, 8.333333333333332, 3.0, 0.0, 1),
(6, 1, 2, 8.333333333333332, 3.0, 0.0, 1), (7, 2, 2, 8.333333333333332, 3.0, 0.0, 1),
(8, 0, 3, 4.166666666666668, 4.0, 0.0, 1), (9, 1, 3, 4.166666666666668, 4.0, 0.0, 1),
(10, 2, 3, 4.166666666666668, 4.0, 0.0, 1), (11, 0, 4, 2.5, 5.0, 0.0, 1),
(12, 1, 4, 2.5, 5.0, 0.0, 1), (13, 2, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
def test_target_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 37.5, 2.0, 0.0, 0), (7, 1, 2, 6.25, 3.0, 0.0, 0), (8, 1, 3, 2.34375, 4.0, 0.0, 0),
(9, 1, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 37.5, 2.0, 0.0, 0), (2, 0, 2, 6.25, 3.0, 0.0, 0),
(3, 0, 3, 2.34375, 4.0, 0.0, 0), (4, 0, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_update_value(self):
record_arrays_close(
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=False).order_records,
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=True).order_records
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=False).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.9465661198057499, 2.02, 0.019120635620076154, 0),
(4, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(5, 1, 2, 0.018558300554959377, 3.0300000000000002, 0.0005623165068152705, 0),
(6, 0, 3, 0.00037870218456959037, 3.96, 1.4996606508955778e-05, 1),
(7, 1, 3, 0.0003638525743521767, 4.04, 1.4699644003827875e-05, 0),
(8, 0, 4, 7.424805112066224e-06, 4.95, 3.675278530472781e-07, 1),
(9, 1, 4, 7.133664827307231e-06, 5.05, 3.6025007377901643e-07, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.7303208018821721, 2.02, 0.014752480198019875, 0),
(4, 2, 1, 0.21624531792357785, 2.02, 0.0043681554220562635, 0),
(5, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(6, 1, 2, 0.009608602243410758, 2.9699999999999998, 0.00028537548662929945, 1),
(7, 2, 2, 0.02779013180558861, 3.0300000000000002, 0.0008420409937093393, 0),
(8, 0, 3, 0.0005670876809631409, 3.96, 2.2456672166140378e-05, 1),
(9, 1, 3, 0.00037770350099464167, 3.96, 1.4957058639387809e-05, 1),
(10, 2, 3, 0.0009077441794302741, 4.04, 3.6672864848982974e-05, 0),
(11, 0, 4, 1.8523501267964093e-05, 4.95, 9.169133127642227e-07, 1),
(12, 1, 4, 1.2972670177191503e-05, 4.95, 6.421471737709794e-07, 1),
(13, 2, 4, 3.0261148547590434e-05, 5.05, 1.5281880016533242e-06, 0)
], dtype=order_dt)
)
def test_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0), (5, 1, 0, 50., 1., 0., 1),
(6, 1, 1, 12.5, 2., 0., 1), (7, 1, 2, 4.16666667, 3., 0., 1),
(8, 1, 3, 1.5625, 4., 0., 1), (9, 1, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 1, 12.5, 2., 0., 1),
(2, 0, 2, 4.16666667, 3., 0., 1), (3, 0, 3, 1.5625, 4., 0., 1),
(4, 0, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 5.00000000e+01, 1., 0., 0), (1, 1, 0, 2.50000000e+01, 1., 0., 0),
(2, 2, 0, 1.25000000e+01, 1., 0., 0), (3, 0, 1, 3.12500000e+00, 2., 0., 0),
(4, 1, 1, 1.56250000e+00, 2., 0., 0), (5, 2, 1, 7.81250000e-01, 2., 0., 0),
(6, 0, 2, 2.60416667e-01, 3., 0., 0), (7, 1, 2, 1.30208333e-01, 3., 0., 0),
(8, 2, 2, 6.51041667e-02, 3., 0., 0), (9, 0, 3, 2.44140625e-02, 4., 0., 0),
(10, 1, 3, 1.22070312e-02, 4., 0., 0), (11, 2, 3, 6.10351562e-03, 4., 0., 0),
(12, 0, 4, 2.44140625e-03, 5., 0., 0), (13, 1, 4, 1.22070312e-03, 5., 0., 0),
(14, 2, 4, 6.10351562e-04, 5., 0., 0)
], dtype=order_dt)
)
def test_auto_seq(self):
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value, size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value / 100, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
def test_max_orders(self):
_ = from_orders_both(close=price_wide)
_ = from_orders_both(close=price_wide, max_orders=9)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, max_orders=8)
def test_max_logs(self):
_ = from_orders_both(close=price_wide, log=True)
_ = from_orders_both(close=price_wide, log=True, max_logs=15)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, log=True, max_logs=14)
# ############# from_signals ############# #
entries = pd.Series([True, True, True, False, False], index=price.index)
entries_wide = entries.vbt.tile(3, keys=['a', 'b', 'c'])
exits = pd.Series([False, False, True, True, True], index=price.index)
exits_wide = exits.vbt.tile(3, keys=['a', 'b', 'c'])
def from_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='both', **kwargs)
def from_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='longonly', **kwargs)
def from_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='shortonly', **kwargs)
def from_ls_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, False, exits, False, **kwargs)
def from_ls_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, False, False, **kwargs)
def from_ls_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, False, False, entries, exits, **kwargs)
class TestFromSignals:
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_one_column(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_multiple_columns(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 200., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 100., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0),
(2, 1, 0, 100., 1., 0., 1), (3, 1, 3, 50., 4., 0., 0),
(4, 2, 0, 100., 1., 0., 1), (5, 2, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_custom_signal_func(self):
@njit
def signal_func_nb(c, long_num_arr, short_num_arr):
long_num = nb.get_elem_nb(c, long_num_arr)
short_num = nb.get_elem_nb(c, short_num_arr)
is_long_entry = long_num > 0
is_long_exit = long_num < 0
is_short_entry = short_num > 0
is_short_exit = short_num < 0
return is_long_entry, is_long_exit, is_short_entry, is_short_exit
pf_base = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
entries=pd.Series([True, False, False, False, False]),
exits=pd.Series([False, False, True, False, False]),
short_entries=pd.Series([False, True, False, True, False]),
short_exits=pd.Series([False, False, False, False, True]),
size=1,
upon_opposite_entry='ignore'
)
pf = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
signal_func_nb=signal_func_nb,
signal_args=(vbt.Rep('long_num_arr'), vbt.Rep('short_num_arr')),
broadcast_named_args=dict(
long_num_arr=pd.Series([1, 0, -1, 0, 0]),
short_num_arr=pd.Series([0, 1, 0, 1, -1])
),
size=1,
upon_opposite_entry='ignore'
)
record_arrays_close(
pf_base.order_records,
pf.order_records
)
def test_amount(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 2.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_value(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 0.3125, 4.0, 0.0, 1),
(2, 1, 4, 0.1775, 5.0, 0.0, 1), (3, 2, 0, 100.0, 1.0, 0.0, 0),
(4, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
with pytest.raises(Exception):
_ = from_signals_both(size=0.5, size_type='percent')
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1), (2, 0, 4, 25., 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close',
accumulate=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 0),
(2, 0, 3, 62.5, 4.0, 0.0, 1), (3, 0, 4, 27.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 3, 37.5, 4., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 25., 1., 0., 0),
(2, 2, 0, 12.5, 1., 0., 0), (3, 0, 3, 50., 4., 0., 1),
(4, 1, 3, 25., 4., 0., 1), (5, 2, 3, 12.5, 4., 0., 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_signals_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 3, 198.01980198019803, 4.04, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099, 1.01, 0., 0), (1, 0, 3, 99.00990099, 4.04, 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 3, 49.504950495049506, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_signals_both(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.8, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 8.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.4, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 4.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.4, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 4.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_signals_both(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.1, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_signals_both(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 2.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 2.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 1.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 0.9, 0.0, 1),
(3, 1, 3, 1.0, 4.4, 0.0, 0), (4, 2, 0, 1.0, 0.0, 0.0, 1), (5, 2, 3, 1.0, 8.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_signals_both(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_signals_both(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 0, 4, 0.5, 5.0, 0.0, 1),
(3, 1, 0, 1.0, 1.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1), (4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 3, 0.5, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_signals_both(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1), (2, 1, 3, 1000.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 3, 275.0, 4.0, 0.0, 0), (2, 1, 0, 1000.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=True, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_both(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_signals_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2, 0.0, 0.0,
0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 100.0, 0.0, 0.0, 1.0,
100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 3, 0.0, 100.0, 0.0, 0.0, 4.0, 400.0, -np.inf, 4.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 800.0, -100.0,
400.0, 0.0, 4.0, 400.0, 200.0, 4.0, 0.0, 1, 0, -1, 1)
], dtype=log_dt)
)
def test_accumulate(self):
record_arrays_close(
from_signals_both(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 3.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 1.0, 4.0, 0.0, 1), (8, 2, 4, 1.0, 5.0, 0.0, 1),
(9, 3, 0, 1.0, 1.0, 0.0, 0), (10, 3, 1, 1.0, 2.0, 0.0, 0), (11, 3, 3, 1.0, 4.0, 0.0, 1),
(12, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 2.0, 4.0, 0.0, 1), (5, 2, 0, 1.0, 1.0, 0.0, 0),
(6, 2, 3, 1.0, 4.0, 0.0, 1), (7, 3, 0, 1.0, 1.0, 0.0, 0), (8, 3, 1, 1.0, 2.0, 0.0, 0),
(9, 3, 3, 1.0, 4.0, 0.0, 1), (10, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 1, 1.0, 2.0, 0.0, 1), (4, 1, 3, 2.0, 4.0, 0.0, 0), (5, 2, 0, 1.0, 1.0, 0.0, 1),
(6, 2, 3, 1.0, 4.0, 0.0, 0), (7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 1),
(9, 3, 3, 1.0, 4.0, 0.0, 0), (10, 3, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_long_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_long_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_longonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 1, 1.0, 2.0, 0.0, 0), (5, 2, 2, 1.0, 3.0, 0.0, 1),
(6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 0),
(8, 5, 1, 1.0, 2.0, 0.0, 0), (9, 5, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_upon_short_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_short_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_shortonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 1),
(1, 1, 0, 1.0, 1.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 1, 1.0, 2.0, 0.0, 1), (5, 2, 2, 1.0, 3.0, 0.0, 0),
(6, 3, 1, 1.0, 2.0, 0.0, 1), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 5, 1, 1.0, 2.0, 0.0, 1), (9, 5, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_dir_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_dir_conflict=[[
'ignore',
'long',
'short',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 1, 1.0, 2.0, 0.0, 0), (6, 2, 2, 1.0, 3.0, 0.0, 1),
(7, 3, 1, 1.0, 2.0, 0.0, 0), (8, 3, 2, 1.0, 3.0, 0.0, 0),
(9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 1),
(11, 5, 1, 1.0, 2.0, 0.0, 0), (12, 5, 2, 1.0, 3.0, 0.0, 1),
(13, 6, 1, 1.0, 2.0, 0.0, 1), (14, 6, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_opposite_entry(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False]
]),
exits=pd.DataFrame([
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True]
]),
size=1.,
upon_opposite_entry=[[
'ignore',
'ignore',
'close',
'close',
'closereduce',
'closereduce',
'reverse',
'reverse',
'reversereduce',
'reversereduce'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 1),
(2, 2, 0, 1.0, 1.0, 0.0, 0), (3, 2, 1, 1.0, 2.0, 0.0, 1), (4, 2, 2, 1.0, 3.0, 0.0, 0),
(5, 3, 0, 1.0, 1.0, 0.0, 1), (6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 4, 0, 1.0, 1.0, 0.0, 0), (9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 0),
(11, 5, 0, 1.0, 1.0, 0.0, 1), (12, 5, 1, 1.0, 2.0, 0.0, 0), (13, 5, 2, 1.0, 3.0, 0.0, 1),
(14, 6, 0, 1.0, 1.0, 0.0, 0), (15, 6, 1, 2.0, 2.0, 0.0, 1), (16, 6, 2, 2.0, 3.0, 0.0, 0),
(17, 7, 0, 1.0, 1.0, 0.0, 1), (18, 7, 1, 2.0, 2.0, 0.0, 0), (19, 7, 2, 2.0, 3.0, 0.0, 1),
(20, 8, 0, 1.0, 1.0, 0.0, 0), (21, 8, 1, 2.0, 2.0, 0.0, 1), (22, 8, 2, 2.0, 3.0, 0.0, 0),
(23, 9, 0, 1.0, 1.0, 0.0, 1), (24, 9, 1, 2.0, 2.0, 0.0, 0), (25, 9, 2, 2.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(**kwargs, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 2, 1.0, 3.0, 0.0, 0),
(2, 1, 0, 1.0, 1.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 1, 1.0, 2.0, 0.0, 1), (6, 2, 2, 1.0, 3.0, 0.0, 0),
(7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 0), (9, 3, 2, 1.0, 3.0, 0.0, 1),
(10, 4, 0, 1.0, 1.0, 0.0, 0), (11, 4, 1, 1.0, 2.0, 0.0, 1), (12, 4, 2, 1.0, 3.0, 0.0, 0),
(13, 5, 0, 1.0, 1.0, 0.0, 1), (14, 5, 1, 1.0, 2.0, 0.0, 0), (15, 5, 2, 1.0, 3.0, 0.0, 1),
(16, 6, 0, 1.0, 1.0, 0.0, 0), (17, 6, 1, 2.0, 2.0, 0.0, 1), (18, 6, 2, 2.0, 3.0, 0.0, 0),
(19, 7, 0, 1.0, 1.0, 0.0, 1), (20, 7, 1, 2.0, 2.0, 0.0, 0), (21, 7, 2, 2.0, 3.0, 0.0, 1),
(22, 8, 0, 1.0, 1.0, 0.0, 0), (23, 8, 1, 1.0, 2.0, 0.0, 1), (24, 8, 2, 1.0, 3.0, 0.0, 0),
(25, 9, 0, 1.0, 1.0, 0.0, 1), (26, 9, 1, 1.0, 2.0, 0.0, 0), (27, 9, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_init_cash(self):
record_arrays_close(
from_signals_both(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 3, 1.0, 4.0, 0.0, 1), (1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 3, 2.0, 4.0, 0.0, 1),
(3, 2, 0, 1.0, 1.0, 0.0, 0), (4, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1), (2, 2, 0, 1.0, 1.0, 0.0, 0),
(3, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 0.25, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 0.5, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_both(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(init_cash=np.inf).order_records
def test_group_by(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_signals_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_signals_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
entries=pd.DataFrame([
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
[False, True, False],
]),
exits=pd.DataFrame([
[False, False, False],
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_signals_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_signals_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_signals_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 1, 0],
[1, 0, 2]
])
)
pf = from_signals_longonly(**kwargs, size=1., size_type='percent')
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100.0, 1.0, 0.0, 0), (1, 2, 1, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 1, 2, 100.0, 1.0, 0.0, 1), (4, 0, 2, 100.0, 1.0, 0.0, 0), (5, 0, 3, 100.0, 1.0, 0.0, 1),
(6, 2, 3, 100.0, 1.0, 0.0, 0), (7, 2, 4, 100.0, 1.0, 0.0, 1), (8, 1, 4, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 0, 1]
])
)
def test_sl_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(sl_stop=-0.1)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0), (2, 1, 1, 20.0, 4.0, 0.0, 1),
(3, 2, 0, 20.0, 5.0, 0.0, 0), (4, 2, 3, 20.0, 2.0, 0.0, 1),
(5, 3, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1),
(2, 2, 0, 20.0, 5.0, 0.0, 1),
(3, 3, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0), (2, 1, 1, 20.0, 4.25, 0.0, 1),
(3, 2, 0, 20.0, 5.0, 0.0, 0), (4, 2, 1, 20.0, 4.25, 0.0, 1),
(5, 3, 0, 20.0, 5.0, 0.0, 0), (6, 3, 1, 20.0, 4.0, 0.0, 1),
(7, 4, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1),
(2, 2, 0, 20.0, 5.0, 0.0, 1),
(3, 3, 0, 20.0, 5.0, 0.0, 1),
(4, 4, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0),
(3, 3, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 2.0, 0.0, 0),
(3, 2, 0, 100.0, 1.0, 0.0, 1), (4, 2, 3, 50.0, 4.0, 0.0, 0),
(5, 3, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0),
(3, 3, 0, 100.0, 1.0, 0.0, 0),
(4, 4, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.75, 0.0, 0),
(3, 2, 0, 100.0, 1.0, 0.0, 1), (4, 2, 1, 100.0, 1.75, 0.0, 0),
(5, 3, 0, 100.0, 1.0, 0.0, 1), (6, 3, 1, 100.0, 2.0, 0.0, 0),
(7, 4, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_ts_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(ts_stop=-0.1)
close = pd.Series([4., 5., 4., 3., 2.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 0),
(1, 1, 0, 25.0, 4.0, 0.0, 0), (2, 1, 2, 25.0, 4.0, 0.0, 1),
(3, 2, 0, 25.0, 4.0, 0.0, 0), (4, 2, 4, 25.0, 2.0, 0.0, 1),
(5, 3, 0, 25.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 1),
(1, 1, 0, 25.0, 4.0, 0.0, 1), (2, 1, 1, 25.0, 5.0, 0.0, 0),
(3, 2, 0, 25.0, 4.0, 0.0, 1),
(4, 3, 0, 25.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records
)
print('here')
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.15, 0.2, 0.25, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 0),
(1, 1, 0, 25.0, 4.0, 0.0, 0), (2, 1, 2, 25.0, 4.25, 0.0, 1),
(3, 2, 0, 25.0, 4.0, 0.0, 0), (4, 2, 2, 25.0, 4.25, 0.0, 1),
(5, 3, 0, 25.0, 4.0, 0.0, 0), (6, 3, 2, 25.0, 4.125, 0.0, 1),
(7, 4, 0, 25.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.15, 0.2, 0.25, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 1),
(1, 1, 0, 25.0, 4.0, 0.0, 1), (2, 1, 1, 25.0, 5.25, 0.0, 0),
(3, 2, 0, 25.0, 4.0, 0.0, 1), (4, 2, 1, 25.0, 5.25, 0.0, 0),
(5, 3, 0, 25.0, 4.0, 0.0, 1), (6, 3, 1, 25.0, 5.25, 0.0, 0),
(7, 4, 0, 25.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([2., 1., 2., 3., 4.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0),
(1, 1, 0, 50.0, 2.0, 0.0, 0), (2, 1, 1, 50.0, 1.0, 0.0, 1),
(3, 2, 0, 50.0, 2.0, 0.0, 0),
(4, 3, 0, 50.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 1),
(1, 1, 0, 50.0, 2.0, 0.0, 1), (2, 1, 2, 50.0, 2.0, 0.0, 0),
(3, 2, 0, 50.0, 2.0, 0.0, 1), (4, 2, 4, 50.0, 4.0, 0.0, 0),
(5, 3, 0, 50.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0),
(1, 1, 0, 50.0, 2.0, 0.0, 0), (2, 1, 1, 50.0, 0.75, 0.0, 1),
(3, 2, 0, 50.0, 2.0, 0.0, 0), (4, 2, 1, 50.0, 0.5, 0.0, 1),
(5, 3, 0, 50.0, 2.0, 0.0, 0),
(6, 4, 0, 50.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 1),
(1, 1, 0, 50.0, 2.0, 0.0, 1), (2, 1, 2, 50.0, 1.75, 0.0, 0),
(3, 2, 0, 50.0, 2.0, 0.0, 1), (4, 2, 2, 50.0, 1.75, 0.0, 0),
(5, 3, 0, 50.0, 2.0, 0.0, 1), (6, 3, 2, 50.0, 1.75, 0.0, 0),
(7, 4, 0, 50.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
def test_tp_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(sl_stop=-0.1)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0),
(2, 2, 0, 20.0, 5.0, 0.0, 0),
(3, 3, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1), (2, 1, 1, 20.0, 4.0, 0.0, 0),
(3, 2, 0, 20.0, 5.0, 0.0, 1), (4, 2, 3, 20.0, 2.0, 0.0, 0),
(5, 3, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0),
(2, 2, 0, 20.0, 5.0, 0.0, 0),
(3, 3, 0, 20.0, 5.0, 0.0, 0),
(4, 4, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1), (2, 1, 1, 20.0, 4.25, 0.0, 0),
(3, 2, 0, 20.0, 5.0, 0.0, 1), (4, 2, 1, 20.0, 4.25, 0.0, 0),
(5, 3, 0, 20.0, 5.0, 0.0, 1), (6, 3, 1, 20.0, 4.0, 0.0, 0),
(7, 4, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0), (2, 1, 1, 100.0, 2.0, 0.0, 1),
(3, 2, 0, 100.0, 1.0, 0.0, 0), (4, 2, 3, 100.0, 4.0, 0.0, 1),
(5, 3, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 1),
(3, 3, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0), (2, 1, 1, 100.0, 1.75, 0.0, 1),
(3, 2, 0, 100.0, 1.0, 0.0, 0), (4, 2, 1, 100.0, 1.75, 0.0, 1),
(5, 3, 0, 100.0, 1.0, 0.0, 0), (6, 3, 1, 100.0, 2.0, 0.0, 1),
(7, 4, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 1),
(3, 3, 0, 100.0, 1.0, 0.0, 1),
(4, 4, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_stop_entry_price(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='val_price',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.625, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='price',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.75, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='fillprice',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 3.0250000000000004, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 3, 16.52892561983471, 1.5125000000000002, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='close',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.5, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
def test_stop_exit_price(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 4.25, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.5, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='stopmarket', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.825, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.25, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 1.125, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='close', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.6, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.7, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 0.9, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='price', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.9600000000000004, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.97, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 0.9900000000000001, 0.0, 1)
], dtype=order_dt)
)
def test_upon_stop_exit(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits, size=1,
sl_stop=0.1, upon_stop_exit=[['close', 'closereduce', 'reverse', 'reversereduce']],
accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 1),
(2, 1, 0, 1.0, 5.0, 0.0, 0), (3, 1, 1, 1.0, 4.0, 0.0, 1),
(4, 2, 0, 1.0, 5.0, 0.0, 0), (5, 2, 1, 2.0, 4.0, 0.0, 1),
(6, 3, 0, 1.0, 5.0, 0.0, 0), (7, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits, size=1,
sl_stop=0.1, upon_stop_exit=[['close', 'closereduce', 'reverse', 'reversereduce']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 1),
(2, 1, 0, 1.0, 5.0, 0.0, 0), (3, 1, 1, 1.0, 4.0, 0.0, 1),
(4, 2, 0, 1.0, 5.0, 0.0, 0), (5, 2, 1, 2.0, 4.0, 0.0, 1),
(6, 3, 0, 1.0, 5.0, 0.0, 0), (7, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
def test_upon_stop_update(self):
entries = pd.Series([True, True, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
sl_stop = pd.Series([0.4, np.nan, np.nan, np.nan, np.nan])
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits, accumulate=True, size=1.,
sl_stop=sl_stop, upon_stop_update=[['keep', 'override', 'overridenan']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 0), (2, 0, 2, 2.0, 3.0, 0.0, 1),
(3, 1, 0, 1.0, 5.0, 0.0, 0), (4, 1, 1, 1.0, 4.0, 0.0, 0), (5, 1, 2, 2.0, 3.0, 0.0, 1),
(6, 2, 0, 1.0, 5.0, 0.0, 0), (7, 2, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
sl_stop = pd.Series([0.4, 0.4, np.nan, np.nan, np.nan])
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits, accumulate=True, size=1.,
sl_stop=sl_stop, upon_stop_update=[['keep', 'override']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 0), (2, 0, 2, 2.0, 3.0, 0.0, 1),
(3, 1, 0, 1.0, 5.0, 0.0, 0), (4, 1, 1, 1.0, 4.0, 0.0, 0), (5, 1, 3, 2.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
def test_adjust_sl_func(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
@njit
def adjust_sl_func_nb(c, dur):
return 0. if c.i - c.init_i >= dur else c.curr_stop, c.curr_trail
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=np.inf, adjust_sl_func_nb=adjust_sl_func_nb, adjust_sl_args=(2,)).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0), (1, 0, 2, 20.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_adjust_ts_func(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([10., 11., 12., 11., 10.], index=price.index)
@njit
def adjust_sl_func_nb(c, dur):
return 0. if c.i - c.curr_i >= dur else c.curr_stop, c.curr_trail
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=np.inf, adjust_sl_func_nb=adjust_sl_func_nb, adjust_sl_args=(2,)).order_records,
np.array([
(0, 0, 0, 10.0, 10.0, 0.0, 0), (1, 0, 4, 10.0, 10.0, 0.0, 1)
], dtype=order_dt)
)
def test_adjust_tp_func(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
@njit
def adjust_tp_func_nb(c, dur):
return 0. if c.i - c.init_i >= dur else c.curr_stop
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=np.inf, adjust_tp_func_nb=adjust_tp_func_nb, adjust_tp_args=(2,)).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_max_orders(self):
_ = from_signals_both(close=price_wide)
_ = from_signals_both(close=price_wide, max_orders=6)
with pytest.raises(Exception):
_ = from_signals_both(close=price_wide, max_orders=5)
def test_max_logs(self):
_ = from_signals_both(close=price_wide, log=True)
_ = from_signals_both(close=price_wide, log=True, max_logs=6)
with pytest.raises(Exception):
_ = from_signals_both(close=price_wide, log=True, max_logs=5)
# ############# from_holding ############# #
class TestFromHolding:
def test_from_holding(self):
record_arrays_close(
vbt.Portfolio.from_holding(price).order_records,
vbt.Portfolio.from_signals(price, True, False, accumulate=False).order_records
)
# ############# from_random_signals ############# #
class TestFromRandomSignals:
def test_from_random_n(self):
result = vbt.Portfolio.from_random_signals(price, n=2, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, True, False, False],
[False, True, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, n=[1, 2], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [True, False], [False, True], [False, False], [False, False]],
[[False, False], [False, True], [False, False], [False, True], [True, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.Int64Index([1, 2], dtype='int64', name='randnx_n')
)
def test_from_random_prob(self):
result = vbt.Portfolio.from_random_signals(price, prob=0.5, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, False, False, False],
[False, False, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, prob=[0.25, 0.5], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [False, False], [False, False], [False, False], [True, False]],
[[False, False], [False, True], [False, False], [False, False], [False, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.MultiIndex.from_tuples(
[(0.25, 0.25), (0.5, 0.5)],
names=['rprobnx_entry_prob', 'rprobnx_exit_prob'])
)
# ############# from_order_func ############# #
@njit
def order_func_nb(c, size):
_size = nb.get_elem_nb(c, size)
return nb.order_nb(_size if c.i % 2 == 0 else -_size)
@njit
def log_order_func_nb(c, size):
_size = nb.get_elem_nb(c, size)
return nb.order_nb(_size if c.i % 2 == 0 else -_size, log=True)
@njit
def flex_order_func_nb(c, size):
if c.call_idx < c.group_len:
_size = nb.get_col_elem_nb(c, c.from_col + c.call_idx, size)
return c.from_col + c.call_idx, nb.order_nb(_size if c.i % 2 == 0 else -_size)
return -1, nb.order_nothing_nb()
@njit
def log_flex_order_func_nb(c, size):
if c.call_idx < c.group_len:
_size = nb.get_col_elem_nb(c, c.from_col + c.call_idx, size)
return c.from_col + c.call_idx, nb.order_nb(_size if c.i % 2 == 0 else -_size, log=True)
return -1, nb.order_nothing_nb()
class TestFromOrderFunc:
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_one_column(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price.tolist(), order_func, np.asarray(np.inf), row_wise=test_row_wise, flexible=test_flexible)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1),
(2, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (3, 0, 3, 66.66666666666669, 4.0, 0.0, 1),
(4, 0, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pf = vbt.Portfolio.from_order_func(
price, order_func, np.asarray(np.inf), row_wise=test_row_wise, flexible=test_flexible)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1),
(2, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (3, 0, 3, 66.66666666666669, 4.0, 0.0, 1),
(4, 0, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
@pytest.mark.parametrize("test_use_numba", [False, True])
def test_multiple_columns(self, test_row_wise, test_flexible, test_use_numba):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, vbt.Rep('size'), broadcast_named_args=dict(size=[0, 1, np.inf]),
row_wise=test_row_wise, flexible=test_flexible, use_numba=test_use_numba)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 2, 0, 100.0, 1.0, 0.0, 0),
(2, 1, 1, 1.0, 2.0, 0.0, 1), (3, 2, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 2, 1.0, 3.0, 0.0, 0), (5, 2, 2, 133.33333333333334, 3.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 2, 3, 66.66666666666669, 4.0, 0.0, 1),
(8, 1, 4, 1.0, 5.0, 0.0, 0), (9, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 1, 1.0, 2.0, 0.0, 1),
(2, 1, 2, 1.0, 3.0, 0.0, 0), (3, 1, 3, 1.0, 4.0, 0.0, 1),
(4, 1, 4, 1.0, 5.0, 0.0, 0), (5, 2, 0, 100.0, 1.0, 0.0, 0),
(6, 2, 1, 200.0, 2.0, 0.0, 1), (7, 2, 2, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 3, 66.66666666666669, 4.0, 0.0, 1), (9, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_group_by(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
group_by=np.array([0, 0, 1]), row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 0, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 2, 1, 200.0, 2.0, 0.0, 1),
(6, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (7, 1, 2, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 0, 3, 66.66666666666669, 4.0, 0.0, 1),
(10, 1, 3, 66.66666666666669, 4.0, 0.0, 1), (11, 2, 3, 66.66666666666669, 4.0, 0.0, 1),
(12, 0, 4, 53.33333333333335, 5.0, 0.0, 0), (13, 1, 4, 53.33333333333335, 5.0, 0.0, 0),
(14, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 0, 1, 200.0, 2.0, 0.0, 1), (3, 1, 1, 200.0, 2.0, 0.0, 1),
(4, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (5, 1, 2, 133.33333333333334, 3.0, 0.0, 0),
(6, 0, 3, 66.66666666666669, 4.0, 0.0, 1), (7, 1, 3, 66.66666666666669, 4.0, 0.0, 1),
(8, 0, 4, 53.33333333333335, 5.0, 0.0, 0), (9, 1, 4, 53.33333333333335, 5.0, 0.0, 0),
(10, 2, 0, 100.0, 1.0, 0.0, 0), (11, 2, 1, 200.0, 2.0, 0.0, 1),
(12, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (13, 2, 3, 66.66666666666669, 4.0, 0.0, 1),
(14, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_cash_sharing(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
group_by=np.array([0, 0, 1]), cash_sharing=True, row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 0, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 0, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 0, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 0, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 2, 133.33333333, 3., 0., 0), (3, 0, 3, 66.66666667, 4., 0., 1),
(4, 0, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_call_seq(self, test_row_wise):
pf = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 0, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 0, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 0, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 0, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 2, 133.33333333, 3., 0., 0), (3, 0, 3, 66.66666667, 4., 0., 1),
(4, 0, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed', row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 1, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 1, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 1, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 1, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 2, 133.33333333, 3., 0., 0), (3, 1, 3, 66.66666667, 4., 0., 1),
(4, 1, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 1, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 1, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 1, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 1, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 2, 133.33333333, 3., 0., 0), (3, 1, 3, 66.66666667, 4., 0., 1),
(4, 1, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
with pytest.raises(Exception):
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='auto', row_wise=test_row_wise
)
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
@njit
def pre_segment_func_nb(c, target_hold_value):
order_size = np.copy(target_hold_value[c.i, c.from_col:c.to_col])
order_size_type = np.full(c.group_len, SizeType.TargetValue)
direction = np.full(c.group_len, Direction.Both)
order_value_out = np.empty(c.group_len, dtype=np.float_)
c.last_val_price[c.from_col:c.to_col] = c.close[c.i, c.from_col:c.to_col]
nb.sort_call_seq_nb(c, order_size, order_size_type, direction, order_value_out)
return order_size, order_size_type, direction
@njit
def pct_order_func_nb(c, order_size, order_size_type, direction):
col_i = c.call_seq_now[c.call_idx]
return nb.order_nb(
order_size[col_i],
c.close[c.i, col_i],
size_type=order_size_type[col_i],
direction=direction[col_i]
)
pf = vbt.Portfolio.from_order_func(
price_wide * 0 + 1, pct_order_func_nb, group_by=np.array([0, 0, 0]),
cash_sharing=True, pre_segment_func_nb=pre_segment_func_nb,
pre_segment_args=(target_hold_value.values,), row_wise=test_row_wise)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 1, 0],
[0, 2, 1],
[1, 0, 2],
[2, 1, 0]
])
)
pd.testing.assert_frame_equal(
pf.asset_value(group_by=False),
target_hold_value
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_target_value(self, test_row_wise, test_flexible):
@njit
def target_val_pre_segment_func_nb(c, val_price):
c.last_val_price[c.from_col:c.to_col] = val_price[c.i]
return ()
if test_flexible:
@njit
def target_val_order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(50., nb.get_col_elem_nb(c, col, c.close), size_type=SizeType.TargetValue)
return -1, nb.order_nothing_nb()
else:
@njit
def target_val_order_func_nb(c):
return nb.order_nb(50., nb.get_elem_nb(c, c.close), size_type=SizeType.TargetValue)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb, row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb,
pre_segment_func_nb=target_val_pre_segment_func_nb,
pre_segment_args=(price.iloc[:-1].values,), row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 4.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 4.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_target_percent(self, test_row_wise, test_flexible):
@njit
def target_pct_pre_segment_func_nb(c, val_price):
c.last_val_price[c.from_col:c.to_col] = val_price[c.i]
return ()
if test_flexible:
@njit
def target_pct_order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(0.5, nb.get_col_elem_nb(c, col, c.close), size_type=SizeType.TargetPercent)
return -1, nb.order_nothing_nb()
else:
@njit
def target_pct_order_func_nb(c):
return nb.order_nb(0.5, nb.get_elem_nb(c, c.close), size_type=SizeType.TargetPercent)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb, row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb,
pre_segment_func_nb=target_pct_pre_segment_func_nb,
pre_segment_args=(price.iloc[:-1].values,), row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 3, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 3, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_update_value(self, test_row_wise, test_flexible):
if test_flexible:
@njit
def order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(
np.inf if c.i % 2 == 0 else -np.inf,
nb.get_col_elem_nb(c, col, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
return -1, nb.order_nothing_nb()
else:
@njit
def order_func_nb(c):
return nb.order_nb(
np.inf if c.i % 2 == 0 else -np.inf,
nb.get_elem_nb(c, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
@njit
def post_order_func_nb(c, value_before, value_now):
value_before[c.i, c.col] = c.value_before
value_now[c.i, c.col] = c.value_now
value_before = np.empty_like(price.values[:, None])
value_now = np.empty_like(price.values[:, None])
_ = vbt.Portfolio.from_order_func(
price,
order_func_nb,
post_order_func_nb=post_order_func_nb,
post_order_args=(value_before, value_now),
row_wise=test_row_wise,
update_value=False,
flexible=test_flexible)
np.testing.assert_array_equal(
value_before,
value_now
)
_ = vbt.Portfolio.from_order_func(
price,
order_func_nb,
post_order_func_nb=post_order_func_nb,
post_order_args=(value_before, value_now),
row_wise=test_row_wise,
update_value=True,
flexible=test_flexible)
np.testing.assert_array_equal(
value_before,
np.array([
[100.0],
[97.04930889128518],
[185.46988117104038],
[82.47853456223025],
[104.65775576218027]
])
)
np.testing.assert_array_equal(
value_now,
np.array([
[98.01980198019803],
[187.36243097890815],
[83.30331990785257],
[105.72569204546781],
[73.54075125567473]
])
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_states(self, test_row_wise, test_flexible):
close = np.array([
[1, 1, 1],
[np.nan, 2, 2],
[3, np.nan, 3],
[4, 4, np.nan],
[5, 5, 5]
])
size = np.array([
[1, 1, 1],
[-1, -1, -1],
[1, 1, 1],
[-1, -1, -1],
[1, 1, 1]
])
value_arr1 = np.empty((size.shape[0], 2), dtype=np.float_)
value_arr2 = np.empty(size.shape, dtype=np.float_)
value_arr3 = np.empty(size.shape, dtype=np.float_)
return_arr1 = np.empty((size.shape[0], 2), dtype=np.float_)
return_arr2 = np.empty(size.shape, dtype=np.float_)
return_arr3 = np.empty(size.shape, dtype=np.float_)
pos_record_arr1 = np.empty(size.shape, dtype=trade_dt)
pos_record_arr2 = np.empty(size.shape, dtype=trade_dt)
pos_record_arr3 = np.empty(size.shape, dtype=trade_dt)
def pre_segment_func_nb(c):
value_arr1[c.i, c.group] = c.last_value[c.group]
return_arr1[c.i, c.group] = c.last_return[c.group]
for col in range(c.from_col, c.to_col):
pos_record_arr1[c.i, col] = c.last_pos_record[col]
if c.i > 0:
c.last_val_price[c.from_col:c.to_col] = c.last_val_price[c.from_col:c.to_col] + 0.5
return ()
if test_flexible:
def order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
value_arr2[c.i, col] = c.last_value[c.group]
return_arr2[c.i, col] = c.last_return[c.group]
pos_record_arr2[c.i, col] = c.last_pos_record[col]
return col, nb.order_nb(size[c.i, col], fixed_fees=1.)
return -1, nb.order_nothing_nb()
else:
def order_func_nb(c):
value_arr2[c.i, c.col] = c.value_now
return_arr2[c.i, c.col] = c.return_now
pos_record_arr2[c.i, c.col] = c.pos_record_now
return nb.order_nb(size[c.i, c.col], fixed_fees=1.)
def post_order_func_nb(c):
value_arr3[c.i, c.col] = c.value_now
return_arr3[c.i, c.col] = c.return_now
pos_record_arr3[c.i, c.col] = c.pos_record_now
_ = vbt.Portfolio.from_order_func(
close,
order_func_nb,
pre_segment_func_nb=pre_segment_func_nb,
post_order_func_nb=post_order_func_nb,
use_numba=False,
row_wise=test_row_wise,
update_value=True,
ffill_val_price=True,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
np.testing.assert_array_equal(
value_arr1,
np.array([
[100.0, 100.0],
[98.0, 99.0],
[98.5, 99.0],
[99.0, 98.0],
[99.0, 98.5]
])
)
np.testing.assert_array_equal(
value_arr2,
np.array([
[100.0, 99.0, 100.0],
[99.0, 99.0, 99.5],
[99.0, 99.0, 99.0],
[100.0, 100.0, 98.5],
[99.0, 98.5, 99.0]
])
)
np.testing.assert_array_equal(
value_arr3,
np.array([
[99.0, 98.0, 99.0],
[99.0, 98.5, 99.0],
[99.0, 99.0, 98.0],
[100.0, 99.0, 98.5],
[98.5, 97.0, 99.0]
])
)
np.testing.assert_array_equal(
return_arr1,
np.array([
[np.nan, np.nan],
[-0.02, -0.01],
[0.00510204081632653, 0.0],
[0.005076142131979695, -0.010101010101010102],
[0.0, 0.00510204081632653]
])
)
np.testing.assert_array_equal(
return_arr2,
np.array([
[0.0, -0.01, 0.0],
[-0.01, -0.01, -0.005],
[0.01020408163265306, 0.01020408163265306, 0.0],
[0.015228426395939087, 0.015228426395939087, -0.005050505050505051],
[0.0, -0.005050505050505051, 0.01020408163265306]
])
)
np.testing.assert_array_equal(
return_arr3,
np.array([
[-0.01, -0.02, -0.01],
[-0.01, -0.015, -0.01],
[0.01020408163265306, 0.01020408163265306, -0.010101010101010102],
[0.015228426395939087, 0.005076142131979695, -0.005050505050505051],
[-0.005050505050505051, -0.020202020202020204, 0.01020408163265306]
])
)
record_arrays_close(
pos_record_arr1.flatten()[3:],
np.array([
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 2, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 2, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 0, 2.0, 0, 2.0, 2.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -1.0, -0.3333333333333333, 0, 0, 1),
(0, 0, 2.0, 0, 2.0, 2.0, -1, 4.0, 1.0, 1.0, 0.25, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, -1, np.nan, 0.0, -1.0, -0.25, 1, 0, 1),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -0.5, -0.16666666666666666, 0, 0, 1)
], dtype=trade_dt)
)
record_arrays_close(
pos_record_arr2.flatten()[3:],
np.array([
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 2, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 2, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 0, 2.0, 0, 2.0, 2.0, -1, np.nan, 0.0, 1.0, 0.25, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -0.5, -0.16666666666666666, 0, 0, 1),
(0, 0, 2.0, 0, 2.0, 2.0, -1, 4.0, 1.0, 1.5, 0.375, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, -1, np.nan, 0.0, -1.5, -0.375, 1, 0, 1),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 1)
], dtype=trade_dt)
)
record_arrays_close(
pos_record_arr3.flatten(),
np.array([
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 2, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 2, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 0, 2.0, 0, 2.0, 2.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -1.0, -0.3333333333333333, 0, 0, 1),
(0, 0, 2.0, 0, 2.0, 2.0, -1, 4.0, 1.0, 1.0, 0.25, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, -1, np.nan, 0.0, -1.0, -0.25, 1, 0, 1),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -0.5, -0.16666666666666666, 0, 0, 1),
(0, 0, 3.0, 0, 3.0, 3.0, -1, 4.0, 1.0, 1.0, 0.1111111111111111, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, 4, 5.0, 1.0, -3.0, -0.75, 1, 1, 1),
(1, 2, 2.0, 2, 4.0, 2.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 1)
], dtype=trade_dt)
)
cash_arr = np.empty((size.shape[0], 2), dtype=np.float_)
position_arr = np.empty(size.shape, dtype=np.float_)
val_price_arr = np.empty(size.shape, dtype=np.float_)
value_arr = np.empty((size.shape[0], 2), dtype=np.float_)
return_arr = np.empty((size.shape[0], 2), dtype=np.float_)
sim_order_cash_arr = np.empty(size.shape, dtype=np.float_)
sim_order_value_arr = np.empty(size.shape, dtype=np.float_)
sim_order_return_arr = np.empty(size.shape, dtype=np.float_)
def post_order_func_nb(c):
sim_order_cash_arr[c.i, c.col] = c.cash_now
sim_order_value_arr[c.i, c.col] = c.value_now
sim_order_return_arr[c.i, c.col] = c.value_now
if c.i == 0 and c.call_idx == 0:
sim_order_return_arr[c.i, c.col] -= c.init_cash[c.group]
sim_order_return_arr[c.i, c.col] /= c.init_cash[c.group]
else:
if c.call_idx == 0:
prev_i = c.i - 1
prev_col = c.to_col - 1
else:
prev_i = c.i
prev_col = c.from_col + c.call_idx - 1
sim_order_return_arr[c.i, c.col] -= sim_order_value_arr[prev_i, prev_col]
sim_order_return_arr[c.i, c.col] /= sim_order_value_arr[prev_i, prev_col]
def post_segment_func_nb(c):
cash_arr[c.i, c.group] = c.last_cash[c.group]
for col in range(c.from_col, c.to_col):
position_arr[c.i, col] = c.last_position[col]
val_price_arr[c.i, col] = c.last_val_price[col]
value_arr[c.i, c.group] = c.last_value[c.group]
return_arr[c.i, c.group] = c.last_return[c.group]
pf = vbt.Portfolio.from_order_func(
close,
order_func_nb,
post_order_func_nb=post_order_func_nb,
post_segment_func_nb=post_segment_func_nb,
use_numba=False,
row_wise=test_row_wise,
update_value=True,
ffill_val_price=True,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
np.testing.assert_array_equal(
cash_arr,
pf.cash().values
)
np.testing.assert_array_equal(
position_arr,
pf.assets().values
)
np.testing.assert_array_equal(
val_price_arr,
pf.get_filled_close().values
)
np.testing.assert_array_equal(
value_arr,
pf.value().values
)
np.testing.assert_array_equal(
return_arr,
pf.returns().values
)
if test_flexible:
with pytest.raises(Exception):
pf.cash(in_sim_order=True, group_by=False)
with pytest.raises(Exception):
pf.value(in_sim_order=True, group_by=False)
with pytest.raises(Exception):
pf.returns(in_sim_order=True, group_by=False)
else:
np.testing.assert_array_equal(
sim_order_cash_arr,
pf.cash(in_sim_order=True, group_by=False).values
)
np.testing.assert_array_equal(
sim_order_value_arr,
pf.value(in_sim_order=True, group_by=False).values
)
np.testing.assert_array_equal(
sim_order_return_arr,
pf.returns(in_sim_order=True, group_by=False).values
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_post_sim_ctx(self, test_row_wise, test_flexible):
if test_flexible:
def order_func(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(
1.,
nb.get_col_elem_nb(c, col, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01,
log=True
)
return -1, nb.order_nothing_nb()
else:
def order_func(c):
return nb.order_nb(
1.,
nb.get_elem_nb(c, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01,
log=True
)
def post_sim_func(c, lst):
lst.append(deepcopy(c))
lst = []
_ = vbt.Portfolio.from_order_func(
price_wide,
order_func,
post_sim_func_nb=post_sim_func,
post_sim_args=(lst,),
row_wise=test_row_wise,
update_value=True,
max_logs=price_wide.shape[0] * price_wide.shape[1],
use_numba=False,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
c = lst[-1]
assert c.target_shape == price_wide.shape
np.testing.assert_array_equal(
c.close,
price_wide.values
)
np.testing.assert_array_equal(
c.group_lens,
np.array([2, 1])
)
np.testing.assert_array_equal(
c.init_cash,
np.array([100., 100.])
)
assert c.cash_sharing
if test_flexible:
assert c.call_seq is None
else:
np.testing.assert_array_equal(
c.call_seq,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
np.testing.assert_array_equal(
c.segment_mask,
np.array([
[True, True],
[True, True],
[True, True],
[True, True],
[True, True]
])
)
assert c.ffill_val_price
assert c.update_value
if test_row_wise:
record_arrays_close(
c.order_records,
np.array([
(0, 0, 0, 1.0, 1.01, 1.0101, 0), (1, 1, 0, 1.0, 1.01, 1.0101, 0),
(2, 2, 0, 1.0, 1.01, 1.0101, 0), (3, 0, 1, 1.0, 2.02, 1.0202, 0),
(4, 1, 1, 1.0, 2.02, 1.0202, 0), (5, 2, 1, 1.0, 2.02, 1.0202, 0),
(6, 0, 2, 1.0, 3.0300000000000002, 1.0303, 0), (7, 1, 2, 1.0, 3.0300000000000002, 1.0303, 0),
(8, 2, 2, 1.0, 3.0300000000000002, 1.0303, 0), (9, 0, 3, 1.0, 4.04, 1.0404, 0),
(10, 1, 3, 1.0, 4.04, 1.0404, 0), (11, 2, 3, 1.0, 4.04, 1.0404, 0),
(12, 0, 4, 1.0, 5.05, 1.0505, 0), (13, 1, 4, 1.0, 5.05, 1.0505, 0),
(14, 2, 4, 1.0, 5.05, 1.0505, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
c.order_records,
np.array([
(0, 0, 0, 1.0, 1.01, 1.0101, 0), (1, 1, 0, 1.0, 1.01, 1.0101, 0),
(2, 0, 1, 1.0, 2.02, 1.0202, 0), (3, 1, 1, 1.0, 2.02, 1.0202, 0),
(4, 0, 2, 1.0, 3.0300000000000002, 1.0303, 0), (5, 1, 2, 1.0, 3.0300000000000002, 1.0303, 0),
(6, 0, 3, 1.0, 4.04, 1.0404, 0), (7, 1, 3, 1.0, 4.04, 1.0404, 0),
(8, 0, 4, 1.0, 5.05, 1.0505, 0), (9, 1, 4, 1.0, 5.05, 1.0505, 0),
(10, 2, 0, 1.0, 1.01, 1.0101, 0), (11, 2, 1, 1.0, 2.02, 1.0202, 0),
(12, 2, 2, 1.0, 3.0300000000000002, 1.0303, 0), (13, 2, 3, 1.0, 4.04, 1.0404, 0),
(14, 2, 4, 1.0, 5.05, 1.0505, 0)
], dtype=order_dt)
)
if test_row_wise:
record_arrays_close(
c.log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2, 0.01, 1.0,
0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799, 1.0, 0.0, 97.9799,
1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 0),
(1, 0, 1, 0, 97.9799, 0.0, 0.0, 97.9799, np.nan, 98.9899, 1.0, 1.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 95.9598, 1.0,
0.0, 95.9598, 1.01, 97.97980000000001, 1.0, 1.01, 1.0101, 0, 0, -1, 1),
(2, 1, 2, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2, 0.01,
1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799, 1.0, 0.0,
97.9799, 1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 2),
(3, 0, 0, 1, 95.9598, 1.0, 0.0, 95.9598, 1.0, 97.9598, 1.0, 2.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 92.9196,
2.0, 0.0, 92.9196, 2.02, 97.95960000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 3),
(4, 0, 1, 1, 92.9196, 1.0, 0.0, 92.9196, 1.0, 97.95960000000001, 1.0, 2.0,
0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 89.8794,
2.0, 0.0, 89.8794, 2.02, 97.95940000000002, 1.0, 2.02, 1.0202, 0, 0, -1, 4),
(5, 1, 2, 1, 97.9799, 1.0, 0.0, 97.9799, 1.0, 98.9799, 1.0, 2.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 94.9397, 2.0,
0.0, 94.9397, 2.02, 98.97970000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 5),
(6, 0, 0, 2, 89.8794, 2.0, 0.0, 89.8794, 2.0, 97.8794, 1.0, 3.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.8191, 3.0,
0.0, 85.8191, 3.0300000000000002, 98.90910000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 6),
(7, 0, 1, 2, 85.8191, 2.0, 0.0, 85.8191, 2.0, 98.90910000000001,
1.0, 3.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True,
81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0300000000000002,
99.93880000000001, 1.0, 3.0300000000000002, 1.0303, 0, 0, -1, 7),
(8, 1, 2, 2, 94.9397, 2.0, 0.0, 94.9397, 2.0, 98.9397, 1.0, 3.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 90.8794,
3.0, 0.0, 90.8794, 3.0300000000000002, 99.96940000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 8),
(9, 0, 0, 3, 81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0, 99.75880000000001,
1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True,
76.67840000000001, 4.0, 0.0, 76.67840000000001, 4.04, 101.83840000000001,
1.0, 4.04, 1.0404, 0, 0, -1, 9),
(10, 0, 1, 3, 76.67840000000001, 3.0, 0.0, 76.67840000000001, 3.0,
101.83840000000001, 1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 71.59800000000001, 4.0, 0.0, 71.59800000000001,
4.04, 103.918, 1.0, 4.04, 1.0404, 0, 0, -1, 10),
(11, 1, 2, 3, 90.8794, 3.0, 0.0, 90.8794, 3.0, 99.8794, 1.0, 4.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.799, 4.0,
0.0, 85.799, 4.04, 101.959, 1.0, 4.04, 1.0404, 0, 0, -1, 11),
(12, 0, 0, 4, 71.59800000000001, 4.0, 0.0, 71.59800000000001, 4.0,
103.59800000000001, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 65.49750000000002, 5.0, 0.0, 65.49750000000002,
5.05, 106.74750000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 12),
(13, 0, 1, 4, 65.49750000000002, 4.0, 0.0, 65.49750000000002, 4.0,
106.74750000000002, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 59.39700000000002, 5.0, 0.0, 59.39700000000002,
5.05, 109.89700000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 13),
(14, 1, 2, 4, 85.799, 4.0, 0.0, 85.799, 4.0, 101.799, 1.0, 5.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 79.69850000000001,
5.0, 0.0, 79.69850000000001, 5.05, 104.94850000000001, 1.0, 5.05, 1.0505, 0, 0, -1, 14)
], dtype=log_dt)
)
else:
record_arrays_close(
c.log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799,
1.0, 0.0, 97.9799, 1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 0),
(1, 0, 1, 0, 97.9799, 0.0, 0.0, 97.9799, np.nan, 98.9899, 1.0, 1.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 95.9598,
1.0, 0.0, 95.9598, 1.01, 97.97980000000001, 1.0, 1.01, 1.0101, 0, 0, -1, 1),
(2, 0, 0, 1, 95.9598, 1.0, 0.0, 95.9598, 1.0, 97.9598, 1.0, 2.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 92.9196, 2.0,
0.0, 92.9196, 2.02, 97.95960000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 2),
(3, 0, 1, 1, 92.9196, 1.0, 0.0, 92.9196, 1.0, 97.95960000000001, 1.0,
2.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 89.8794,
2.0, 0.0, 89.8794, 2.02, 97.95940000000002, 1.0, 2.02, 1.0202, 0, 0, -1, 3),
(4, 0, 0, 2, 89.8794, 2.0, 0.0, 89.8794, 2.0, 97.8794, 1.0, 3.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.8191,
3.0, 0.0, 85.8191, 3.0300000000000002, 98.90910000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 4),
(5, 0, 1, 2, 85.8191, 2.0, 0.0, 85.8191, 2.0, 98.90910000000001, 1.0,
3.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True,
81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0300000000000002,
99.93880000000001, 1.0, 3.0300000000000002, 1.0303, 0, 0, -1, 5),
(6, 0, 0, 3, 81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0,
99.75880000000001, 1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 76.67840000000001, 4.0, 0.0, 76.67840000000001,
4.04, 101.83840000000001, 1.0, 4.04, 1.0404, 0, 0, -1, 6),
(7, 0, 1, 3, 76.67840000000001, 3.0, 0.0, 76.67840000000001, 3.0,
101.83840000000001, 1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 71.59800000000001, 4.0, 0.0, 71.59800000000001,
4.04, 103.918, 1.0, 4.04, 1.0404, 0, 0, -1, 7),
(8, 0, 0, 4, 71.59800000000001, 4.0, 0.0, 71.59800000000001, 4.0,
103.59800000000001, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 65.49750000000002, 5.0, 0.0, 65.49750000000002,
5.05, 106.74750000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 8),
(9, 0, 1, 4, 65.49750000000002, 4.0, 0.0, 65.49750000000002, 4.0,
106.74750000000002, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 59.39700000000002, 5.0, 0.0, 59.39700000000002,
5.05, 109.89700000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 9),
(10, 1, 2, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2, 0.01,
1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799, 1.0, 0.0,
97.9799, 1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 10),
(11, 1, 2, 1, 97.9799, 1.0, 0.0, 97.9799, 1.0, 98.9799, 1.0, 2.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 94.9397,
2.0, 0.0, 94.9397, 2.02, 98.97970000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 11),
(12, 1, 2, 2, 94.9397, 2.0, 0.0, 94.9397, 2.0, 98.9397, 1.0, 3.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 90.8794,
3.0, 0.0, 90.8794, 3.0300000000000002, 99.96940000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 12),
(13, 1, 2, 3, 90.8794, 3.0, 0.0, 90.8794, 3.0, 99.8794, 1.0, 4.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.799, 4.0,
0.0, 85.799, 4.04, 101.959, 1.0, 4.04, 1.0404, 0, 0, -1, 13),
(14, 1, 2, 4, 85.799, 4.0, 0.0, 85.799, 4.0, 101.799, 1.0, 5.0, 0, 2, 0.01,
1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 79.69850000000001,
5.0, 0.0, 79.69850000000001, 5.05, 104.94850000000001, 1.0, 5.05, 1.0505, 0, 0, -1, 14)
], dtype=log_dt)
)
np.testing.assert_array_equal(
c.last_cash,
np.array([59.39700000000002, 79.69850000000001])
)
np.testing.assert_array_equal(
c.last_position,
np.array([5., 5., 5.])
)
np.testing.assert_array_equal(
c.last_val_price,
np.array([5.0, 5.0, 5.0])
)
np.testing.assert_array_equal(
c.last_value,
np.array([109.39700000000002, 104.69850000000001])
)
np.testing.assert_array_equal(
c.second_last_value,
np.array([103.59800000000001, 101.799])
)
np.testing.assert_array_equal(
c.last_return,
np.array([0.05597598409235705, 0.028482598060884715])
)
np.testing.assert_array_equal(
c.last_debt,
np.array([0., 0., 0.])
)
np.testing.assert_array_equal(
c.last_free_cash,
np.array([59.39700000000002, 79.69850000000001])
)
if test_row_wise:
np.testing.assert_array_equal(
c.last_oidx,
np.array([12, 13, 14])
)
np.testing.assert_array_equal(
c.last_lidx,
np.array([12, 13, 14])
)
else:
np.testing.assert_array_equal(
c.last_oidx,
np.array([8, 9, 14])
)
np.testing.assert_array_equal(
c.last_lidx,
np.array([8, 9, 14])
)
assert c.order_records[c.last_oidx[0]]['col'] == 0
assert c.order_records[c.last_oidx[1]]['col'] == 1
assert c.order_records[c.last_oidx[2]]['col'] == 2
assert c.log_records[c.last_lidx[0]]['col'] == 0
assert c.log_records[c.last_lidx[1]]['col'] == 1
assert c.log_records[c.last_lidx[2]]['col'] == 2
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_free_cash(self, test_row_wise, test_flexible):
if test_flexible:
def order_func(c, size):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(
size[c.i, col],
nb.get_col_elem_nb(c, col, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
return -1, nb.order_nothing_nb()
else:
def order_func(c, size):
return nb.order_nb(
size[c.i, c.col],
nb.get_elem_nb(c, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
def post_order_func(c, debt, free_cash):
debt[c.i, c.col] = c.debt_now
if c.cash_sharing:
free_cash[c.i, c.group] = c.free_cash_now
else:
free_cash[c.i, c.col] = c.free_cash_now
size = np.array([
[5, -5, 5],
[5, -5, -10],
[-5, 5, 10],
[-5, 5, -10],
[-5, 5, 10]
])
debt = np.empty(price_wide.shape, dtype=np.float_)
free_cash = np.empty(price_wide.shape, dtype=np.float_)
pf = vbt.Portfolio.from_order_func(
price_wide,
order_func, size,
post_order_func_nb=post_order_func,
post_order_args=(debt, free_cash,),
row_wise=test_row_wise,
use_numba=False,
flexible=test_flexible
)
np.testing.assert_array_equal(
debt,
np.array([
[0.0, 4.95, 0.0],
[0.0, 14.850000000000001, 9.9],
[0.0, 7.425000000000001, 0.0],
[0.0, 0.0, 19.8],
[24.75, 0.0, 0.0]
])
)
np.testing.assert_array_equal(
free_cash,
np.array([
[93.8995, 94.0005, 93.8995],
[82.6985, 83.00150000000001, 92.70150000000001],
[96.39999999999999, 81.55000000000001, 80.8985],
[115.002, 74.998, 79.5025],
[89.0045, 48.49550000000001, 67.0975]
])
)
np.testing.assert_almost_equal(
free_cash,
pf.cash(free=True).values
)
debt = np.empty(price_wide.shape, dtype=np.float_)
free_cash = np.empty(price_wide.shape, dtype=np.float_)
pf = vbt.Portfolio.from_order_func(
price_wide.vbt.wrapper.wrap(price_wide.values[::-1]),
order_func, size,
post_order_func_nb=post_order_func,
post_order_args=(debt, free_cash,),
row_wise=test_row_wise,
use_numba=False,
flexible=test_flexible
)
np.testing.assert_array_equal(
debt,
np.array([
[0.0, 24.75, 0.0],
[0.0, 44.55, 19.8],
[0.0, 22.275, 0.0],
[0.0, 0.0, 9.9],
[4.95, 0.0, 0.0]
])
)
np.testing.assert_array_equal(
free_cash,
np.array([
[73.4975, 74.0025, 73.4975],
[52.0955, 53.00449999999999, 72.1015],
[65.797, 81.25299999999999, 80.0985],
[74.598, 114.60199999999998, 78.9005],
[68.5985, 108.50149999999998, 87.49949999999998]
])
)
np.testing.assert_almost_equal(
free_cash,
pf.cash(free=True).values
)
debt = np.empty(price_wide.shape, dtype=np.float_)
free_cash = np.empty((price_wide.shape[0], 2), dtype=np.float_)
pf = vbt.Portfolio.from_order_func(
price_wide,
order_func, size,
post_order_func_nb=post_order_func,
post_order_args=(debt, free_cash,),
row_wise=test_row_wise,
use_numba=False,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
np.testing.assert_array_equal(
debt,
np.array([
[0.0, 4.95, 0.0],
[0.0, 14.850000000000001, 9.9],
[0.0, 7.425000000000001, 0.0],
[0.0, 0.0, 19.8],
[24.75, 0.0, 0.0]
])
)
np.testing.assert_array_equal(
free_cash,
np.array([
[87.9, 93.8995],
[65.70000000000002, 92.70150000000001],
[77.95000000000002, 80.8985],
[90.00000000000001, 79.5025],
[37.500000000000014, 67.0975]
])
)
np.testing.assert_almost_equal(
free_cash,
pf.cash(free=True).values
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_init_cash(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=[1., 10., np.inf], flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 10.0, 1.0, 0.0, 0),
(2, 2, 0, 10.0, 1.0, 0.0, 0), (3, 0, 1, 10.0, 2.0, 0.0, 1),
(4, 1, 1, 10.0, 2.0, 0.0, 1), (5, 2, 1, 10.0, 2.0, 0.0, 1),
(6, 0, 2, 6.666666666666667, 3.0, 0.0, 0), (7, 1, 2, 6.666666666666667, 3.0, 0.0, 0),
(8, 2, 2, 10.0, 3.0, 0.0, 0), (9, 0, 3, 10.0, 4.0, 0.0, 1),
(10, 1, 3, 10.0, 4.0, 0.0, 1), (11, 2, 3, 10.0, 4.0, 0.0, 1),
(12, 0, 4, 8.0, 5.0, 0.0, 0), (13, 1, 4, 8.0, 5.0, 0.0, 0),
(14, 2, 4, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 10.0, 2.0, 0.0, 1),
(2, 0, 2, 6.666666666666667, 3.0, 0.0, 0), (3, 0, 3, 10.0, 4.0, 0.0, 1),
(4, 0, 4, 8.0, 5.0, 0.0, 0), (5, 1, 0, 10.0, 1.0, 0.0, 0),
(6, 1, 1, 10.0, 2.0, 0.0, 1), (7, 1, 2, 6.666666666666667, 3.0, 0.0, 0),
(8, 1, 3, 10.0, 4.0, 0.0, 1), (9, 1, 4, 8.0, 5.0, 0.0, 0),
(10, 2, 0, 10.0, 1.0, 0.0, 0), (11, 2, 1, 10.0, 2.0, 0.0, 1),
(12, 2, 2, 10.0, 3.0, 0.0, 0), (13, 2, 3, 10.0, 4.0, 0.0, 1),
(14, 2, 4, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
assert type(pf._init_cash) == np.ndarray
base_pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=np.inf, flexible=test_flexible)
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=InitCashMode.Auto, flexible=test_flexible)
record_arrays_close(
pf.order_records,
base_pf.orders.values
)
assert pf._init_cash == InitCashMode.Auto
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=InitCashMode.AutoAlign, flexible=test_flexible)
record_arrays_close(
pf.order_records,
base_pf.orders.values
)
assert pf._init_cash == InitCashMode.AutoAlign
def test_func_calls(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_group_func_nb(c, call_i, pre_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_group_lst.append(call_i[0])
return (call_i,)
@njit
def post_group_func_nb(c, call_i, post_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_group_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(c, call_i, order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
row_wise=False, template_mapping=dict(np=np)
)
assert call_i[0] == 56
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [56]
assert list(pre_group_lst) == [2, 34]
assert list(post_group_lst) == [33, 55]
assert list(pre_segment_lst) == [3, 9, 15, 21, 27, 35, 39, 43, 47, 51]
assert list(post_segment_lst) == [8, 14, 20, 26, 32, 38, 42, 46, 50, 54]
assert list(order_lst) == [4, 6, 10, 12, 16, 18, 22, 24, 28, 30, 36, 40, 44, 48, 52]
assert list(post_order_lst) == [5, 7, 11, 13, 17, 19, 23, 25, 29, 31, 37, 41, 45, 49, 53]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=False, template_mapping=dict(np=np)
)
assert call_i[0] == 38
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [38]
assert list(pre_group_lst) == [2, 22]
assert list(post_group_lst) == [21, 37]
assert list(pre_segment_lst) == [3, 5, 7, 13, 19, 23, 25, 29, 31, 35]
assert list(post_segment_lst) == [4, 6, 12, 18, 20, 24, 28, 30, 34, 36]
assert list(order_lst) == [8, 10, 14, 16, 26, 32]
assert list(post_order_lst) == [9, 11, 15, 17, 27, 33]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=False, template_mapping=dict(np=np)
)
assert call_i[0] == 26
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [26]
assert list(pre_group_lst) == [2, 16]
assert list(post_group_lst) == [15, 25]
assert list(pre_segment_lst) == [3, 9, 17, 21]
assert list(post_segment_lst) == [8, 14, 20, 24]
assert list(order_lst) == [4, 6, 10, 12, 18, 22]
assert list(post_order_lst) == [5, 7, 11, 13, 19, 23]
def test_func_calls_flexible(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_group_func_nb(c, call_i, pre_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_group_lst.append(call_i[0])
return (call_i,)
@njit
def post_group_func_nb(c, call_i, post_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_group_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def flex_order_func_nb(c, call_i, order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
order_lst.append(call_i[0])
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, NoOrder
return -1, NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
row_wise=False, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 66
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [66]
assert list(pre_group_lst) == [2, 39]
assert list(post_group_lst) == [38, 65]
assert list(pre_segment_lst) == [3, 10, 17, 24, 31, 40, 45, 50, 55, 60]
assert list(post_segment_lst) == [9, 16, 23, 30, 37, 44, 49, 54, 59, 64]
assert list(order_lst) == [
4, 6, 8, 11, 13, 15, 18, 20, 22, 25, 27, 29, 32, 34,
36, 41, 43, 46, 48, 51, 53, 56, 58, 61, 63
]
assert list(post_order_lst) == [5, 7, 12, 14, 19, 21, 26, 28, 33, 35, 42, 47, 52, 57, 62]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=False, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 42
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [42]
assert list(pre_group_lst) == [2, 24]
assert list(post_group_lst) == [23, 41]
assert list(pre_segment_lst) == [3, 5, 7, 14, 21, 25, 27, 32, 34, 39]
assert list(post_segment_lst) == [4, 6, 13, 20, 22, 26, 31, 33, 38, 40]
assert list(order_lst) == [8, 10, 12, 15, 17, 19, 28, 30, 35, 37]
assert list(post_order_lst) == [9, 11, 16, 18, 29, 36]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=False, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 30
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [30]
assert list(pre_group_lst) == [2, 18]
assert list(post_group_lst) == [17, 29]
assert list(pre_segment_lst) == [3, 10, 19, 24]
assert list(post_segment_lst) == [9, 16, 23, 28]
assert list(order_lst) == [4, 6, 8, 11, 13, 15, 20, 22, 25, 27]
assert list(post_order_lst) == [5, 7, 12, 14, 21, 26]
def test_func_calls_row_wise(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst):
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst):
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_row_func_nb(c, call_i, pre_row_lst):
call_i[0] += 1
pre_row_lst.append(call_i[0])
return (call_i,)
@njit
def post_row_func_nb(c, call_i, post_row_lst):
call_i[0] += 1
post_row_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst):
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst):
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(c, call_i, order_lst):
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst):
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst,),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst,),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst,),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst,),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst,),
row_wise=True, template_mapping=dict(np=np)
)
assert call_i[0] == 62
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [62]
assert list(pre_row_lst) == [2, 14, 26, 38, 50]
assert list(post_row_lst) == [13, 25, 37, 49, 61]
assert list(pre_segment_lst) == [3, 9, 15, 21, 27, 33, 39, 45, 51, 57]
assert list(post_segment_lst) == [8, 12, 20, 24, 32, 36, 44, 48, 56, 60]
assert list(order_lst) == [4, 6, 10, 16, 18, 22, 28, 30, 34, 40, 42, 46, 52, 54, 58]
assert list(post_order_lst) == [5, 7, 11, 17, 19, 23, 29, 31, 35, 41, 43, 47, 53, 55, 59]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst,),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst,),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst,),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst,),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst,),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=True, template_mapping=dict(np=np)
)
assert call_i[0] == 44
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [44]
assert list(pre_row_lst) == [2, 8, 16, 26, 38]
assert list(post_row_lst) == [7, 15, 25, 37, 43]
assert list(pre_segment_lst) == [3, 5, 9, 11, 17, 23, 27, 33, 39, 41]
assert list(post_segment_lst) == [4, 6, 10, 14, 22, 24, 32, 36, 40, 42]
assert list(order_lst) == [12, 18, 20, 28, 30, 34]
assert list(post_order_lst) == [13, 19, 21, 29, 31, 35]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst,),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst,),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst,),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst,),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst,),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=True, template_mapping=dict(np=np)
)
assert call_i[0] == 32
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [32]
assert list(pre_row_lst) == [2, 4, 10, 18, 30]
assert list(post_row_lst) == [3, 9, 17, 29, 31]
assert list(pre_segment_lst) == [5, 11, 19, 25]
assert list(post_segment_lst) == [8, 16, 24, 28]
assert list(order_lst) == [6, 12, 14, 20, 22, 26]
assert list(post_order_lst) == [7, 13, 15, 21, 23, 27]
def test_func_calls_row_wise_flexible(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_row_func_nb(c, call_i, pre_row_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_row_lst.append(call_i[0])
return (call_i,)
@njit
def post_row_func_nb(c, call_i, post_row_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_row_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def flex_order_func_nb(c, call_i, order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
order_lst.append(call_i[0])
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, NoOrder
return -1, NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst, sub_arg),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
row_wise=True, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 72
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [72]
assert list(pre_row_lst) == [2, 16, 30, 44, 58]
assert list(post_row_lst) == [15, 29, 43, 57, 71]
assert list(pre_segment_lst) == [3, 10, 17, 24, 31, 38, 45, 52, 59, 66]
assert list(post_segment_lst) == [9, 14, 23, 28, 37, 42, 51, 56, 65, 70]
assert list(order_lst) == [
4, 6, 8, 11, 13, 18, 20, 22, 25, 27, 32, 34, 36,
39, 41, 46, 48, 50, 53, 55, 60, 62, 64, 67, 69
]
assert list(post_order_lst) == [5, 7, 12, 19, 21, 26, 33, 35, 40, 47, 49, 54, 61, 63, 68]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst, sub_arg),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=True, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 48
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [48]
assert list(pre_row_lst) == [2, 8, 17, 28, 42]
assert list(post_row_lst) == [7, 16, 27, 41, 47]
assert list(pre_segment_lst) == [3, 5, 9, 11, 18, 25, 29, 36, 43, 45]
assert list(post_segment_lst) == [4, 6, 10, 15, 24, 26, 35, 40, 44, 46]
assert list(order_lst) == [12, 14, 19, 21, 23, 30, 32, 34, 37, 39]
assert list(post_order_lst) == [13, 20, 22, 31, 33, 38]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst, sub_arg),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=True, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 36
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [36]
assert list(pre_row_lst) == [2, 4, 11, 20, 34]
assert list(post_row_lst) == [3, 10, 19, 33, 35]
assert list(pre_segment_lst) == [5, 12, 21, 28]
assert list(post_segment_lst) == [9, 18, 27, 32]
assert list(order_lst) == [6, 8, 13, 15, 17, 22, 24, 26, 29, 31]
assert list(post_order_lst) == [7, 14, 16, 23, 25, 30]
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_max_orders(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
_ = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
row_wise=test_row_wise, flexible=test_flexible)
_ = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_orders=15, flexible=test_flexible)
with pytest.raises(Exception):
_ = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_orders=14, flexible=test_flexible)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_max_logs(self, test_row_wise, test_flexible):
log_order_func = log_flex_order_func_nb if test_flexible else log_order_func_nb
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func, np.asarray(np.inf),
row_wise=test_row_wise, flexible=test_flexible)
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_logs=15, flexible=test_flexible)
with pytest.raises(Exception):
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_logs=14, flexible=test_flexible)
# ############# Portfolio ############# #
price_na = pd.DataFrame({
'a': [np.nan, 2., 3., 4., 5.],
'b': [1., 2., np.nan, 4., 5.],
'c': [1., 2., 3., 4., np.nan]
}, index=price.index)
order_size_new = pd.Series([1., 0.1, -1., -0.1, 1.])
directions = ['longonly', 'shortonly', 'both']
group_by = pd.Index(['first', 'first', 'second'], name='group')
pf = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='amount', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=None,
init_cash=[100., 100., 100.], freq='1D', attach_call_seq=True
) # independent
pf_grouped = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='amount', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=False,
init_cash=[100., 100., 100.], freq='1D', attach_call_seq=True
) # grouped
pf_shared = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='amount', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=True,
init_cash=[200., 100.], freq='1D', attach_call_seq=True
) # shared
class TestPortfolio:
def test_config(self, tmp_path):
pf2 = pf.copy()
pf2._metrics = pf2._metrics.copy()
pf2.metrics['hello'] = 'world'
pf2._subplots = pf2.subplots.copy()
pf2.subplots['hello'] = 'world'
assert vbt.Portfolio.loads(pf2['a'].dumps()) == pf2['a']
assert vbt.Portfolio.loads(pf2.dumps()) == pf2
pf2.save(tmp_path / 'pf')
assert vbt.Portfolio.load(tmp_path / 'pf') == pf2
def test_wrapper(self):
pd.testing.assert_index_equal(
pf.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
price_na.columns
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.grouper.group_by is None
assert pf.wrapper.grouper.allow_enable
assert pf.wrapper.grouper.allow_disable
assert pf.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
pf_grouped.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
pf_grouped.wrapper.columns,
price_na.columns
)
assert pf_grouped.wrapper.ndim == 2
pd.testing.assert_index_equal(
pf_grouped.wrapper.grouper.group_by,
group_by
)
assert pf_grouped.wrapper.grouper.allow_enable
assert pf_grouped.wrapper.grouper.allow_disable
assert pf_grouped.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
pf_shared.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
pf_shared.wrapper.columns,
price_na.columns
)
assert pf_shared.wrapper.ndim == 2
pd.testing.assert_index_equal(
pf_shared.wrapper.grouper.group_by,
group_by
)
assert not pf_shared.wrapper.grouper.allow_enable
assert pf_shared.wrapper.grouper.allow_disable
assert not pf_shared.wrapper.grouper.allow_modify
def test_indexing(self):
assert pf['a'].wrapper == pf.wrapper['a']
assert pf['a'].orders == pf.orders['a']
assert pf['a'].logs == pf.logs['a']
assert pf['a'].init_cash == pf.init_cash['a']
pd.testing.assert_series_equal(pf['a'].call_seq, pf.call_seq['a'])
assert pf['c'].wrapper == pf.wrapper['c']
assert pf['c'].orders == pf.orders['c']
assert pf['c'].logs == pf.logs['c']
assert pf['c'].init_cash == pf.init_cash['c']
pd.testing.assert_series_equal(pf['c'].call_seq, pf.call_seq['c'])
assert pf[['c']].wrapper == pf.wrapper[['c']]
assert pf[['c']].orders == pf.orders[['c']]
assert pf[['c']].logs == pf.logs[['c']]
pd.testing.assert_series_equal(pf[['c']].init_cash, pf.init_cash[['c']])
pd.testing.assert_frame_equal(pf[['c']].call_seq, pf.call_seq[['c']])
assert pf_grouped['first'].wrapper == pf_grouped.wrapper['first']
assert pf_grouped['first'].orders == pf_grouped.orders['first']
assert pf_grouped['first'].logs == pf_grouped.logs['first']
assert pf_grouped['first'].init_cash == pf_grouped.init_cash['first']
pd.testing.assert_frame_equal(pf_grouped['first'].call_seq, pf_grouped.call_seq[['a', 'b']])
assert pf_grouped[['first']].wrapper == pf_grouped.wrapper[['first']]
assert pf_grouped[['first']].orders == pf_grouped.orders[['first']]
assert pf_grouped[['first']].logs == pf_grouped.logs[['first']]
pd.testing.assert_series_equal(
pf_grouped[['first']].init_cash,
pf_grouped.init_cash[['first']])
pd.testing.assert_frame_equal(pf_grouped[['first']].call_seq, pf_grouped.call_seq[['a', 'b']])
assert pf_grouped['second'].wrapper == pf_grouped.wrapper['second']
assert pf_grouped['second'].orders == pf_grouped.orders['second']
assert pf_grouped['second'].logs == pf_grouped.logs['second']
assert pf_grouped['second'].init_cash == pf_grouped.init_cash['second']
pd.testing.assert_series_equal(pf_grouped['second'].call_seq, pf_grouped.call_seq['c'])
assert pf_grouped[['second']].orders == pf_grouped.orders[['second']]
assert pf_grouped[['second']].wrapper == pf_grouped.wrapper[['second']]
assert pf_grouped[['second']].orders == pf_grouped.orders[['second']]
assert pf_grouped[['second']].logs == pf_grouped.logs[['second']]
pd.testing.assert_series_equal(
pf_grouped[['second']].init_cash,
pf_grouped.init_cash[['second']])
pd.testing.assert_frame_equal(pf_grouped[['second']].call_seq, pf_grouped.call_seq[['c']])
assert pf_shared['first'].wrapper == pf_shared.wrapper['first']
assert pf_shared['first'].orders == pf_shared.orders['first']
assert pf_shared['first'].logs == pf_shared.logs['first']
assert pf_shared['first'].init_cash == pf_shared.init_cash['first']
pd.testing.assert_frame_equal(pf_shared['first'].call_seq, pf_shared.call_seq[['a', 'b']])
assert pf_shared[['first']].orders == pf_shared.orders[['first']]
assert pf_shared[['first']].wrapper == pf_shared.wrapper[['first']]
assert pf_shared[['first']].orders == pf_shared.orders[['first']]
assert pf_shared[['first']].logs == pf_shared.logs[['first']]
pd.testing.assert_series_equal(
pf_shared[['first']].init_cash,
pf_shared.init_cash[['first']])
pd.testing.assert_frame_equal(pf_shared[['first']].call_seq, pf_shared.call_seq[['a', 'b']])
assert pf_shared['second'].wrapper == pf_shared.wrapper['second']
assert pf_shared['second'].orders == pf_shared.orders['second']
assert pf_shared['second'].logs == pf_shared.logs['second']
assert pf_shared['second'].init_cash == pf_shared.init_cash['second']
pd.testing.assert_series_equal(pf_shared['second'].call_seq, pf_shared.call_seq['c'])
assert pf_shared[['second']].wrapper == pf_shared.wrapper[['second']]
assert pf_shared[['second']].orders == pf_shared.orders[['second']]
assert pf_shared[['second']].logs == pf_shared.logs[['second']]
pd.testing.assert_series_equal(
pf_shared[['second']].init_cash,
pf_shared.init_cash[['second']])
pd.testing.assert_frame_equal(pf_shared[['second']].call_seq, pf_shared.call_seq[['c']])
def test_regroup(self):
assert pf.regroup(None) == pf
assert pf.regroup(False) == pf
assert pf.regroup(group_by) != pf
pd.testing.assert_index_equal(pf.regroup(group_by).wrapper.grouper.group_by, group_by)
assert pf_grouped.regroup(None) == pf_grouped
assert pf_grouped.regroup(False) != pf_grouped
assert pf_grouped.regroup(False).wrapper.grouper.group_by is None
assert pf_grouped.regroup(group_by) == pf_grouped
assert pf_shared.regroup(None) == pf_shared
with pytest.raises(Exception):
_ = pf_shared.regroup(False)
assert pf_shared.regroup(group_by) == pf_shared
def test_cash_sharing(self):
assert not pf.cash_sharing
assert not pf_grouped.cash_sharing
assert pf_shared.cash_sharing
def test_call_seq(self):
pd.testing.assert_frame_equal(
pf.call_seq,
pd.DataFrame(
np.array([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_grouped.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
def test_orders(self):
record_arrays_close(
pf.orders.values,
np.array([
(0, 0, 1, 0.1, 2.02, 0.10202, 0), (1, 0, 2, 0.1, 2.9699999999999998, 0.10297, 1),
(2, 0, 4, 1.0, 5.05, 0.1505, 0), (3, 1, 0, 1.0, 0.99, 0.10990000000000001, 1),
(4, 1, 1, 0.1, 1.98, 0.10198, 1), (5, 1, 3, 0.1, 4.04, 0.10404000000000001, 0),
(6, 1, 4, 1.0, 4.95, 0.14950000000000002, 1), (7, 2, 0, 1.0, 1.01, 0.1101, 0),
(8, 2, 1, 0.1, 2.02, 0.10202, 0), (9, 2, 2, 1.0, 2.9699999999999998, 0.1297, 1),
(10, 2, 3, 0.1, 3.96, 0.10396000000000001, 1)
], dtype=order_dt)
)
result = pd.Series(
np.array([3, 4, 4]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.orders.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_orders(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_orders(group_by=False).count(),
result
)
result = pd.Series(
np.array([7, 4]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_orders(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.orders.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.orders.count(),
result
)
def test_logs(self):
record_arrays_close(
pf.logs.values,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, np.nan, 0, 0, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.0, 0.0, 0.0,
100.0, np.nan, 100.0, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(1, 0, 0, 1, 100.0, 0.0, 0.0, 100.0, 2.0, 100.0, 0.1, 2.0, 0, 0, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 99.69598, 0.1,
0.0, 99.69598, 2.0, 100.0, 0.1, 2.02, 0.10202, 0, 0, -1, 0),
(2, 0, 0, 2, 99.69598, 0.1, 0.0, 99.69598, 3.0, 99.99598, -1.0, 3.0,
0, 0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 99.89001,
0.0, 0.0, 99.89001, 3.0, 99.99598, 0.1, 2.9699999999999998, 0.10297, 1, 0, -1, 1),
(3, 0, 0, 3, 99.89001, 0.0, 0.0, 99.89001, 4.0, 99.89001, -0.1, 4.0,
0, 0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
99.89001, 0.0, 0.0, 99.89001, 4.0, 99.89001, np.nan, np.nan, np.nan, -1, 2, 8, -1),
(4, 0, 0, 4, 99.89001, 0.0, 0.0, 99.89001, 5.0, 99.89001, 1.0, 5.0, 0,
0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 94.68951,
1.0, 0.0, 94.68951, 5.0, 99.89001, 1.0, 5.05, 0.1505, 0, 0, -1, 2),
(5, 1, 1, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, 1.0, 1.0, 0, 1, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.8801, -1.0,
0.99, 98.9001, 1.0, 100.0, 1.0, 0.99, 0.10990000000000001, 1, 0, -1, 3),
(6, 1, 1, 1, 100.8801, -1.0, 0.99, 98.9001, 2.0, 98.8801, 0.1, 2.0, 0, 1,
0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.97612,
-1.1, 1.188, 98.60011999999999, 2.0, 98.8801, 0.1, 1.98, 0.10198, 1, 0, -1, 4),
(7, 1, 1, 2, 100.97612, -1.1, 1.188, 98.60011999999999, 2.0, 98.77611999999999,
-1.0, np.nan, 0, 1, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.97612,
-1.1, 1.188, 98.60011999999999, 2.0, 98.77611999999999, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(8, 1, 1, 3, 100.97612, -1.1, 1.188, 98.60011999999999, 4.0, 96.57611999999999,
-0.1, 4.0, 0, 1, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
100.46808, -1.0, 1.08, 98.30807999999999, 4.0, 96.57611999999999, 0.1, 4.04,
0.10404000000000001, 0, 0, -1, 5),
(9, 1, 1, 4, 100.46808, -1.0, 1.08, 98.30807999999999, 5.0, 95.46808, 1.0, 5.0, 0, 1,
0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 105.26858, -2.0, 6.03,
93.20857999999998, 5.0, 95.46808, 1.0, 4.95, 0.14950000000000002, 1, 0, -1, 6),
(10, 2, 2, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, 1.0, 1.0, 0, 2, 0.01, 0.1,
0.01, 1e-08, np.inf, 0.0, False, True, False, True, 98.8799, 1.0, 0.0, 98.8799,
1.0, 100.0, 1.0, 1.01, 0.1101, 0, 0, -1, 7),
(11, 2, 2, 1, 98.8799, 1.0, 0.0, 98.8799, 2.0, 100.8799, 0.1, 2.0, 0, 2, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 98.57588000000001, 1.1,
0.0, 98.57588000000001, 2.0, 100.8799, 0.1, 2.02, 0.10202, 0, 0, -1, 8),
(12, 2, 2, 2, 98.57588000000001, 1.1, 0.0, 98.57588000000001, 3.0, 101.87588000000001,
-1.0, 3.0, 0, 2, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
101.41618000000001, 0.10000000000000009, 0.0, 101.41618000000001, 3.0,
101.87588000000001, 1.0, 2.9699999999999998, 0.1297, 1, 0, -1, 9),
(13, 2, 2, 3, 101.41618000000001, 0.10000000000000009, 0.0, 101.41618000000001,
4.0, 101.81618000000002, -0.1, 4.0, 0, 2, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0,
False, True, False, True, 101.70822000000001, 0.0, 0.0, 101.70822000000001,
4.0, 101.81618000000002, 0.1, 3.96, 0.10396000000000001, 1, 0, -1, 10),
(14, 2, 2, 4, 101.70822000000001, 0.0, 0.0, 101.70822000000001, 4.0, 101.70822000000001,
1.0, np.nan, 0, 2, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
101.70822000000001, 0.0, 0.0, 101.70822000000001, 4.0, 101.70822000000001,
np.nan, np.nan, np.nan, -1, 1, 1, -1)
], dtype=log_dt)
)
result = pd.Series(
np.array([5, 5, 5]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.logs.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_logs(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_logs(group_by=False).count(),
result
)
result = pd.Series(
np.array([10, 5]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_logs(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.logs.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.logs.count(),
result
)
def test_entry_trades(self):
record_arrays_close(
pf.entry_trades.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998, 0.10297,
-0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0, -0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 1.0, 0, 0.99, 0.10990000000000001, 4, 4.954285714285714,
0.049542857142857145, -4.12372857142857, -4.165382395382394, 1, 0, 2),
(3, 1, 0.1, 1, 1.98, 0.10198, 4, 4.954285714285714, 0.004954285714285714,
-0.4043628571428571, -2.0422366522366517, 1, 0, 2),
(4, 1, 1.0, 4, 4.95, 0.14950000000000002, 4, 4.954285714285714,
0.049542857142857145, -0.20332857142857072, -0.04107647907647893, 1, 0, 2),
(5, 2, 1.0, 0, 1.01, 0.1101, 3, 3.0599999999999996, 0.21241818181818184,
1.727481818181818, 1.71037803780378, 0, 1, 3),
(6, 2, 0.1, 1, 2.02, 0.10202, 3, 3.0599999999999996, 0.021241818181818185,
-0.019261818181818203, -0.09535553555355546, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 3, 2]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.entry_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_entry_trades(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_entry_trades(group_by=False).count(),
result
)
result = pd.Series(
np.array([5, 2]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_entry_trades(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.entry_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.entry_trades.count(),
result
)
def test_exit_trades(self):
record_arrays_close(
pf.exit_trades.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998, 0.10297,
-0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 0.1, 0, 1.0799999999999998, 0.019261818181818182,
3, 4.04, 0.10404000000000001, -0.4193018181818182, -3.882424242424243, 1, 1, 2),
(3, 1, 2.0, 0, 3.015, 0.3421181818181819, 4, 5.0, 0.0,
-4.312118181818182, -0.7151108095884214, 1, 0, 2),
(4, 2, 1.0, 0, 1.1018181818181818, 0.19283636363636364, 2,
2.9699999999999998, 0.1297, 1.5456454545454543, 1.4028135313531351, 0, 1, 3),
(5, 2, 0.10000000000000009, 0, 1.1018181818181818, 0.019283636363636378,
3, 3.96, 0.10396000000000001, 0.1625745454545457, 1.4755115511551162, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 2, 2]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.exit_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_exit_trades(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_exit_trades(group_by=False).count(),
result
)
result = pd.Series(
np.array([4, 2]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_exit_trades(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.exit_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.exit_trades.count(),
result
)
def test_positions(self):
record_arrays_close(
pf.positions.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998,
0.10297, -0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 2.1, 0, 2.9228571428571426, 0.36138000000000003, 4, 4.954285714285714,
0.10404000000000001, -4.731420000000001, -0.7708406647116326, 1, 0, 2),
(3, 2, 1.1, 0, 1.1018181818181818, 0.21212000000000003, 3,
3.06, 0.23366000000000003, 1.7082200000000003, 1.4094224422442245, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.positions.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_positions(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_positions(group_by=False).count(),
result
)
result = pd.Series(
np.array([3, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_positions(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.positions.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.positions.count(),
result
)
def test_drawdowns(self):
record_arrays_close(
pf.drawdowns.values,
np.array([
(0, 0, 0, 1, 4, 4, 100.0, 99.68951, 99.68951, 0),
(1, 1, 0, 1, 4, 4, 99.8801, 95.26858, 95.26858, 0),
(2, 2, 2, 3, 3, 4, 101.71618000000001, 101.70822000000001, 101.70822000000001, 0)
], dtype=drawdown_dt)
)
result = pd.Series(
np.array([1, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_drawdowns(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_drawdowns(group_by=False).count(),
result
)
result = pd.Series(
np.array([1, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_drawdowns(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.drawdowns.count(),
result
)
def test_close(self):
pd.testing.assert_frame_equal(pf.close, price_na)
pd.testing.assert_frame_equal(pf_grouped.close, price_na)
pd.testing.assert_frame_equal(pf_shared.close, price_na)
def test_get_filled_close(self):
pd.testing.assert_frame_equal(
pf.get_filled_close(),
price_na.ffill().bfill()
)
def test_asset_flow(self):
pd.testing.assert_frame_equal(
pf.asset_flow(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 0.1],
[-0.1, 0., -1.],
[0., 0., -0.1],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.asset_flow(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 0.1, 0.],
[0., 0., 0.],
[0., -0.1, 0.],
[0., 1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -0.1, 0.1],
[-0.1, 0., -1.],
[0., 0.1, -0.1],
[1., -1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.asset_flow(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_flow(),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_flow(),
result
)
def test_assets(self):
pd.testing.assert_frame_equal(
pf.assets(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 1.1],
[0., 0., 0.1],
[0., 0., 0.],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.assets(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 1.1, 0.],
[0., 1.1, 0.],
[0., 1., 0.],
[0., 2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -1.1, 1.1],
[0., -1.1, 0.1],
[0., -1., 0.],
[1., -2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.assets(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.assets(),
result
)
pd.testing.assert_frame_equal(
pf_shared.assets(),
result
)
def test_position_mask(self):
pd.testing.assert_frame_equal(
pf.position_mask(direction='longonly'),
pd.DataFrame(
np.array([
[False, False, True],
[True, False, True],
[False, False, True],
[False, False, False],
[True, False, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.position_mask(direction='shortonly'),
pd.DataFrame(
np.array([
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[False, True, True],
[True, True, True],
[False, True, True],
[False, True, False],
[True, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.position_mask(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.position_mask(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.position_mask(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[True, True],
[True, True],
[True, True],
[True, False],
[True, False]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.position_mask(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.position_mask(),
result
)
pd.testing.assert_frame_equal(
pf_shared.position_mask(),
result
)
def test_position_coverage(self):
pd.testing.assert_series_equal(
pf.position_coverage(direction='longonly'),
pd.Series(np.array([0.4, 0., 0.6]), index=price_na.columns).rename('position_coverage')
)
pd.testing.assert_series_equal(
pf.position_coverage(direction='shortonly'),
pd.Series(np.array([0., 1., 0.]), index=price_na.columns).rename('position_coverage')
)
result = pd.Series(np.array([0.4, 1., 0.6]), index=price_na.columns).rename('position_coverage')
pd.testing.assert_series_equal(
pf.position_coverage(),
result
)
pd.testing.assert_series_equal(
pf_grouped.position_coverage(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.position_coverage(group_by=False),
result
)
result = pd.Series(
np.array([0.7, 0.6]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('position_coverage')
pd.testing.assert_series_equal(
pf.position_coverage(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.position_coverage(),
result
)
pd.testing.assert_series_equal(
pf_shared.position_coverage(),
result
)
def test_cash_flow(self):
pd.testing.assert_frame_equal(
pf.cash_flow(free=True),
pd.DataFrame(
np.array([
[0.0, -1.0998999999999999, -1.1201],
[-0.30402, -0.2999800000000002, -0.3040200000000002],
[0.19402999999999998, 0.0, 2.8402999999999996],
[0.0, -0.2920400000000002, 0.29204000000000035],
[-5.2005, -5.0995, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., 0.8801, -1.1201],
[-0.30402, 0.09602, -0.30402],
[0.19403, 0., 2.8403],
[0., -0.50804, 0.29204],
[-5.2005, 4.8005, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.cash_flow(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash_flow(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash_flow(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0.8801, -1.1201],
[-0.208, -0.30402],
[0.19403, 2.8403],
[-0.50804, 0.29204],
[-0.4, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.cash_flow(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash_flow(),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash_flow(),
result
)
def test_init_cash(self):
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
pf_grouped.get_init_cash(group_by=False),
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
pf_shared.get_init_cash(group_by=False),
pd.Series(np.array([200., 200., 100.]), index=price_na.columns).rename('init_cash')
)
result = pd.Series(
np.array([200., 100.]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
pd.testing.assert_series_equal(
pf.get_init_cash(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.init_cash,
result
)
pd.testing.assert_series_equal(
pf_shared.init_cash,
result
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=None).init_cash,
pd.Series(
np.array([14000., 12000., 10000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=None).init_cash,
pd.Series(
np.array([14000., 14000., 14000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
def test_cash(self):
pd.testing.assert_frame_equal(
pf.cash(free=True),
pd.DataFrame(
np.array([
[100.0, 98.9001, 98.8799],
[99.69598, 98.60011999999999, 98.57588000000001],
[99.89001, 98.60011999999999, 101.41618000000001],
[99.89001, 98.30807999999999, 101.70822000000001],
[94.68951, 93.20857999999998, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[100., 100.8801, 98.8799],
[99.69598, 100.97612, 98.57588],
[99.89001, 100.97612, 101.41618],
[99.89001, 100.46808, 101.70822],
[94.68951, 105.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.cash(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash(group_by=False),
pd.DataFrame(
np.array([
[200., 200.8801, 98.8799],
[199.69598, 200.97612, 98.57588],
[199.89001, 200.97612, 101.41618],
[199.89001, 200.46808, 101.70822],
[194.68951, 205.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.cash(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[200.8801, 200.8801, 98.8799],
[200.6721, 200.97612, 98.57588000000001],
[200.86613, 200.6721, 101.41618000000001],
[200.35809, 200.35809, 101.70822000000001],
[199.95809, 205.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200.8801, 98.8799],
[200.6721, 98.57588],
[200.86613, 101.41618],
[200.35809, 101.70822],
[199.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.cash(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash(),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash(),
result
)
def test_asset_value(self):
pd.testing.assert_frame_equal(
pf.asset_value(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.2, 0., 2.2],
[0., 0., 0.3],
[0., 0., 0.],
[5., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.asset_value(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 2.2, 0.],
[0., 2.2, 0.],
[0., 4., 0.],
[0., 10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.2, -2.2, 2.2],
[0., -2.2, 0.3],
[0., -4., 0.],
[5., -10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.asset_value(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_value(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-1., 1.],
[-2., 2.2],
[-2.2, 0.3],
[-4., 0.],
[-5., 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.asset_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_value(),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_value(),
result
)
def test_gross_exposure(self):
pd.testing.assert_frame_equal(
pf.gross_exposure(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 0.01001202],
[0.00200208, 0., 0.02183062],
[0., 0., 0.00294938],
[0., 0., 0.],
[0.05015573, 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.gross_exposure(direction='shortonly'),
pd.DataFrame(
np.array([
[0.0, 0.01000999998999, 0.0],
[0.0, 0.021825370842812494, 0.0],
[0.0, 0.021825370842812494, 0.0],
[0.0, 0.03909759620159034, 0.0],
[0.0, 0.09689116931945001, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0.0, -0.010214494162927312, 0.010012024441354066],
[0.00200208256628545, -0.022821548354919067, 0.021830620581035857],
[0.0, -0.022821548354919067, 0.002949383274126105],
[0.0, -0.04241418126633477, 0.0],
[0.050155728521486365, -0.12017991413866216, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.gross_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.gross_exposure(group_by=False),
pd.DataFrame(
np.array([
[0.0, -0.00505305454620791, 0.010012024441354066],
[0.0010005203706447724, -0.011201622483733716, 0.021830620581035857],
[0.0, -0.011201622483733716, 0.002949383274126105],
[0.0, -0.020585865497718882, 0.0],
[0.025038871596209537, -0.0545825965137659, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.00505305454620791, 0.010012024441354066],
[-0.010188689433972452, 0.021830620581035857],
[-0.0112078992458765, 0.002949383274126105],
[-0.02059752492931316, 0.0],
[-0.027337628293439265, 0.0]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.gross_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_shared.gross_exposure(),
result
)
def test_net_exposure(self):
result = pd.DataFrame(
np.array([
[0.0, -0.01000999998999, 0.010012024441354066],
[0.00200208256628545, -0.021825370842812494, 0.021830620581035857],
[0.0, -0.021825370842812494, 0.002949383274126105],
[0.0, -0.03909759620159034, 0.0],
[0.050155728521486365, -0.09689116931945001, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.net_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.net_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.net_exposure(group_by=False),
pd.DataFrame(
np.array([
[0.0, -0.005002498748124688, 0.010012024441354066],
[0.0010005203706447724, -0.010956168751293576, 0.021830620581035857],
[0.0, -0.010956168751293576, 0.002949383274126105],
[0.0, -0.019771825228137207, 0.0],
[0.025038871596209537, -0.049210520540028384, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.005002498748124688, 0.010012024441354066],
[-0.009965205542937988, 0.021830620581035857],
[-0.010962173376438594, 0.002949383274126105],
[-0.019782580537729116, 0.0],
[-0.0246106361476199, 0.0]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.net_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.net_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_shared.net_exposure(),
result
)
def test_value(self):
result = pd.DataFrame(
np.array([
[100., 99.8801, 99.8799],
[99.89598, 98.77612, 100.77588],
[99.89001, 98.77612, 101.71618],
[99.89001, 96.46808, 101.70822],
[99.68951, 95.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.value(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.value(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.value(group_by=False),
pd.DataFrame(
np.array([
[200., 199.8801, 99.8799],
[199.89598, 198.77612, 100.77588],
[199.89001, 198.77612, 101.71618],
[199.89001, 196.46808, 101.70822],
[199.68951, 195.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.value(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[199.8801, 199.8801, 99.8799],
[198.6721, 198.77612000000002, 100.77588000000002],
[198.66613, 198.6721, 101.71618000000001],
[196.35809, 196.35809, 101.70822000000001],
[194.95809, 195.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[199.8801, 99.8799],
[198.6721, 100.77588],
[198.66613, 101.71618],
[196.35809, 101.70822],
[194.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.value(),
result
)
pd.testing.assert_frame_equal(
pf_shared.value(),
result
)
def test_total_profit(self):
result = pd.Series(
np.array([-0.31049, -4.73142, 1.70822]),
index=price_na.columns
).rename('total_profit')
pd.testing.assert_series_equal(
pf.total_profit(),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_profit(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.total_profit(group_by=False),
result
)
result = pd.Series(
np.array([-5.04191, 1.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_profit')
pd.testing.assert_series_equal(
pf.total_profit(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_profit(),
result
)
pd.testing.assert_series_equal(
pf_shared.total_profit(),
result
)
def test_final_value(self):
result = pd.Series(
np.array([99.68951, 95.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
pd.testing.assert_series_equal(
pf.final_value(),
result
)
pd.testing.assert_series_equal(
pf_grouped.final_value(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.final_value(group_by=False),
pd.Series(
np.array([199.68951, 195.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
)
result = pd.Series(
np.array([194.95809, 101.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('final_value')
pd.testing.assert_series_equal(
pf.final_value(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.final_value(),
result
)
pd.testing.assert_series_equal(
pf_shared.final_value(),
result
)
def test_total_return(self):
result = pd.Series(
np.array([-0.0031049, -0.0473142, 0.0170822]),
index=price_na.columns
).rename('total_return')
pd.testing.assert_series_equal(
pf.total_return(),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_return(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.total_return(group_by=False),
pd.Series(
np.array([-0.00155245, -0.0236571, 0.0170822]),
index=price_na.columns
).rename('total_return')
)
result = pd.Series(
np.array([-0.02520955, 0.0170822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_return')
pd.testing.assert_series_equal(
pf.total_return(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_return(),
result
)
pd.testing.assert_series_equal(
pf_shared.total_return(),
result
)
def test_returns(self):
result = pd.DataFrame(
np.array([
[0.00000000e+00, -1.19900000e-03, -1.20100000e-03],
[-1.04020000e-03, -1.10530526e-02, 8.97057366e-03],
[-5.97621646e-05, 0.0, 9.33060570e-03],
[0.00000000e+00, -0.023366376407576966, -7.82569695e-05],
[-2.00720773e-03, -1.24341648e-02, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.returns(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.returns(group_by=False),
pd.DataFrame(
np.array([
[0.00000000e+00, -5.99500000e-04, -1.20100000e-03],
[-5.20100000e-04, -5.52321117e-03, 8.97057366e-03],
[-2.98655331e-05, 0.0, 9.33060570e-03],
[0.00000000e+00, -0.011611253907159497, -7.82569695e-05],
[-1.00305163e-03, -6.10531746e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.returns(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[0.0, -0.0005995000000000062, -1.20100000e-03],
[-0.0005233022960706736, -0.005523211165093367, 8.97057366e-03],
[-3.0049513746473233e-05, 0.0, 9.33060570e-03],
[0.0, -0.011617682390048093, -7.82569695e-05],
[-0.0010273695869600474, -0.0061087373583639994, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-5.99500000e-04, -1.20100000e-03],
[-6.04362315e-03, 8.97057366e-03],
[-3.0049513746473233e-05, 9.33060570e-03],
[-0.011617682390048093, -7.82569695e-05],
[-7.12983101e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.returns(),
result
)
pd.testing.assert_frame_equal(
pf_shared.returns(),
result
)
def test_asset_returns(self):
result = pd.DataFrame(
np.array([
[0., -np.inf, -np.inf],
[-np.inf, -1.10398, 0.89598],
[-0.02985, 0.0, 0.42740909],
[0., -1.0491090909090908, -0.02653333],
[-np.inf, -0.299875, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.asset_returns(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-np.inf, -np.inf],
[-1.208, 0.89598],
[-0.0029850000000000154, 0.42740909],
[-1.0491090909090908, -0.02653333],
[-0.35, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.asset_returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_returns(),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_returns(),
result
)
def test_benchmark_value(self):
result = pd.DataFrame(
np.array([
[100., 100., 100.],
[100., 200., 200.],
[150., 200., 300.],
[200., 400., 400.],
[250., 500., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.benchmark_value(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_value(group_by=False),
pd.DataFrame(
np.array([
[200., 200., 100.],
[200., 400., 200.],
[300., 400., 300.],
[400., 800., 400.],
[500., 1000., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200., 100.],
[300., 200.],
[350., 300.],
[600., 400.],
[750., 400.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.benchmark_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_value(),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_value(),
result
)
def test_benchmark_returns(self):
result = pd.DataFrame(
np.array([
[0., 0., 0.],
[0., 1., 1.],
[0.5, 0., 0.5],
[0.33333333, 1., 0.33333333],
[0.25, 0.25, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.benchmark_returns(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0., 0.],
[0.5, 1.],
[0.16666667, 0.5],
[0.71428571, 0.33333333],
[0.25, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.benchmark_returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_returns(),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_returns(),
result
)
def test_total_benchmark_return(self):
result = pd.Series(
np.array([1.5, 4., 3.]),
index=price_na.columns
).rename('total_benchmark_return')
pd.testing.assert_series_equal(
pf.total_benchmark_return(),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_benchmark_return(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.total_benchmark_return(group_by=False),
result
)
result = pd.Series(
np.array([2.75, 3.]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_benchmark_return')
pd.testing.assert_series_equal(
pf.total_benchmark_return(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_benchmark_return(),
result
)
pd.testing.assert_series_equal(
pf_shared.total_benchmark_return(),
result
)
def test_return_method(self):
pd.testing.assert_frame_equal(
pf_shared.cumulative_returns(),
pd.DataFrame(
np.array([
[-0.000599499999999975, -0.0012009999999998966],
[-0.006639499999999909, 0.007758800000000177],
[-0.006669349999999907, 0.017161800000000005],
[-0.01820955000000002, 0.017082199999999936],
[-0.025209550000000136, 0.017082199999999936]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
)
pd.testing.assert_frame_equal(
pf_shared.cumulative_returns(group_by=False),
pd.DataFrame(
np.array([
[0.0, -0.000599499999999975, -0.0012009999999998966],
[-0.0005201000000001343, -0.006119399999999886, 0.007758800000000177],
[-0.0005499500000001323, -0.006119399999999886, 0.017161800000000005],
[-0.0005499500000001323, -0.017659599999999886, 0.017082199999999936],
[-0.0015524500000001495, -0.023657099999999875, 0.017082199999999936]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(),
pd.Series(
np.array([-20.095906945591288, 12.345065267401496]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(risk_free=0.01),
pd.Series(
np.array([-59.62258787402645, -23.91718815937344]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(year_freq='365D'),
pd.Series(
np.array([-20.095906945591288, 12.345065267401496]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(group_by=False),
pd.Series(
np.array([-13.30950646054953, -19.278625117344564, 12.345065267401496]),
index=price_na.columns
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.information_ratio(group_by=False),
pd.Series(
np.array([-0.9988561334618041, -0.8809478746008806, -0.884780642352239]),
index=price_na.columns
).rename('information_ratio')
)
with pytest.raises(Exception):
_ = pf_shared.information_ratio(pf_shared.benchmark_returns(group_by=False) * 2)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Start Value', 'End Value',
'Total Return [%]', 'Benchmark Return [%]', 'Max Gross Exposure [%]',
'Total Fees Paid', 'Max Drawdown [%]', 'Max Drawdown Duration',
'Total Trades', 'Total Closed Trades', 'Total Open Trades',
'Open Trade PnL', 'Win Rate [%]', 'Best Trade [%]', 'Worst Trade [%]',
'Avg Winning Trade [%]', 'Avg Losing Trade [%]',
'Avg Winning Trade Duration', 'Avg Losing Trade Duration',
'Profit Factor', 'Expectancy', 'Sharpe Ratio', 'Calmar Ratio',
'Omega Ratio', 'Sortino Ratio'
], dtype='object')
pd.testing.assert_series_equal(
pf.stats(),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 98.88877000000001, -1.11123, 283.3333333333333,
2.05906183131983, 0.42223000000000005, 1.6451238489727062, pd.Timedelta('3 days 08:00:00'),
2.0, 1.3333333333333333, 0.6666666666666666, -1.5042060606060605, 33.333333333333336,
-98.38058805880588, -100.8038553855386, 143.91625412541256, -221.34645964596464,
pd.Timedelta('2 days 12:00:00'), pd.Timedelta('2 days 00:00:00'), np.inf, 0.10827272727272726,
-6.751008013903537, 10378.930331014584, 4.768700318817701, 31.599760994679134
]),
index=stats_index,
name='agg_func_mean')
)
pd.testing.assert_series_equal(
pf.stats(column='a'),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997, 150.0,
5.015572852148637, 0.35549, 0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
2, 1, 1, -0.20049999999999982, 0.0, -54.450495049504966, -54.450495049504966,
np.nan, -54.450495049504966, pd.NaT, pd.Timedelta('1 days 00:00:00'), 0.0,
-0.10999000000000003, -13.30804491478906, -65.40868619923044, 0.0, -11.738864633265454
]),
index=stats_index,
name='a')
)
pd.testing.assert_series_equal(
pf.stats(column='a', settings=dict(freq='10 days', year_freq='200 days')),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('50 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997, 150.0,
5.015572852148637, 0.35549, 0.3104900000000015, pd.Timedelta('40 days 00:00:00'),
2, 1, 1, -0.20049999999999982, 0.0, -54.450495049504966, -54.450495049504966,
np.nan, -54.450495049504966, pd.NaT, pd.Timedelta('10 days 00:00:00'), 0.0, -0.10999000000000003,
-3.1151776875290866, -3.981409131683691, 0.0, -2.7478603669149457
]),
index=stats_index,
name='a')
)
pd.testing.assert_series_equal(
pf.stats(column='a', settings=dict(trade_type='positions')),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997, 150.0,
5.015572852148637, 0.35549, 0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
2, 1, 1, -0.20049999999999982, 0.0, -54.450495049504966, -54.450495049504966,
np.nan, -54.450495049504966, pd.NaT, pd.Timedelta('1 days 00:00:00'), 0.0,
-0.10999000000000003, -13.30804491478906, -65.40868619923044, 0.0, -11.738864633265454
]),
index=pd.Index([
'Start', 'End', 'Period', 'Start Value', 'End Value',
'Total Return [%]', 'Benchmark Return [%]', 'Max Gross Exposure [%]',
'Total Fees Paid', 'Max Drawdown [%]', 'Max Drawdown Duration',
'Total Trades', 'Total Closed Trades', 'Total Open Trades',
'Open Trade PnL', 'Win Rate [%]', 'Best Trade [%]',
'Worst Trade [%]', 'Avg Winning Trade [%]',
'Avg Losing Trade [%]', 'Avg Winning Trade Duration',
'Avg Losing Trade Duration', 'Profit Factor', 'Expectancy',
'Sharpe Ratio', 'Calmar Ratio', 'Omega Ratio', 'Sortino Ratio'
], dtype='object'),
name='a')
)
pd.testing.assert_series_equal(
pf.stats(column='a', settings=dict(required_return=0.1, risk_free=0.01)),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997, 150.0,
5.015572852148637, 0.35549, 0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
2, 1, 1, -0.20049999999999982, 0.0, -54.450495049504966, -54.450495049504966,
np.nan, -54.450495049504966, pd.NaT, | pd.Timedelta('1 days 00:00:00') | pandas.Timedelta |
"""
Tests for the pandas.io.common functionalities
"""
import mmap
import os
import re
import pytest
from pandas.compat import FileNotFoundError, StringIO, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
import pandas.util.testing as tm
import pandas.io.common as icom
class CustomFSPath(object):
"""For testing fspath on unknown objects"""
def __init__(self, path):
self.path = path
def __fspath__(self):
return self.path
# Functions that consume a string path and return a string or path-like object
path_types = [str, CustomFSPath]
try:
from pathlib import Path
path_types.append(Path)
except ImportError:
pass
try:
from py.path import local as LocalPath
path_types.append(LocalPath)
except ImportError:
pass
HERE = os.path.abspath(os.path.dirname(__file__))
# https://github.com/cython/cython/issues/1720
@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning")
class TestCommonIOCapabilities(object):
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_expand_user(self):
filename = '~/sometest'
expanded_name = icom._expand_user(filename)
assert expanded_name != filename
assert os.path.isabs(expanded_name)
assert os.path.expanduser(filename) == expanded_name
def test_expand_user_normal_path(self):
filename = '/somefolder/sometest'
expanded_name = icom._expand_user(filename)
assert expanded_name == filename
assert os.path.expanduser(filename) == expanded_name
@td.skip_if_no('pathlib')
def test_stringify_path_pathlib(self):
rel_path = icom._stringify_path(Path('.'))
assert rel_path == '.'
redundant_path = icom._stringify_path(Path('foo//bar'))
assert redundant_path == os.path.join('foo', 'bar')
@td.skip_if_no('py.path')
def test_stringify_path_localpath(self):
path = os.path.join('foo', 'bar')
abs_path = os.path.abspath(path)
lpath = LocalPath(path)
assert icom._stringify_path(lpath) == abs_path
def test_stringify_path_fspath(self):
p = CustomFSPath('foo/bar.csv')
result = | icom._stringify_path(p) | pandas.io.common._stringify_path |
import pandas as pd
import numpy as np
# TODO: fix 'skips', add remaining rows once scrape completes
df_list = []
colnames = ['one', 'two']
# 87 turned out weird, figure out what happened here
# skips = [87, 101, 144, 215, 347, 350, 360,374]
for i in range(7):
# if i in skips:
# print('skipping {}'.format(i))
# pass
# else:
df1 = pd.read_csv('coffee_{}_table_0.csv'.format(i),names=colnames)
df2 = pd.read_csv('coffee_{}_table_1.csv'.format(i))
df3 = pd.read_csv('coffee_{}_table_2.csv'.format(i))
df4 = pd.read_csv('coffee_{}_table_3.csv'.format(i))
df5 = pd.read_csv('coffee_{}_table_4.csv'.format(i))
# df1
"""
Unnamed: 0 0
0 0 90.58
1 1 Q Arabica Certificate
2 2 Embeddable Image
3 3 Cupping Protocol and Descriptors
4 4 View Green Analysis Details
5 5 Request a Sample
6 6 Species Arabica
7 7 Owner 郭亮志 GuoLiangZhi
"""
df1=df1.drop(df1.index[2:-1]).reset_index()
owner = df1.iloc[2, 2]
owner_new=owner.replace('Owner ', '')
df1.iloc[2, 2]=owner_new
data_0=df1['two'].tolist()
df1_processed = pd.DataFrame([data_0],columns=['Score','Certificate','Owner'])
# df2
"""
Unnamed: 0 0 1 \
0 0 Country of Origin Ethiopia
1 1 Farm Name METAD PLC
2 2 Lot Number NaN
3 3 Mill METAD PLC
4 4 ICO Number 2014/2015
5 5 Company METAD Agricultural Developmet plc
6 6 Altitude 1950-2200
7 7 Region GUJI-HAMBELA/GOYO
8 8 Producer METAD PLC
2 3
0 Number of Bags 300
1 Bag Weight 60 kg
2 In-Country Partner METAD Agricultural Development plc
3 Harvest Year 2014
4 Grading Date April 4th, 2015
5 Owner metad plc
6 Variety NaN
7 Status Completed
8 Processing Method Washed / Wet
"""
df2.columns = ['one','two','three','four','five']
colnames1 = df2['two'].tolist()
colnames2 = df2['four'].tolist()
data1 = df2['three'].tolist()
data2 = df2['five'].tolist()
df2_processed = pd.DataFrame([(data1+data2)],columns=(colnames1+colnames2))
# df3
"""
Unnamed: 0 0 1 2 3
0 0 NaN Sample NaN Sample
1 1 Aroma 8.67 Uniformity 10.00
2 2 Flavor 8.83 Clean Cup 10.00
3 3 Aftertaste 8.67 Sweetness 10.00
4 4 Acidity 8.75 Cupper Points 8.75
5 5 Body 8.50 Total Cup Points Sample 90.58
6 6 Balance 8.42 NaN NaN
"""
df3.columns = ['one','two','three','four','five']
colnames1 = df3['two'].tolist()
colnames2 = df3['four'].tolist()
data1 = df3['three'].tolist()
data2 = df3['five'].tolist()
df3_processed = pd.DataFrame([(data1+data2)],columns=(colnames1+colnames2))
# df4
"""
Unnamed: 0 0 1 2 \
0 0 Moisture 12 % Color
1 1 Category One Defects 0 full defects Category Two Defects
2 2 Quakers 0 NaN
3
0 Green
1 0 full defects
2 NaN
"""
df4.columns = ['one','two','three','four','five']
colnames1 = df4['two'].tolist()
colnames2 = df4['four'].tolist()
data1 = df4['three'].tolist()
data2 = df4['five'].tolist()
df4_processed = | pd.DataFrame([(data1+data2)],columns=(colnames1+colnames2)) | pandas.DataFrame |
'''
At the time of writing this code I'm learning BeautifulSoup, so a lot of comments are just to help me understand what bs4 functions are doing.
'''
from urllib.request import urlopen
from bs4 import BeautifulSoup
import time
import pandas as pd
import re
def scrape_first_table(url,
headers_limit=2,
headers_index=0,
row_start=1,
headers_start=0):
'''
Takes in a url string and soup slicing parameters, returns a df of the first table on the page.
'''
soup = url_to_soup(url)
headers = get_table_headers(soup, headers_limit, headers_index)
rows_data = get_row_data(soup, row_start)
return pd.DataFrame(rows_data, columns = headers[headers_start:])
def scrape_mvp_vote_results_by_year(years):
'''Takes in an iterable of years as ints, returns a pandas df of MVP voting results for the year ending in the given year.
E.g: passing range(2020,2017,-1) returns results from 2020 descending to 2018
Args:
years ([int]):list of years
Returns:
pandas.Dataframe : dataframe of voting results
'''
df = pd.DataFrame()
for year in years:
t0 = time.time() #crawl delay initializer
url = f'https://www.basketball-reference.com/awards/awards_{year}.html#mvp'
html = urlopen(url)
soup = BeautifulSoup(html, 'lxml')
season = get_seasons(soup.find('h1').getText())[0]
headers = get_table_headers(soup, 2, 1)
headers[0] = 'Season'
rows = soup.findAll('tr')[2:]
rows_data = [
[td.getText() for td in row.findAll('td')]
for row in rows
]
rows_data = remove_results_after_first_table(rows_data)
for row in rows_data:
row.insert(0, season)
df = df.append(pd.DataFrame(rows_data, columns = headers), ignore_index=True)
time.sleep(3-(t0-time.time())) #BR requests a crawl-delay of 3 seconds
return df
def scrape_team_index_pages(teams):
'''
Takes in an iterable of 3 letter team initials (e.g 'GSW') and returns a dataframe of the season results of those teams together in a single dataframe.
Args:
teams ([str])
Returns:
pandas.Dataframe: dataframe of season results for each teams season
'''
df = | pd.DataFrame() | pandas.DataFrame |
from datetime import datetime
import re
import unittest
import nose
from nose.tools import assert_equal
import numpy as np
from pandas.tslib import iNaT
from pandas import Series, DataFrame, date_range, DatetimeIndex, Timestamp
from pandas import compat
from pandas.compat import range, long, lrange, lmap, u
from pandas.core.common import notnull, isnull
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.core.config as cf
_multiprocess_can_split_ = True
def test_mut_exclusive():
msg = "mutually exclusive arguments: '[ab]' and '[ab]'"
with tm.assertRaisesRegexp(TypeError, msg):
com._mut_exclusive(a=1, b=2)
assert com._mut_exclusive(a=1, b=None) == 1
assert com._mut_exclusive(major=None, major_axis=None) is None
def test_is_sequence():
is_seq = com._is_sequence
assert(is_seq((1, 2)))
assert(is_seq([1, 2]))
assert(not is_seq("abcd"))
assert(not is_seq(u("abcd")))
assert(not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert(not is_seq(A()))
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
float_series = Series(np.random.randn(5))
obj_series = Series(np.random.randn(5), dtype=object)
assert(isinstance(notnull(float_series), Series))
assert(isinstance(notnull(obj_series), Series))
def test_isnull():
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert not isnull(np.inf)
assert not isnull(-np.inf)
float_series = Series(np.random.randn(5))
obj_series = Series(np.random.randn(5), dtype=object)
assert(isinstance(isnull(float_series), Series))
assert(isinstance(isnull(obj_series), Series))
# call on DataFrame
df = DataFrame(np.random.randn(10, 5))
df['foo'] = 'bar'
result = isnull(df)
expected = result.apply(isnull)
tm.assert_frame_equal(result, expected)
def test_isnull_tuples():
result = isnull((1, 2))
exp = np.array([False, False])
assert(np.array_equal(result, exp))
result = isnull([(False,)])
exp = np.array([[False]])
assert(np.array_equal(result, exp))
result = isnull([(1,), (2,)])
exp = np.array([[False], [False]])
assert(np.array_equal(result, exp))
# list of strings / unicode
result = isnull(('foo', 'bar'))
assert(not result.any())
result = isnull((u('foo'), u('bar')))
assert(not result.any())
def test_isnull_lists():
result = isnull([[False]])
exp = np.array([[False]])
assert(np.array_equal(result, exp))
result = isnull([[1], [2]])
exp = np.array([[False], [False]])
assert(np.array_equal(result, exp))
# list of strings / unicode
result = isnull(['foo', 'bar'])
assert(not result.any())
result = isnull([u('foo'), u('bar')])
assert(not result.any())
def test_isnull_datetime():
assert (not isnull(datetime.now()))
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
assert(notnull(idx).all())
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isnull(idx)
assert(mask[0])
assert(not mask[1:].any())
def test_datetimeindex_from_empty_datetime64_array():
for unit in [ 'ms', 'us', 'ns' ]:
idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit))
assert(len(idx) == 0)
def test_nan_to_nat_conversions():
df = DataFrame(dict({
'A' : np.asarray(lrange(10),dtype='float64'),
'B' : Timestamp('20010101') }))
df.iloc[3:6,:] = np.nan
result = df.loc[4,'B'].value
assert(result == iNaT)
s = df['B'].copy()
s._data = s._data.setitem(tuple([slice(8,9)]),np.nan)
assert(isnull(s[8]))
# numpy < 1.7.0 is wrong
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= '1.7.0':
assert(s[8].value == np.datetime64('NaT').astype(np.int64))
def test_any_none():
assert(com._any_none(1, 2, 3, None))
assert(not com._any_none(1, 2, 3, 4))
def test_all_not_none():
assert(com._all_not_none(1, 2, 3, 4))
assert(not com._all_not_none(1, 2, 3, None))
assert(not com._all_not_none(None, None, None, None))
def test_repr_binary_type():
import string
letters = string.ascii_letters
btype = compat.binary_type
try:
raw = btype(letters, encoding=cf.get_option('display.encoding'))
except TypeError:
raw = btype(letters)
b = compat.text_type(compat.bytes_to_str(raw))
res = com.pprint_thing(b, quote_strings=True)
assert_equal(res, repr(b))
res = com.pprint_thing(b, quote_strings=False)
assert_equal(res, b)
def test_rands():
r = com.rands(10)
assert(len(r) == 10)
def test_adjoin():
data = [['a', 'b', 'c'],
['dd', 'ee', 'ff'],
['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = com.adjoin(2, *data)
assert(adjoined == expected)
def test_iterpairs():
data = [1, 2, 3, 4]
expected = [(1, 2),
(2, 3),
(3, 4)]
result = list(com.iterpairs(data))
assert(result == expected)
def test_split_ranges():
def _bin(x, width):
"return int(x) as a base2 string of given width"
return ''.join(str((x >> i) & 1) for i in range(width - 1, -1, -1))
def test_locs(mask):
nfalse = sum(np.array(mask) == 0)
remaining = 0
for s, e in com.split_ranges(mask):
remaining += e - s
assert 0 not in mask[s:e]
# make sure the total items covered by the ranges are a complete cover
assert remaining + nfalse == len(mask)
# exhaustively test all possible mask sequences of length 8
ncols = 8
for i in range(2 ** ncols):
cols = lmap(int, list(_bin(i, ncols))) # count up in base2
mask = [cols[i] == 1 for i in range(len(cols))]
test_locs(mask)
# base cases
test_locs([])
test_locs([0])
test_locs([1])
def test_indent():
s = 'a b c\nd e f'
result = com.indent(s, spaces=6)
assert(result == ' a b c\n d e f')
def test_banner():
ban = com.banner('hi')
assert(ban == ('%s\nhi\n%s' % ('=' * 80, '=' * 80)))
def test_map_indices_py():
data = [4, 3, 2, 1]
expected = {4: 0, 3: 1, 2: 2, 1: 3}
result = com.map_indices_py(data)
assert(result == expected)
def test_union():
a = [1, 2, 3]
b = [4, 5, 6]
union = sorted(com.union(a, b))
assert((a + b) == union)
def test_difference():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.difference(b, a))
assert([4, 5, 6] == inter)
def test_intersection():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.intersection(a, b))
assert(a == inter)
def test_groupby():
values = ['foo', 'bar', 'baz', 'baz2', 'qux', 'foo3']
expected = {'f': ['foo', 'foo3'],
'b': ['bar', 'baz', 'baz2'],
'q': ['qux']}
grouped = com.groupby(values, lambda x: x[0])
for k, v in grouped:
assert v == expected[k]
def test_is_list_like():
passes = ([], [1], (1,), (1, 2), {'a': 1}, set([1, 'a']), Series([1]),
Series([]), Series(['a']).str)
fails = (1, '2', object())
for p in passes:
assert com.is_list_like(p)
for f in fails:
assert not com.is_list_like(f)
def test_ensure_int32():
values = np.arange(10, dtype=np.int32)
result = com._ensure_int32(values)
assert(result.dtype == np.int32)
values = np.arange(10, dtype=np.int64)
result = com._ensure_int32(values)
assert(result.dtype == np.int32)
def test_ensure_platform_int():
# verify that when we create certain types of indices
# they remain the correct type under platform conversions
from pandas.core.index import Int64Index
# int64
x = Int64Index([1, 2, 3], dtype='int64')
assert(x.dtype == np.int64)
pi = com._ensure_platform_int(x)
assert(pi.dtype == np.int_)
# int32
x = Int64Index([1, 2, 3], dtype='int32')
assert(x.dtype == np.int32)
pi = com._ensure_platform_int(x)
assert(pi.dtype == np.int_)
# TODO: fix this broken test
# def test_console_encode():
# """
# On Python 2, if sys.stdin.encoding is None (IPython with zmq frontend)
# common.console_encode should encode things as utf-8.
# """
# if compat.PY3:
# raise nose.SkipTest
# with tm.stdin_encoding(encoding=None):
# result = com.console_encode(u"\u05d0")
# expected = u"\u05d0".encode('utf-8')
# assert (result == expected)
def test_is_re():
passes = re.compile('ad'),
fails = 'x', 2, 3, object()
for p in passes:
assert com.is_re(p)
for f in fails:
assert not com.is_re(f)
def test_is_recompilable():
passes = (r'a', u('x'), r'asdf', re.compile('adsf'),
u(r'\u2233\s*'), re.compile(r''))
fails = 1, [], object()
for p in passes:
assert com.is_re_compilable(p)
for f in fails:
assert not com.is_re_compilable(f)
class TestTake(unittest.TestCase):
# standard incompatible fill error
fill_error = re.compile("Incompatible type for fill_value")
_multiprocess_can_split_ = True
def test_1d_with_out(self):
def _test_dtype(dtype, can_hold_na):
data = np.random.randint(0, 2, 4).astype(dtype)
indexer = [2, 1, 0, 1]
out = np.empty(4, dtype=dtype)
com.take_1d(data, indexer, out=out)
expected = data.take(indexer)
tm.assert_almost_equal(out, expected)
indexer = [2, 1, 0, -1]
out = np.empty(4, dtype=dtype)
if can_hold_na:
com.take_1d(data, indexer, out=out)
expected = data.take(indexer)
expected[3] = np.nan
tm.assert_almost_equal(out, expected)
else:
with tm.assertRaisesRegexp(TypeError, self.fill_error):
com.take_1d(data, indexer, out=out)
# no exception o/w
data.take(indexer, out=out)
_test_dtype(np.float64, True)
_test_dtype(np.float32, True)
_test_dtype(np.uint64, False)
_test_dtype(np.uint32, False)
_test_dtype(np.uint16, False)
_test_dtype(np.uint8, False)
_test_dtype(np.int64, False)
_test_dtype(np.int32, False)
_test_dtype(np.int16, False)
_test_dtype(np.int8, False)
_test_dtype(np.object_, True)
_test_dtype(np.bool, False)
def test_1d_fill_nonna(self):
def _test_dtype(dtype, fill_value, out_dtype):
data = np.random.randint(0, 2, 4).astype(dtype)
indexer = [2, 1, 0, -1]
result = com.take_1d(data, indexer, fill_value=fill_value)
assert((result[[0, 1, 2]] == data[[2, 1, 0]]).all())
assert(result[3] == fill_value)
assert(result.dtype == out_dtype)
indexer = [2, 1, 0, 1]
result = com.take_1d(data, indexer, fill_value=fill_value)
assert((result[[0, 1, 2, 3]] == data[indexer]).all())
assert(result.dtype == dtype)
_test_dtype(np.int8, np.int16(127), np.int8)
_test_dtype(np.int8, np.int16(128), np.int16)
_test_dtype(np.int32, 1, np.int32)
_test_dtype(np.int32, 2.0, np.float64)
_test_dtype(np.int32, 3.0 + 4.0j, np.complex128)
_test_dtype(np.int32, True, np.object_)
_test_dtype(np.int32, '', np.object_)
_test_dtype(np.float64, 1, np.float64)
_test_dtype(np.float64, 2.0, np.float64)
_test_dtype(np.float64, 3.0 + 4.0j, np.complex128)
_test_dtype(np.float64, True, np.object_)
_test_dtype(np.float64, '', np.object_)
_test_dtype(np.complex128, 1, np.complex128)
_test_dtype(np.complex128, 2.0, np.complex128)
_test_dtype(np.complex128, 3.0 + 4.0j, np.complex128)
_test_dtype(np.complex128, True, np.object_)
_test_dtype(np.complex128, '', np.object_)
_test_dtype(np.bool_, 1, np.object_)
_test_dtype(np.bool_, 2.0, np.object_)
_test_dtype(np.bool_, 3.0 + 4.0j, np.object_)
_test_dtype(np.bool_, True, np.bool_)
_test_dtype(np.bool_, '', np.object_)
def test_2d_with_out(self):
def _test_dtype(dtype, can_hold_na):
data = np.random.randint(0, 2, (5, 3)).astype(dtype)
indexer = [2, 1, 0, 1]
out0 = np.empty((4, 3), dtype=dtype)
out1 = np.empty((5, 4), dtype=dtype)
com.take_nd(data, indexer, out=out0, axis=0)
com.take_nd(data, indexer, out=out1, axis=1)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
indexer = [2, 1, 0, -1]
out0 = np.empty((4, 3), dtype=dtype)
out1 = np.empty((5, 4), dtype=dtype)
if can_hold_na:
com.take_nd(data, indexer, out=out0, axis=0)
com.take_nd(data, indexer, out=out1, axis=1)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected0[3, :] = np.nan
expected1[:, 3] = np.nan
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
else:
for i, out in enumerate([out0, out1]):
with tm.assertRaisesRegexp(TypeError, self.fill_error):
com.take_nd(data, indexer, out=out, axis=i)
# no exception o/w
data.take(indexer, out=out, axis=i)
_test_dtype(np.float64, True)
_test_dtype(np.float32, True)
_test_dtype(np.uint64, False)
_test_dtype(np.uint32, False)
_test_dtype(np.uint16, False)
_test_dtype(np.uint8, False)
_test_dtype(np.int64, False)
_test_dtype(np.int32, False)
_test_dtype(np.int16, False)
_test_dtype(np.int8, False)
_test_dtype(np.object_, True)
_test_dtype(np.bool, False)
def test_2d_fill_nonna(self):
def _test_dtype(dtype, fill_value, out_dtype):
data = np.random.randint(0, 2, (5, 3)).astype(dtype)
indexer = [2, 1, 0, -1]
result = | com.take_nd(data, indexer, axis=0, fill_value=fill_value) | pandas.core.common.take_nd |
def autoNewDirs():
import os, shutil
from fup.helpers.files import originalFilesPaths, getfileSizeMtime
from fup.utils.commun import generateID, current_date
from fup.utils.jsoninfo import configInfo
config = configInfo()
bindir = os.path.abspath(config["path_to_bin"])
filesinfodict = originalFilesPaths(infoDict={}, auto=True)
newdirsNames = list(filesinfodict.keys())
unassignedpath = os.path.abspath(config['path_to_batches_unassigned'])
unassigneddirli = os.listdir(unassignedpath)
unsdict = {}
for d in unassigneddirli:
commName = d.split('BID_')[0].strip()
unsdict[commName] = d
unassigneddirNames = list(unsdict.keys())
communliBatch = list(set(newdirsNames).intersection(unassigneddirNames))
auto = False
infoDictli = []
tobinli = []
for opac, vdict in filesinfodict.items():
#similar to uploadFilesCreateBatch, but without flask file object
batchID = generateID()
operator = opac.split(' ')[0]
aircraft = opac.split(' ')[1]
bindir_batch = os.path.join(bindir, batchID)
if opac not in communliBatch:
batchNameFolder = operator+' '+ aircraft +' BID_'+batchID
path = os.path.join(unassignedpath, batchNameFolder)
os.mkdir(path)
else:
auto = True
communOpAc = list(set([opac]).intersection(communliBatch))
batchNameFolder = unsdict[communOpAc[0]]
path = os.path.join(unassignedpath, batchNameFolder)
existingBatchID = batchNameFolder.split('BID_')[-1].replace('_', '')
bindir_batch = os.path.join(bindir, existingBatchID)
tobinli.append({'source': vdict['rootpath'], 'destination': bindir_batch})
filesnameli = []
fileIDli = []
for file in vdict['files']:
if auto:
#print("yuhuu file",file)
filepath = file
fileinfo = getfileSizeMtime(filepath)
fileinfo["FileName"] = file.split("\\")[-1]
responseFileInfo = checkFileInfo(fileinfo)
if responseFileInfo != True:
return responseFileInfo, auto, auto
filename = file.split('\\')[-1]
fileid = generateID()
newFileName = 'FID_'+fileid+' '+filename
save_path = os.path.join(path, newFileName)
filesnameli.append(filename)
fileIDli.append(fileid)
try:
shutil.copy2(file, save_path)
except Exception as e:
return str(e), str(e), str(e)
orgfilesname = ', '.join(filesnameli)
orgfilespath = path
filesId = ', '.join(fileIDli)
addedDate = current_date()
infoaddDict = {'BatchID': batchID,
'Aircraft': aircraft,
'Operator': operator,
'OriginalFilesName': orgfilesname,
'OriginalFilesPath': orgfilespath,
'FilesID': filesId,
'AddedDate': addedDate
}
infoDictli.append(infoaddDict)
#print(infoaddDict)
return infoDictli, auto, tobinli
def originalFilesPaths(infoDict, auto=False):
import os, re
from fup.utils.commun import getDirs
from fup.utils.jsoninfo import configInfo
config = configInfo()
newFilesPath = config["path_to_new_opfiles"]
newFilesPath = os.path.abspath(newFilesPath)
orgdirli = os.listdir(newFilesPath)
if auto:
orgdirs = [os.path.join(newFilesPath, adir) for adir in orgdirli]
orgdirs = getDirs(orgdirs)
dirsdict = {}
for path in orgdirs:
try:
op = path.split('\\')[-1].split(' ')[0].strip()
ac = str(path.split('\\')[-1].split(' ')[1].strip())
if not re.search('A', ac):
ac = 'A'+ac
opac = op+' '+ac
infoDict['Operator'] = op
infoDict['Aircraft'] = op
filespath = originalFilesPaths(infoDict, auto=False) #recursive
dirsdict[opac] = {'files': filespath, 'rootpath':path}
except:#in case there is no op or ac
pass
#print(dirsdict)
return dirsdict
else:
#Get original files paths to the new files added to batch
try:
orgdirli = [p for p in orgdirli if re.search(infoDict['Operator'], p)]
orgdirli = [p for p in orgdirli if re.search(infoDict['Aircraft'], p) or re.search(infoDict['Aircraft'][1:], p)]
except:
response = "Can't collect Operator and Aircraft info.."
return response
if len(orgdirli) == 1:
orgdir = orgdirli[0]
else:
response = "Operator '{}' with Aircraft '{}' was not found in NEW folder!".format(infoDict['Operator'], infoDict['Aircraft'])
return response
orgpath = os.path.join(newFilesPath, orgdir)
filespath = [os.path.join(orgpath, filepath) for filepath in os.listdir(orgpath)]
#print('asd',filespath)
return filespath
def matchOriginalinNew(orgfiles, newfiles):
#take 2 lists and see if original is found in new, return a dict
import re
fid_pattern = r"^FID_[a-zA-Z0-9]{6}\n*"
newfilesdict = {}
for file in newfiles:
if re.match(fid_pattern, file):
fid = str(re.search(fid_pattern, file).group()).replace('FID_', '')
fileName = str(file.replace(str('FID_' + fid), '')).strip()
#print("fid, file ", fid, fileName)
newfilesdict[fid] = fileName
return newfilesdict
def getFileId(filepath, matchedFilesdict):
from fup.utils.commun import delPunctuationMarks
file = filepath.split('\\')[-1]
#print('getFileIdfunc: ', file, matchedFilesdict)
for kid, vfname in matchedFilesdict.items():
if delPunctuationMarks(vfname) == delPunctuationMarks(file):
#print(kid, vfname)
return kid, vfname
def getfileSizeMtime(filepath):
import os, time
from time import mktime
from datetime import datetime
metadata = os.stat(filepath)
file_size = str(metadata.st_size) # bytes
filetime = time.localtime(metadata.st_mtime)
dt = datetime.fromtimestamp(mktime(filetime))
creation_date = dt.strftime('%d-%m-%Y')
fileinfodict = {'FileSizeBytes':file_size,
'ModificationDate': creation_date
}
return fileinfodict
def checkFileInfo(fileinfo):
import re
import pandas as pd
from fup.utils.dbwrap import sql2df
from fup.helpers.files import delDirsnotindb
from fup.utils.commun import delPunctuationMarks
#print("fileinfo ",fileinfo)
histdf = sql2df('fileshistory')
filedict = {}
for k, v in fileinfo.items():
filedict[k] = [v]
filedf = pd.DataFrame.from_dict(filedict)
#print("yuhuu filedict", filedict)
merged_name = filedf.merge(histdf, left_on=['FileName'], right_on=['FileName'], suffixes=('', '_y'))
colstodel = [col for col in merged_name.columns.tolist() if re.search('_y', col)]
for col in colstodel:
merged_name.drop(col, axis=1, inplace=True)
merged_size = filedf.merge(histdf, left_on=['FileSizeBytes'], right_on=['FileSizeBytes'], suffixes=('', '_y'))
colstodel = [col for col in merged_size.columns.tolist() if re.search('_y', col)]
for col in colstodel:
merged_size.drop(col, axis=1, inplace=True)
merged_mtime = filedf.merge(histdf, left_on=['ModificationDate'], right_on=['ModificationDate'], suffixes=('', '_y'))
colstodel = [col for col in merged_mtime.columns.tolist() if re.search('_y', col)]
for col in colstodel:
merged_mtime.drop(col, axis=1, inplace=True)
if (merged_name.shape[0] == 0):
return True
elif (merged_name.shape[0] == 0) and (merged_size.shape[0] == 0):
return True
elif (merged_name.shape[0] == 0) and (merged_size.shape[0] == 0) and (merged_mtime.shape[0] == 0):
return True
else:
try:
filename_merge = merged_name['FileName'].tolist()[0]
for fname in histdf['FileName']:
if delPunctuationMarks(fname) == delPunctuationMarks(filename_merge):
histdf_filtered = histdf[histdf['FileName'] == fname]
filename_hist = histdf_filtered['FileName'].tolist()
batchid_hist = histdf_filtered['AddedInBatch'].tolist()
fileid_hist = histdf_filtered['FileID'].tolist()
delDirsnotindb()
response = "File '{}' was probably added before! Check BID_{}, FID_{}!".format(filename_hist[0], batchid_hist[0], fileid_hist[0])
#print(response)
return response
except Exception as e:
return str("Probably files in NEW are already inserted. Got: {}".format(e))
def delDirsnotindb():
import os
from fup.utils.jsoninfo import configInfo
from fup.utils.commun import deletetree
from fup.helpers.batch import batchExists
config = configInfo()
unassignedpath = os.path.abspath(config['path_to_batches_unassigned'])
unassigneddirli = os.listdir(unassignedpath)
todelDirs = {}
for batchNameFolder in unassigneddirli:
bid = batchNameFolder.split('BID_')[-1].replace('_', '')
if batchNameFolder == '_info.txt':
continue
if not batchExists(bid):
todelDirs[bid] = batchNameFolder
for kbid, vdirName in todelDirs.items():
deldir = os.path.join(unassignedpath, vdirName)
deletetree(deldir)
def updateDBforNewFiles():
#Verify if new files were added to a existing batch if so, update db
import os, re
import pandas as pd
from fup.utils.dbwrap import sql_insertDict, sql_updateDict, get_dftable, sql_deleteRow
from fup.helpers.batch import batchInfo
from fup.helpers.files import getfileSizeMtime
from fup.utils.commun import list_duplicates
#Update followup with the new file added to the batch
followupdf = get_dftable('followup')
orgpaths = followupdf['OriginalFilesPath'].tolist()
orgpaths_nodups = list(set(orgpaths))
newtempbid = {}
for opath in orgpaths_nodups:
bid = opath.split("\\")[-1].split('BID_')[-1].strip()
followupdf_bid = followupdf[followupdf['OriginalFilesPath'].str.contains('|'.join([bid]), na=False)]
bids = followupdf_bid["BatchID"].tolist()
bidtodelli = [b for b in bids if b != bid]
tempd = {}
for biddel in bidtodelli:
infobatch_previous = batchInfo(biddel)
if infobatch_previous != False:
for k in list(infobatch_previous.keys()):
if k not in ['OriginalFilesName', 'FilesID', 'ChangesLog', 'BatchID']:
infobatch_previous.pop(k, None)
tempd["prevInfo"] = infobatch_previous
# else:
# response_notfound = "BatchID {} is not in database! Please delete from unassigned folder {}!".format(existingBatchID, existingBatchID)
# tempd["prevInfo"] = response_notfound
# #return response_notfound, response_notfound, response_notfound
newtempbid[bid] = tempd
orgpaths_dups = list_duplicates(orgpaths)
existingbid = {}
for opath in orgpaths_dups:
tempd = {}
bid = opath.split("\\")[-1].split('BID_')[-1].strip()
infobatch_previous = batchInfo(bid)
if infobatch_previous != False:
for k in list(infobatch_previous.keys()):
if k not in ['OriginalFilesName', 'FilesID', 'ChangesLog', 'BatchID']:
infobatch_previous.pop(k, None)
#print('OK ',infobatch_previous)
tempd["prevInfo"] = infobatch_previous
# else:
# response_notfound = "BatchID {} is not in database! Please delete from unassigned folder {}!".format(existingBatchID, existingBatchID)
# #print('NOK ',response_notfound)
# tempd["prevInfo"] = response_notfound
# #return response_notfound, response_notfound, response_notfound
existingbid[bid] = tempd
tempbidtodel = []
for bidorg, dorg in existingbid.items():
for bidtemp, dtemp in newtempbid.items():
if bidorg == bidtemp:
#make df from dict
dforg = | pd.DataFrame.from_dict(dorg['prevInfo']) | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
from colassigner import ColAssigner
from ..data_management import fe_raw_cols as fe_rc
from ..data_management import fe_trepos as fe_t2
from ..data_management import pv_raw_cols as pv_rc
from ..data_management import pv_trepos as pv_t2
# manual part
start_date = pd.to_datetime(["2011-06-01"]).astype(int)[0]
fe_team_replacement_dic = {82: 24341}
fe_player_replacement_dic = {27800: 312814, 322402: 344042, 135468: 229172}
class CorefCols(ColAssigner):
def __init__(self, pvsdf):
super().__init__()
self.pv_season_df = pvsdf
def comp_type(self, df):
return np.where(
self.pv_season_df.reindex(df[pv_rc.CommonCols.season_id])[pv_rc.SeasonInfoCols.season_type].str.contains(
"pokal"
),
"cup",
"league",
)
def correct_height(s):
return np.where(s > 130, s, s.median())
def correct_dob(s):
allowed = pd.to_datetime(["1950-01-01", "2020-01-01"]).astype(int)
return np.where((s > allowed[0]) & (s < allowed[1]), s, s.median())
def get_pv_season_id(df):
return df[pv_rc.CommonCols.competition_id] + "-" + df[pv_rc.CommonCols.season_year_id]
def get_fe_bases():
team_df = fe_t2.teams_table.get_full_df().drop(fe_team_replacement_dic.keys(), errors="ignore")
match_df = (
fe_t2.matches_table.get_full_df()
.assign(
date=lambda df: pd.to_datetime(df[fe_rc.MatchesCols.datetime]).astype(np.int64),
home_teamid=lambda df: df[fe_rc.MatchesCols.TeamId.home].replace(fe_team_replacement_dic),
away_teamid=lambda df: df[fe_rc.MatchesCols.TeamId.away].replace(fe_team_replacement_dic),
)
.loc[lambda df: df["date"] > start_date]
)
season_df = fe_t2.seasons_table.get_full_df().assign(
name=lambda df: df[fe_rc.SeasonsCols.competition_name],
fe_comp_uid=lambda df: df[fe_rc.CommonCols.area_name] + " " + df["name"],
)
comp_df = season_df.groupby("fe_comp_uid")[[fe_rc.CommonCols.area_name]].first()
lineup_df = fe_t2.lineups_table.get_full_df().assign(
starter=lambda df: np.where(df[fe_rc.LineupsCols.position] == "Sub", "sub", "starter"),
fe_player_id=lambda df: df[fe_rc.CommonCols.player_id].replace(fe_player_replacement_dic),
)
player_df = (
fe_t2.players_table.get_full_df()
.drop(fe_player_replacement_dic.keys(), errors="ignore")
.assign(
dob=lambda pdf: lineup_df.merge(match_df.loc[:, ["date"]].reset_index(), how="inner")
.assign(dob=lambda df: df["date"] - df[fe_rc.LineupsCols.age] * 365 * 24 * 60 * 60 * 10 ** 9)
.groupby("fe_player_id")["dob"]
.mean()
.reindex(pdf.index)
.pipe(correct_dob),
height=lambda df: df[fe_rc.PlayersCols.height].pipe(correct_height),
)
.loc[:, [fe_rc.PlayersCols.name, "height", "dob"]]
)
return comp_df, season_df, match_df, player_df, team_df, lineup_df
def get_pv_bases():
country_df = pv_t2.countries_table.get_full_df()
match_df = (
pv_t2.match_info_table.get_full_df()
.assign(
**{
pv_rc.CommonCols.season_id: get_pv_season_id,
"date": lambda df: pd.to_datetime(df[pv_rc.MatchInfoCols.date]).astype(np.int64),
}
)
.loc[lambda df: df["date"] > start_date]
)
season_df = pv_t2.seasons_table.get_full_df()
team_df = (
pv_t2.team_info_table.get_full_df()
.assign(
country=lambda df: pv_t2.countries_table.get_full_df()
.reindex(df[pv_rc.CommonCols.country_id])[pv_rc.CountriesCols.country_name]
.fillna("N/A")
.values
)
.loc[:, [pv_rc.TeamInfoCols.name, "country"]]
)
lineup_df = pv_t2.match_lineups_table.get_full_df()
player_df = (
pv_t2.player_info_table.get_full_df()
.assign(
dob=lambda df: | pd.to_datetime(df[pv_rc.PlayerInfoCols.date_of_birth]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from pandas.compat import lrange, lzip, range
import pandas as pd
from pandas import Index, MultiIndex, Series
import pandas.util.testing as tm
def test_equals(idx):
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(np.array(idx))
same_values = Index(idx, dtype=object)
assert idx.equals(same_values)
assert same_values.equals(idx)
if idx.nlevels == 1:
# do not test MultiIndex
assert not idx.equals(pd.Series(idx))
def test_equals_op(idx):
# GH9947, GH10637
index_a = idx
n = len(index_a)
index_b = index_a[0:-1]
index_c = index_a[0:-1].append(index_a[-2:-1])
index_d = index_a[0:1]
with pytest.raises(ValueError, match="Lengths must match"):
index_a == index_b
expected1 = np.array([True] * n)
expected2 = np.array([True] * (n - 1) + [False])
tm.assert_numpy_array_equal(index_a == index_a, expected1)
tm.assert_numpy_array_equal(index_a == index_c, expected2)
# test comparisons with numpy arrays
array_a = np.array(index_a)
array_b = np.array(index_a[0:-1])
array_c = np.array(index_a[0:-1].append(index_a[-2:-1]))
array_d = np.array(index_a[0:1])
with pytest.raises(ValueError, match="Lengths must match"):
index_a == array_b
tm.assert_numpy_array_equal(index_a == array_a, expected1)
tm.assert_numpy_array_equal(index_a == array_c, expected2)
# test comparisons with Series
series_a = | Series(array_a) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 5 02:12:12 2022
@author: Kraken
Project: MHP Hackathon
"""
import os
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 16})
WORKING_DIR = "model_14"
WORKING_DIR2 = "model_12"
# "model_8": dqn with fixed weights
# "model_4": dqn
MVG_AVG_WINDOW = 5
# =============================================================================
# Queue Plots - Combined
# =============================================================================
QUEUE = "plot_queue_data.txt"
# rl agent
with open(os.path.join(WORKING_DIR, QUEUE), "r") as txtfile:
data = txtfile.readlines()
data = [float(x.rstrip("\n")) for x in data][:250]
# ctl
with open(os.path.join(WORKING_DIR2, QUEUE), "r") as txtfile:
data2 = txtfile.readlines()
data2 = [float(x.rstrip("\n")) for x in data2]
fig = plt.figure(figsize=(12, 8))
plt.plot(data2, "blue", label="Conventional Traffic Lights")
plt.plot(data, "orange", label="RL Agent")
plt.xlabel("# Episodes")
plt.ylabel("Average queue length (vehicles)")
plt.title("Conventional Traffic Lights & RL Optimized Smart Traffic Lights")
plt.grid()
plt.legend(loc="upper right")
plt.savefig(QUEUE.replace("_data.txt", "_combined.png"))
# =============================================================================
# Delay Plots - Combined
# =============================================================================
QUEUE = "plot_delay_data.txt"
# rl agent
with open(os.path.join(WORKING_DIR, QUEUE), "r") as txtfile:
data = txtfile.readlines()
data = [float(x.rstrip("\n")) for x in data][:250]
# ctl
with open(os.path.join(WORKING_DIR2, QUEUE), "r") as txtfile:
data2 = txtfile.readlines()
data2 = [float(x.rstrip("\n")) for x in data2]
fig = plt.figure(figsize=(12, 8))
plt.plot(data, "orange", label="RL Agent")
plt.plot(data2, "blue", label="Conventional Traffic Lights")
plt.xlabel("# Episodes")
plt.ylabel("Cumulative Delay (s)")
plt.title("Conventional Traffic Lights & RL Optimized Smart Traffic Lights")
plt.grid()
plt.legend(loc="upper right")
plt.savefig(QUEUE.replace("_data.txt", "_combined.png"))
# =============================================================================
# Reward Plots - Combined
# =============================================================================
QUEUE = "plot_reward_data.txt"
# rl agent
with open(os.path.join(WORKING_DIR, QUEUE), "r") as txtfile:
data = txtfile.readlines()
data = [float(x.rstrip("\n")) for x in data][:250]
# ctl
with open(os.path.join(WORKING_DIR2, QUEUE), "r") as txtfile:
data2 = txtfile.readlines()
data2 = [float(x.rstrip("\n")) for x in data2]
fig = plt.figure(figsize=(12, 8))
plt.plot(data, "orange", label="RL Agent")
plt.plot(data2, "blue", label="Conventional Traffic Lights")
plt.xlabel("# Episodes")
plt.ylabel("Cumulative Negative Reward")
plt.title("Conventional Traffic Lights & RL Optimized Smart Traffic Lights")
plt.grid()
plt.legend(loc="best")
plt.savefig(QUEUE.replace("_data.txt", "_combined.png"))
WORKING_DIR = "model_14"
MVG_AVG_WINDOW = 5
# =============================================================================
# Queue Plots
# =============================================================================
QUEUE = "plot_queue_data.txt"
with open(os.path.join(WORKING_DIR, QUEUE), "r") as txtfile:
data = txtfile.readlines()
data = [float(x.rstrip("\n")) for x in data]
data_series = | pd.Series(data) | pandas.Series |
# -*- coding: utf-8 -*-
"""Functionality that extends on what the base StatsCan api returns in some way
TODO
----
Function to delete tables
Extend getChangedCubeList with a function that returns all tables updated
within a date range
"""
import os
import json
import zipfile
import h5py
import pandas as pd
import numpy as np
import requests
from stats_can.scwds import get_series_info_from_vector
from stats_can.scwds import get_data_from_vectors_and_latest_n_periods
from stats_can.scwds import get_bulk_vector_data_by_range
from stats_can.scwds import get_cube_metadata
from stats_can.scwds import get_full_table_download
from stats_can.helpers import parse_tables
from stats_can.helpers import parse_vectors
def get_tables_for_vectors(vectors):
""" get a list of dicts mapping vectors to tables
Parameters
----------
vectors : list of str or str
Vectors to find tables for
Returns
-------
tables_list: list of dict
keys for each vector number return the table, plus a key for
'all_tables' that has a list of unique tables used by vectors
"""
v_json = get_series_info_from_vector(vectors)
vectors = [j["vectorId"] for j in v_json]
tables_list = {j["vectorId"]: str(j["productId"]) for j in v_json}
tables_list["all_tables"] = []
for vector in vectors:
if tables_list[vector] not in tables_list["all_tables"]:
tables_list["all_tables"].append(tables_list[vector])
return tables_list
def table_subsets_from_vectors(vectors):
"""get a list of dicts mapping tables to vectors
Parameters
----------
vectors : list of str or str
Vectors to find tables for
Returns
-------
tables_dict: list of dict
keys for each table used by the vectors, matched to a list of vectors
"""
start_tables_dict = get_tables_for_vectors(vectors)
tables_dict = {t: [] for t in start_tables_dict["all_tables"]}
vecs = list(start_tables_dict.keys())[:-1] # all but the all_tables key
for vec in vecs:
tables_dict[start_tables_dict[vec]].append(vec)
return tables_dict
def download_tables(tables, path=None, csv=True):
"""Download a json file and zip of data for a list of tables to path
Parameters
----------
tables: list of str
tables to be downloaded
path: str, default: None (will do current directory)
Where to download the table and json
csv: boolean, default True
download in CSV format, if not download SDMX
Returns
-------
downloaded: list
list of tables that were downloaded
"""
metas = get_cube_metadata(tables)
for meta in metas:
product_id = meta["productId"]
zip_url = get_full_table_download(product_id, csv=csv)
if csv:
zip_file = product_id + "-eng.zip"
else:
zip_file = product_id + ".zip"
json_file = product_id + ".json"
if path:
zip_file = os.path.join(path, zip_file)
json_file = os.path.join(path, json_file)
# Thanks http://evanhahn.com/python-requests-library-useragent/
response = requests.get(zip_url, stream=True, headers={"user-agent": None})
# Thanks https://bit.ly/2sPYPYw
with open(json_file, "w") as outfile:
json.dump(meta, outfile)
with open(zip_file, "wb") as handle:
for chunk in response.iter_content(chunk_size=512):
if chunk: # filter out keep-alive new chunks
handle.write(chunk)
downloaded = [meta["productId"] for meta in metas]
return downloaded
def zip_update_tables(path=None, csv=True):
"""check local json, update zips of outdated tables
Grabs the json files in path, checks them against the metadata on
StatsCan and grabs updated tables where there have been changes
There isn't actually a "last modified date" part to the metadata
What I'm doing is comparing the latest reference period. Almost all
data changes will at least include incremental releases, so this should
capture what I want
Parameters
----------
path: str, default: None
where to look for tables to update
csv: boolean, default: True
Downloads updates in CSV form by default, SDMX if false
Returns
-------
update_table_list: list
list of the tables that were updated
"""
local_jsons = list_zipped_tables(path=path)
tables = [j["productId"] for j in local_jsons]
remote_jsons = get_cube_metadata(tables)
update_table_list = []
for local, remote in zip(local_jsons, remote_jsons):
if local["cubeEndDate"] != remote["cubeEndDate"]:
update_table_list.append(local["productId"])
download_tables(update_table_list, path, csv=csv)
return update_table_list
def zip_table_to_dataframe(table, path=None):
"""Reads a StatsCan table into a pandas DataFrame
If a zip file of the table does not exist in path, downloads it
Parameters
----------
table: str
the table to load to dataframe from zipped csv
path: str, default: current working directory when module is loaded
where to download the tables or load them
Returns:
df: pandas.DataFrame
the table as a dataframe
"""
# Parse tables returns a list, can only do one table at a time here though
table = parse_tables(table)[0]
table_zip = table + "-eng.zip"
if path:
table_zip = os.path.join(path, table_zip)
if not os.path.isfile(table_zip):
download_tables([table], path)
csv_file = table + ".csv"
with zipfile.ZipFile(table_zip) as myzip:
with myzip.open(csv_file) as myfile:
col_names = pd.read_csv(myfile, nrows=0).columns
# reopen the file or it misses the first row
with myzip.open(csv_file) as myfile:
types_dict = {"VALUE": float}
types_dict.update({col: str for col in col_names if col not in types_dict})
df = pd.read_csv(myfile, dtype=types_dict)
possible_cats = [
"GEO",
"DGUID",
"STATUS",
"SYMBOL",
"TERMINATED",
"DECIMALS",
"UOM",
"UOM_ID",
"SCALAR_FACTOR",
"SCALAR_ID",
"VECTOR",
"COORDINATE",
"Wages",
"National Occupational Classification for Statistics (NOC-S)",
"Supplementary unemployment rates",
"Sex",
"Age group",
"Labour force characteristics",
"Statistics",
"Data type",
"Job permanency",
"Union coverage",
"Educational attainment",
]
actual_cats = [col for col in possible_cats if col in col_names]
df[actual_cats] = df[actual_cats].astype("category")
try:
df["REF_DATE"] = pd.to_datetime(df["REF_DATE"], format="%Y-%m")
except TypeError:
df["REF_DATE"] = pd.to_datetime(df["REF_DATE"])
return df
def list_zipped_tables(path=None):
"""List StatsCan tables available
defaults to looking in the current working directory and for zipped CSVs
Parameters
----------
path: string or path, default None
Where to look for zipped tables
csv: boolean, default True
Whether to look for CSV or SDMX files
Returns
-------
tables: list
list of available tables json data
"""
# Find json files
jsons = [f for f in os.listdir(path) if f.endswith(".json")]
if path:
jsons = [os.path.join(path, j) for j in jsons]
tables = []
for j in jsons:
try:
with open(j) as json_file:
result = json.load(json_file)
if "productId" in result:
tables.append(result)
except ValueError as e:
print("failed to read json file" + j)
print(e)
return tables
def tables_to_h5(tables, h5file="stats_can.h5", path=None):
"""Take a table and its metadata and put it in an hdf5 file
Parameters
----------
tables: list of str
tables to add to the h5file
h5file: str, default stats_can.h5
name of the h5file to store the tables in
path: str or path, default = current working directory
path to the h5file
Returns
-------
tables: list
list of tables loaded into the file
"""
if path:
h5file = os.path.join(path, h5file)
tables = parse_tables(tables)
for table in tables:
hkey = "table_" + table
jkey = "json_" + table
zip_file = table + "-eng.zip"
json_file = table + ".json"
if path:
zip_file = os.path.join(path, zip_file)
json_file = os.path.join(path, json_file)
if not os.path.isfile(json_file):
download_tables([table], path)
df = zip_table_to_dataframe(table, path=path)
with open(json_file) as f_name:
df_json = json.load(f_name)
with pd.HDFStore(h5file, "a") as store:
df.to_hdf(store, key=hkey, format="table", complevel=1)
with h5py.File(h5file, "a") as hfile:
if jkey in hfile.keys():
del hfile[jkey]
hfile.create_dataset(jkey, data=json.dumps(df_json))
os.remove(zip_file)
os.remove(json_file)
return tables
def table_from_h5(table, h5file="stats_can.h5", path=None):
"""Read a table from h5 to a dataframe
Parameters
----------
table: str
name of the table to read
h5file: str, default stats_can.h5
name of the h5file to retrieve the table from
path: str or path, default = current working directory
path to the h5file
Returns
-------
df: pd.DataFrame
table in dataframe format
"""
table = "table_" + parse_tables(table)[0]
if path:
h5 = os.path.join(path, h5file)
else:
h5 = h5file
try:
with pd.HDFStore(h5, "r") as store:
df = pd.read_hdf(store, key=table)
except (KeyError, OSError):
print("Downloading and loading " + table)
tables_to_h5(tables=table, h5file=h5file, path=path)
with | pd.HDFStore(h5, "r") | pandas.HDFStore |
"""
"""
import numpy as np
import pandas as pd
def parse_data_elecciones_esp(votation_file):
#Headers as rows for now
df = | pd.read_excel(votation_file, 0) | pandas.read_excel |
"""Holds the Dataset class used for managing training and test data."""
import datetime
import pandas
import numpy as np
def LoadData(filenames, split=True):
"""Load a bunch of files as a pandas dataframe.
Input files should have three columns for userid, query, and date.
"""
def Prepare(s):
s = str(s)
return ['<S>'] + list(s) + ['</S>']
dfs = []
for filename in filenames:
df = pandas.read_csv(filename, sep='\t', compression='gzip', header=None)
df.columns = ['user', 'query_', 'date']
if split:
df['query_'] = df.query_.apply(Prepare)
df['user'] = df.user.apply(lambda x: 's' + str(x))
dates = df.date.apply(lambda x: datetime.datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
df['hourofday'] = [d.hour for d in dates]
df['dayofweek'] = [d.dayofweek for d in dates]
dfs.append(df)
return | pandas.concat(dfs) | pandas.concat |
import operator
from shutil import get_terminal_size
from typing import Dict, Hashable, List, Type, Union, cast
from warnings import warn
import numpy as np
from pandas._config import get_option
from pandas._libs import algos as libalgos, hashtable as htable
from pandas._typing import ArrayLike, Dtype, Ordered, Scalar
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
Substitution,
cache_readonly,
deprecate_kwarg,
doc,
)
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.dtypes.cast import (
coerce_indexer_dtype,
maybe_cast_to_extension_array,
maybe_infer_to_datetimelike,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
is_categorical_dtype,
is_datetime64_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_integer_dtype,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
from pandas.core import ops
from pandas.core.accessor import PandasDelegate, delegate_names
import pandas.core.algorithms as algorithms
from pandas.core.algorithms import _get_data_algo, factorize, take, take_1d, unique1d
from pandas.core.array_algos.transforms import shift
from pandas.core.arrays.base import ExtensionArray, _extension_array_shared_docs
from pandas.core.base import NoNewAttributesMixin, PandasObject, _shared_docs
import pandas.core.common as com
from pandas.core.construction import array, extract_array, sanitize_array
from pandas.core.indexers import check_array_indexer, deprecate_ndim_indexing
from pandas.core.missing import interpolate_2d
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.sorting import nargsort
from pandas.io.formats import console
def _cat_compare_op(op):
opname = f"__{op.__name__}__"
@unpack_zerodim_and_defer(opname)
def func(self, other):
if is_list_like(other) and len(other) != len(self):
# TODO: Could this fail if the categories are listlike objects?
raise ValueError("Lengths must match.")
if not self.ordered:
if opname in ["__lt__", "__gt__", "__le__", "__ge__"]:
raise TypeError(
"Unordered Categoricals can only compare equality or not"
)
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = "Categoricals can only be compared if 'categories' are the same."
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif self.ordered and not (self.categories == other.categories).all():
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError(
"Categoricals can only be compared if 'ordered' is the same"
)
if not self.ordered and not self.categories.equals(other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
f = getattr(self._codes, opname)
ret = f(other_codes)
mask = (self._codes == -1) | (other_codes == -1)
if mask.any():
# In other series, the leads to False, so do that here too
if opname == "__ne__":
ret[(self._codes == -1) & (other_codes == -1)] = True
else:
ret[mask] = False
return ret
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
ret = getattr(self._codes, opname)(i)
if opname not in {"__eq__", "__ge__", "__gt__"}:
# check for NaN needed if we are not equal or larger
mask = self._codes == -1
ret[mask] = False
return ret
else:
if opname == "__eq__":
return np.zeros(len(self), dtype=bool)
elif opname == "__ne__":
return np.ones(len(self), dtype=bool)
else:
raise TypeError(
f"Cannot compare a Categorical for op {opname} with a "
"scalar, which is not a category."
)
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if opname in ["__eq__", "__ne__"]:
return getattr(np.array(self), opname)(np.array(other))
raise TypeError(
f"Cannot compare a Categorical for op {opname} with "
f"type {type(other)}.\nIf you want to compare values, "
"use 'np.asarray(cat) <op> other'."
)
func.__name__ = opname
return func
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except (KeyError, TypeError):
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
class Categorical(ExtensionArray, PandasObject):
"""
Represent a categorical variable in classic R / S-plus fashion.
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : bool, default False
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical.
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : bool
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
See Also
--------
CategoricalDtype : Type for categorical data.
CategoricalIndex : An Index with an underlying ``Categorical``.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html>`_
for more.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
# tolist is not actually deprecated, just suppressed in the __dir__
_deprecations = PandasObject._deprecations | frozenset(["tolist"])
_typ = "categorical"
def __init__(
self, values, categories=None, ordered=None, dtype=None, fastpath=False
):
dtype = CategoricalDtype._from_values_or_dtype(
values, categories, ordered, dtype
)
# At this point, dtype is always a CategoricalDtype, but
# we may have dtype.categories be None, and we need to
# infer categories in a factorization step further below
if fastpath:
self._codes = coerce_indexer_dtype(values, dtype.categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
# By convention, empty lists result in object dtype:
sanitize_dtype = np.dtype("O") if len(values) == 0 else None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError as err:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError(
"'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument."
) from err
except ValueError as err:
# FIXME
raise NotImplementedError(
"> 1 ndim Categorical are not supported at this time"
) from err
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values.dtype):
old_codes = (
values._values.codes if isinstance(values, ABCSeries) else values.codes
)
codes = recode_for_categories(
old_codes, values.dtype.categories, dtype.categories
)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = -np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""
The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if self.dtype.categories is not None and len(self.dtype.categories) != len(
new_dtype.categories
):
raise ValueError(
"new categories need to have the same number of "
"items as the old categories!"
)
self._dtype = new_dtype
@property
def ordered(self) -> Ordered:
"""
Whether the categories have an ordered relationship.
"""
return self.dtype.ordered
@property
def dtype(self) -> CategoricalDtype:
"""
The :class:`~pandas.api.types.CategoricalDtype` for this instance.
"""
return self._dtype
@property
def _constructor(self) -> Type["Categorical"]:
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def _formatter(self, boxed=False):
# Defer to CategoricalFormatter's formatter.
return None
def copy(self) -> "Categorical":
"""
Copy constructor.
"""
return self._constructor(
values=self._codes.copy(), dtype=self.dtype, fastpath=True
)
def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike:
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
"""
if is_categorical_dtype(dtype):
dtype = cast(Union[str, CategoricalDtype], dtype)
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
if is_extension_array_dtype(dtype):
return array(self, dtype=dtype, copy=copy) # type: ignore # GH 28770
if is_integer_dtype(dtype) and self.isna().any():
raise ValueError("Cannot convert float NaN to integer")
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def size(self) -> int:
"""
Return the len of myself.
"""
return self._codes.size
@cache_readonly
def itemsize(self) -> int:
"""
return the size of a single category
"""
return self.categories.itemsize
def tolist(self) -> List[Scalar]:
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
to_list = tolist
@classmethod
def _from_inferred_categories(
cls, inferred_categories, inferred_codes, dtype, true_values=None
):
"""
Construct a Categorical from inferred values.
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
true_values : list, optional
If none are provided, the default ones are
"True", "TRUE", and "true."
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (
isinstance(dtype, CategoricalDtype) and dtype.categories is not None
)
if known_categories:
# Convert to a specialized type with `dtype` if specified.
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors="coerce")
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors="coerce")
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors="coerce")
elif dtype.categories.is_boolean():
if true_values is None:
true_values = ["True", "TRUE", "true"]
cats = cats.isin(true_values)
if known_categories:
# Recode from observation order to dtype.categories order.
categories = dtype.categories
codes = recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# Sort categories and recode for unknown categories.
unsorted = cats.copy()
categories = cats.sort_values()
codes = recode_for_categories(inferred_codes, unsorted, categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories=None, ordered=None, dtype=None):
"""
Make a Categorical type from codes and categories or dtype.
This constructor is useful if you already have codes and
categories/dtype and so do not need the (computation intensive)
factorization step, which is usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like of int
An integer array, where each integer points to a category in
categories or dtype.categories, or else is -1 for NaN.
categories : index-like, optional
The categories for the categorical. Items need to be unique.
If the categories are not given here, then they must be provided
in `dtype`.
ordered : bool, optional
Whether or not this categorical is treated as an ordered
categorical. If not given here or in `dtype`, the resulting
categorical will be unordered.
dtype : CategoricalDtype or "category", optional
If :class:`CategoricalDtype`, cannot be used together with
`categories` or `ordered`.
.. versionadded:: 0.24.0
When `dtype` is provided, neither `categories` nor `ordered`
should be provided.
Returns
-------
Categorical
Examples
--------
>>> dtype = pd.CategoricalDtype(['a', 'b'], ordered=True)
>>> pd.Categorical.from_codes(codes=[0, 1, 0, 1], dtype=dtype)
[a, b, a, b]
Categories (2, object): [a < b]
"""
dtype = CategoricalDtype._from_values_or_dtype(
categories=categories, ordered=ordered, dtype=dtype
)
if dtype.categories is None:
msg = (
"The categories must be provided in 'categories' or "
"'dtype'. Both were None."
)
raise ValueError(msg)
if is_extension_array_dtype(codes) and is_integer_dtype(codes):
# Avoid the implicit conversion of Int to object
if isna(codes).any():
raise ValueError("codes cannot contain NA values")
codes = codes.to_numpy(dtype=np.int64)
else:
codes = np.asarray(codes)
if len(codes) and not is_integer_dtype(codes):
raise ValueError("codes need to be array-like integers")
if len(codes) and (codes.max() >= len(dtype.categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and len(categories)-1")
return cls(codes, dtype=dtype, fastpath=True)
@property
def codes(self) -> np.ndarray:
"""
The category codes of this categorical.
Codes are an array of integers which are the positions of the actual
values in the categories array.
There is no setter, use the other categorical methods and the normal item
setter to change values in the categorical.
Returns
-------
ndarray[int]
A non-writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_categories(self, categories, fastpath=False):
"""
Sets new categories inplace
Parameters
----------
fastpath : bool, default False
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories, self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (
not fastpath
and self.dtype.categories is not None
and len(new_dtype.categories) != len(self.dtype.categories)
):
raise ValueError(
"new categories need to have the same number of "
"items than the old categories!"
)
self._dtype = new_dtype
def _set_dtype(self, dtype: CategoricalDtype) -> "Categorical":
"""
Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = recode_for_categories(self.codes, self.categories, dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Set the ordered attribute to the boolean value.
Parameters
----------
value : bool
Set whether this categorical is ordered (True) or not (False).
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to the value.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Set the Categorical to be ordered.
Parameters
----------
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to True.
Returns
-------
Categorical
Ordered Categorical.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Set the Categorical to be unordered.
Parameters
----------
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to False.
Returns
-------
Categorical
Unordered Categorical.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False, inplace=False):
"""
Set the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes, which does not considers a S1 string equal to a single char
python string.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, default False
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : bool, default False
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : bool, default False
Whether or not to reorder the categories in-place or return a copy
of this categorical with reordered categories.
Returns
-------
Categorical with reordered categories or None if inplace.
Raises
------
ValueError
If new_categories does not validate as categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if cat.dtype.categories is not None and len(new_dtype.categories) < len(
cat.dtype.categories
):
# remove all _codes which are larger and set to -1/NaN
cat._codes[cat._codes >= len(new_dtype.categories)] = -1
else:
codes = recode_for_categories(
cat.codes, cat.categories, new_dtype.categories
)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
"""
Rename categories.
Parameters
----------
new_categories : list-like, dict-like or callable
New categories which will replace old categories.
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0.
inplace : bool, default False
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
See Also
--------
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.copy()
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item) for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
"""
Reorder categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : bool, default False
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
See Also
--------
rename_categories : Rename categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if set(self.dtype.categories) != set(new_categories):
raise ValueError(
"items in new_categories are not the same as in old categories"
)
return self.set_categories(new_categories, ordered=ordered, inplace=inplace)
def add_categories(self, new_categories, inplace=False):
"""
Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : bool, default False
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
raise ValueError(
f"new categories must not include old categories: {already_included}"
)
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
"""
Remove the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : bool, default False
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
Raises
------
ValueError
If the removals are not contained in the categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_list_like(removals):
removals = [removals]
removal_set = set(removals)
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = {x for x in not_included if notna(x)}
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
raise ValueError(f"removals must all be in old categories: {not_included}")
return self.set_categories(
new_categories, ordered=self.ordered, rename=False, inplace=inplace
)
def remove_unused_categories(self, inplace=False):
"""
Remove categories which are not used.
Parameters
----------
inplace : bool, default False
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(
new_categories, ordered=self.ordered
)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned. NaN values are unaffected.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(
self._codes.copy(), categories=new_categories, ordered=self.ordered
)
except ValueError:
# NA values are represented in self._codes with -1
# np.take causes NA values to take final element in new_categories
if np.any(self._codes == -1):
new_categories = new_categories.insert(len(new_categories), np.nan)
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op(operator.eq)
__ne__ = _cat_compare_op(operator.ne)
__lt__ = _cat_compare_op(operator.lt)
__gt__ = _cat_compare_op(operator.gt)
__le__ = _cat_compare_op(operator.le)
__ge__ = _cat_compare_op(operator.ge)
# for Series/ndarray like compat
@property
def shape(self):
"""
Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods, fill_value=None):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
fill_value : object, optional
The scalar value to use for newly introduced missing values.
.. versionadded:: 0.24.0
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
fill_value = self._validate_fill_value(fill_value)
codes = shift(codes.copy(), periods, axis=0, fill_value=fill_value)
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def _validate_fill_value(self, fill_value):
"""
Convert a user-facing fill_value to a representation to use with our
underlying ndarray, raising ValueError if this is not possible.
Parameters
----------
fill_value : object
Returns
-------
fill_value : int
Raises
------
ValueError
"""
if isna(fill_value):
fill_value = -1
elif fill_value in self.categories:
fill_value = self.categories.get_loc(fill_value)
else:
raise ValueError(
f"'fill_value={fill_value}' is not present "
"in this Categorical's categories"
)
return fill_value
def __array__(self, dtype=None) -> np.ndarray:
"""
The numpy array interface.
Returns
-------
numpy.array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype.
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
# for binary ops, use our custom dunder methods
result = ops.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
# for all other cases, raise for now (similarly as what happens in
# Series.__array_prepare__)
raise TypeError(
f"Object with dtype {self.dtype} cannot perform "
f"the numpy op {ufunc.__name__}"
)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception("invalid pickle state")
if "_dtype" not in state:
state["_dtype"] = CategoricalDtype(state["_categories"], state["_ordered"])
for k, v in state.items():
setattr(self, k, v)
@property
def T(self) -> "Categorical":
"""
Return transposed numpy array.
"""
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(deep=deep)
@doc(_shared_docs["searchsorted"], klass="Categorical")
def searchsorted(self, value, side="left", sorter=None):
# searchsorted is very performance sensitive. By converting codes
# to same dtype as self.codes, we get much faster performance.
if is_scalar(value):
codes = self.categories.get_loc(value)
codes = self.codes.dtype.type(codes)
else:
locs = [self.categories.get_loc(x) for x in value]
codes = np.array(locs, dtype=self.codes.dtype)
return self.codes.searchsorted(codes, side=side, sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See Also
--------
isna : Top-level isna.
isnull : Alias of isna.
Categorical.notna : Boolean inverse of Categorical.isna.
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See Also
--------
notna : Top-level notna.
notnull : Alias of notna.
Categorical.isna : Boolean inverse of Categorical.notna.
"""
return ~self.isna()
notnull = notna
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Return a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : bool, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = np.bincount(obs, minlength=ncat or 0)
else:
count = np.bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype, fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype="int64")
def _internal_get_values(self):
"""
Return the values.
For internal compatibility with pandas formatting.
Returns
-------
np.ndarray or Index
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods.
"""
# if we are a datetime and period index, return Index to keep metadata
if needs_i8_conversion(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
elif is_integer_dtype(self.categories) and -1 in self._codes:
return self.categories.astype("object").take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError(
f"Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n"
)
def _values_for_argsort(self):
return self._codes
def argsort(self, ascending=True, kind="quicksort", **kwargs):
"""
Return the indices that would sort the Categorical.
.. versionchanged:: 0.25.0
Changed to sort missing values at the end.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
**kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
numpy.array
See Also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
Missing values are placed at the end
>>> cat = pd.Categorical([2, None, 1])
>>> cat.argsort()
array([2, 0, 1])
"""
return super().argsort(ascending=ascending, kind=kind, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position="last"):
"""
Sort the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : bool, default False
Do operation in place.
ascending : bool, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2, 2, NaN, 5]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2, 2, 5, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2, 2, 5]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5, 2, 2]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if na_position not in ["last", "first"]:
raise ValueError(f"invalid na_position: {repr(na_position)}")
sorted_idx = nargsort(self, ascending=ascending, na_position=na_position)
if inplace:
self._codes = self._codes[sorted_idx]
else:
return self._constructor(
values=self._codes[sorted_idx], dtype=self.dtype, fastpath=True
)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy.array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype("float64")
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def view(self, dtype=None):
if dtype is not None:
raise NotImplementedError(dtype)
return self._constructor(values=self._codes, dtype=self.dtype, fastpath=True)
def to_dense(self):
"""
Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
warn(
"Categorical.to_dense is deprecated and will be removed in "
"a future version. Use np.asarray(cat) instead.",
FutureWarning,
stacklevel=2,
)
return np.asarray(self)
def fillna(self, value=None, method=None, limit=None):
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError(
"specifying a limit for fillna has not been implemented yet"
)
codes = self._codes
# pad / bfill
if method is not None:
# TODO: dispatch when self.categories is EA-dtype
values = np.asarray(self).reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None, value).astype(
self.categories.dtype
)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, (np.ndarray, Categorical, ABCSeries)):
# We get ndarray or Categorical if called via Series.fillna,
# where it will unwrap another aligned Series before getting here
mask = ~algorithms.isin(value, self.categories)
if not isna(value[mask]).all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(codes == -1)
codes = codes.copy()
codes[indexer] = values_codes[indexer]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not | isna(value) | pandas.core.dtypes.missing.isna |
# # Planning
# ## Challenge
# This is an open-ended challenge to find something interesting and useful (with a business case!) from a dataset of New York City's restaurant health inspections. The inspections are performed by the Department of Health and Mental Hygiene (DOHMH). Some suggestions include identifying trends or actionable insights, or providing recommendations. The audience could be restaurant customers, inspectors, or restauranteurs.
# I came up with some questions I was interested in answering:
# 1. What factors contribute to inspection failures?
# 2. Is there any evidence of geographic bias in inspections?
# 3. Is there any evidence of cuisine bias in inspections?
# 4. Is there any evidence of inspection timing affecting results?
# ## Approach
# I cleaned, plotted, and examined the data. Documentation describing the inspection process suggested two possible outcome variables to look into: 1) initial inspection failure and 2) closure after reinspection. I wanted to investigate both, but started with initial inspection failure.
# I investigated both logistic regression and random forest classification models. I chose to focus on the logistic regression results because I wanted to be able to interpret the coefficients and odds ratios. I tuned hyperparameters and evaluated the model using AUC ROC, because it is a good overall summary of model performance, considering all cells of the confusion matrix. A logistic regression model with L2 (ridge) regression and a penalty of 0.1 classifies initial inspection failures with an AUC of 0.932.
# ## Results
# ### 1. What factors contribute to inspection failures?
# Looking at the odds ratios for each of the features in the logistic regression model, here are some of the most important factors affecting initial inspection failure.
# - Features associated with lower odds of passing initial inspection:
# - Violation codes related to the presence of mice, rats, cockroaches, or flies
# - Violation codes related to lack of washing facilities, lack of food safety plan, improper food storage temperature, and lack of a required certificate
# - The borough Queens
# - Many kinds of cuisine, including Bangladeshi, Indian, Moroccan, Asian, Malaysian, Spanish, African, Turkish, Latin, Chinese, Mediterranean, Hawaiian, Egyptian, Thai, etc.
# - The number of violations cited
# - Features associated with higher odds of passing initial inspection:
# - Violation codes with lower stakes issues, such as violation of a recently-introduced ban on styrofoam, improper lighting or ventilation, or reuse of single use items
# - The borough Staten Island
# - Many kinds of cuisine including ice cream, hot dogs, donuts, soups/sandwiches, hamburgers, Continental, cafe/coffee/tea shops, juices/smoothies, Ethiopian, steak, sandwiches, bakeries, bagel/pretzel shops, etc. Many of these seem to be shops that would have less food prep and smaller facilities to maintain, so they make sense.
# - Increasing day of the week
# ### 2. Is there any evidence of geographic bias in inspections?
# Yes, there is some evidence for Queens establishments having lower odds of passing the initial inspection and for Staten Island establishments having higher odds of passing. It's difficult to answer this question without a more sophisticated version of logistic regression to use.
# ### 3. Is there any evidence of cuisine bias in inspections?
# Yes, the cuisine types with the lowest odds of passing the initial inspection include many of the "ethnic" cuisines. Other information is needed to determine if this is a cause or an effect.
# ### 4. Is there any evidence of inspection timing affecting results?
# There might be a slight increase in odds of passing the initial inspection for inspections happening later in the week, but it was slight and of unknown significance. There is no evidence of any effect of the time of year (month) on the odds of passing inspection.
# ## Takeaways
# - Restauranteurs in Queens or those running establishments serving at-risk cuisines (e.g. Bangladeshi, Indian, Moroccan, Malaysian, etc.) should be extra vigilant before inspections.
# - Restauranteurs should pay special attention to the violations most associated with lower odds of passing the inspection, such as presence of vermin, lack of washing facilities, improper food storage temperature, and lack of required certficiations or food safety plans.
# - NYC food inspectors should carefully examine their inspection process to see if it is being affected by bias against certain cuisines.
# - Aspiring restauranteurs could open an ice cream, hot dog, donut, soup & sandwich, or coffee & tea shop to start out with lower odds of failing the initial food saftey inspection.
# +
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pickle
import seaborn as sns
from datetime import datetime
from IPython.display import display
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import plot_confusion_matrix, plot_roc_curve
from sklearn.model_selection import cross_validate, GridSearchCV, train_test_split, StratifiedKFold
from sklearn.preprocessing import MultiLabelBinarizer, OneHotEncoder
from treeinterpreter import treeinterpreter as ti
sns.set(style="whitegrid", font_scale=1.25)
plt.figure(figsize=(12.8, 9.6), dpi=400)
# -
# +
data_dir = '~/devel/insight-data-challenges/05-nyc-restaurant-inspections/data'
output_dir = '~/devel/insight-data-challenges/05-nyc-restaurant-inspections/output'
# -
# ## Read in and clean the user data
# +
inspections = pd.read_csv(
os.path.join(os.path.expanduser(data_dir), 'DOHMH_New_York_City_Restaurant_Inspection_Results.csv'),
parse_dates=['INSPECTION DATE', 'GRADE DATE', 'RECORD DATE']
)
display(inspections.info())
display(inspections.head(15))
# -
# ### Fix data types
# Find the categorical variables
# +
# Are there any that look categorical based on number of unique values?
values_per_variable = inspections.apply('nunique', 0)
variable_dtypes = inspections.dtypes.apply(lambda x: x.name)
variable_info = pd.DataFrame({'n_categories': values_per_variable,
'dtype': variable_dtypes,
'variable': values_per_variable.index}).reset_index(drop=True)
display(variable_info)
# Convert columns to categorical
cat_threshold = 110 # If n unique values is below this, it's probably categorical
known_cat_cols = [
'ACTION', 'BORO', 'GRADE', 'INSPECTION TYPE', 'CRITICAL FLAG', 'CUISINE DESCRIPTION',
'VIOLATION CODE', 'VIOLATION DESCRIPTION', 'Community Board', 'Council District'
]
variable_info['to_category'] = (variable_info['n_categories'] < cat_threshold)\
& (~variable_info['dtype'].isin(('datetime64[ns]', )))
display(variable_info)
# Are there any known categorical variables missing? Or vice versa?
set(variable_info['variable'].loc[variable_info['to_category']].to_list()) - set(known_cat_cols)
set(known_cat_cols) - set(variable_info['variable'].loc[variable_info['to_category']].to_list())
for v in variable_info['variable'].loc[variable_info['to_category']]:
inspections[v] = inspections[v].astype('category')
display(inspections.info())
variable_info['dtype'] = inspections.dtypes.apply(lambda x: x.name).to_numpy()
# -
# ### Convert zipcode to an int
# +
display(inspections['ZIPCODE'].describe())
display(inspections['ZIPCODE'].isna().sum()) # 5500 NaN values, which is why it's not an int. Leave it for now.
# -
# ### Fix missing value codes
# +
inspections['BORO'] = inspections['BORO'].replace('0', np.NaN)
for v in inspections.select_dtypes(include='category').columns:
print('_' * 20)
print(v)
display(inspections[v].value_counts(dropna=False))
new_establishment_inspection_date = datetime(1900, 1, 1)
inspections['INSPECTION DATE'] = inspections['INSPECTION DATE'].replace(new_establishment_inspection_date, pd.NaT)
for v in inspections.select_dtypes(include='datetime').columns:
print('_' * 20)
print(v)
display(inspections[v].value_counts(dropna=False))
display(inspections.select_dtypes(include='number').describe())
variable_info['n_missing'] = inspections.apply(lambda x: x.isna().sum()).to_numpy()
# -
# ### Make a map from violation code to violation description
# +
# Check if there's more than one description per violation code, to see if it will work to select the first one
display(
inspections[['VIOLATION CODE', 'VIOLATION DESCRIPTION']].groupby(
'VIOLATION CODE').aggregate('nunique')['VIOLATION DESCRIPTION'].value_counts()
)
# -
# There are 15 violation codes without any matching description.
# +
inspections['VIOLATION CODE'].nunique()
violation_descriptions = inspections[['VIOLATION CODE', 'VIOLATION DESCRIPTION']].groupby(
'VIOLATION CODE').aggregate('first')
with pd.option_context('display.max_rows', 200):
display(violation_descriptions)
# -
# ## Add some derived variables
# ### Use documentation instructions to label gradeable/ungradeable inspections
# +
gradeable_inspection_types = (
'Cycle Inspection / Initial Inspection',
'Cycle Inspection / Re-Inspection',
'Pre-Permit (Operational) / Initial Inspection',
'Pre-Permit (Operational)/Re-Inspection',
)
gradeable_actions = (
'Violations were cited in the following area(s).',
'No violations were recorded at the time of this inspection.',
'Establishment Closed by DOHMH.',
)
gradeable_inspection_date_min = datetime(2010, 7, 27)
inspections['INSPECTION TYPE'].isin(gradeable_inspection_types).sum()
inspections['ACTION'].isin(gradeable_actions).sum()
np.sum(inspections['INSPECTION DATE'] >= gradeable_inspection_date_min)
inspections['is_gradeable'] = ((inspections['INSPECTION TYPE'].isin(gradeable_inspection_types))
& (inspections['ACTION'].isin(gradeable_actions))
& (inspections['INSPECTION DATE'] >= gradeable_inspection_date_min)
)
display(inspections['is_gradeable'].value_counts(dropna=False))
# -
# ### Add variables for what kind of inspection it was
# +
inspections['INSPECTION TYPE'].value_counts()
inspections['is_cycle_inspection'] = inspections['INSPECTION TYPE'].str.contains('Cycle')
inspections['is_opening_inspection'] = inspections['INSPECTION TYPE'].str.contains(
'Pre-permit (Operational)', regex=False)
inspections['is_initial_inspection'] = inspections['INSPECTION TYPE'].str.contains('Initial')
inspections['is_reinspection'] = inspections['INSPECTION TYPE'].str.contains('Re-inspection')
inspections['is_compliance_inspection'] = inspections['INSPECTION TYPE'].str.contains('Compliance')
# -
# ### Add variables for date components
# +
inspections['inspection_year'] = inspections['INSPECTION DATE'].dt.year.astype('category')
inspections['inspection_month'] = inspections['INSPECTION DATE'].dt.month.astype('category')
inspections['inspection_day'] = inspections['INSPECTION DATE'].dt.day
inspections['inspection_dayofyear'] = inspections['INSPECTION DATE'].dt.dayofyear
inspections['inspection_dayofweek'] = inspections['INSPECTION DATE'].dt.dayofweek.astype('category')
inspections['inspection_isweekday'] = inspections['inspection_dayofweek'].isin(range(5))
inspections['inspection_week'] = inspections['INSPECTION DATE'].dt.week.astype('category')
display(inspections.info())
# -
# ## Plot everything
# +
# Try the Pandas built in histogram function, even though it's mediocre
inspections.select_dtypes(exclude='bool').hist(figsize=(20, 15))
plt.show()
# And it fails on boolean columns!
# -
# ### Histograms of the numeric variables
# +
g = sns.FacetGrid(
inspections.select_dtypes(include='number').melt(), col='variable', col_wrap=4,
sharex=False, sharey=False, height=4
)
g.map(plt.hist, 'value', color='steelblue', bins=20)
plt.show()
# -
# ### Barplots of the categorical & boolean variables
# Individual plots for variables with too many categories
# +
cat_col_n_values = inspections.select_dtypes(include='category').apply('nunique', 0)
many_values_cat_vars = cat_col_n_values.loc[cat_col_n_values > 20].index
other_cat_vars = cat_col_n_values.loc[cat_col_n_values <= 20].index
# for v in many_values_cat_vars:
# g = sns.countplot(data=inspections, x=v)
# g.set_xticklabels(g.get_xticklabels(), rotation=60, horizontalalignment='right')
# plt.tight_layout()
# plt.show()
# The best is really just a sorted table of value counts.
for v in many_values_cat_vars:
print('_' * 20)
print(v)
with pd.option_context('display.max_rows', cat_threshold):
display(inspections[v].value_counts(dropna=False))
# -
# A facet grid for those with fewer categories
# +
# tmp = inspections[other_cat_vars].melt()
# tmp['value_trunc'] = tmp['value'].str.slice(stop=25)
# g = sns.catplot(
# data=tmp, col='variable', col_wrap=3,
# x='value_trunc', kind='count',
# facet_kws={'sharex': False, 'sharey': False},
# margin_titles=False
# )
# for ax in g.axes.flat:
# for label in ax.get_xticklabels():
# label.set_rotation(70)
# plt.show()
# I can't get the sharex/sharey arguments to work properly. God do I miss ggplot!
for v in other_cat_vars:
g = sns.countplot(data=inspections, x=v)
g.set_xticklabels(g.get_xticklabels(), rotation=60, horizontalalignment='right')
plt.tight_layout()
plt.show()
# -
# ### Histograms of the datetime variables
# +
g = sns.FacetGrid(
inspections.select_dtypes(include='datetime').melt(), col='variable', col_wrap=3,
sharex=False, sharey=False, height=4
)
g.map(plt.hist, 'value', color='steelblue', bins=20)
plt.show()
# -
# ### Head and tail of the object variables
# +
for v in inspections.select_dtypes(include='object').columns:
print('_' * 20)
print(v)
display(inspections[v].head(15))
display(inspections[v].tail(15))
# -
# ## Filter to most important core inspection types
# +
core_inspections = inspections.loc[(inspections['is_cycle_inspection'] | inspections['is_opening_inspection'])
& (inspections['is_initial_inspection'] | inspections['is_reinspection']), ]
# Make sure it's sorted by ascending inspection date
core_inspections = core_inspections.sort_values('INSPECTION DATE', ascending=True)
# -
# ## Summary of inspections
# ### Summary by business
# +
business_summary = core_inspections.groupby('CAMIS').aggregate(
n_rows=('CAMIS', 'count'),
n_inspections=('INSPECTION DATE', 'nunique'),
avg_inspection_frequency=('INSPECTION DATE', lambda x: np.mean(np.diff(x.unique())).astype('timedelta64[D]'))
)
business_summary['avg_inspection_frequency'] = business_summary['avg_inspection_frequency'].dt.days
display(business_summary.info())
g = sns.FacetGrid(
business_summary.melt(), col='variable',
sharex=False, sharey=False, height=4
)
g.map(plt.hist, 'value', color='steelblue', bins=20)
plt.show()
# -
# ### Summary of initial inspection failures
# +
passing_grades = ('A', )
nonpassing_grades = ('B', 'C', )
pending_grades = ('N', 'Z', 'P', )
# Since there are NaNs in both gradeable and ungradeable, I'm going to infer that GRADE of NaN means non-passing
core_inspections.loc[core_inspections['is_gradeable'], 'GRADE'].value_counts(dropna=False)
core_inspections.loc[~core_inspections['is_gradeable'], 'GRADE'].value_counts(dropna=False)
# When using categorical variables in a groupby, Pandas will by default plan to have NaN values for each empty
# group as well, and that led to an array allocation error here. Using observed=True fixed it.
initial_inspections = core_inspections.loc[core_inspections['is_initial_inspection'], ].groupby(
['CAMIS', 'BORO', 'INSPECTION DATE', 'inspection_month', 'inspection_dayofweek',
'CUISINE DESCRIPTION', 'INSPECTION TYPE'], observed=True).aggregate(
passed=('GRADE', lambda x: x.iloc[0] == 'A'),
grade=('GRADE', 'first'),
has_critical_flag=('CRITICAL FLAG', lambda x: np.any(x == 'Y')),
n_violations=('VIOLATION CODE', lambda x: x.loc[~x.isna()].nunique()),
violation_codes=('VIOLATION CODE', lambda x: x.loc[~x.isna()].to_list())
).reset_index()
for v in ['passed', 'grade', 'has_critical_flag', 'n_violations']:
g = sns.countplot(data=initial_inspections, x=v)
g.set_xticklabels(g.get_xticklabels(), rotation=60, horizontalalignment='right')
plt.tight_layout()
plt.show()
# Add one-hot encoding for each violation code, BORO, and CUISINE DESCRIPTION
initial_inspections['violation_codes']
mlb = MultiLabelBinarizer()
expanded_violation_codes = mlb.fit_transform(initial_inspections['violation_codes'])
initial_inspections_violation_code_vars = 'violation_' + mlb.classes_
expanded_violation_codes = pd.DataFrame(expanded_violation_codes, columns=initial_inspections_violation_code_vars)
initial_inspections = pd.concat([initial_inspections, expanded_violation_codes], axis=1)
ohe = OneHotEncoder(sparse=False)
boro_encoding = ohe.fit_transform(initial_inspections['BORO'].to_numpy().reshape(-1, 1))
initial_inspections_boro_vars = 'BORO_' + ohe.categories_[0]
boro_encoding = pd.DataFrame(boro_encoding, columns=initial_inspections_boro_vars)
initial_inspections = pd.concat([initial_inspections, boro_encoding], axis=1)
ohe = OneHotEncoder(sparse=False)
cuisine_encoding = ohe.fit_transform(initial_inspections['CUISINE DESCRIPTION'].to_numpy().reshape(-1, 1))
initial_inspections_cuisine_vars = 'cuisine_' + ohe.categories_[0]
cuisine_encoding = pd.DataFrame(cuisine_encoding, columns=initial_inspections_cuisine_vars)
initial_inspections = pd.concat([initial_inspections, cuisine_encoding], axis=1)
display(initial_inspections.info(max_cols=500))
# -
# +
closed_actions = (
'Establishment Closed by DOHMH. Violations were cited in the following area(s) and those requiring immediate action were addressed.',
'Establishment re-closed by DOHMH',
)
reinspections = core_inspections.loc[core_inspections['is_reinspection'], ].groupby(
['CAMIS', 'BORO', 'INSPECTION DATE', 'inspection_month', 'inspection_dayofweek',
'CUISINE DESCRIPTION', 'INSPECTION TYPE'], observed=True).aggregate(
passed=('GRADE', lambda x: x.iloc[0] == 'A'),
grade=('GRADE', 'first'),
closed=('ACTION', lambda x: x.isin(closed_actions).any()),
has_critical_flag=('CRITICAL FLAG', lambda x: np.any(x == 'Y')),
n_violations=('VIOLATION CODE', lambda x: x.loc[~x.isna()].nunique()),
violation_codes=('VIOLATION CODE', lambda x: x.loc[~x.isna()].to_list())
).reset_index()
# Put some plotting in here
for v in ['passed', 'grade', 'closed', 'has_critical_flag', 'n_violations']:
g = sns.countplot(data=reinspections, x=v)
g.set_xticklabels(g.get_xticklabels(), rotation=60, horizontalalignment='right')
plt.tight_layout()
plt.show()
reinspections['violation_codes']
mlb = MultiLabelBinarizer()
expanded_violation_codes = mlb.fit_transform(reinspections['violation_codes'])
expanded_violation_codes = pd.DataFrame(expanded_violation_codes, columns='violation_' + mlb.classes_)
reinspections = pd.concat([reinspections, expanded_violation_codes], axis=1)
ohe = OneHotEncoder(sparse=False)
boro_encoding = ohe.fit_transform(reinspections['BORO'].to_numpy().reshape(-1, 1))
reinspections_boro_vars = 'BORO_' + ohe.categories_[0]
boro_encoding = pd.DataFrame(boro_encoding, columns=reinspections_boro_vars)
reinspections = pd.concat([reinspections, boro_encoding], axis=1)
ohe = OneHotEncoder(sparse=False)
cuisine_encoding = ohe.fit_transform(reinspections['CUISINE DESCRIPTION'].to_numpy().reshape(-1, 1))
reinspections_cuisine_vars = 'cuisine_' + ohe.categories_[0]
cuisine_encoding = pd.DataFrame(cuisine_encoding, columns=reinspections_cuisine_vars)
reinspections = pd.concat([reinspections, cuisine_encoding], axis=1)
display(reinspections.info(max_cols=500))
# -
# ## Find important features for classification of failed initial inspections using RandomForest
# ### Prepare data for random forest
# Are there low-variance features that should be removed?
# +
initial_inspections_variances = initial_inspections.var(axis=0)
with pd.option_context('display.max_rows', 200):
display(initial_inspections_variances.sort_values())
g = sns.distplot(initial_inspections_variances, rug=False)
plt.show()
# -
# I'm not sure how meaningful variance is for categorical variables
# ### Just try running random forest to get it working
# +
id_vars = ['CAMIS', 'INSPECTION DATE', 'INSPECTION TYPE', 'violation_codes', 'BORO', 'CUISINE DESCRIPTION']
label_vars = ['passed', 'grade']
feature_vars = list(set(initial_inspections.columns) - set(id_vars) - set(label_vars))
feature_vars.sort()
X = initial_inspections[feature_vars].to_numpy()
y = initial_inspections[label_vars[0]]
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=48)
forest = RandomForestClassifier(n_estimators=20, class_weight='balanced', oob_score=True, random_state=48)
forest.fit(X_train, y_train)
forest.predict(X_test)
forest.score(X_test, y_test)
print(forest.feature_importances_)
print(forest.oob_score_)
# g = sns.barplot(x=forest.feature_importances_, y=feature_vars)
# plt.show()
# -
# This model looks fine, just based on accuracy (89%). Now to do cross-validation...
# ### Cross validation
# +
chosen_metrics = ['accuracy', 'balanced_accuracy', 'f1', 'f1_weighted', 'recall', 'roc_auc']
forest = RandomForestClassifier(n_estimators=50, class_weight='balanced', random_state=48)
cv_results = cross_validate(forest, X, y,
cv=StratifiedKFold(n_splits=5),
return_train_score=False,
scoring=chosen_metrics
)
cv_means = {k: np.mean(v) for k, v in cv_results.items()}
display(cv_means)
# n_estimators_grid = np.concatenate((np.arange(25, 175, 25), np.arange(200, 600, 100)))
# n_estimators_grid = np.arange(25, 175, 25)
# n_estimators_search = GridSearchCV(
# estimator=RandomForestClassifier(random_state=48),
# param_grid={'n_estimators': n_estimators_grid},
# scoring=chosen_metrics,
# n_jobs=3,
# cv=StratifiedKFold(n_splits=5),
# refit=False
# )
# n_estimators_search.fit(X, y)
# -
# ### Fit the final model
# +
final_forest = RandomForestClassifier(n_estimators=100, class_weight='balanced', random_state=48)
final_forest.fit(X_train, y_train)
final_predictions = final_forest.predict(X_test)
final_forest.feature_importances_
final_feature_importances = pd.DataFrame({'feature': feature_vars, 'importance': final_forest.feature_importances_})
final_feature_importances = final_feature_importances.sort_values('importance', ascending=False)
plt.figure(figsize=(6, 10), dpi=400)
g = sns.barplot(x='importance', y='feature', data=final_feature_importances.head(40))
plt.tight_layout()
plt.show()
plt.figure(figsize=(12.8, 9.6), dpi=400)
plot_confusion_matrix(final_forest, X_test, y_test, values_format=',.0f')
plt.show()
plot_roc_curve(final_forest, X_test, y_test)
plt.plot([0, 1], [0, 1], 'k-', color='r', label='Chance', linestyle='--')
plt.show()
# -
# ### Interpret the feature contributions
# +
prediction, bias, contributions = ti.predict(final_forest, X_train[1, ].reshape(1, -1))
print("Prediction"), prediction
print("Bias (trainset prior)"), bias
print("Feature contributions:")
for c, feature in zip(contributions[0], feature_vars):
print(feature, c)
# -
# I don't really find this very helpful. I would need to do this for every sample in the dataset to get something informative.
# Go back to logistic regression so you can actually explain it!
# ## Find important features for classification of failed initial inspections using logistic regression
# ### Fit an initial logistic regression model
# +
logreg = LogisticRegression(random_state=48, max_iter=1000, solver='saga')
logreg.fit(X_train, y_train)
logreg.predict(X_test)
print(logreg.score(X_test, y_test))
# -
# ### Cross validation of a basic logistic regression model
# +
logreg = LogisticRegression(random_state=48, max_iter=1000, solver='saga')
cv_results = cross_validate(logreg, X, y,
cv=StratifiedKFold(n_splits=5),
return_train_score=False,
scoring=chosen_metrics
)
cv_means = {k: np.mean(v) for k, v in cv_results.items()}
display(cv_means)
# -
# ### Grid search to tune over regularization hyperparameters
# +
param_grid = {
# C = inverse of regularization strength; positive float; smaller C = stronger regularization
'C': (10.0, 4.0, 2.0, 1.0, 0.5, 0.1, 0.01, 0.001), # Similar to defaults for LogisticRegressionCV
'penalty': ['l1', 'l2'] # Regularization penalty type; L1 = Lasso, L2 = Ridge
# L1 = penalized by absolute value of coefficient magnitude
# L2 = penalized by squared magnitude of coefficient
}
logreg_gridsearch_outfile = os.path.join(os.path.expanduser(output_dir), 'logreg_gridsearch_results.pickle')
# The higher the value of C, the longer the fit takes and the higher the max_iter needed.
# Use saga solver because it is faster for large data and supports both L1 and L2 regularization
if not os.path.exists(logreg_gridsearch_outfile):
classifier_grid_search = GridSearchCV(
estimator=LogisticRegression(solver='saga', random_state=48, max_iter=5000),
param_grid=param_grid,
cv=5, scoring='roc_auc', verbose=2, n_jobs=3
)
grid_search_models = classifier_grid_search.fit(X, y)
with open(logreg_gridsearch_outfile, 'wb') as f:
pickle.dump(grid_search_models, f, pickle.HIGHEST_PROTOCOL)
else:
with open(logreg_gridsearch_outfile, 'rb') as f:
grid_search_models = pickle.load(f)
grid_search_results = pd.DataFrame(grid_search_models.cv_results_)
grid_search_results['params_string'] = grid_search_results['params'].apply(
lambda x: 'C={:.3f}\nPenalty={}'.format(x['C'], x['penalty']))
grid_search_results = grid_search_results.sort_values(by='rank_test_score', ascending=True)
display(grid_search_results)
plt.figure(figsize=(8, 8), dpi=400)
g = sns.barplot(x='mean_test_score', y='params_string', data=grid_search_results)
g.set(xlabel='Mean AUC ROC', ylabel='Hyperparameter values')
plt.tight_layout()
plt.show()
plt.figure(figsize=(12.8, 9.6), dpi=400)
# -
# L1 and L2 regularization are bascially both good. And any C higher than 0.1 is good.
# I chose C = 0.1 and penalty = l2 because they're fastest of the ones with good AUC ROC.
# ### Fit the final logistic regression model
# +
final_C = 0.1
final_penalty = 'l2'
final_logreg = LogisticRegression(random_state=48, max_iter=5000, C=final_C, penalty=final_penalty, solver='saga')
final_logreg.fit(X_train, y_train)
final_logreg_predictions = final_logreg.predict(X_test)
plot_confusion_matrix(final_logreg, X_test, y_test, values_format=',.0f')
plt.show()
plot_roc_curve(final_logreg, X_test, y_test)
plt.plot([0, 1], [0, 1], 'k-', color='r', label='Chance', linestyle='--')
plt.show()
# -
# ### Gather coefficients & odds ratios
# coef_ gives the coefficients contributing to classes_[1], which is "True" for passing the initial inspection.
# So coefficients quantify contribution to probability of passing initial inspection.
# Odds ratios greater than 1.0 indicating a higher probability of passing the initial inspection and ORs less than 1.0 indicate a lower probability of passing the initial inspection.
# +
final_coefficients = pd.DataFrame({'feature': feature_vars, 'coefficient': final_logreg.coef_[0]})
# coef_ gives the coefficients contributing to classes_[1], which is passed="True"
final_coefficients['magnitude'] = final_coefficients['coefficient'].abs()
final_coefficients['direction'] = np.sign(final_coefficients['coefficient'])
final_coefficients['direction'] = final_coefficients['direction'].replace({-1.0: 'negative', 1.0: 'positive', 0: 'NA'})
final_coefficients = final_coefficients.sort_values('magnitude', ascending=False)
final_coefficients['odds_ratio'] = np.exp(final_coefficients['coefficient'])
# The odds ratio is the ratio of the odds of passing inspection to the odds of failing inspection. For a given feature, it is the OR when other features are held constant.
with pd.option_context('display.max_rows', 200):
display(final_coefficients.sort_values('odds_ratio', ascending=False))
# g = sns.barplot(x='magnitude', y='feature', hue='direction', data=final_coefficients.head(40))
# g.set(xlabel='Coefficient magnitude', ylabel='Feature')
# plt.tight_layout()
# plt.show()
# -
# ### Investigate model results
# #### What are odds ratios for the various violation codes?
# +
plt.figure(figsize=(9, 12), dpi=400)
g = sns.barplot(x='odds_ratio', y='feature', hue='direction',
data=final_coefficients[final_coefficients['feature'].isin(
initial_inspections_violation_code_vars)].sort_values('odds_ratio', ascending=True))
g.axvline(1.0)
g.set(xlabel='Odds ratio', ylabel='Feature')
plt.tight_layout()
plt.show()
top_violation_codes = final_coefficients.loc[final_coefficients['feature'].isin(
initial_inspections_violation_code_vars),
].sort_values('odds_ratio', ascending=False).head(10)['feature'].str.replace('violation_', '')
bottom_violation_codes = final_coefficients.loc[final_coefficients['feature'].isin(
initial_inspections_violation_code_vars),
].sort_values('odds_ratio', ascending=True).head(10)['feature'].str.replace('violation_', '')
with pd.option_context('display.max_colwidth', 150):
print('HIGHEST ODDS RATIO VIOLATION CODES - higher odds of passing initial inspection')
display(violation_descriptions.loc[violation_descriptions.index.isin(top_violation_codes), ])
print('LOWEST ODDS RATIO VIOLATION CODES - lower odds of passing initial inspection')
display(violation_descriptions.loc[violation_descriptions.index.isin(bottom_violation_codes), ])
# -
# Investigating the violation code 22G with a missing description:
# https://rules.cityofnewyork.us/tags/sanitary-inspection
# "In the list of unscored violations, a new violation code 22G containing a penalty for violations of Administrative Code §16-329 (c) which prohibits use of expanded polystyrene single service articles, is being added. "
# https://www1.nyc.gov/office-of-the-mayor/news/295-18/mayor-de-blasio-ban-single-use-styrofoam-products-new-york-city-will-be-effect
# From a recent ban on styrofoam products!
# What is an HACCP plan?
# "Hazard Analysis Critical Control Points (HACCP) is an internationally recognized method of identifying and managing food safety related risk and, when central to an active food safety program, can provide your customers, the public, and regulatory agencies assurance that a food safety program is well managed." (Source)[https://safefoodalliance.com/food-safety-resources/haccp-overview/]
# #### What are odds ratios for the boroughs?
# +
plt.figure(figsize=(8, 4), dpi=400)
g = sns.barplot(x='odds_ratio', y='feature',
data=final_coefficients[final_coefficients['feature'].isin(
initial_inspections_boro_vars)].sort_values('odds_ratio', ascending=True))
g.set(xlabel='Odds ratio', ylabel='Feature')
g.axvline(1.0)
plt.tight_layout()
plt.show()
# -
# The boroughs are all pretty close to having the same odds of passing initial inspection, though Queens and Staten Island are perhaps a bit different. It's hard to say without p values for the coefficients, which I would need to use a different package or do bootstrapping for.
# #### What are odds ratios for the various cuisines?
# +
plt.figure(figsize=(10, 13), dpi=400)
g = sns.barplot(x='odds_ratio', y='feature', hue='direction',
data=final_coefficients[final_coefficients['feature'].isin(
initial_inspections_cuisine_vars)].sort_values('odds_ratio', ascending=True))
g.axvline(1.0)
g.set(xlabel='Odds ratio', ylabel='Feature')
plt.tight_layout()
plt.show()
top_cuisines = final_coefficients.loc[
final_coefficients['feature'].isin(initial_inspections_cuisine_vars) & (final_coefficients['odds_ratio'] > 1.0),
].sort_values('odds_ratio', ascending=False)
bottom_cuisines = final_coefficients.loc[
final_coefficients['feature'].isin(initial_inspections_cuisine_vars) & (final_coefficients['odds_ratio'] < 1.0),
].sort_values('odds_ratio', ascending=True)
with | pd.option_context('display.max_rows', 100) | pandas.option_context |
from __future__ import print_function, division
import pandas as pd
import numpy as np
import numpy.random as rand
import torch as th
from torch.utils.data import Dataset
from torch.autograd import Variable
import random
from sklearn import preprocessing as skl_preprocessing
from sklearn.metrics import roc_curve, roc_auc_score, classification_report, confusion_matrix, f1_score, precision_score, recall_score
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import os.path as ospath
import itertools
try:
xrange
except NameError:
xrange = range
class CollisionDataset(Dataset):
def __init__(self, data, header=None, scaler=None, target_col=0, index_col=None):
if type(data) is np.ndarray:
X = np.concatenate([data[:, :target_col], data[:, target_col+1:]], axis=1)
y = data[:, target_col]
else:
filetype = ospath.splitext(data)[1][1:]
if filetype.lower() == "csv":
self._dataframe = | pd.read_csv(data, header=header, index_col=index_col) | pandas.read_csv |
from src.mlb_fantasy import GetData
from luigi import build
import pandas as pd
from sklearn.cluster import KMeans, MeanShift, DBSCAN, SpectralClustering
from sklearn.mixture import BayesianGaussianMixture, GaussianMixture
import matplotlib.pyplot as plt
colmap = {1: 'r', 2: 'g', 3: 'b'}
b_data = pd.read_csv(GetData(type='batting').output().open(), index_col=0)
p_data = pd.read_csv(GetData(type='pitching').output().open(), index_col=0)
df = pd.DataFrame({'x': p_data['G'], 'y': p_data['IP']})
fantasy_baseball_pitching_stats = ('ERA', 'WHIP', 'SO', 'SV', 'W')
def gaussian_clustering():
p_data_filtered = p_data[(p_data.G > 20) | (p_data.IP > 50)]
df = pd.DataFrame({'x': p_data_filtered['G'], 'y': p_data_filtered['IP']})
labels = GaussianMixture(n_components=2).fit_predict(X=df.values)
colors = list(map(lambda x: colmap[x + 1], labels))
plt.scatter(p_data_filtered['G'], p_data_filtered['IP'], color=colors)
plt.xlabel("Games Played")
plt.ylabel("Innings Pitched")
plt.title("Yearly Pitching Stats")
plt.show()
p_data_filtered = p_data[(p_data.G > 20) | (p_data.IP > 50)]
df = | pd.DataFrame({'x': p_data_filtered['G'], 'y': p_data_filtered['IP']}) | pandas.DataFrame |
import PySimpleGUI as sg
import pandas as pd
from functools import reduce
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from math import pi
def user_input_GUI():
global stock_share_hash, index_hash
layout = [
[sg.Text('Please enter Portfolio and its individual stock share', font=("Helvetica bold", 20))],
[sg.Text('Portfolio', size=(7, 1), font=("Helvetica", 16)),
sg.InputText('', key='stock', font=("Helvetica", 16))],
[sg.Text('Share', size=(7, 1), font=("Helvetica", 16)),
sg.InputText('', key='share', font=("Helvetica", 16))],
[sg.Text('Indices Weight (0 - 1)', font=("Helvetica bold", 16))],
[sg.Text('SPI', size=(3, 1), font=("Helvetica", 16)),
sg.InputText('1', key='spi_weight', size=(3, 1), font=("Helvetica", 16)),
sg.Text('TPI', size=(3, 1), font=("Helvetica", 16)),
sg.InputText('1', key='tpi_weight', size=(3, 1), font=("Helvetica", 16)),
sg.Text('SLI', size=(3, 1), font=("Helvetica", 16)),
sg.InputText('1', key='sli_weight', size=(3, 1), font=("Helvetica", 16)),
sg.Text('PRI', size=(3, 1), font=("Helvetica", 16)),
sg.InputText('1', key='pri_weight', size=(3, 1), font=("Helvetica", 16)),
sg.Text('ATSI', size=(3, 1), font=("Helvetica", 16)),
sg.InputText('1', key='atsi_weight', size=(3, 1), font=("Helvetica", 16))],
[sg.Submit('Analyze', font=("Helvetica", 16)), sg.Exit(font=("Helvetica", 16))]]
window = sg.Window('Client Tool for Finding Optimal ATS').Layout(layout)
while True:
event, stock_share_hash_old = window.Read()
for key, value in stock_share_hash_old.items():
stock_share_hash_old.update({key: value.split(',')})
newlist = []
for value in stock_share_hash_old['share']:
newlist.append(int(value))
stock_share_hash_old.update({'share': newlist})
stock_share_hash = {}
for index in range(len(stock_share_hash_old['stock'])):
stock_share_hash[stock_share_hash_old['stock'][index].upper()] = stock_share_hash_old['share'][index]
index_hash = {}
index_hash.update({'spi_weight': stock_share_hash_old['spi_weight']})
# stock_share_hash.pop('spi_weight')
index_hash.update({'tpi_weight': stock_share_hash_old['tpi_weight']})
# stock_share_hash.pop('tpi_weight')
index_hash.update({'sli_weight': stock_share_hash_old['sli_weight']})
# stock_share_hash.pop('sli_weight')
index_hash.update({'pri_weight': stock_share_hash_old['pri_weight']})
# stock_share_hash.pop('pri_weight')
index_hash.update({'atsi_weight': stock_share_hash_old['atsi_weight']})
# stock_share_hash.pop('atsi_weight')
# Remove spaces in key
stock_share_hash = {k.replace(' ', ''): v for k, v in stock_share_hash.items()}
overall_score(input=stock_share_hash, finra_data=finra_data, sector_data=sector_data)
sg.Popup('Most Optimal ATS for Routing this Portfolio:', stock_share_hash, score_sorted, font=("Helvetica", 16))
if event is None or event == 'Exit':
break
window.Close()
return
# def stock_ratio_filter(portfolio, data, ratio_data):
# data['Week'] = pd.to_datetime(data['Week'])
# last_week = data['Week'].max()
# last_week_data = data[data['Week'] == last_week]
#
# lastweek_shares = last_week_data.groupby(['Symbol'])['Shares'].sum()
# lastweek_shares = pd.DataFrame(lastweek_shares)
#
# ratio_data = pd.merge(left=lastweek_shares, right=ratio_data, left_on="Symbol", right_on="symbol", how="left")
# ratio_data['Total_Volume'] = ratio_data['volume'] / ratio_data['marketPercent']
# ratio_data['ADV'] = ratio_data['Shares'] / 5
# ratio_data['Ratio'] = ratio_data['ADV'] / ratio_data['Total_Volume']
#
# portfolio_data = ratio_data[ratio_data['Symbol'].isin(portfolio)]
# portfolio_data.set_index('Symbol')
# for stock in portfolio:
# if not portfolio_data.loc[stock] < 0.03:
#
#
def portfolio_share_prop_index(portfolio, data):
portfolio_data = data[data['Symbol'].isin(portfolio)]
ats_list = data.ATS_MPID.unique()
hash_portfolio = {stock: [] for stock in portfolio}
for stock in portfolio:
each_stock = portfolio_data[portfolio_data['Symbol'] == stock]
stock_sum_by_ats = each_stock.groupby(['ATS_MPID'])['Shares'].sum()
model = stock_sum_by_ats / sum(stock_sum_by_ats)
# model_normalized = (model - min(model)) / (max(model) - min(model))
for ats in ats_list:
if ats not in model.index:
new_ats = pd.Series([0], index=[ats])
model = model.append(new_ats)
hash_portfolio.update({stock: model.sort_values(ascending=False)})
worthfullness_index = pd.Series()
for ats in ats_list:
if ats not in worthfullness_index.index:
new_ats = pd.Series([0], index=[ats])
worthfullness_index = worthfullness_index.append(new_ats)
for stock in portfolio:
worthfullness_index += hash_portfolio[stock]
worthfullness_index_normalized = \
(worthfullness_index - min(worthfullness_index)) / (max(worthfullness_index) - min(worthfullness_index))
# worthfullness_index /= len(portfolio)
return worthfullness_index_normalized
def portfolio_trade_prop_index(portfolio, data):
portfolio_data = data[data['Symbol'].isin(portfolio)]
ats_list = data.ATS_MPID.unique()
hash_portfolio = {stock: [] for stock in portfolio}
for stock in portfolio:
each_stock = portfolio_data[portfolio_data['Symbol'] == stock]
stock_sum_by_ats = each_stock.groupby(['ATS_MPID'])['Trades'].sum()
model = stock_sum_by_ats / sum(stock_sum_by_ats)
# model_normalized = (model - min(model)) / (max(model) - min(model))
for ats in ats_list:
if ats not in model.index:
new_ats = pd.Series([0], index=[ats])
model = model.append(new_ats)
hash_portfolio.update({stock: model.sort_values(ascending=False)})
worthfullness_index = pd.Series()
for ats in ats_list:
if ats not in worthfullness_index.index:
new_ats = pd.Series([0], index=[ats])
worthfullness_index = worthfullness_index.append(new_ats)
for stock in portfolio:
worthfullness_index += hash_portfolio[stock]
worthfullness_index_normalized = \
(worthfullness_index - min(worthfullness_index)) / (max(worthfullness_index) - min(worthfullness_index))
# worthfullness_index /= len(portfolio)
return worthfullness_index_normalized
# test_portfolio = ['A', 'AA']
# data = pd.read_csv("/Users/TonY/Desktop/capstone/finra.csv")
# portfolio_share_prop_index(test_portfolio, data)
# a = portfolio_share_prop_index(test_portfolio, data) + portfolio_trade_prop_index(portfolio, data)
def sector_liquidity_index(portfolio, data, sector_data):
sector_list = []
sector_stock_hash = {}
hash_index = {}
ats_list = data.ATS_MPID.unique()
for stock in portfolio:
sector_list.append(sector_data.loc[sector_data['Symbol'] == stock, 'sector'].iloc[0])
sector_list = set(sector_list)
for sector in sector_list:
sector_stock_hash.update(
{sector: sector_data.loc[sector_data['sector'] == sector, 'Symbol'].values[:].tolist()})
for sector in sector_stock_hash:
portfolio_data = data[data['Symbol'].isin(sector_stock_hash[sector])]
sector_sum_by_ats = portfolio_data.groupby(['ATS_MPID'])['Shares'].sum()
model = sector_sum_by_ats / sum(sector_sum_by_ats)
# model_normalized = (model - min(model)) / (max(model) - min(model))
for ats in ats_list:
if ats not in model.index:
new_ats = pd.Series([0], index=[ats])
model = model.append(new_ats)
hash_index.update({sector: model})
sl_index = pd.Series()
for ats in ats_list:
if ats not in sl_index.index:
new_ats = pd.Series([0], index=[ats])
sl_index = sl_index.append(new_ats)
for sector in sector_list:
sl_index += hash_index[sector]
sl_index_normalized = (sl_index - min(sl_index)) / (max(sl_index) - min(sl_index))
# sl_index /= len(sector_list)
return sl_index_normalized
# data = pd.read_csv("/Users/TonY/Desktop/capstone/finra.csv")
# sector_data = pd.read_csv('/Users/TonY/Desktop/capstone/market_cap_sector_mktcapcategory_by_symbol.csv', encoding='utf-8')
# test_portfolio = ['A', 'AA', 'ADRO', 'AABA']
# b = sector_liquidity_index(test_portfolio, data, sector_data)
# len(b)
def participation_rate_index(hash_portfolio_share, data):
hash_par_rate_index = {}
ats_list = data.ATS_MPID.unique()
for stock in hash_portfolio_share:
data_selected = data.loc[data['Symbol'] == stock]
result = data_selected.groupby('ATS_MPID')['Shares'].sum() / 85
model = hash_portfolio_share[stock] / result
# model_normalized = (model - min(model)) / (max(model) - min(model))
for ats in ats_list:
if ats not in model.index:
new_ats = pd.Series([0], index=[ats])
model = model.append(new_ats)
hash_par_rate_index.update({stock: model})
pr_index = pd.Series()
for ats in ats_list:
if ats not in pr_index.index:
new_ats = pd.Series([0], index=[ats])
pr_index = pr_index.append(new_ats)
for stock in hash_portfolio_share:
pr_index += hash_par_rate_index[stock]
pr_index_normalized = (pr_index - min(pr_index)) / (max(pr_index) - min(pr_index))
# pr_index /= len(hash_portfolio_share)
for i in range(len(pr_index_normalized)):
if pr_index_normalized[i] != 0:
pr_index_normalized[i] = 1 - pr_index_normalized[i]
return pr_index_normalized
data = pd.read_csv("/Users/TonY/Desktop/capstone/finra.csv")
hash_portfolio_share = {'A': 100, "AA": 200}
participation_rate_index(hash_portfolio_share, data)
def avg_trade_size_index(hash_portfolio_share, data):
hash_par_rate_index = {}
ats_list = data.ATS_MPID.unique()
for stock in hash_portfolio_share:
data_selected = data.loc[data['Symbol'] == stock]
share_sum = data_selected.groupby('ATS_MPID')['Shares'].sum()
trade_sum = data_selected.groupby('ATS_MPID')['Trades'].sum()
model = hash_portfolio_share[stock] / (share_sum / trade_sum)
# model_normalized = (model - min(model)) / (max(model) - min(model))
for ats in ats_list:
if ats not in model.index:
new_ats = pd.Series([0], index=[ats])
model = model.append(new_ats)
hash_par_rate_index.update({stock: model})
pr_index = pd.Series()
for ats in ats_list:
if ats not in pr_index.index:
new_ats = pd.Series([0], index=[ats])
pr_index = pr_index.append(new_ats)
for stock in hash_portfolio_share:
pr_index += hash_par_rate_index[stock]
pr_index_normalized = (pr_index - min(pr_index)) / (max(pr_index) - min(pr_index))
# pr_index /= len(hash_portfolio_share)
return pr_index_normalized
def overall_score(input, finra_data, sector_data):
# input = user_input_GUI()
global spi, tpi, sli, pri, atsi, score_sorted
spi = portfolio_share_prop_index(portfolio=input.keys(), data=finra_data)
tpi = portfolio_trade_prop_index(portfolio=input.keys(), data=finra_data)
sli = sector_liquidity_index(portfolio=input.keys(), data=finra_data, sector_data=sector_data)
pri = participation_rate_index(hash_portfolio_share=input, data=finra_data)
atsi = avg_trade_size_index(hash_portfolio_share=input, data=finra_data)
score = float(index_hash['spi_weight'][0]) * spi + float(index_hash['tpi_weight'][0]) * tpi + \
float(index_hash['sli_weight'][0]) * sli + float(index_hash['pri_weight'][0]) * pri + \
float(index_hash['atsi_weight'][0]) * atsi
score /= 5
score_sorted = round(score.sort_values(ascending=False), 3)[0:5]
# print(stock_share_hash, '\n', score_sorted[0:5])
return radar_chart()
def index_to_dataframe():
data_frame_spi = pd.DataFrame(spi, columns=['SPI'])
data_frame_spi.index.name = 'ATS'
data_frame_tpi = pd.DataFrame(tpi, columns=['TPI'])
data_frame_tpi.index.name = 'ATS'
data_frame_sli = pd.DataFrame(sli, columns=['SLI'])
data_frame_sli.index.name = 'ATS'
data_frame_pri = | pd.DataFrame(pri, columns=['PRI']) | pandas.DataFrame |
import math
from abc import ABC
from typing import Optional, Iterable
import pandas as pd
from django.db import connection
from pandas import DataFrame
from recipe_db.analytics import METRIC_PRECISION, POPULARITY_START_MONTH, POPULARITY_CUT_OFF_DATE
from recipe_db.analytics.scope import RecipeScope, StyleProjection, YeastProjection, HopProjection, \
FermentableProjection
from recipe_db.analytics.utils import remove_outliers, get_style_names_dict, get_hop_names_dict, get_yeast_names_dict, \
get_fermentable_names_dict, RollingAverage, Trending, months_ago
from recipe_db.models import Recipe
class RecipeLevelAnalysis(ABC):
def __init__(self, scope: RecipeScope) -> None:
self.scope = scope
class RecipesListAnalysis(RecipeLevelAnalysis):
def random(self, num_recipes: int) -> Iterable[Recipe]:
scope_filter = self.scope.get_filter()
query = '''
SELECT r.uid AS recipe_id
FROM recipe_db_recipe AS r
WHERE r.name IS NOT NULL {}
ORDER BY random()
LIMIT %s
'''.format(scope_filter.where)
df = pd.read_sql(query, connection, params=scope_filter.parameters + [num_recipes])
recipe_ids = df['recipe_id'].values.tolist()
if len(recipe_ids) == 0:
return []
return Recipe.objects.filter(uid__in=recipe_ids).order_by('name')
class RecipesCountAnalysis(RecipeLevelAnalysis):
def total(self) -> int:
scope_filter = self.scope.get_filter()
query = '''
SELECT
count(r.uid) AS total_recipes
FROM recipe_db_recipe AS r
WHERE
created IS NOT NULL
{}
'''.format(scope_filter.where)
df = pd.read_sql(query, connection, params=scope_filter.parameters)
if len(df) == 0:
return 0
return df['total_recipes'].values.tolist()[0]
def per_day(self) -> DataFrame:
scope_filter = self.scope.get_filter()
query = '''
SELECT
date(r.created) AS day,
count(r.uid) AS total_recipes
FROM recipe_db_recipe AS r
WHERE
created IS NOT NULL
{}
GROUP BY date(r.created)
'''.format(scope_filter.where)
df = pd.read_sql(query, connection, params=scope_filter.parameters)
df = df.set_index('day')
return df
def per_month(self) -> DataFrame:
scope_filter = self.scope.get_filter()
query = '''
SELECT
date(r.created, 'start of month') AS month,
count(r.uid) AS total_recipes
FROM recipe_db_recipe AS r
WHERE
created IS NOT NULL
{}
GROUP BY date(r.created, 'start of month')
ORDER BY month ASC
'''.format(scope_filter.where)
df = | pd.read_sql(query, connection, params=scope_filter.parameters) | pandas.read_sql |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize('freq', ['D', 'B'])
def test_timedelta(self, freq):
index = pd.date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, 'D') + Timestamp('2000')
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2000') + pd.to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], 'D') + Timestamp('2000')
with pytest.raises(OverflowError, match=msg):
Timestamp('2000') + pd.to_timedelta([106580], 'D')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
(pd.to_timedelta([_NaT, '5 days', '1 hours']) -
pd.to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = pd.to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (pd.to_timedelta([pd.NaT, '5 days', '1 hours']) +
pd.to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
with pytest.raises(TypeError):
df1 + np.nan
with pytest.raises(TypeError):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range('2012-1-1', periods=3, freq='D')
v2 = pd.date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
tm.assert_series_equal(rs, xp)
assert rs.dtype == 'timedelta64[ns]'
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == 'timedelta64[ns]'
# series on the rhs
result = df['A'] - df['A'].shift()
assert result.dtype == 'timedelta64[ns]'
result = df['A'] + td
assert result.dtype == 'M8[ns]'
# scalar Timestamp on rhs
maxa = df['A'].max()
assert isinstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
assert resultb.dtype == 'timedelta64[ns]'
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
tm.assert_series_equal(result, expected)
assert result.dtype == 'm8[ns]'
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
assert resulta.dtype == 'm8[ns]'
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df['A'])
assert resultb.dtype == 'M8[ns]'
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(df['A'], resultb)
assert resultb.dtype == 'M8[ns]'
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
nat_series_dtype_timedelta = Series([NaT, NaT],
dtype='timedelta64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
tm.assert_series_equal(timedelta_series - NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
# addition
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
# multiplication
tm.assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
tm.assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(timedelta_series * np.nan,
nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series,
nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / np.nan,
nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box_with_array):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
def test_td64arr_add_sub_float(self, box_with_array, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdarr = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdarr + other
with pytest.raises(TypeError):
other + tdarr
with pytest.raises(TypeError):
tdarr - other
with pytest.raises(TypeError):
other - tdarr
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box_with_array, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box_with_array)
msg = ("cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation")
with pytest.raises(TypeError, match=msg):
idx - Timestamp('2011-01-01')
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp('2011-01-01', tz=tz)
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
def test_td64arr_add_sub_timestamp(self, box_with_array):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdi = timedelta_range('1 day', periods=3)
expected = pd.date_range('2012-01-02', periods=3)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range('2011-12-31', periods=3, freq='-1D')
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
with pytest.raises(TypeError):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_add_datetime64_nat(self, box_with_array):
# GH#23215
other = np.datetime64('NaT')
tdi = timedelta_range('1 day', periods=3)
expected = pd.DatetimeIndex(["NaT", "NaT", "NaT"])
tdser = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(tdser + other, expected)
tm.assert_equal(other + tdser, expected)
# ------------------------------------------------------------------
# Operations with int-like others
def test_td64arr_add_int_series_invalid(self, box):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
int_ser = Series([2, 3, 4])
with pytest.raises(err):
tdser + int_ser
with pytest.raises(err):
int_ser + tdser
with pytest.raises(err):
tdser - int_ser
with pytest.raises(err):
int_ser - tdser
def test_td64arr_add_intlike(self, box_with_array):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box_with_array)
err = TypeError
if box_with_array in [pd.Index, tm.to_array]:
err = NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box_with_array,
scalar):
box = box_with_array
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box in [pd.Index, tm.to_array] and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
# TODO: this was taken from tests.series.test_ops; de-duplicate
@pytest.mark.parametrize('scalar_td', [timedelta(minutes=5, seconds=4),
Timedelta(minutes=5, seconds=4),
Timedelta('5m4s').to_timedelta64()])
def test_operators_timedelta64_with_timedelta(self, scalar_td):
# smoke tests
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 + scalar_td
scalar_td + td1
td1 - scalar_td
scalar_td - td1
td1 / scalar_td
scalar_td / td1
# TODO: this was taken from tests.series.test_ops; de-duplicate
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
def test_td64arr_add_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
if box is pd.DataFrame and names[1] == 'Venkman':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_add_sub_td64_nat(self, box):
# GH#23320 special handling for timedelta64("NaT")
tdi = pd.TimedeltaIndex([NaT, Timedelta('1s')])
other = np.timedelta64("NaT")
expected = pd.TimedeltaIndex(["NaT"] * 2)
obj = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
result = other - obj
tm.assert_equal(result, expected)
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = | tm.box_expected(expected, box) | pandas.util.testing.box_expected |
from __future__ import division
import numpy as np
import os.path
import sys
import pandas as pd
from base.uber_model import UberModel, ModelSharedInputs
from .therps_functions import TherpsFunctions
import time
from functools import wraps
def timefn(fn):
@wraps(fn)
def measure_time(*args, **kwargs):
t1 = time.time()
result = fn(*args, **kwargs)
t2 = time.time()
print("therps_model_rest.py@timefn: " + fn.func_name + " took " + "{:.6f}".format(t2 - t1) + " seconds")
return result
return measure_time
class TherpsInputs(ModelSharedInputs):
"""
Input class for Therps.
"""
def __init__(self):
"""Class representing the inputs for Therps"""
super(TherpsInputs, self).__init__()
# Inputs: Assign object attribute variables from the input Pandas DataFrame
"""
Therps constructor.
:param chem_name:
:param use:
:param formu_name:
:param percent_act_ing:
:param foliar_diss_hlife:
:param num_apps:
:param app_interval:
:param application_rate:
:param ld50_bird:
:param lc50_bird:
:param noaec_bird:
:param noael_bird:
:param species_of_the_tested_bird_avian_ld50:
:param species_of_the_tested_bird_avian_lc50:
:param species_of_the_tested_bird_avian_noaec:
:param species_of_the_tested_bird_avian_noael:
:param tw_bird_ld50:
:param tw_bird_lc50:
:param tw_bird_noaec:
:param tw_bird_noael:
:param mineau_sca_fact:
:param aw_herp_sm:
:param aw_herp_md:
:param aw_herp_slg:
:param awc_herp_sm:
:param awc_herp_md:
:param awc_herp_lg:
:param bw_frog_prey_mamm:
:param bw_frog_prey_herp:
:return:
"""
self.use = pd.Series([], dtype="object", name="use")
self.formu_name = pd.Series([], dtype="object", name="formu_name")
self.percent_act_ing = pd.Series([], dtype="float", name="percent_act_ing")
self.foliar_diss_hlife = pd.Series([], dtype="float64", name="foliar_diss_hlife")
self.num_apps = pd.Series([], dtype="int64", name="num_apps")
self.app_interval = pd.Series([], dtype="int", name="app_interval")
self.application_rate = pd.Series([], dtype="float", name="application_rate")
self.ld50_bird = pd.Series([], dtype="float", name="ld50_bird")
self.lc50_bird = pd.Series([], dtype="float", name="lc50_bird")
self.noaec_bird = pd.Series([], dtype="float", name="noaec_bird")
self.noael_bird = pd.Series([], dtype="float", name="noael_bird")
self.species_of_the_tested_bird_avian_ld50 = pd.Series([], dtype="float",
name="species_of_the_tested_bird_avian_ld50")
self.species_of_the_tested_bird_avian_lc50 = pd.Series([], dtype="float",
name="species_of_the_tested_bird_avian_lc50")
self.species_of_the_tested_bird_avian_noaec = pd.Series([], dtype="float",
name="species_of_the_tested_bird_avian_noaec")
self.species_of_the_tested_bird_avian_noael = pd.Series([], dtype="float",
name="species_of_the_tested_bird_avian_noael")
self.tw_bird_ld50 = pd.Series([], dtype="float", name="tw_bird_ld50")
self.tw_bird_lc50 = pd.Series([], dtype="float", name="tw_bird_lc50")
self.tw_bird_noaec = pd.Series([], dtype="float", name="tw_bird_noaec")
self.tw_bird_noael = pd.Series([], dtype="float", name="tw_bird_noael")
self.mineau_sca_fact = pd.Series([], dtype="float", name="mineau_sca_fact")
self.aw_herp_sm = pd.Series([], dtype="float", name="aw_herp_sm")
self.aw_herp_md = pd.Series([], dtype="float", name="aw_herp_md")
self.aw_herp_lg = pd.Series([], dtype="float", name="aw_herp_lg")
self.awc_herp_sm = pd.Series([], dtype="float", name="awc_herp_sm")
self.awc_herp_md = pd.Series([], dtype="float", name="awc_herp_md")
self.awc_herp_lg = pd.Series([], dtype="float", name="awc_herp_lg")
self.bw_frog_prey_mamm = pd.Series([], dtype="float", name="bw_frog_prey_mamm")
self.bw_frog_prey_herp = pd.Series([], dtype="float", name="bw_frog_prey_herp")
## application rates and days of applications
#self.app_rates = pd.Series([], dtype="object") #Series of lists, each list contains app_rates of a model simulation run
#self.day_out = pd.Series([], dtype="object") #Series of lists, each list contains day #'s of applications within a model simulaiton run
class TherpsOutputs(object):
"""
Output class for Therps.
"""
def __init__(self):
"""Class representing the outputs for Therps"""
super(TherpsOutputs, self).__init__()
## application rates and days of applications
#self.day_out = pd.Series([], dtype='object', name='day_out')
#self.app_rates = pd.Series([], dtype='object', name='app_rates')
# TODO: Add these back in after deciding how to handle the numpy arrays
# timeseries of concentrations related to herbiferous food sources
# self.out_c_ts_sg = pd.Series([], dtype='float') # short grass
# self.out_c_ts_blp = pd.Series([], dtype='float') # broad-leafed plants
# self.out_c_ts_fp = pd.Series([], dtype='float') # fruits/pods
#
# self.out_c_ts_mean_sg = pd.Series([], dtype='float') # short grass
# self.out_c_ts_mean_blp = pd.Series([], dtype='float') # broad-leafed plants
# self.out_c_ts_mean_fp = pd.Series([], dtype='float') # fruits/pods
# Table 5
self.out_ld50_ad_sm = pd.Series([], dtype='float', name="out_ld50_ad_sm")
self.out_ld50_ad_md = pd.Series([], dtype='float', name="out_ld50_ad_md")
self.out_ld50_ad_lg = pd.Series([], dtype='float', name="out_ld50_ad_lg")
self.out_eec_dose_bp_sm = pd.Series([], dtype='float', name="out_eec_dose_bp_sm")
self.out_eec_dose_bp_md = pd.Series([], dtype='float', name="out_eec_dose_bp_md")
self.out_eec_dose_bp_lg = pd.Series([], dtype='float', name="out_eec_dose_bp_lg")
self.out_arq_dose_bp_sm = pd.Series([], dtype='float', name="out_arq_dose_bp_sm")
self.out_arq_dose_bp_md = pd.Series([], dtype='float', name="out_arq_dose_bp_md")
self.out_arq_dose_bp_lg = pd.Series([], dtype='float', name="out_arq_dose_bp_lg")
self.out_eec_dose_fr_sm = pd.Series([], dtype='float', name="out_eec_dose_fr_sm")
self.out_eec_dose_fr_md = pd.Series([], dtype='float', name="out_eec_dose_fr_md")
self.out_eec_dose_fr_lg = pd.Series([], dtype='float', name="out_eec_dose_fr_lg")
self.out_arq_dose_fr_sm = pd.Series([], dtype='float', name="out_arq_dose_fr_sm")
self.out_arq_dose_fr_md = pd.Series([], dtype='float', name="out_arq_dose_fr_md")
self.out_arq_dose_fr_lg = pd.Series([], dtype='float', name="out_arq_dose_fr_lg")
self.out_eec_dose_hm_md = pd.Series([], dtype='float', name="out_eec_dose_hm_md")
self.out_eec_dose_hm_lg = pd.Series([], dtype='float', name="out_eec_dose_hm_lg")
self.out_arq_dose_hm_md = pd.Series([], dtype='float', name="out_arq_dose_hm_md")
self.out_arq_dose_hm_lg = pd.Series([], dtype='float', name="out_arq_dose_hm_lg")
self.out_eec_dose_im_md = pd.Series([], dtype='float', name="out_eec_dose_im_md")
self.out_eec_dose_im_lg = pd.Series([], dtype='float', name="out_eec_dose_im_lg")
self.out_arq_dose_im_md = pd.Series([], dtype='float', name="out_arq_dose_im_md")
self.out_arq_dose_im_lg = pd.Series([], dtype='float', name="out_arq_dose_im_lg")
self.out_eec_dose_tp_md = pd.Series([], dtype='float', name="out_eec_dose_tp_md")
self.out_eec_dose_tp_lg = pd.Series([], dtype='float', name="out_eec_dose_tp_lg")
self.out_arq_dose_tp_md = pd.Series([], dtype='float', name="out_arq_dose_tp_md")
self.out_arq_dose_tp_lg = pd.Series([], dtype='float', name="out_arq_dose_tp_lg")
# Table 6
self.out_eec_diet_herp_bl = pd.Series([], dtype='float', name="out_eec_diet_herp_bl")
self.out_eec_arq_herp_bl = pd.Series([], dtype='float', name="out_eec_arq_herp_bl")
self.out_eec_diet_herp_fr = pd.Series([], dtype='float', name="out_eec_diet_herp_fr")
self.out_eec_arq_herp_fr = pd.Series([], dtype='float', name="out_eec_arq_herp_fr")
self.out_eec_diet_herp_hm = pd.Series([], dtype='float', name="out_eec_diet_herp_hm")
self.out_eec_arq_herp_hm = pd.Series([], dtype='float', name="out_eec_arq_herp_hm")
self.out_eec_diet_herp_im = pd.Series([], dtype='float', name="out_eec_diet_herp_im")
self.out_eec_arq_herp_im = pd.Series([], dtype='float', name="out_eec_arq_herp_im")
self.out_eec_diet_herp_tp = pd.Series([], dtype='float', name="out_eec_diet_herp_tp")
self.out_eec_arq_herp_tp = pd.Series([], dtype='float', name="out_eec_arq_herp_tp")
# Table 7
self.out_eec_diet_herp_bl = pd.Series([], dtype='float', name="out_eec_diet_herp_bl")
self.out_eec_crq_herp_bl = pd.Series([], dtype='float', name="out_eec_crq_herp_bl")
self.out_eec_diet_herp_fr = pd.Series([], dtype='float', name="out_eec_diet_herp_fr")
self.out_eec_crq_herp_fr = pd.Series([], dtype='float', name="out_eec_crq_herp_fr")
self.out_eec_diet_herp_hm = pd.Series([], dtype='float', name="out_eec_diet_herp_hm")
self.out_eec_crq_herp_hm = pd.Series([], dtype='float', name="out_eec_crq_herp_hm")
self.out_eec_diet_herp_im = pd.Series([], dtype='float', name="out_eec_diet_herp_im")
self.out_eec_crq_herp_im = pd.Series([], dtype='float', name="out_eec_crq_herp_im")
self.out_eec_diet_herp_tp = pd.Series([], dtype='float', name="out_eec_diet_herp_tp")
self.out_eec_crq_herp_tp = pd.Series([], dtype='float', name="out_eec_crq_herp_tp")
# Table 8
self.out_eec_dose_bp_sm_mean = pd.Series([], dtype='float', name="out_eec_dose_bp_sm_mean")
self.out_eec_dose_bp_md_mean = pd.Series([], dtype='float', name="out_eec_dose_bp_md_mean")
self.out_eec_dose_bp_lg_mean = pd.Series([], dtype='float', name="out_eec_dose_bp_lg_mean")
self.out_arq_dose_bp_sm_mean = pd.Series([], dtype='float', name="out_arq_dose_bp_sm_mean")
self.out_arq_dose_bp_md_mean = pd.Series([], dtype='float', name="out_arq_dose_bp_md_mean")
self.out_arq_dose_bp_lg_mean = pd.Series([], dtype='float', name="out_arq_dose_bp_lg_mean")
self.out_eec_dose_fr_sm_mean = pd.Series([], dtype='float', name="out_eec_dose_fr_sm_mean")
self.out_eec_dose_fr_md_mean = pd.Series([], dtype='float', name="out_eec_dose_fr_md_mean")
self.out_eec_dose_fr_lg_mean = pd.Series([], dtype='float', name="out_eec_dose_fr_lg_mean")
self.out_arq_dose_fr_sm_mean = pd.Series([], dtype='float', name="out_arq_dose_fr_sm_mean")
self.out_arq_dose_fr_md_mean = pd.Series([], dtype='float', name="out_arq_dose_fr_md_mean")
self.out_arq_dose_fr_lg_mean = pd.Series([], dtype='float', name="out_arq_dose_fr_lg_mean")
self.out_eec_dose_hm_md_mean = pd.Series([], dtype='float', name="out_eec_dose_hm_md_mean")
self.out_eec_dose_hm_lg_mean = pd.Series([], dtype='float', name="out_eec_dose_hm_lg_mean")
self.out_arq_dose_hm_md_mean = pd.Series([], dtype='float', name="out_arq_dose_hm_md_mean")
self.out_arq_dose_hm_lg_mean = pd.Series([], dtype='float', name="out_arq_dose_hm_lg_mean")
self.out_eec_dose_im_md_mean = pd.Series([], dtype='float', name="out_eec_dose_im_md_mean")
self.out_eec_dose_im_lg_mean = pd.Series([], dtype='float', name="out_eec_dose_im_lg_mean")
self.out_arq_dose_im_md_mean = pd.Series([], dtype='float', name="out_arq_dose_im_md_mean")
self.out_arq_dose_im_lg_mean = pd.Series([], dtype='float', name="out_arq_dose_im_lg_mean")
self.out_eec_dose_tp_md_mean = pd.Series([], dtype='float', name="out_eec_dose_tp_md_mean")
self.out_eec_dose_tp_lg_mean = pd.Series([], dtype='float', name="out_eec_dose_tp_lg_mean")
self.out_arq_dose_tp_md_mean = pd.Series([], dtype='float', name="out_arq_dose_tp_md_mean")
self.out_arq_dose_tp_lg_mean = pd.Series([], dtype='float', name="out_arq_dose_tp_lg_mean")
# Table 9
self.out_eec_diet_herp_bl_mean = | pd.Series([], dtype='float', name="out_eec_diet_herp_bl_mean") | pandas.Series |
# Import libraries
import mysql.connector
import pandas as pd
import re
import json
# Establish a MySQL connection
mydb = mysql.connector.connect(host="localhost", user="root", password="<PASSWORD>")
# Get cursor, which is used to traverse the database, line by line
cursor = mydb.cursor()
# Create a database
cursor.execute("create database if not exists mytest;")
cursor.execute("use mytest;")
# Create a table for mobile, laptop, tablet, gaming console, headphone, speaker
cursor.execute("create table if not exists mobile "
"(id int, "
"title_fa varchar(255), "
"title_en varchar(255), "
"url_code varchar(255), "
"category_fa varchar(255), "
"keywords varchar(255), "
"brand_fa varchar(255), "
"brand_en varchar(255), "
"primary key (id));")
cursor.execute("create table if not exists laptop "
"(id int primary key , "
"title_fa varchar(255), "
"title_en varchar(255), "
"url_code varchar(255), "
"category_fa varchar(255), "
"keywords varchar(255), "
"brand_fa varchar(255), "
"brand_en varchar(255));")
cursor.execute("create table if not exists tablet "
"(id int primary key , "
"title_fa varchar(255), "
"title_en varchar(255), "
"url_code varchar(255), "
"category_fa varchar(255), "
"keywords varchar(255), "
"brand_fa varchar(255), "
"brand_en varchar(255));")
cursor.execute("create table if not exists gamingconsole "
"(id int primary key , "
"title_fa varchar(255), "
"title_en varchar(255), "
"url_code varchar(255), "
"category_fa varchar(255), "
"keywords varchar(255), "
"brand_fa varchar(255), "
"brand_en varchar(255));")
cursor.execute("create table if not exists headphone "
"(id int primary key , "
"title_fa varchar(255), "
"title_en varchar(255), "
"url_code varchar(255), "
"category_fa varchar(255), "
"keywords varchar(255), "
"brand_fa varchar(255), "
"brand_en varchar(255));")
cursor.execute("create table if not exists speaker "
"(id int primary key , "
"title_fa varchar(255), "
"title_en varchar(255), "
"url_code varchar(255), "
"category_fa varchar(255), "
"keywords varchar(255), "
"brand_fa varchar(255), "
"brand_en varchar(255));")
# Create a table for attribute of mobile, laptop, tablet, gaming console, headphone, speaker
cursor.execute("create table if not exists mobile_attribute "
"(id int auto_increment, "
"product_id int, "
"mykey varchar(255), "
"myvalue varchar(255),"
"primary key (id),"
"foreign key (product_id) references mobile(id));")
cursor.execute("create table if not exists laptop_attribute "
"(id int auto_increment, "
"product_id int, "
"mykey varchar(255), "
"myvalue varchar(255),"
"primary key (id),"
"foreign key (product_id) references laptop(id));")
cursor.execute("create table if not exists tablet_attribute "
"(id int auto_increment, "
"product_id int, "
"mykey varchar(255), "
"myvalue varchar(255),"
"primary key (id),"
"foreign key (product_id) references tablet(id));")
cursor.execute("create table if not exists gamingconsole_attribute "
"(id int auto_increment, "
"product_id int, "
"mykey varchar(255), "
"myvalue varchar(255),"
"primary key (id),"
"foreign key (product_id) references gamingconsole(id));")
cursor.execute("create table if not exists headphone_attribute "
"(id int auto_increment, "
"product_id int, "
"mykey varchar(255), "
"myvalue varchar(255),"
"primary key (id),"
"foreign key (product_id) references headphone(id));")
cursor.execute("create table if not exists speaker_attribute "
"(id int auto_increment, "
"product_id int, "
"mykey varchar(255), "
"myvalue varchar(255),"
"primary key (id),"
"foreign key (product_id) references speaker(id));")
# Create a table for description of mobile, laptop, tablet, gaming console, headphone, speaker
cursor.execute("create table if not exists title_alt_for_mobile "
"(id int auto_increment, "
"product_id int, "
"description varchar(255),"
"primary key (id),"
"foreign key (product_id) references mobile(id));")
cursor.execute("create table if not exists title_alt_for_laptop "
"(id int auto_increment, "
"product_id int, "
"description varchar(255),"
"primary key (id),"
"foreign key (product_id) references laptop(id));")
cursor.execute("create table if not exists title_alt_for_tablet "
"(id int auto_increment, "
"product_id int, "
"description varchar(255),"
"primary key (id),"
"foreign key (product_id) references tablet(id));")
cursor.execute("create table if not exists title_alt_for_gamingconsole "
"(id int auto_increment, "
"product_id int, "
"description varchar(255),"
"primary key (id),"
"foreign key (product_id) references gamingconsole(id));")
cursor.execute("create table if not exists title_alt_for_headphone "
"(id int auto_increment, "
"product_id int, "
"description varchar(255),"
"primary key (id),"
"foreign key (product_id) references headphone(id));")
cursor.execute("create table if not exists title_alt_for_speaker "
"(id int auto_increment, "
"product_id int, "
"description varchar(255),"
"primary key (id),"
"foreign key (product_id) references speaker(id));")
# Read Product related file
info = pd.read_excel("data/5-awte8wbd.xlsx")
# List of attributes
head = list(info)
# Insert each columns and values in arrays
col = []
for i in range(head.__len__()):
col.append( | pd.DataFrame(info, columns=[head[i]]) | pandas.DataFrame |
from collections import Counter
import pandas as pd
import pytest
from simplekv import KeyValueStore
from kartothek.api.discover import (
discover_cube,
discover_datasets,
discover_datasets_unchecked,
discover_ktk_cube_dataset_ids,
)
from kartothek.core.cube.constants import (
KTK_CUBE_DF_SERIALIZER,
KTK_CUBE_METADATA_DIMENSION_COLUMNS,
KTK_CUBE_METADATA_KEY_IS_SEED,
KTK_CUBE_METADATA_PARTITION_COLUMNS,
KTK_CUBE_METADATA_STORAGE_FORMAT,
KTK_CUBE_METADATA_SUPPRESS_INDEX_ON,
KTK_CUBE_METADATA_VERSION,
)
from kartothek.core.cube.cube import Cube
from kartothek.core.uuid import gen_uuid
from kartothek.io.eager import (
store_dataframes_as_dataset,
update_dataset_from_dataframes,
)
from kartothek.io_components.metapartition import MetaPartition
@pytest.fixture
def cube():
return Cube(
dimension_columns=["x", "y"],
partition_columns=["p", "q"],
uuid_prefix="cube",
index_columns=["i1"],
seed_dataset="myseed",
)
def store_data(
cube,
function_store,
df,
name,
partition_on="default",
metadata_version=KTK_CUBE_METADATA_VERSION,
metadata_storage_format=KTK_CUBE_METADATA_STORAGE_FORMAT,
metadata=None,
overwrite=False,
new_ktk_cube_metadata=True,
write_suppress_index_on=True,
):
if partition_on == "default":
partition_on = cube.partition_columns
if isinstance(df, pd.DataFrame):
mp = MetaPartition(label=gen_uuid(), data=df, metadata_version=metadata_version)
indices_to_build = set(cube.index_columns) & set(df.columns)
if name == cube.seed_dataset:
indices_to_build |= set(cube.dimension_columns) - set(
cube.suppress_index_on
)
mp = mp.build_indices(indices_to_build)
dfs = mp
else:
assert isinstance(df, MetaPartition)
assert df.metadata_version == metadata_version
dfs = df
if metadata is None:
metadata = {
KTK_CUBE_METADATA_DIMENSION_COLUMNS: cube.dimension_columns,
KTK_CUBE_METADATA_KEY_IS_SEED: (name == cube.seed_dataset),
}
if new_ktk_cube_metadata:
metadata.update(
{KTK_CUBE_METADATA_PARTITION_COLUMNS: cube.partition_columns}
)
if write_suppress_index_on:
metadata.update(
{KTK_CUBE_METADATA_SUPPRESS_INDEX_ON: list(cube.suppress_index_on)}
)
return store_dataframes_as_dataset(
store=function_store,
dataset_uuid=cube.ktk_dataset_uuid(name),
dfs=dfs,
partition_on=list(partition_on) if partition_on else None,
metadata_storage_format=metadata_storage_format,
metadata_version=metadata_version,
df_serializer=KTK_CUBE_DF_SERIALIZER,
metadata=metadata,
overwrite=overwrite,
)
def assert_datasets_equal(left, right):
assert set(left.keys()) == set(right.keys())
for k in left.keys():
ds_l = left[k]
ds_r = right[k]
assert ds_l.uuid == ds_r.uuid
def assert_dataset_issubset(superset, subset):
assert set(subset.keys()).issubset(set(superset.keys()))
for k in subset.keys():
assert subset[k].uuid == superset[k].uuid
def test_discover_ktk_cube_dataset_ids(function_store):
cube = Cube(
dimension_columns=["dim"],
partition_columns=["part"],
uuid_prefix="cube",
seed_dataset="seed",
)
ktk_cube_dataset_ids = ["A", "B", "C"]
for ktk_cube_id in ktk_cube_dataset_ids:
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"dim": [0], "part": [0]}),
name=ktk_cube_id,
)
collected_ktk_cube_dataset_ids = discover_ktk_cube_dataset_ids(
cube.uuid_prefix, function_store()
)
assert collected_ktk_cube_dataset_ids == set(ktk_cube_dataset_ids)
class TestDiscoverDatasetsUnchecked:
def test_simple(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
),
"enrich": store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name="enrich",
),
}
actual = discover_datasets_unchecked(cube.uuid_prefix, function_store)
assert_datasets_equal(actual, expected)
def test_no_seed(self, cube, function_store):
expected = {
"enrich": store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name="enrich",
)
}
actual = discover_datasets_unchecked(cube.uuid_prefix, function_store)
assert_datasets_equal(actual, expected)
def test_other_files(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
)
}
function_store().put(cube.ktk_dataset_uuid("enrich") + "/foo", b"")
actual = discover_datasets_unchecked(cube.uuid_prefix, function_store)
assert_datasets_equal(actual, expected)
def test_no_common_metadata(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
)
}
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name="enrich",
)
keys = set(function_store().keys())
metadata_key = cube.ktk_dataset_uuid("enrich") + ".by-dataset-metadata.json"
assert metadata_key in keys
for k in keys:
if (k != metadata_key) and k.startswith(cube.ktk_dataset_uuid("enrich")):
function_store().delete(k)
actual = discover_datasets_unchecked(cube.uuid_prefix, function_store)
assert_datasets_equal(actual, expected)
def test_filter_partial_datasets_found(self, cube, function_store):
enrich_dataset = store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name="enrich",
)
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name="mytable",
)
expected = {"enrich": enrich_dataset}
actual = discover_datasets_unchecked(
cube.uuid_prefix, function_store, filter_ktk_cube_dataset_ids=["enrich"]
)
assert_dataset_issubset(actual, expected)
def test_filter_no_datasets_found(self, cube, function_store):
actual = discover_datasets_unchecked(
cube.uuid_prefix, function_store, filter_ktk_cube_dataset_ids=["enrich"]
)
assert actual == {}
def test_msgpack_clean(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
),
"enrich": store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name="enrich",
metadata_storage_format="msgpack",
),
}
actual = discover_datasets_unchecked(cube.uuid_prefix, function_store)
assert_datasets_equal(actual, expected)
def test_msgpack_priority(self, cube, function_store):
"""
json metadata files have priority in kartothek, so the disovery should respect this
"""
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": [0]}),
name=cube.seed_dataset,
metadata_storage_format="msgpack",
)
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v2": [0]}),
name=cube.seed_dataset,
overwrite=True,
)
}
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v3": [0]}),
name=cube.seed_dataset,
metadata_storage_format="msgpack",
overwrite=True,
)
actual = discover_datasets_unchecked(cube.uuid_prefix, function_store)
assert_datasets_equal(actual, expected)
def test_msgpack_efficiency(self, cube, function_store):
"""
We should only iterate over the store once, even though we are looking for 2 suffixes.
Furthermore, we must only load every dataset once.
"""
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
metadata_storage_format="msgpack",
)
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
overwrite=True,
)
class StoreMock(KeyValueStore):
def __init__(self, store):
self._store = store
self._iter_keys_called = 0
self._iter_prefixes_called = 0
self._get_called = Counter()
def iter_keys(self, prefix=""):
self._iter_keys_called += 1
return self._store.iter_keys(prefix)
def iter_prefixes(self, delimiter, prefix=""):
self._iter_prefixes_called += 1
return self._store.iter_prefixes(delimiter, prefix)
def get(self, key):
self._get_called[key] += 1
return self._store.get(key)
store = StoreMock(function_store())
discover_datasets_unchecked(cube.uuid_prefix, store)
assert store._iter_keys_called == 0
assert store._iter_prefixes_called == 1
assert max(store._get_called.values()) == 1
class TestDiscoverDatasets:
def test_seed_only(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
)
}
actual = discover_datasets(cube, function_store)
assert_datasets_equal(actual, expected)
def test_2_datasets(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
),
"enrich": store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": 100}),
name="enrich",
),
}
actual = discover_datasets(cube, function_store)
assert_datasets_equal(actual, expected)
def test_partitions_superset(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
),
"enrich": store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": 100}),
name="enrich",
partition_on=["p", "q", "v1"],
),
}
actual = discover_datasets(cube, function_store)
assert_datasets_equal(actual, expected)
def test_raises_no_seed(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name="enrich",
)
with pytest.raises(ValueError) as exc:
discover_datasets(cube, function_store)
assert str(exc.value) == 'Seed data ("myseed") is missing.'
def test_raises_wrong_partition_on_seed_other(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0]}),
name=cube.seed_dataset,
partition_on=["p"],
)
with pytest.raises(ValueError) as exc:
discover_datasets(cube, function_store)
assert (
str(exc.value) == 'Seed dataset "myseed" has missing partition columns: q'
)
def test_partition_on_nonseed_no_part(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
),
"enrich": store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "v1": [0]}),
name="enrich",
partition_on=[],
),
}
actual = discover_datasets(cube, function_store)
assert_datasets_equal(actual, expected)
def test_raises_wrong_metadata_version(self, cube, function_store):
with pytest.raises(
NotImplementedError, match="Minimal supported metadata version is"
):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
metadata_version=2,
partition_on=None,
)
def test_raises_dtypes(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
)
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0.0], "p": [0], "q": [0], "v1": 100}),
name="enrich",
)
with pytest.raises(ValueError) as exc:
discover_datasets(cube, function_store)
assert 'Found incompatible entries for column "y"' in str(exc.value)
def test_raises_overlap(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": 100}),
name=cube.seed_dataset,
)
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": 100}),
name="enrich",
)
with pytest.raises(ValueError) as exc:
discover_datasets(cube, function_store)
assert "Found columns present in multiple datasets" in str(exc.value)
def test_raises_partition_on_overlap(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": 100}),
name=cube.seed_dataset,
)
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "v1": 100}),
name="enrich",
partition_on=["v1"],
)
with pytest.raises(ValueError) as exc:
discover_datasets(cube, function_store)
assert "Found columns present in multiple datasets" in str(exc.value)
def test_raises_missing_dimension_columns(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=MetaPartition(
label=gen_uuid(),
data=pd.DataFrame({"x": [0], "p": [0], "q": [0]}),
metadata_version=KTK_CUBE_METADATA_VERSION,
).build_indices(["x"]),
name=cube.seed_dataset,
)
with pytest.raises(ValueError) as exc:
discover_datasets(cube, function_store)
assert (
str(exc.value) == 'Seed dataset "myseed" has missing dimension columns: y'
)
def test_raises_no_dimension_columns(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": 100}),
name=cube.seed_dataset,
)
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"p": [0], "q": [0], "v2": 100}),
name="enrich",
)
with pytest.raises(ValueError) as exc:
discover_datasets(cube, function_store)
assert (
str(exc.value)
== 'Dataset "enrich" must have at least 1 of the following dimension columns: x, y'
)
def test_raises_dimension_index_missing(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=MetaPartition(
label=gen_uuid(),
data=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
metadata_version=KTK_CUBE_METADATA_VERSION,
),
name=cube.seed_dataset,
)
with pytest.raises(ValueError) as exc:
discover_datasets(cube, function_store)
assert (
str(exc.value)
== 'ExplicitSecondaryIndex "x" is missing in dataset "myseed".'
)
def test_raises_other_index_missing(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=MetaPartition(
label=gen_uuid(),
data=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
metadata_version=KTK_CUBE_METADATA_VERSION,
).build_indices(["x", "y"]),
name=cube.seed_dataset,
)
store_data(
cube=cube,
function_store=function_store,
df=MetaPartition(
label=gen_uuid(),
data=pd.DataFrame(
{"x": [0], "y": [0], "p": [0], "q": [0], "i1": [1337]}
),
metadata_version=KTK_CUBE_METADATA_VERSION,
),
name="enrich",
)
with pytest.raises(ValueError) as exc:
discover_datasets(cube, function_store)
assert (
str(exc.value)
== 'ExplicitSecondaryIndex or PartitionIndex "i1" is missing in dataset "enrich".'
)
def test_accepts_addional_indices(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=MetaPartition(
label=gen_uuid(),
data=pd.DataFrame(
{"x": [0], "y": [0], "p": [0], "q": [0], "v1": [0]}
),
metadata_version=KTK_CUBE_METADATA_VERSION,
).build_indices(["x", "y", "v1"]),
name=cube.seed_dataset,
),
"enrich": store_data(
cube=cube,
function_store=function_store,
df=MetaPartition(
label=gen_uuid(),
data=pd.DataFrame(
{
"x": [0],
"y": [0],
"p": [0],
"q": [0],
"i1": [1337],
"v2": [42],
}
),
metadata_version=KTK_CUBE_METADATA_VERSION,
).build_indices(["i1", "x", "v2"]),
name="enrich",
),
}
actual = discover_datasets(cube, function_store)
assert_datasets_equal(actual, expected)
def test_accepts_partition_index_for_index(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": [0]}),
name=cube.seed_dataset,
),
"enrich": store_data(
cube=cube,
function_store=function_store,
df=MetaPartition(
label=gen_uuid(),
data=pd.DataFrame({"x": [0], "y": [0], "i1": [1337], "v2": [42]}),
metadata_version=KTK_CUBE_METADATA_VERSION,
),
name="enrich",
partition_on=["i1"],
),
}
actual = discover_datasets(cube, function_store)
assert_datasets_equal(actual, expected)
def test_raises_unspecified_partition_columns(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
partition_on=["p", "q"],
)
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": [0]}),
name="enrich",
partition_on=["q"],
)
with pytest.raises(ValueError) as exc:
discover_datasets(cube, function_store)
assert (
str(exc.value) == "Unspecified but provided partition columns in enrich: p"
)
def test_accepts_projected_datasets(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=MetaPartition(
label=gen_uuid(),
data=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
metadata_version=KTK_CUBE_METADATA_VERSION,
).build_indices(["x", "y"]),
name=cube.seed_dataset,
),
"x": store_data(
cube=cube,
function_store=function_store,
df=MetaPartition(
label=gen_uuid(),
data=pd.DataFrame({"x": [0], "p": [0], "q": [0], "v1": [42]}),
metadata_version=KTK_CUBE_METADATA_VERSION,
),
name="x",
),
"y": store_data(
cube=cube,
function_store=function_store,
df=MetaPartition(
label=gen_uuid(),
data=pd.DataFrame({"y": [0], "p": [0], "q": [0], "v2": [42]}),
metadata_version=KTK_CUBE_METADATA_VERSION,
),
name="y",
),
}
actual = discover_datasets(cube, function_store)
assert_datasets_equal(actual, expected)
def test_filter_basic(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
),
"enrich": store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": 100}),
name="enrich",
),
}
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v2": 100}),
name="foo",
)
actual = discover_datasets(cube, function_store, {"myseed", "enrich"})
assert_datasets_equal(actual, expected)
def test_filter_ignores_invalid(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
),
"enrich": store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": 100}),
name="enrich",
),
}
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame(
{
"x": [0],
"y": [0],
"p": [0],
"q": [0],
"v1": 100, # overlapping payload
}
),
name="foo",
)
actual = discover_datasets(cube, function_store, {"myseed", "enrich"})
assert_datasets_equal(actual, expected)
def test_filter_missing(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
)
with pytest.raises(ValueError) as exc:
discover_datasets(cube, function_store, {"myseed", "enrich"})
assert (
str(exc.value) == "Could not find the following requested datasets: enrich"
)
def test_filter_empty(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
)
with pytest.raises(ValueError) as exc:
discover_datasets(cube, function_store, {})
assert str(exc.value) == 'Seed data ("myseed") is missing.'
def test_raises_partial_datasets_found(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name="enrich",
)
with pytest.raises(ValueError) as exc:
discover_datasets(
cube,
function_store,
filter_ktk_cube_dataset_ids=["enrich", "non_existing_table"],
)
assert (
str(exc.value)
== "Could not find the following requested datasets: non_existing_table"
)
def test_raises_no_datasets_found(self, cube, function_store):
with pytest.raises(ValueError) as exc:
discover_datasets(
cube,
function_store,
filter_ktk_cube_dataset_ids=["enrich", "non_existing_table"],
)
assert (
str(exc.value)
== "Could not find the following requested datasets: enrich, non_existing_table"
)
def test_msgpack(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
),
"enrich": store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": 100}),
name="enrich",
metadata_storage_format="msgpack",
),
}
actual = discover_datasets(cube, function_store)
assert_datasets_equal(actual, expected)
def test_empty_dataset(self, cube, function_store):
expected = {
cube.seed_dataset: store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
),
"enrich": store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": 100}),
name="enrich",
metadata_storage_format="msgpack",
),
}
expected = {
filter_ktk_cube_dataset_id: update_dataset_from_dataframes(
[], store=function_store, dataset_uuid=ds.uuid, delete_scope=[{}]
)
for filter_ktk_cube_dataset_id, ds in expected.items()
}
actual = discover_datasets(cube, function_store)
assert_datasets_equal(actual, expected)
class TestDiscoverCube:
def test_seed_only(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "i1": [0]}),
name=cube.seed_dataset,
)
cube_actual, datasets = discover_cube(cube.uuid_prefix, function_store)
assert cube_actual == cube
assert set(datasets.keys()) == {cube.seed_dataset}
ds = datasets[cube.seed_dataset]
assert ds.primary_indices_loaded
def test_without_partition_timestamp_metadata(self, cube, function_store):
# test discovery of a cube without metadata keys
# "KLEE_TS" and KTK_CUBE_METADATA_PARTITION_COLUMNS still works
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame(
{
"x": [0],
"y": [0],
"p": [0],
"q": [0],
"KLEE_TS": [pd.Timestamp("2000")],
"i1": [0],
}
),
partition_on=["p", "q", "KLEE_TS"],
name=cube.seed_dataset,
new_ktk_cube_metadata=False,
)
cube_actual, datasets = discover_cube(cube.uuid_prefix, function_store)
assert cube_actual == cube
assert set(datasets.keys()) == {cube.seed_dataset}
def test_reads_suppress_index(self, cube, function_store):
cube = cube.copy(suppress_index_on=cube.dimension_columns)
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "i1": [0]}),
name=cube.seed_dataset,
)
cube_actual, datasets = discover_cube(cube.uuid_prefix, function_store)
assert cube_actual == cube
def test_reads_suppress_index_default(self, cube, function_store):
# test that reading also works for old metadata that does not contain the suppress_index_on method.
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "i1": [0]}),
name=cube.seed_dataset,
write_suppress_index_on=False,
)
cube_actual, datasets = discover_cube(cube.uuid_prefix, function_store)
assert cube_actual == cube
def test_multiple(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
)
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "p": [0], "q": [0], "i1": [0]}),
name="enrich",
)
cube_actual, datasets = discover_cube(cube.uuid_prefix, function_store)
assert cube_actual == cube
assert set(datasets.keys()) == {cube.seed_dataset, "enrich"}
ds_seed = datasets[cube.seed_dataset]
assert ds_seed.primary_indices_loaded
ds_enrich = datasets["enrich"]
assert ds_enrich.primary_indices_loaded
def test_partitions_superset(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
)
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "p": [0], "q": [0], "i1": [0], "v1": [0]}),
name="enrich",
partition_on=["p", "q", "v1"],
)
cube_actual, datasets = discover_cube(cube.uuid_prefix, function_store)
assert cube_actual == cube
assert set(datasets.keys()) == {cube.seed_dataset, "enrich"}
ds_seed = datasets[cube.seed_dataset]
assert ds_seed.primary_indices_loaded
ds_enrich = datasets["enrich"]
assert ds_enrich.primary_indices_loaded
def test_raises_no_seed(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "i1": [0]}),
name=cube.seed_dataset,
metadata={},
)
with pytest.raises(ValueError) as exc:
discover_cube(cube.uuid_prefix, function_store)
assert str(exc.value) == 'Could not find seed dataset for cube "cube".'
def test_raises_multiple_seeds(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
metadata={KTK_CUBE_METADATA_KEY_IS_SEED: True},
)
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "p": [0], "q": [0], "i1": [0]}),
name="enrich",
metadata={KTK_CUBE_METADATA_KEY_IS_SEED: True},
)
with pytest.raises(ValueError) as exc:
discover_cube(cube.uuid_prefix, function_store)
assert (
str(exc.value)
== 'Found multiple possible seed datasets for cube "cube": enrich, myseed'
)
def test_raises_dimension_columns_missing(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}),
name=cube.seed_dataset,
metadata={KTK_CUBE_METADATA_KEY_IS_SEED: True},
)
with pytest.raises(ValueError) as exc:
discover_cube(cube.uuid_prefix, function_store)
assert (
str(exc.value)
== 'Could not recover dimension columns from seed dataset ("myseed") of cube "cube".'
)
def test_raises_partition_keys_missing_old_metadata(self, cube, function_store):
store_data(
cube=cube,
function_store=function_store,
df= | pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from collections import OrderedDict
from datetime import datetime
import numpy as np
import pytest
from pandas.compat import lrange
from pandas import DataFrame, MultiIndex, Series, date_range, notna
import pandas.core.panel as panelm
from pandas.core.panel import Panel
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal,
makeCustomDataframe as mkdf, makeMixedDataFrame)
from pandas.tseries.offsets import MonthEnd
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class PanelTests(object):
panel = None
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
pytest.raises(TypeError, hash, c_empty)
pytest.raises(TypeError, hash, c)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class SafeForSparse(object):
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).iloc[0]
ops = ['add', 'sub', 'mul', 'truediv',
'floordiv', 'div', 'mod', 'pow']
for op in ops:
with pytest.raises(NotImplementedError):
getattr(p, op)(d, axis=0)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class CheckIndexing(object):
def test_delitem_and_pop(self):
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
tm.assert_frame_equal(panelc[1], panel[1])
tm.assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
tm.assert_frame_equal(panelc[0], panel[0])
tm.assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
tm.assert_frame_equal(panelc[1], panel[1])
tm.assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# bad shape
p = Panel(np.random.randn(4, 3, 2))
msg = (r"shape of value must be \(3, 2\), "
r"shape of given object was \(4, 2\)")
with pytest.raises(ValueError, match=msg):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = | Panel({'Item1': df1, 'Item2': df2}) | pandas.core.panel.Panel |
import sys
import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy import create_engine
# import sqlite3
def load_data(messages_filepath, categories_filepath):
'''
Function to load data and merge them into one file
Args:
messages_filepath: Filepath to load the messages.csv
categories_filepath: Filepath to load the categories.csv
Output:
df: combined dataFrame
'''
messages = pd.read_csv(messages_filepath)
categories= | pd.read_csv(categories_filepath) | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
| tm.assert_series_equal(result, exp) | pandas.util.testing.assert_series_equal |
"""Tests suite for Period handling.
Parts derived from scikits.timeseries code, original authors:
- <NAME> & <NAME>
- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
"""
from unittest import TestCase
from datetime import datetime, timedelta
from numpy.ma.testutils import assert_equal
from pandas.tseries.period import Period, PeriodIndex
from pandas.tseries.index import DatetimeIndex, date_range
from pandas.tseries.tools import to_datetime
import pandas.core.datetools as datetools
import numpy as np
from pandas import Series, TimeSeries
from pandas.util.testing import assert_series_equal
class TestPeriodProperties(TestCase):
"Test properties such as year, month, weekday, etc...."
#
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_interval_constructor(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
self.assertEquals(i1, i2)
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
self.assert_(i1 != i4)
self.assertEquals(i4, i5)
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/12/12', freq='D')
self.assertEquals(i1, i2.asfreq('B'))
i3 = Period('3/10/12', freq='b')
self.assertEquals(i1, i3)
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
self.assertEquals(i1, i2)
i3 = Period(year=2005, month=3, day=1, freq='d')
self.assertEquals(i1, i3)
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
self.assertEquals(i1, i2)
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i1 = Period('05Q1')
self.assertEquals(i1, i2)
lower = Period('05q1')
self.assertEquals(i1, lower)
i1 = Period('1Q2005')
self.assertEquals(i1, i2)
lower = Period('1q2005')
self.assertEquals(i1, lower)
i1 = Period('1Q05')
self.assertEquals(i1, i2)
lower = Period('1q05')
self.assertEquals(i1, lower)
i1 = Period('4Q1984')
self.assertEquals(i1.year, 1984)
lower = Period('4q1984')
self.assertEquals(i1, lower)
i1 = Period('1982', freq='min')
i2 = Period('1982', freq='MIN')
self.assertEquals(i1, i2)
i2 = Period('1982', freq=('Min', 1))
self.assertEquals(i1, i2)
def test_freq_str(self):
i1 = Period('1982', freq='Min')
self.assert_(i1.freq[0] != '1')
i2 = Period('11/30/2005', freq='2Q')
self.assertEquals(i2.freq[0], '2')
def test_to_timestamp(self):
intv = Period('1982', freq='A')
start_ts = intv.to_timestamp(which_end='S')
aliases = ['s', 'StarT', 'BEGIn']
for a in aliases:
self.assertEquals(start_ts, intv.to_timestamp(which_end=a))
end_ts = intv.to_timestamp(which_end='E')
aliases = ['e', 'end', 'FINIsH']
for a in aliases:
self.assertEquals(end_ts, intv.to_timestamp(which_end=a))
from_lst = ['A', 'Q', 'M', 'W', 'B',
'D', 'H', 'Min', 'S']
for i, fcode in enumerate(from_lst):
intv = Period('1982', freq=fcode)
result = intv.to_timestamp().to_period(fcode)
self.assertEquals(result, intv)
self.assertEquals(intv.start_time(), intv.to_timestamp('S'))
self.assertEquals(intv.end_time(), intv.to_timestamp('E'))
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq='A', year=2007)
assert_equal(a_date.year, 2007)
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
assert_equal((qd + x).qyear, 2007)
assert_equal((qd + x).quarter, x + 1)
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq='M', year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
assert_equal(m_ival_x.year, 2007)
if 1 <= x + 1 <= 3:
assert_equal(m_ival_x.quarter, 1)
elif 4 <= x + 1 <= 6:
assert_equal(m_ival_x.quarter, 2)
elif 7 <= x + 1 <= 9:
assert_equal(m_ival_x.quarter, 3)
elif 10 <= x + 1 <= 12:
assert_equal(m_ival_x.quarter, 4)
assert_equal(m_ival_x.month, x + 1)
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq='WK', year=2007, month=1, day=7)
#
assert_equal(w_date.year, 2007)
assert_equal(w_date.quarter, 1)
assert_equal(w_date.month, 1)
assert_equal(w_date.week, 1)
assert_equal((w_date - 1).week, 52)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq='B', year=2007, month=1, day=1)
#
assert_equal(b_date.year, 2007)
assert_equal(b_date.quarter, 1)
assert_equal(b_date.month, 1)
assert_equal(b_date.day, 1)
assert_equal(b_date.weekday, 0)
assert_equal(b_date.day_of_year, 1)
#
d_date = Period(freq='D', year=2007, month=1, day=1)
#
assert_equal(d_date.year, 2007)
assert_equal(d_date.quarter, 1)
assert_equal(d_date.month, 1)
assert_equal(d_date.day, 1)
assert_equal(d_date.weekday, 0)
assert_equal(d_date.day_of_year, 1)
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date = Period(freq='H', year=2007, month=1, day=1, hour=0)
#
assert_equal(h_date.year, 2007)
assert_equal(h_date.quarter, 1)
assert_equal(h_date.month, 1)
assert_equal(h_date.day, 1)
assert_equal(h_date.weekday, 0)
assert_equal(h_date.day_of_year, 1)
assert_equal(h_date.hour, 0)
#
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
#
assert_equal(t_date.quarter, 1)
assert_equal(t_date.month, 1)
assert_equal(t_date.day, 1)
assert_equal(t_date.weekday, 0)
assert_equal(t_date.day_of_year, 1)
assert_equal(t_date.hour, 0)
assert_equal(t_date.minute, 0)
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
#
assert_equal(s_date.year, 2007)
assert_equal(s_date.quarter, 1)
assert_equal(s_date.month, 1)
assert_equal(s_date.day, 1)
assert_equal(s_date.weekday, 0)
assert_equal(s_date.day_of_year, 1)
assert_equal(s_date.hour, 0)
assert_equal(s_date.minute, 0)
assert_equal(s_date.second, 0)
def noWrap(item):
return item
class TestFreqConversion(TestCase):
"Test frequency conversion of date objects"
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='WK', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
assert_equal(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
assert_equal(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
assert_equal(ival_A.asfreq('M', 's'), ival_A_to_M_start)
assert_equal(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
assert_equal(ival_A.asfreq('WK', 'S'), ival_A_to_W_start)
assert_equal(ival_A.asfreq('WK', 'E'), ival_A_to_W_end)
assert_equal(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
assert_equal(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
assert_equal(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
assert_equal(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
assert_equal(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
assert_equal(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
assert_equal(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
assert_equal(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
assert_equal(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
assert_equal(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
assert_equal(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
assert_equal(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
assert_equal(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
assert_equal(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
assert_equal(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='WK', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
assert_equal(ival_Q.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
assert_equal(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
assert_equal(ival_Q.asfreq('WK', 'S'), ival_Q_to_W_start)
assert_equal(ival_Q.asfreq('WK', 'E'), ival_Q_to_W_end)
assert_equal(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
assert_equal(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
assert_equal(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
assert_equal(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
assert_equal(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
assert_equal(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
assert_equal(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
assert_equal(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
assert_equal(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
assert_equal(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
assert_equal(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
assert_equal(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
assert_equal(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
assert_equal(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
assert_equal(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='WK', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
assert_equal(ival_M.asfreq('A'), ival_M_to_A)
assert_equal(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
assert_equal(ival_M.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M.asfreq('WK', 'S'), ival_M_to_W_start)
assert_equal(ival_M.asfreq('WK', 'E'), ival_M_to_W_end)
assert_equal(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
assert_equal(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
assert_equal(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
assert_equal(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
assert_equal(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
assert_equal(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
assert_equal(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
assert_equal(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
assert_equal(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
assert_equal(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
assert_equal(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='WK', year=2007, month=1, day=1)
ival_WSUN = Period(freq='WK', year=2007, month=1, day=7)
ival_WSAT = Period(freq='WK-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='WK-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='WK-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='WK-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='WK-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='WK-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq='WK', year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq='WK', year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq='WK', year=2007, month=1, day=31)
ival_W_to_A = Period(freq='A', year=2007)
ival_W_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_W_to_M = Period(freq='M', year=2007, month=1)
if Period(freq='D', year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq='A', year=2007)
else:
ival_W_to_A_end_of_year = Period(freq='A', year=2008)
if Period(freq='D', year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=2)
if Period(freq='D', year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=2)
ival_W_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq='B', year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_W_to_H_end = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_W_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_W_to_T_end = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_W_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_W_to_S_end = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
assert_equal(ival_W.asfreq('A'), ival_W_to_A)
assert_equal(ival_W_end_of_year.asfreq('A'),
ival_W_to_A_end_of_year)
assert_equal(ival_W.asfreq('Q'), ival_W_to_Q)
assert_equal(ival_W_end_of_quarter.asfreq('Q'),
ival_W_to_Q_end_of_quarter)
assert_equal(ival_W.asfreq('M'), ival_W_to_M)
assert_equal(ival_W_end_of_month.asfreq('M'),
ival_W_to_M_end_of_month)
assert_equal(ival_W.asfreq('B', 'S'), ival_W_to_B_start)
assert_equal(ival_W.asfreq('B', 'E'), ival_W_to_B_end)
assert_equal(ival_W.asfreq('D', 'S'), ival_W_to_D_start)
assert_equal(ival_W.asfreq('D', 'E'), ival_W_to_D_end)
assert_equal(ival_WSUN.asfreq('D', 'S'), ival_WSUN_to_D_start)
assert_equal(ival_WSUN.asfreq('D', 'E'), ival_WSUN_to_D_end)
assert_equal(ival_WSAT.asfreq('D', 'S'), ival_WSAT_to_D_start)
assert_equal(ival_WSAT.asfreq('D', 'E'), ival_WSAT_to_D_end)
assert_equal(ival_WFRI.asfreq('D', 'S'), ival_WFRI_to_D_start)
assert_equal(ival_WFRI.asfreq('D', 'E'), ival_WFRI_to_D_end)
assert_equal(ival_WTHU.asfreq('D', 'S'), ival_WTHU_to_D_start)
assert_equal(ival_WTHU.asfreq('D', 'E'), ival_WTHU_to_D_end)
assert_equal(ival_WWED.asfreq('D', 'S'), ival_WWED_to_D_start)
assert_equal(ival_WWED.asfreq('D', 'E'), ival_WWED_to_D_end)
assert_equal(ival_WTUE.asfreq('D', 'S'), ival_WTUE_to_D_start)
assert_equal(ival_WTUE.asfreq('D', 'E'), ival_WTUE_to_D_end)
assert_equal(ival_WMON.asfreq('D', 'S'), ival_WMON_to_D_start)
assert_equal(ival_WMON.asfreq('D', 'E'), ival_WMON_to_D_end)
assert_equal(ival_W.asfreq('H', 'S'), ival_W_to_H_start)
assert_equal(ival_W.asfreq('H', 'E'), ival_W_to_H_end)
assert_equal(ival_W.asfreq('Min', 'S'), ival_W_to_T_start)
assert_equal(ival_W.asfreq('Min', 'E'), ival_W_to_T_end)
assert_equal(ival_W.asfreq('S', 'S'), ival_W_to_S_start)
assert_equal(ival_W.asfreq('S', 'E'), ival_W_to_S_end)
assert_equal(ival_W.asfreq('WK'), ival_W)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq='B', year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq='B', year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq='B', year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq='B', year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq='B', year=2007, month=1, day=5)
ival_B_to_A = Period(freq='A', year=2007)
ival_B_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_B_to_M = Period(freq='M', year=2007, month=1)
ival_B_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_B_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_B_to_H_end = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_B_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_B_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_B_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_B_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
assert_equal(ival_B.asfreq('A'), ival_B_to_A)
assert_equal(ival_B_end_of_year.asfreq('A'), ival_B_to_A)
assert_equal(ival_B.asfreq('Q'), ival_B_to_Q)
assert_equal(ival_B_end_of_quarter.asfreq('Q'), ival_B_to_Q)
assert_equal(ival_B.asfreq('M'), ival_B_to_M)
assert_equal(ival_B_end_of_month.asfreq('M'), ival_B_to_M)
assert_equal(ival_B.asfreq('WK'), ival_B_to_W)
assert_equal(ival_B_end_of_week.asfreq('WK'), ival_B_to_W)
assert_equal(ival_B.asfreq('D'), ival_B_to_D)
assert_equal(ival_B.asfreq('H', 'S'), ival_B_to_H_start)
assert_equal(ival_B.asfreq('H', 'E'), ival_B_to_H_end)
assert_equal(ival_B.asfreq('Min', 'S'), ival_B_to_T_start)
assert_equal(ival_B.asfreq('Min', 'E'), ival_B_to_T_end)
assert_equal(ival_B.asfreq('S', 'S'), ival_B_to_S_start)
assert_equal(ival_B.asfreq('S', 'E'), ival_B_to_S_end)
assert_equal(ival_B.asfreq('B'), ival_B)
def test_conv_daily(self):
# frequency conversion tests: from Business Frequency"
ival_D = Period(freq='D', year=2007, month=1, day=1)
ival_D_end_of_year = Period(freq='D', year=2007, month=12, day=31)
ival_D_end_of_quarter = Period(freq='D', year=2007, month=3, day=31)
ival_D_end_of_month = Period(freq='D', year=2007, month=1, day=31)
ival_D_end_of_week = Period(freq='D', year=2007, month=1, day=7)
ival_D_friday = Period(freq='D', year=2007, month=1, day=5)
ival_D_saturday = Period(freq='D', year=2007, month=1, day=6)
ival_D_sunday = Period(freq='D', year=2007, month=1, day=7)
ival_D_monday = Period(freq='D', year=2007, month=1, day=8)
ival_B_friday = Period(freq='B', year=2007, month=1, day=5)
ival_B_monday = Period(freq='B', year=2007, month=1, day=8)
ival_D_to_A = Period(freq='A', year=2007)
ival_Deoq_to_AJAN = Period(freq='A-JAN', year=2008)
ival_Deoq_to_AJUN = Period(freq='A-JUN', year=2007)
ival_Deoq_to_ADEC = Period(freq='A-DEC', year=2007)
ival_D_to_QEJAN = Period(freq="Q-JAN", year=2007, quarter=4)
ival_D_to_QEJUN = Period(freq="Q-JUN", year=2007, quarter=3)
ival_D_to_QEDEC = Period(freq="Q-DEC", year=2007, quarter=1)
ival_D_to_M = Period(freq='M', year=2007, month=1)
ival_D_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_D_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_D_to_H_end = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_D_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_D_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_D_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_D_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
assert_equal(ival_D.asfreq('A'), ival_D_to_A)
assert_equal(ival_D_end_of_quarter.asfreq('A-JAN'),
ival_Deoq_to_AJAN)
assert_equal(ival_D_end_of_quarter.asfreq('A-JUN'),
ival_Deoq_to_AJUN)
assert_equal(ival_D_end_of_quarter.asfreq('A-DEC'),
ival_Deoq_to_ADEC)
assert_equal(ival_D_end_of_year.asfreq('A'), ival_D_to_A)
assert_equal(ival_D_end_of_quarter.asfreq('Q'), ival_D_to_QEDEC)
assert_equal(ival_D.asfreq("Q-JAN"), ival_D_to_QEJAN)
assert_equal(ival_D.asfreq("Q-JUN"), ival_D_to_QEJUN)
assert_equal(ival_D.asfreq("Q-DEC"), ival_D_to_QEDEC)
assert_equal(ival_D.asfreq('M'), ival_D_to_M)
assert_equal(ival_D_end_of_month.asfreq('M'), ival_D_to_M)
assert_equal(ival_D.asfreq('WK'), ival_D_to_W)
assert_equal(ival_D_end_of_week.asfreq('WK'), ival_D_to_W)
assert_equal(ival_D_friday.asfreq('B'), ival_B_friday)
assert_equal(ival_D_saturday.asfreq('B', 'S'), ival_B_friday)
assert_equal(ival_D_saturday.asfreq('B', 'E'), ival_B_monday)
assert_equal(ival_D_sunday.asfreq('B', 'S'), ival_B_friday)
assert_equal(ival_D_sunday.asfreq('B', 'E'), ival_B_monday)
assert_equal(ival_D.asfreq('H', 'S'), ival_D_to_H_start)
assert_equal(ival_D.asfreq('H', 'E'), ival_D_to_H_end)
assert_equal(ival_D.asfreq('Min', 'S'), ival_D_to_T_start)
assert_equal(ival_D.asfreq('Min', 'E'), ival_D_to_T_end)
assert_equal(ival_D.asfreq('S', 'S'), ival_D_to_S_start)
assert_equal(ival_D.asfreq('S', 'E'), ival_D_to_S_end)
assert_equal(ival_D.asfreq('D'), ival_D)
def test_conv_hourly(self):
# frequency conversion tests: from Hourly Frequency"
ival_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_H_end_of_year = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_H_end_of_quarter = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_H_end_of_month = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_H_end_of_week = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_H_end_of_day = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_end_of_bus = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_to_A = Period(freq='A', year=2007)
ival_H_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_H_to_M = Period(freq='M', year=2007, month=1)
ival_H_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_H_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_H_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_H_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_H_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_H_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_H_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
assert_equal(ival_H.asfreq('A'), ival_H_to_A)
assert_equal(ival_H_end_of_year.asfreq('A'), ival_H_to_A)
assert_equal(ival_H.asfreq('Q'), ival_H_to_Q)
assert_equal(ival_H_end_of_quarter.asfreq('Q'), ival_H_to_Q)
assert_equal(ival_H.asfreq('M'), ival_H_to_M)
assert_equal(ival_H_end_of_month.asfreq('M'), ival_H_to_M)
assert_equal(ival_H.asfreq('WK'), ival_H_to_W)
assert_equal(ival_H_end_of_week.asfreq('WK'), ival_H_to_W)
assert_equal(ival_H.asfreq('D'), ival_H_to_D)
assert_equal(ival_H_end_of_day.asfreq('D'), ival_H_to_D)
assert_equal(ival_H.asfreq('B'), ival_H_to_B)
assert_equal(ival_H_end_of_bus.asfreq('B'), ival_H_to_B)
assert_equal(ival_H.asfreq('Min', 'S'), ival_H_to_T_start)
assert_equal(ival_H.asfreq('Min', 'E'), ival_H_to_T_end)
assert_equal(ival_H.asfreq('S', 'S'), ival_H_to_S_start)
assert_equal(ival_H.asfreq('S', 'E'), ival_H_to_S_end)
assert_equal(ival_H.asfreq('H'), ival_H)
def test_conv_minutely(self):
# frequency conversion tests: from Minutely Frequency"
ival_T = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_T_end_of_year = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_T_end_of_quarter = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_T_end_of_month = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_T_end_of_week = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_T_end_of_day = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_bus = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_hour = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_T_to_A = Period(freq='A', year=2007)
ival_T_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_T_to_M = Period(freq='M', year=2007, month=1)
ival_T_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_T_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_T_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_T_to_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_T_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_T_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
assert_equal(ival_T.asfreq('A'), ival_T_to_A)
assert_equal(ival_T_end_of_year.asfreq('A'), ival_T_to_A)
assert_equal(ival_T.asfreq('Q'), ival_T_to_Q)
assert_equal(ival_T_end_of_quarter.asfreq('Q'), ival_T_to_Q)
assert_equal(ival_T.asfreq('M'), ival_T_to_M)
assert_equal(ival_T_end_of_month.asfreq('M'), ival_T_to_M)
assert_equal(ival_T.asfreq('WK'), ival_T_to_W)
assert_equal(ival_T_end_of_week.asfreq('WK'), ival_T_to_W)
assert_equal(ival_T.asfreq('D'), ival_T_to_D)
assert_equal(ival_T_end_of_day.asfreq('D'), ival_T_to_D)
assert_equal(ival_T.asfreq('B'), ival_T_to_B)
assert_equal(ival_T_end_of_bus.asfreq('B'), ival_T_to_B)
assert_equal(ival_T.asfreq('H'), ival_T_to_H)
assert_equal(ival_T_end_of_hour.asfreq('H'), ival_T_to_H)
assert_equal(ival_T.asfreq('S', 'S'), ival_T_to_S_start)
assert_equal(ival_T.asfreq('S', 'E'), ival_T_to_S_end)
assert_equal(ival_T.asfreq('Min'), ival_T)
def test_conv_secondly(self):
# frequency conversion tests: from Secondly Frequency"
ival_S = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_S_end_of_year = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_quarter = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_month = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_week = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
ival_S_end_of_day = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_bus = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_hour = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
ival_S_end_of_minute = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
ival_S_to_A = Period(freq='A', year=2007)
ival_S_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_S_to_M = Period(freq='M', year=2007, month=1)
ival_S_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_S_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_S_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_S_to_H = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_S_to_T = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
assert_equal(ival_S.asfreq('A'), ival_S_to_A)
assert_equal(ival_S_end_of_year.asfreq('A'), ival_S_to_A)
assert_equal(ival_S.asfreq('Q'), ival_S_to_Q)
assert_equal(ival_S_end_of_quarter.asfreq('Q'), ival_S_to_Q)
assert_equal(ival_S.asfreq('M'), ival_S_to_M)
assert_equal(ival_S_end_of_month.asfreq('M'), ival_S_to_M)
assert_equal(ival_S.asfreq('WK'), ival_S_to_W)
assert_equal(ival_S_end_of_week.asfreq('WK'), ival_S_to_W)
assert_equal(ival_S.asfreq('D'), ival_S_to_D)
assert_equal(ival_S_end_of_day.asfreq('D'), ival_S_to_D)
assert_equal(ival_S.asfreq('B'), ival_S_to_B)
assert_equal(ival_S_end_of_bus.asfreq('B'), ival_S_to_B)
assert_equal(ival_S.asfreq('H'), ival_S_to_H)
assert_equal(ival_S_end_of_hour.asfreq('H'), ival_S_to_H)
assert_equal(ival_S.asfreq('Min'), ival_S_to_T)
assert_equal(ival_S_end_of_minute.asfreq('Min'), ival_S_to_T)
assert_equal(ival_S.asfreq('S'), ival_S)
class TestPeriodIndex(TestCase):
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_make_time_series(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index)
self.assert_(isinstance(series, TimeSeries))
def test_to_timestamp(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index, name='foo')
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = series.to_timestamp('D', 'end')
self.assert_(result.index.equals(exp_index))
self.assertEquals(result.name, 'foo')
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-DEC')
result = series.to_timestamp('D', 'start')
self.assert_(result.index.equals(exp_index))
def _get_with_delta(delta, freq='A-DEC'):
return date_range(to_datetime('1/1/2001') + delta,
to_datetime('12/31/2009') + delta, freq=freq)
delta = timedelta(hours=23)
result = series.to_timestamp('H', 'end')
exp_index = _get_with_delta(delta)
self.assert_(result.index.equals(exp_index))
delta = timedelta(hours=23, minutes=59)
result = series.to_timestamp('T', 'end')
exp_index = _get_with_delta(delta)
self.assert_(result.index.equals(exp_index))
result = series.to_timestamp('S', 'end')
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
self.assert_(result.index.equals(exp_index))
def test_constructor(self):
ii = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert_equal(len(ii), 9)
ii = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
assert_equal(len(ii), 4 * 9)
ii = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
assert_equal(len(ii), 12 * 9)
ii = PeriodIndex(freq='D', start='1/1/2001', end='12/31/2009')
assert_equal(len(ii), 365 * 9 + 2)
ii = PeriodIndex(freq='B', start='1/1/2001', end='12/31/2009')
assert_equal(len(ii), 261 * 9)
ii = PeriodIndex(freq='H', start='1/1/2001', end='12/31/2001 23:00')
assert_equal(len(ii), 365 * 24)
ii = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 23:59')
assert_equal(len(ii), 24 * 60)
ii = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 23:59:59')
assert_equal(len(ii), 24 * 60 * 60)
start = Period('02-Apr-2005', 'B')
i1 = PeriodIndex(start=start, periods=20)
assert_equal(len(i1), 20)
assert_equal(i1.freq, start.freq)
assert_equal(i1[0], start)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), 10)
assert_equal(i1.freq, end_intv.freq)
assert_equal(i1[-1], end_intv)
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assert_((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assert_((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
try:
PeriodIndex(start=start, end=end_intv)
raise AssertionError('Cannot allow mixed freq for start and end')
except ValueError:
pass
end_intv = Period('2005-05-01', 'B')
i1 = PeriodIndex(start=start, end=end_intv)
try:
PeriodIndex(start=start)
raise AssertionError('Must specify periods if missing start or end')
except ValueError:
pass
def test_shift(self):
ii1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
ii2 = PeriodIndex(freq='A', start='1/1/2002', end='12/1/2010')
assert_equal(len(ii1), len(ii2))
assert_equal(ii1.shift(1).values, ii2.values)
ii1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
ii2 = PeriodIndex(freq='A', start='1/1/2000', end='12/1/2008')
assert_equal(len(ii1), len(ii2))
assert_equal(ii1.shift(-1).values, ii2.values)
ii1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
ii2 = PeriodIndex(freq='M', start='2/1/2001', end='1/1/2010')
assert_equal(len(ii1), len(ii2))
assert_equal(ii1.shift(1).values, ii2.values)
ii1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
ii2 = PeriodIndex(freq='M', start='12/1/2000', end='11/1/2009')
assert_equal(len(ii1), len(ii2))
assert_equal(ii1.shift(-1).values, ii2.values)
ii1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009')
ii2 = PeriodIndex(freq='D', start='1/2/2001', end='12/2/2009')
assert_equal(len(ii1), len(ii2))
assert_equal(ii1.shift(1).values, ii2.values)
ii1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009')
ii2 = PeriodIndex(freq='D', start='12/31/2000', end='11/30/2009')
assert_equal(len(ii1), len(ii2))
assert_equal(ii1.shift(-1).values, ii2.values)
def test_asfreq(self):
ii1 = PeriodIndex(freq='A', start='1/1/2001', end='1/1/2001')
ii2 = PeriodIndex(freq='Q', start='1/1/2001', end='1/1/2001')
ii3 = PeriodIndex(freq='M', start='1/1/2001', end='1/1/2001')
ii4 = PeriodIndex(freq='D', start='1/1/2001', end='1/1/2001')
ii5 = PeriodIndex(freq='H', start='1/1/2001', end='1/1/2001 00:00')
ii6 = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 00:00')
ii7 = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 00:00:00')
self.assertEquals(ii1.asfreq('Q', 'S'), ii2)
self.assertEquals(ii1.asfreq('Q', 's'), ii2)
self.assertEquals(ii1.asfreq('M', 'start'), ii3)
self.assertEquals(ii1.asfreq('D', 'StarT'), ii4)
self.assertEquals(ii1.asfreq('H', 'beGIN'), ii5)
self.assertEquals(ii1.asfreq('Min', 'S'), ii6)
self.assertEquals(ii1.asfreq('S', 'S'), ii7)
self.assertEquals(ii2.asfreq('A', 'S'), ii1)
self.assertEquals(ii2.asfreq('M', 'S'), ii3)
self.assertEquals(ii2.asfreq('D', 'S'), ii4)
self.assertEquals(ii2.asfreq('H', 'S'), ii5)
self.assertEquals(ii2.asfreq('Min', 'S'), ii6)
self.assertEquals(ii2.asfreq('S', 'S'), ii7)
self.assertEquals(ii3.asfreq('A', 'S'), ii1)
self.assertEquals(ii3.asfreq('Q', 'S'), ii2)
self.assertEquals(ii3.asfreq('D', 'S'), ii4)
self.assertEquals(ii3.asfreq('H', 'S'), ii5)
self.assertEquals(ii3.asfreq('Min', 'S'), ii6)
self.assertEquals(ii3.asfreq('S', 'S'), ii7)
self.assertEquals(ii4.asfreq('A', 'S'), ii1)
self.assertEquals(ii4.asfreq('Q', 'S'), ii2)
self.assertEquals(ii4.asfreq('M', 'S'), ii3)
self.assertEquals(ii4.asfreq('H', 'S'), ii5)
self.assertEquals(ii4.asfreq('Min', 'S'), ii6)
self.assertEquals(ii4.asfreq('S', 'S'), ii7)
self.assertEquals(ii5.asfreq('A', 'S'), ii1)
self.assertEquals(ii5.asfreq('Q', 'S'), ii2)
self.assertEquals(ii5.asfreq('M', 'S'), ii3)
self.assertEquals(ii5.asfreq('D', 'S'), ii4)
self.assertEquals(ii5.asfreq('Min', 'S'), ii6)
self.assertEquals(ii5.asfreq('S', 'S'), ii7)
self.assertEquals(ii6.asfreq('A', 'S'), ii1)
self.assertEquals(ii6.asfreq('Q', 'S'), ii2)
self.assertEquals(ii6.asfreq('M', 'S'), ii3)
self.assertEquals(ii6.asfreq('D', 'S'), ii4)
self.assertEquals(ii6.asfreq('H', 'S'), ii5)
self.assertEquals(ii6.asfreq('S', 'S'), ii7)
self.assertEquals(ii7.asfreq('A', 'S'), ii1)
self.assertEquals(ii7.asfreq('Q', 'S'), ii2)
self.assertEquals(ii7.asfreq('M', 'S'), ii3)
self.assertEquals(ii7.asfreq('D', 'S'), ii4)
self.assertEquals(ii7.asfreq('H', 'S'), ii5)
self.assertEquals(ii7.asfreq('Min', 'S'), ii6)
#self.assertEquals(ii7.asfreq('A', 'E'), i_end)
def test_badinput(self):
self.assertRaises(datetools.DateParseError, Period, '1/1/-2000', 'A')
self.assertRaises(ValueError, Period, -2000, 'A')
self.assertRaises(ValueError, Period, 0, 'A')
self.assertRaises(ValueError, PeriodIndex, [-1, 0, 1], 'A')
self.assertRaises(ValueError, PeriodIndex, np.array([-1, 0, 1]), 'A')
def test_dti_to_period(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
ii1 = dti.to_period()
ii2 = dti.to_period(freq='D')
self.assertEquals(ii1[0], Period('Jan 2005', freq='M'))
self.assertEquals(ii2[0], Period('1/31/2005', freq='D'))
self.assertEquals(ii1[-1], Period('Nov 2005', freq='M'))
self.assertEquals(ii2[-1], Period('11/30/2005', freq='D'))
def test_iindex_slice_index(self):
ii = PeriodIndex(start='1/1/10', end='12/31/12', freq='M')
s = Series(np.random.rand(len(ii)), index=ii)
res = s['2010']
exp = s[0:12]
assert_series_equal(res, exp)
res = s['2011']
exp = s[12:24]
assert_series_equal(res, exp)
def test_iindex_qaccess(self):
ii = PeriodIndex(['2Q05', '3Q05', '4Q05', '1Q06', '2Q06'], freq='Q')
s = Series(np.random.rand(len(ii)), index=ii).cumsum()
# Todo: fix these accessors!
self.assert_(s['05Q4'] == s[2])
def test_interval_dt64_round_trip(self):
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='B')
ii = dti.to_period()
self.assert_(ii.to_timestamp().equals(dti))
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='B')
ii = dti.to_period(freq='3H')
self.assert_(ii.to_timestamp().equals(dti))
def test_iindex_multiples(self):
ii = PeriodIndex(start='1/1/10', end='12/31/12', freq='2M')
self.assertEquals(ii[0], Period('1/1/10', '2M'))
self.assertEquals(ii[1], Period('3/1/10', '2M'))
self.assertEquals(ii[0].asfreq('6M'), ii[2].asfreq('6M'))
self.assertEquals(ii[0].asfreq('A'), ii[2].asfreq('A'))
self.assertEquals(ii[0].asfreq('M', how='S'),
Period('Jan 2010', '1M'))
self.assertEquals(ii[0].asfreq('M', how='E'),
Period('Feb 2010', '1M'))
self.assertEquals(ii[1].asfreq('M', how='S'),
Period('Mar 2010', '1M'))
i = Period('1/1/2010 12:05:18', '5S')
self.assertEquals(i, Period('1/1/2010 12:05:15', '5S'))
i = Period('1/1/2010 12:05:18', '5S')
self.assertEquals(i.asfreq('1S', how='E'),
Period('1/1/2010 12:05:19', '1S'))
class TestMethods(TestCase):
"Base test class for MaskedArrays."
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_add(self):
dt1 = | Period(freq='D', year=2008, month=1, day=1) | pandas.tseries.period.Period |
import numpy as np
import pandas as pd
from dowhy.interpreters.visual_interpreter import VisualInterpreter
from dowhy.causal_estimators.propensity_score_estimator import PropensityScoreEstimator
from dowhy.causal_estimators.propensity_score_stratification_estimator import PropensityScoreStratificationEstimator
from dowhy.causal_estimator import CausalEstimate
class PropensityBalanceInterpreter(VisualInterpreter):
SUPPORTED_ESTIMATORS=[PropensityScoreStratificationEstimator,]
def __init__(self, estimate, **kwargs):
super().__init__(estimate, **kwargs)
if not isinstance(estimate, CausalEstimate):
error_msg = "The interpreter method expects a CausalEstimate object."
self.logger.error(error_msg)
raise ValueError(error_msg)
self.estimator = self.estimate.estimator
if not any(isinstance(self.estimator, est_class) for est_class in PropensityBalanceInterpreter.SUPPORTED_ESTIMATORS):
error_msg = "The interpreter method only supports propensity score stratification estimator."
self.logger.error(error_msg)
raise ValueError(error_msg)
def interpret(self):
"""Balance plot that shows the change in standardized mean differences for each covariate after propensity score stratification.
"""
cols = self.estimator._observed_common_causes_names + self.estimator._treatment_name +["strata", "propensity_score"]
df = self.estimator._data[cols]
df_long=pd.wide_to_long(df.reset_index(), stubnames=["W"], i='index', j="common_cause_id").reset_index().astype({'W': 'float64'})
# First, calculating mean differences by strata
mean_diff = df_long.groupby(self.estimator._treatment_name+ ["common_cause_id", "strata"]).agg(mean_w = ("W", np.mean))
mean_diff = mean_diff.groupby(["common_cause_id","strata"]). transform(lambda x: x.max() - x.min()).reset_index()
mean_diff = mean_diff.query("v0==True")
size_by_w_strata = df_long.groupby(["common_cause_id", "strata"]).agg(
size=("propensity_score", np.size)).reset_index()
size_by_strata = df_long.groupby(["common_cause_id"]).agg(
size=("propensity_score", np.size)).reset_index()
size_by_strata = pd.merge(size_by_w_strata, size_by_strata, on="common_cause_id")
mean_diff_strata = pd.merge(mean_diff, size_by_strata, on=("common_cause_id", "strata"))
stddev_by_w_strata = df_long.groupby(["common_cause_id", "strata"]).agg(
stddev=("W", np.std)).reset_index()
mean_diff_strata = pd.merge(mean_diff_strata, stddev_by_w_strata, on=["common_cause_id", "strata"])
mean_diff_strata["scaled_mean"] = (mean_diff_strata["mean_w"]/mean_diff_strata["stddev"])* (mean_diff_strata["size_x"]/mean_diff_strata["size_y"])
mean_diff_strata = mean_diff_strata.groupby("common_cause_id").agg(std_mean_diff = ("scaled_mean", np.sum )).reset_index()
# Second, without strata
mean_diff_overall = df_long.groupby(self.estimator._treatment_name+ ["common_cause_id"]).agg(mean_w = ("W", np.mean))
mean_diff_overall = mean_diff_overall.groupby("common_cause_id"). transform(lambda x: x.max() - x.min()).reset_index()
mean_diff_overall=mean_diff_overall[mean_diff_overall[self.estimator._treatment_name[0]]==True] #TODO
stddev_overall = df_long.groupby(["common_cause_id"]).agg(
stddev=("W", np.std)).reset_index()
mean_diff_overall = | pd.merge(mean_diff_overall, stddev_overall, on=["common_cause_id"]) | pandas.merge |
# Code for extract the information from the web
# with the <id> information into the bolivia_power_1.csv file
# input: bolivia_power_1.id.csv
# output 6x.npy array file:
# <nodes_ids.lat,lon> <node.tags>
# <way.ids> <way.ref> <way.tags>
# ...
# v. 1.1
#import pandas as pd
import numpy as np
import pandas as pd
# Data from Bolivia_power
path_to_csv_power_data = '/notebooks/Power/data/bolivia_power_1.csv'
df_bolivia_power= pd.read_csv(path_to_csv_power_data,delimiter=',',sep=',', error_bad_lines=False)
df_bolivia_power.columns = ['type','id','name_1','name_2','name_3','name_4']
df_bolivia_power.head()
# As array Type and id
df2_type = np.asarray(df_bolivia_power['type'])
df2_id = np.asarray(df_bolivia_power['id'])
# Return to Pandas DataFrame
data_frame_type = | pd.DataFrame(df2_type) | pandas.DataFrame |
'data engineering'
#%%
import numpy as np
import matplotlib.pyplot as pyplot
import seaborn as sns
import pandas as pd
# import xgboost as xgb
from scipy.stats import skew
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.preprocessing import LabelEncoder, RobustScaler
from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC
from sklearn.kernel_ridge import KernelRidge
from sklearn.pipeline import make_pipeline
from scipy.special import boxcox1p
from xgboost.sklearn import XGBRegressor
from sklearn.metrics import mean_squared_error
rawTrain = pd.read_csv('house-pricing/data/train.csv')
#删除outliers离群数据,只删除train上的
rawTrain = rawTrain.drop(rawTrain[(rawTrain['GrLivArea'] > 4000) & (rawTrain['SalePrice'] < 300000)].index)
rawTrainX = rawTrain.drop(['SalePrice', 'Id'], axis=1)
rawTrainY = rawTrain['SalePrice']
rawTest = pd.read_csv('house-pricing/data/test.csv')
testId = rawTest['Id']
rawTestX = rawTest.drop(['Id'], axis=1)
#将两者合并,再处理,因为有些分类特征可能在test里有,但是在train里没有
mergedX = pd.concat([rawTrainX, rawTestX])
#快捷方式
mx = mergedX
print('merged train info = ' + str(mx.shape))
#处理特征的缺失值
mx['PoolQC'] = mx['PoolQC'].fillna('None')
mx['MiscFeature'] = mx['MiscFeature'].fillna('None')
mx['Alley'] = mx['Alley'].fillna('None')
mx['Fence'] = mx['Fence'].fillna('None')
mx['FireplaceQu'] = mx['FireplaceQu'].fillna('None')
#门前的距离取同区域邻居数据的中位数
mx['LotFrontage'] = mx.groupby('Neighborhood')['LotFrontage'].transform(lambda x: x.fillna(x.median()))
for col in ['GarageType', 'GarageFinish', 'GarageQual', 'GarageCond']:
mx[col] = mx[col].fillna('None')
for col in ['GarageYrBlt', 'GarageArea', 'GarageCars']:
mx[col] = mx[col].fillna(0)
for col in ['BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF','TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath']:
mx[col] = mx[col].fillna(0)
for col in ['BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2']:
mx[col] = mx[col].fillna('None')
mx['MasVnrType'] = mx['MasVnrType'].fillna('None')
mx['MasVnrArea'] = mx['MasVnrArea'].fillna(0)
mx['MSZoning'] = mx['MSZoning'].fillna(mx['MSZoning'].mode()[0])
#For this categorical feature all records are "AllPub", except for one "NoSeWa" and 2 NA .
#Since the house with 'NoSewa' is in the training set, this feature won't help in predictive modelling. We can then safely remove it.
mx = mx.drop(['Utilities'], axis=1)
mx['Functional'] = mx['Functional'].fillna('Typ')
mx['Electrical'] = mx['Electrical'].fillna(mx['Electrical'].mode()[0])
mx['KitchenQual'] = mx['KitchenQual'].fillna(mx['KitchenQual'].mode()[0])
mx['Exterior1st'] = mx['Exterior1st'].fillna(mx['Exterior1st'].mode()[0])
mx['Exterior2nd'] = mx['Exterior2nd'].fillna(mx['Exterior2nd'].mode()[0])
mx['SaleType'] = mx['SaleType'].fillna(mx['SaleType'].mode()[0])
mx['MSSubClass'] = mx['MSSubClass'].fillna("None")
#将某些数值型的特征转换成类目型
mx['MSSubClass'] = mx['MSSubClass'].apply(str)
mx['OverallCond'] = mx['OverallCond'].apply(str)
mx['YrSold'] = mx['YrSold'].apply(str)
mx['MoSold'] = mx['MoSold'].apply(str)
#离散变量编码
cols = ('FireplaceQu', 'BsmtQual', 'BsmtCond', 'GarageQual', 'GarageCond',
'ExterQual', 'ExterCond','HeatingQC', 'PoolQC', 'KitchenQual', 'BsmtFinType1',
'BsmtFinType2', 'Functional', 'Fence', 'BsmtExposure', 'GarageFinish', 'LandSlope',
'LotShape', 'PavedDrive', 'Street', 'Alley', 'CentralAir', 'MSSubClass', 'OverallCond',
'YrSold', 'MoSold')
for c in cols:
lbl = LabelEncoder()
mx[c] = lbl.fit_transform(mx[c].values)
#增加特征
mx['TotalSF'] = mx['TotalBsmtSF'] + mx['1stFlrSF'] + mx['2ndFlrSF']
mx['TotalBath'] = mx['FullBath'] + mx['HalfBath']
mx['BathPerBedroom'] = mx['TotalBath'] / mx['BedroomAbvGr']
mx['BathPerBedroom'] = mx['BathPerBedroom'].replace([np.inf, -np.inf], 0)
mx['BathPerBedroom'] = mx['BathPerBedroom'].fillna(0)
mx['KitchenPerBedroom'] = mx['KitchenAbvGr'] / mx['BedroomAbvGr']
mx['KitchenPerBedroom'] = mx['KitchenPerBedroom'].replace([np.inf, -np.inf], 0)
mx['KitchenPerBedroom'] = mx['KitchenPerBedroom'].fillna(0)
mx['KitchenPerBath'] = mx['KitchenAbvGr'] / mx['TotalBath']
mx['KitchenPerBath'] = mx['KitchenPerBath'].replace([np.inf, -np.inf], 0)
mx['KitchenPerBath'] = mx['KitchenPerBath'].fillna(0)
mx['GarageCarsPerBedroom'] = mx['GarageCars'] / mx['BedroomAbvGr']
mx['GarageCarsPerBedroom'] = mx['GarageCarsPerBedroom'].replace([np.inf, -np.inf], 0)
mx['GarageCarsPerBedroom'] = mx['GarageCarsPerBedroom'].fillna(0)
#分类特征dummy
mx = | pd.get_dummies(mx) | pandas.get_dummies |
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
df = pd.read_csv(filename).dropna().drop_duplicates()
# valid value
df = df[df['view'].isin(range(5))]
df = df[df['condition'].isin(range(1, 6))]
df = df[df['grade'].isin(range(1, 14))]
for col_name in ['price', 'bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'sqft_living15', 'sqft_lot15']:
df = df[df[col_name] > 0]
for col_name in ['sqft_above', 'sqft_basement']:
df = df[df[col_name] >= 0]
df = df[df['yr_built'] >= 1000]
df = df[df['bedrooms'] <= 15]
# categorical
df['yr_built20'] = (df['yr_built'] // 20).astype(int)
yr_built20 = pd.get_dummies(df.yr_built20)
yr_built20.rename(columns={95: 'yr_1900', 96: 'yr_1920', 97: 'yr_1940', 98: 'yr_1960', 99: 'yr_1980', 100: 'yr_2000'}, inplace= True)
df = df.join(yr_built20)
df['yr_renovated'] = np.where(df['yr_renovated'] < 2000, 0, 1)
zip_dummies = | pd.get_dummies(df.zipcode, prefix='zip:') | pandas.get_dummies |
import numpy as np
import pandas as pd
import csv
import os
import ast
import logging
import random
from paths import *
from pathlib import Path
from data.etl import etl, over_sample
from models.train_model import train_lgb
from models.predict_model import predict_lgb
# logger config
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
fh = logging.FileHandler(log_path / 'log_item.log')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
logger.addHandler(ch)
logger.addHandler(fh)
# item-level params
level = 'item' # group or item level
ind = 1942 #1913 to create validation forecast
run = 'yes' #whether or not to run SMOTE
# sales data set, extend to include d_1942 - d_1969
sales_master = pd.read_csv(data_path / 'raw/sales_train_evaluation.csv')
seq = np.arange(1942,1970,1)
for i in seq:
col = ('d_'+ str(i))
sales_master[col] = 0
# read calendar, sell_price datasets
calendar = | pd.read_csv(data_path / 'raw/calendar.csv') | pandas.read_csv |
import requests as rq
from bs4 import BeautifulSoup as bs
import pandas as pd
def getTheNumberOfPages(): #get how many pages including house ads
site ='https://www.hurriyetemlak.com/ankara-satilik?page=1'
links = bs(rq.get(site).content,"html.parser").find_all('a')
total_page = 0
counter = 0
for link in links:
if str(link).find('tabindex')!=-1 :
counter += 1
if counter == 8:
total_page = int(str(link)[(str(link).find('>')+1):str(link).rfind('<')] )
return total_page
def getLinks(total_page): #returns an array of links for house ads
i = 0
advert = ['tmp']
for j in range(1,total_page+1):
site = 'https://www.hurriyetemlak.com/ankara-satilik?page='+str(j)
print('page:',j)
links = bs(rq.get(site).content,"html.parser").find_all('a')
for link in links:
if str(link.get('href')).find('ankara')!=-1 :
temp = str(link.get('href'))
temp2 = 'https://www.hurriyetemlak.com'+temp
if advert[i] != temp2:
advert.append(temp2)
print(temp2)
i = i + 1
return advert
def removeRedundantAdverts(advert): #removes adverts
temp = ''
advert_range = len(advert)
k = 0
while k<advert_range:
temp = advert[k]
temp = temp[len(temp)-2:]
try:
isinstance(int(temp), int)
except:
advert.remove(advert[k])
advert_range-=1
k=k-1
k+=1
return advert
def getFeatures(advert,features,values): #return houses features and feature values
links = bs(rq.get(advert).content,"html.parser").find_all('span')
# to parse data used `009b093a` as a keyword. it changes very often. need to check website`s html to find `<span data-v-009b093a=` like that.
for link in links:
if str(link).find('txt') != -1 and str(link).find('Cephe') == -1: #ignores 'Cephe'. they make troubles
features.append(str(link)[(str(link).find('>')+1):str(link).rfind('<')]) # to get features' names
if str(link).find('<span data-v-009b093a=') != -1:
if str(link).rfind('<span data-v-009b093a=') == str(link).find('<span data-v-009b093a=') and str(link).find('<span data-v-009b093a="">, </span>')==-1 and str(link).find('Batı')==-1 and str(link).find('Doğu')==-1 and str(link).find('Kuzey')==-1 and str(link).find('Güney')==-1 and str(link).find('/sup')==-1:
values.append(str(link)[ ( str(link).find('>')+1 ) : str(link).rfind('<') ]) # to get features' values
return features,values
def getProvince(advert,features,values): #to get region and neighborhood
link3 = bs(rq.get(advert).content,"html.parser").find_all('ul',{'class':'short-info-list'}) # get Province value
province = str(link3).split('</li> <li data-v-009b093a="">')
values.append(province[1].strip())
values.append(province[2].strip())
features.append('Semt')
features.append('Mahalle')
return features,values
def getPrice(advert,features,values):
link2 = bs(rq.get(advert).content,"html.parser").find_all('p',{'class':'fontRB fz24 price'}) # get Price value
values.append( str(link2)[ (str(link2).find('>')+2) : str(link2).rfind('<')-1 ] )
features.append('Fiyat')
return features,values
def createDataset():
total_page = getTheNumberOfPages()
advert = getLinks(1) # write how many pages you want
advert = removeRedundantAdverts(advert)
df = pd.DataFrame()
for j in range(0,len(advert)):
try:
print(j,' ',advert[j])
features = []
values = []
features.append('Link')
values.append(advert[j])
getFeatures(advert[j],features,values)
values.remove(values[6])
getPrice(advert[j],features,values)
getProvince(advert[j],features,values)
listvalues = [values]
try:
df2 = pd.DataFrame(listvalues,columns=features)
except:
values.remove(values[len(values)-2])
df2 = | pd.DataFrame(listvalues,columns=features) | pandas.DataFrame |
from src.evaluation.gnn_evaluation_module import eval_gnn
from src.models.gat_models import MonoGAT#, BiGAT, TriGAT
from src.models.rgcn_models import MonoRGCN, RGCN2
from src.models.appnp_model import MonoAPPNPModel
from src.models.multi_layered_model import MonoModel#, BiModel, TriModel
from torch_geometric.nn import GCNConv, SAGEConv, GATConv, RGCNConv, SGConv, APPNP, ClusterGCNConv
from src.data.data_loader import GraphDataset
import warnings
import pandas as pd
import os
import argparse
import numpy as np
import pickle
import torch
from src.evaluation.network_split import NetworkSplitShchur
from src.data.create_modified_configuration_model import generate_modified_conf_model
from torch_geometric.utils import from_networkx, to_networkx
from community import best_partition
import networkx as nx
def parse_args():
parser = argparse.ArgumentParser(description="Test accuracy for GCN/SAGE/GAT/RGCN/SGC/APPNP")
parser.add_argument('--size',
type=int,
default=96,
help='Channel size. Default is 12.')
parser.add_argument('--lr',
type=float,
default=0.01,
help='Learning rate. Default is 0.01.')
parser.add_argument('--wd',
type=float,
default=0.01,
help='Regularization weight. Default is 0.01.')
parser.add_argument('--dropout',
type=float,
default=0.8,
help='Dropout probability. Default is 0.6.')
parser.add_argument('--conf',
type=bool,
default=False,
help='Is configuration model evaluation. Default is False.')
parser.add_argument('--shifting',
type=bool,
default=False,
help='Is shifting evaluation. Default is False.')
parser.add_argument('--sbm',
type=bool,
default=False,
help='Is SBM evaluation. Default is False.')
parser.add_argument('--sbm_label',
type=bool,
default=False,
help='Is SBM_label evaluation. Default is False.')
parser.add_argument('--flipped',
type=bool,
default=False,
help='Evaluating with flipped edges? Default is False.')
parser.add_argument('--removed_hubs',
type=bool,
default=False,
help='Evaluating with removed hubs? Default is False.')
parser.add_argument('--added_2hop_edges',
type=bool,
default=False,
help='Evaluating with added 2-hop edges? Default is False.')
parser.add_argument('--label_sbm',
type=bool,
default=False,
help='Evaluating with SBMs created from labels? Default is False.')
parser.add_argument('--heads',
type=int,
default=4,
help='Attention heads. Default is 4.')
parser.add_argument('--attention_dropout',
type=float,
default=0.4,
help='Attention dropout for GAT. Default is 0.4.')
parser.add_argument('--dataset',
default="cora",
help='Dataset name. Default is cora.')
parser.add_argument('--model',
default="gcn",
help='Model name. Default is GCN.')
parser.add_argument('--splits',
type=int,
default=100,
help='Number of random train/validation/test splits. Default is 100.')
parser.add_argument('--runs',
type=int,
default=20,
help='Number of random initializations of the model. Default is 20.')
parser.add_argument('--conf_inits',
type=int,
default=10,
help='Number of configuration model runs. Default is 10.')
parser.add_argument('--sbm_inits',
type=int,
default=10,
help='Number of SBM runs. Default is 10.')
parser.add_argument('--directionality',
default='undirected',
help='Directionality: undirected/directed/reversed. Default is undirected.')
parser.add_argument('--train_examples',
type=int,
default=20,
help='Number of training examples per class. Default is 20.')
parser.add_argument('--val_examples',
type=int,
default=30,
help='Number of validation examples per class. Default is 30.')
args = parser.parse_args()
return args
name2conv = {'gcn': GCNConv, 'sage': SAGEConv, 'gat': GATConv, 'rgcn': RGCNConv, 'rgcn2':RGCN2, 'sgc':SGConv, 'appnp':APPNP, 'cgcn':ClusterGCNConv}
def eval_archs_gat(dataset, dataset_name, channel_size, dropout, lr, wd, heads,attention_dropout,runs,splits,train_examples,val_examples, models=[MonoGAT],isDirected = False):
if isDirected:
models = [MonoGAT]
return eval_gnn(dataset, dataset_name, GATConv, channel_size, dropout, lr, wd, heads=heads, attention_dropout=attention_dropout,
models=models, num_runs=runs, num_splits=splits, test_score=True,
train_examples = train_examples, val_examples = val_examples)
def eval_archs_gcn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, runs,splits,train_examples,val_examples, models=[MonoModel], isDirected=False):
if isDirected:
models = [MonoModel]
return eval_gnn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, heads=1,attention_dropout=0.3, # dummy values for heads and attention_dropout
models=models, num_runs=runs, num_splits=splits,test_score=True,
train_examples = train_examples, val_examples = val_examples)
def eval_archs_appnp(dataset, dataset_name, conv, channel_size, dropout, lr, wd, runs,splits,train_examples,val_examples, models=[MonoAPPNPModel]):
return eval_gnn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, heads=1,attention_dropout=0.3, # dummy values for heads and attention_dropout
models=models, num_runs=runs, num_splits=splits,test_score=True,
train_examples = train_examples, val_examples = val_examples)
def eval_archs_rgcn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, runs,splits,train_examples,val_examples, models=[MonoRGCN]):
return eval_gnn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, heads=1,attention_dropout=0.3, # dummy values for heads and attention_dropout
models=models, num_runs=runs, num_splits=splits,test_score=True,
train_examples = train_examples, val_examples = val_examples)
def eval(model, dataset, dataset_name, channel_size, dropout, lr, wd, heads, attention_dropout, runs, splits, train_examples, val_examples, isDirected):
if model == 'gat':
return eval_archs_gat(dataset, dataset_name, channel_size, dropout, lr, wd, heads, attention_dropout, splits=splits, runs=runs, train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
elif model == 'rgcn' or model == 'rgcn2':
return eval_archs_rgcn(dataset, dataset_name, name2conv[model], channel_size, dropout, lr, wd, splits=splits, runs=runs, train_examples = train_examples, val_examples = val_examples)
elif model == 'appnp':
return eval_archs_appnp(dataset, dataset_name, name2conv[model], channel_size, dropout, lr, wd, splits=splits, runs=runs, train_examples = train_examples, val_examples = val_examples)
else:
return eval_archs_gcn(dataset, dataset_name, name2conv[model], channel_size, dropout, lr, wd, splits=splits, runs=runs, train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
def eval_original(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}', dataset_name,
f'data/graphs/processed/{dataset_name}/{dataset_name}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
return df_cur
def eval_shuffled_features(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}', dataset_name,
f'data/graphs/processed/{dataset_name}/{dataset_name}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
dataset.x = dataset.x[torch.randperm(dataset.x.size()[0])]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
return df_cur
def eval_random_features(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}', dataset_name,
f'data/graphs/processed/{dataset_name}/{dataset_name}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
dataset.x = torch.randint(0, 2, dataset.x.shape, dtype=torch.float)
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
return df_cur
def eval_cm_communities(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}-cm_communities-{i}', dataset_name,
f'data/graphs/cm_communities/{dataset_name}/{dataset_name}_cm_communities_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
# G = to_networkx(dataset)
# G = nx.DiGraph(G)
# node_communities = best_partition(nx.to_undirected(G))
# nx.set_node_attributes(G,node_communities,'label')
# # print(dataset.edge_index)
# old_edges = dataset.edge_index
# G = generate_modified_conf_model(G)
# # dir_path = f'data/graphs/cm_communities/{dataset_name}'
# # if not os.path.exists(dir_path):
# # os.mkdir(dir_path)
# # nx.write_edgelist(G, f'{dir_path}/{dataset_name}_cm_communities_{i}.cites')
# dataset.edge_index = torch.tensor(data=np.array(list(G.edges)).T,dtype=torch.long)
# print((torch.tensor(data=np.array(list(G.edges)).T,dtype=torch.long)-old_edges).abs().sum())
# print(dataset.edge_index)
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['graph'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_random(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, random_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(random_inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-random{i}', dataset_name,
f'data/graphs/random/{dataset_name}/{dataset_name}_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['random_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_erdos(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, erdos_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(erdos_inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-erdos{i}', dataset_name,
f'data/graphs/erdos/{dataset_name}/{dataset_name}_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['erdos_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_injected_edges(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits, num_edges, hubs_experiment):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
last_edge = None
for e in num_edges:
for i in range(inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-injected_{e}_{i}_{hubs_experiment}', dataset_name,
f'data/graphs/injected_edges/{dataset_name}/{dataset_name}_{hubs_experiment}_{e}_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
# print(f'data/graphs/injected_edges/{dataset_name}/{dataset_name}_{hubs_experiment}_{e}_{i}.cites')
# print(dataset.edge_index.shape)
# print(dataset.edge_index)
# if last_edge is None:
# last_edge = dataset.edge_index
# continue
# print((1-last_edge.eq(last_edge).double()).sum())
# continue
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['init_num'] = i
df_cur['injected_edges'] = e
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_injected_edges_degree_cat(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits, num_edges, percentile):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
last_edge = None
e = num_edges
hubs_experiment = 'global_edges'
for i in range(inits):
for frm in range(0,100,percentile):
to = frm + percentile
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-injected_{e}_{i}_{hubs_experiment}_{frm}_to_{to}', dataset_name,
f'data/graphs/injected_edges_degree_cat/{dataset_name}/{dataset_name}_{hubs_experiment}_{e}_{i}_{frm}_to_{to}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['init_num'] = i
df_cur['injected_edges'] = e
df_cur['from'] = frm
df_cur['to'] = to
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_injected_edges_constant_nodes(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits, control_ratio, edges_per_node, percentile):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
last_edge = None
hubs_experiment = 'global_edges'
for frm in range(0,100,percentile):
for i in range(inits):
for e in edges_per_node:
to = frm + percentile
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-injected_{e}edges_{control_ratio}nodes_{i}_{hubs_experiment}_{frm}_to_{to}', dataset_name,
f'data/graphs/injected_edges_constant_nodes/{dataset_name}/{dataset_name}_global_edges{e}_nodes{control_ratio:.3f}_{i}_{frm}_to_{to}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['init_num'] = i
df_cur['edges_per_node'] = e
df_cur['control_ratio'] = control_ratio
df_cur['from'] = frm
df_cur['to'] = to
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_injected_edges_attack_target(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits, control_ratio, edges_per_node, percentile):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
last_edge = None
hubs_experiment = 'global_edges'
for atkfrm in range(0,100,percentile):
for tgtfrm in range(0,100,percentile):
for i in range(inits):
for e in edges_per_node:
atkto = atkfrm + percentile
tgtto = tgtfrm + percentile
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-injected_{e}edges_{control_ratio:.3f}nodes_{i}_{hubs_experiment}_atk{atkfrm}_{atkto}_tgt{tgtfrm}_{tgtto}', dataset_name,
f'data/graphs/injected_edges_attack_target/{dataset_name}/{dataset_name}_global_edges{e}_nodes{control_ratio:.3f}_{i}_atk{atkfrm}_{atkto}_tgt{tgtfrm}_{tgtto}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['init_num'] = i
df_cur['edges_per_node'] = e
df_cur['control_ratio'] = control_ratio
df_cur['atkfrm'] = atkfrm
df_cur['atkto'] = atkto
df_cur['tgtfrm'] = tgtfrm
df_cur['tgtto'] = tgtto
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_injected_edges_sbm(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits, num_edges, hubs_experiment):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
last_edge = None
for e in num_edges:
for i in range(inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-injected_sbm_{e}_{i}_{hubs_experiment}', dataset_name,
f'data/graphs/injected_edges_sbm/{dataset_name}/{dataset_name}_{hubs_experiment}_{e}_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['init_num'] = i
df_cur['injected_edges'] = e
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_label_sbm(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples,hubs_experiment):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-label_sbm_{hubs_experiment}', dataset_name,
f'data/graphs/label_sbm/{dataset_name}/{dataset_name}_{hubs_experiment}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
return df_cur
def eval_conf(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, conf_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(conf_inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-confmodel{i}', dataset_name,
f'data/graphs/confmodel/{dataset_name}/{dataset_name}_confmodel_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['confmodel_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_shifting(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, shifting_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for change in 'CL':
for inc in [True, False]:
for r in [0.16,0.32,0.64]: #[0.02,0.04,0.08]:
for i in range(shifting_inits):
output_prefix = f'data/graphs/shifting/{dataset_name}/{dataset_name}_shifting'
output_suffix = '.cites'
graph_path = f'{output_prefix}_{change}_{"inc" if inc else "dec"}_r{r:.2f}_{i}{output_suffix}'
if not os.path.exists(graph_path):
print(f'File not found: {graph_path}')
continue
dataset = GraphDataset(f'data/tmp/{dataset_name}_shifting_{change}_{"inc" if inc else "dec"}_r{r:.2f}_{i}{output_suffix}',
dataset_name, graph_path,
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['graph_num'] = i
df_cur['inc'] = inc
df_cur['change'] = change
df_cur['r'] = r
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_sbm(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, sbm_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(sbm_inits):
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-sbm{i}', dataset_name,
f'data/graphs/sbm/{dataset_name}/{dataset_name}_sbm_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['sbm_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_sbm_label(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, sbm_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(sbm_inits):
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-sbm_label{i}', dataset_name,
f'data/graphs/sbm_label/{dataset_name}/{dataset_name}_sbm_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['sbm_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_modcm(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, modcm_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(modcm_inits):
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-modcm{i}', dataset_name,
f'data/graphs/modcm/{dataset_name}/{dataset_name}_modcm_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['modcm_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_modsbm(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, modsbm_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(modsbm_inits):
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-modsbm{i}', dataset_name,
f'data/graphs/modsbm/{dataset_name}/{dataset_name}_modsbm_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['modsbm_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_reglabel(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, reglabel_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(reglabel_inits):
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-reglabel{i}', dataset_name,
f'data/graphs/reglabel/{dataset_name}/{dataset_name}_reglabel_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['reglabel_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
################## Synthetic part #####################################
def load_communities(path):
with open(path, 'rb') as handle:
ret = pickle.load(handle)
return ret
def load_labels(path):
label = {}
with open(path, 'r') as handle:
label = {}
for line in handle:
s = line.strip().split()
label[s[0]] = s[-1]
return label
def agg(x):
return len(x.unique())
def calc_uncertainty(df_community,dataset_name,labeled=False,seed=0):
if dataset_name == 'cora':
df_community.label = df_community.label.apply(lambda x : ''.join([c for c in x if c.isupper()]))
if labeled:
df_community = df_community[df_community[f'labeled{seed}']]
communities = df_community.community.unique()
labels = df_community.label.unique()
mtx = df_community.pivot_table(index='community', columns='label',values='node',aggfunc=agg).fillna(0) / len(df_community)
def Pmarg(c):
return len(df_community[df_community.community == c]) / len(df_community)
def Pcond(l,c):
return mtx.loc[c,l]/Pmarg(c)
H = 0
for c in communities:
h = 0
for l in labels:
if Pcond(l,c) == 0:
continue
h += Pcond(l,c) * np.log2(1./Pcond(l,c))
H += h * Pmarg(c)
def Pl(l):
return len(df_community[df_community.label == l]) / len(df_community)
Hl = 0
for l in labels:
if Pl(l) == 0:
continue
Hl += Pl(l) * np.log2(1./Pl(l))
IG = Hl-H
return IG/Hl
def eval_sbm_swap(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, sbm_inits, is_sbm):
step = 10
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(sbm_inits if is_sbm else 1):
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
if is_sbm:
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-sbm{i}-', dataset_name,
f'data/graphs/sbm/{dataset_name}/{dataset_name}_sbm_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)
else:
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-', dataset_name,
f'data/graphs/processed/{dataset_name}/{dataset_name}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)
data = dataset[0]
community = load_communities(f'data/community_id_dicts/{dataset_name}/{dataset_name}_louvain.pickle')
mapping = data.node_name_mapping
label = load_labels(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
df_community = pd.DataFrame({'dataset':dataset_name, 'node':node, 'community':community[node], 'label':label[node]} for node in community)
df_community['node_id'] = df_community.node.apply(lambda x:mapping[x])
for seed in range(splits):
split = NetworkSplitShchur(dataset, train_examples_per_class=train_examples,early_examples_per_class=0,
val_examples_per_class=val_examples, split_seed=seed)
df_community[f'labeled{seed}'] = df_community.node_id.apply(lambda x: (split.train_mask[x]).numpy())
n = len(data.y)
# select nodes at random
shuffled = np.arange(n)
np.random.shuffle(shuffled)
row = shuffled[:int(n/2)]
col = shuffled[int(n/2):int(n/2)*2]
assert(len(row) == len(col))
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
if is_sbm:
df_cur['sbm_num'] = i
df_cur['ratio'] = 0
df_cur['uncertainty'] = calc_uncertainty(df_community, dataset_name)
ulc = [calc_uncertainty(df_community, dataset_name, True, seed) for seed in range(splits)]
df_cur['uncertainty_known'] = [ulc]
print(df_cur)
df_val = pd.concat([df_val, df_cur])
for ratio in range(0,100,step):
frm = int(ratio/100 * len(row))
to = int((ratio+step)/100 * len(row))
U = row[frm:to]
V = col[frm:to]
for u,v in zip(U,V):
tmp = data.x[v].detach().clone()
data.x[v] = dataset[0].x[u]
data.x[u] = tmp
tmp = data.y[v].detach().clone()
data.y[v] = dataset[0].y[u]
data.y[u] = tmp
tmp = df_community.loc[df_community.node_id == v, 'community'].values[0]
df_community.loc[df_community.node_id == v, 'community'] = df_community.loc[df_community.node_id == u, 'community'].values[0]
df_community.loc[df_community.node_id == u, 'community'] = tmp
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
if is_sbm:
df_cur['sbm_num'] = i
df_cur['ratio'] = ratio+step
df_cur['uncertainty'] = calc_uncertainty(df_community, dataset_name)
ulc = [calc_uncertainty(df_community, dataset_name, True, seed) for seed in range(splits)]
df_cur['uncertainty_known'] = [ulc]
print(df_cur)
df_val = pd.concat([df_val, df_cur])
return df_val
################## END: Synthetic part #####################################
def eval_flipped(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, percentages=range(10,51,10)):
print(percentages)
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = | pd.DataFrame() | pandas.DataFrame |
"""
冬奥系统 1KM BTH GRIB2 网格产品
"""
from pathlib import Path
import pandas as pd
import numpy as np
import click
import dask
from dask.distributed import Client, progress
from dask_mpi import initialize
from reki.format.grib.eccodes import load_bytes_from_file
import logging
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s][%(name)s][%(levelname)s] %(message)s'
)
logger = logging.getLogger(__name__)
PRODUCTION_LIST = [
{
"field": {"parameter": "HGT"},
"output": {"name": "HGT"}
},
{
"field": {"parameter": "2t"},
"output": {"name": "2T"}
},
{
"field": {"parameter": "2r"},
"output": {"name": "2RH"}
},
{
"field": {"parameter": "10u"},
"output": {"name": "10U"}
},
{
"field": {"parameter": "10v"},
"output": {"name": "10V"}
},
{
"field": {"parameter": "GUST", "level": 10},
"output": {"name": "10FG1"}
},
{
"field": {"parameter": "PWAT"},
"output": {"name": "PWV"}
},
{
"field": {"parameter": "VIS"},
"output": {"name": "VIS"}
},
{
"field": {"parameter": "PRES"},
"output": {"name": "SP"}
},
{
"field": {"parameter": "t", "level_type": "surface"},
"output": {"name": "SKT"}
},
{
"field": {"parameter": "TCDC"},
"output": {"name": "TCC"}
},
{
"field": {"parameter": "LCDC"},
"output": {"name": "LCC"}
},
{
"field": {"parameter": "APCP"},
"output": {"name": "TP"}
},
{
"field": {"parameter": "ASNOW"},
"output": {"name": "SF"}
},
{
"field": {"parameter": "2d"},
"output": {"name": "2D"}
},
{
"field": {"parameter": {"discipline": 0, "parameterCategory": 3, "parameterNumber": 225}, "level_type": "surface"},
"output": {"name": "DEG0L"}
},
{
"field": {"parameter": {"discipline": 0, "parameterCategory": 16, "parameterNumber": 224}, "level_type": "surface"},
"output": {"name": "CRR"}
},
{
"field": {"parameter": "PTYPE"},
"output": {"name": "PTYPE"}
},
*[
{
"field": {"parameter": "t", "level_type": "pl", "level": level},
"output": {"name": f"{level}T"}
} for level in [925, 850, 800, 700, 500]
],
*[
{
"field": {"parameter": "q", "level_type": "pl", "level": level},
"output": {"name": f"{level}Q"}
} for level in [925, 850, 800, 700, 500]
],
*[
{
"field": {"parameter": "gh", "level_type": "pl", "level": level},
"output": {"name": f"{level}GH"}
} for level in [925, 850, 800, 700, 500]
],
*[
{
"field": {"parameter": "u", "level_type": "pl", "level": level},
"output": {"name": f"{level}U"}
} for level in [925, 850, 800, 700, 500]
],
*[
{
"field": {"parameter": "v", "level_type": "pl", "level": level},
"output": {"name": f"{level}V"}
} for level in [925, 850, 800, 700, 500]
],
*[
{
"field": {"parameter": "wz", "level_type": "pl", "level": level},
"output": {"name": f"{level}W"}
} for level in [925, 850, 800, 700, 500]
],
*[
{
"field": {"parameter": "r", "level_type": "pl", "level": level},
"output": {"name": f"{level}R"}
} for level in [925, 850, 800, 700, 500]
],
]
def get_grib2_file_name(start_time: pd.Timestamp, forecast_time: pd.Timedelta):
start_time_str = start_time.strftime("%Y%m%d%H")
forecast_time_str = f"{int(forecast_time/ | pd.Timedelta(hours=1) | pandas.Timedelta |
from gnn_benchmark.common.run_db import RunState
import collections
from gnn_benchmark.common.utils import run_entries_to_df, confidence_interval_95
import seaborn as sns
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import lines
import functools
import copy
class Analysis:
task_col = "run_definition.task_name"
time_col = "results.duration"
mem_usage_col = "results.gpu_mem_usage"
def __init__(self, runs_db, metric_col, metric_comp="max"):
self.runs_db = runs_db
self.metric_col = metric_col
assert metric_comp in ["max", "min"]
self.metric_comp = metric_comp
@functools.lru_cache()
def _read_runs(self):
n_runs = self.runs_db.n_runs()
n_finished = self.runs_db.n_runs(RunState.finished)
if n_runs > n_finished:
print(f"\n\nNot all runs finished! "
f"Currently, {n_finished}/{n_runs} are finished ({(100 * n_finished) // n_runs}%)\n\n")
runs_df = run_entries_to_df(self.runs_db.find_finished(), replace_none="None")
return runs_df
def _best_run_indices(self, runs_df, compare_col):
"""Computes the indices of the best runs for the interesting parameter"""
best_indices = []
model_names = runs_df[compare_col].unique()
op = "idxmax" if self.metric_comp == "max" else "idxmin"
for m in model_names:
best_indices.append(
getattr(runs_df[runs_df[compare_col] == m][self.metric_col], op)()
)
return best_indices
def _get_param_of_best_run(self, compare_col, param):
cmp = self.best_runs_df(compare_col)
cmp = cmp.reset_index(level=[1])
tasks = cmp.index.unique()
evaluation_results = {}
for d in tasks:
best_run_rows = cmp[cmp.index == d]
best_run_rows = best_run_rows.set_index(
compare_col, drop=True
)
evaluation_results[d] = best_run_rows[param]
best_summarized = pd.concat(evaluation_results, axis=1)
return best_summarized
def best_results_df(self, compare_col):
"""Gives a high-level overview dataframe containing the performances of the compare_col x the tasks"""
return self._get_param_of_best_run(compare_col, self.metric_col)
def runtimes_df(self, compare_col):
"""Gives a high-level overview dataframe containing the runtimes of the best compare_col x the tasks"""
return self._get_param_of_best_run(compare_col, self.time_col)
def mem_usage_df(self, compare_col):
"""Gives a high-level overview dataframe containing the memory usage of the best compare_col x the tasks"""
return self._get_param_of_best_run(compare_col, self.mem_usage_col) // 1024 // 1024
def best_runs_df(self, compare_col):
"""Returns, for every task/compare_col combination, the best run and its results"""
runs_df = self._read_runs()
tasks = runs_df[self.task_col].unique()
best_hparams = {}
for d in tasks:
best_run_idxes = self._best_run_indices(runs_df[runs_df[self.task_col] == d], compare_col)
best_run_rows = runs_df.loc[best_run_idxes]
best_run_rows = best_run_rows.set_index(
compare_col, drop=True
)
best_hparams[d] = best_run_rows
best_hparams = pd.concat(best_hparams, axis=0)
return best_hparams
def human_readable(self, df):
def edit_string(s):
if s is None:
return s
s = s.replace("run_definition.", "")
s = s.replace("results.", "")
s = s.replace("_metrics.", ".")
return s
df = copy.deepcopy(df)
columns = df.columns
if isinstance(columns, pd.MultiIndex):
for i, level_names in enumerate(columns.levels):
new_names = [edit_string(n) for n in level_names]
columns = columns.set_levels(new_names, level=i)
else:
columns = columns.to_list()
for i, c in enumerate(columns):
c = edit_string(c)
columns[i] = c
df.columns = columns
df.index.name = edit_string(df.index.name)
return df
def ranking_df(self, compare_col):
best_summarized = self.best_results_df(compare_col)
finished_cols = best_summarized.columns[(~pd.isna(best_summarized).any(axis=0)).values.nonzero()]
ranking = best_summarized[finished_cols].rank(ascending=self.metric_comp == "min")
mean_ranking = ranking.mean(axis=1)
ranking["total"] = mean_ranking
return ranking
def relative_performance(self, compare_col):
best_summarized = self.best_results_df(compare_col)
if self.metric_comp == "max":
max_performances = best_summarized.max(axis=0)
else:
max_performances = best_summarized.min(axis=0)
relative_performances = best_summarized / max_performances
mean_relative_performance = relative_performances.mean(axis=1)
relative_performances["mean"] = mean_relative_performance
return relative_performances
def _plot_overfitting_task(self, df, compare_col, metric_x, metric_y, ax=None, jitter_x=0., jitter_y=0.,
same_scale=False):
if ax is None:
fig, ax = plt.subplots(1, 1)
x = np.array(df[metric_x])
x = x + np.random.normal(0, jitter_x, x.shape)
y = np.array(df[metric_y])
y = y + np.random.normal(0, jitter_y, y.shape)
hue = df[compare_col]
ax = sns.scatterplot(x=x, y=y, hue=hue,
alpha=0.5, ax=ax)
ax.set_xlabel(metric_x)
ax.set_ylabel(metric_y)
if same_scale:
lims = list(zip(ax.get_xlim(), ax.get_ylim()))
newlims = min(lims[0]), max(lims[1])
diagonal = lines.Line2D(newlims, newlims, c=(0, 0, 0, 0.1))
ax.add_line(diagonal)
ax.set_xlim(newlims)
ax.set_ylim(newlims)
# Setting equal size
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
asp = abs((xmax - xmin) / (ymax - ymin))
ax.set_aspect(asp)
return ax
def overfitting_fig(self, compare_col, metric_x, metric_y, jitter_x=0., jitter_y=0., same_scale=False):
df = self._read_runs()
tasks = df[self.task_col].unique()
ntasks = len(tasks)
if ntasks <= 3:
ncols = ntasks
nrows = 1
elif ntasks <= 6:
ncols = 3
nrows = 2
else:
nrows = int(np.ceil((len(tasks) / 1.5)**0.5))
ncols = int(np.ceil(nrows * 1.5)) - 1
fig, axes = plt.subplots(nrows, ncols, squeeze=False)
for ax, t in zip(axes.flatten(), tasks):
self._plot_overfitting_task(
df[df[self.task_col] == t], compare_col, metric_x, metric_y, ax=ax, jitter_x=jitter_x,
jitter_y=jitter_y, same_scale=same_scale
)
ax.set_title(t)
handles, labels = axes[0, 0].get_legend_handles_labels()
fig.legend(handles, labels, loc='lower right')
[ax.get_legend().remove() for ax in axes.flatten() if ax.get_legend() is not None]
return fig
def print_default_analysis(self, interesting_col, metric_col):
best_results_df = self.human_readable(self.best_results_df(interesting_col))
best_runs_df = self.human_readable(self.best_runs_df(interesting_col))
ranking = self.human_readable(self.ranking_df(interesting_col))
overfitting_fig = self.overfitting_fig(
compare_col=interesting_col,
metric_x=metric_col.replace("test_metrics", "train_metrics"),
metric_y=metric_col,
same_scale=True
)
relative = self.human_readable(self.relative_performance(interesting_col))
runtimes = self.runtimes_df(interesting_col)
mem_usage = self.human_readable(self.mem_usage_df(interesting_col))
with pd.option_context("display.width", 0):
print("run summary")
print(best_results_df)
print("\n\nconfigs of the best runs")
print(best_runs_df)
print("\n\nranking")
print(ranking)
print("\n\nrelative performance")
print(relative)
print("\n\nruntimes (s)")
print(runtimes)
print("\n\nGPU mem_usage (MB)")
print(mem_usage)
plt.show()
class FoldedAnalysis(Analysis):
# TODO: Print out confidences in the overview evaluation
fold_idx_col = "run_definition.fold_idx"
def _unique_hparams(self, df):
run_def_cols = [
c for c in df.columns if c.startswith("run_definition.")
and c != self.fold_idx_col
]
filtered_hparam_columns = []
for h in run_def_cols:
if isinstance(df[h].iloc[0], collections.abc.Hashable):
if len(df[h].unique()) > 1:
filtered_hparam_columns.append(h)
else:
if len(df[h].transform(tuple).unique()) > 1:
filtered_hparam_columns.append(h)
return filtered_hparam_columns
def _create_hparam_hash(self, df, to_keep=None):
# creates a fake "hyperparameter hash" that uniquely defines hparams. This allows us to find all related folds
# we look for all columns in which there are runs that differ, to later build a string representation (for each run)
# of which hyperparameter they differ in.
to_keep = to_keep or set()
filtered_hparam_columns = self._unique_hparams(df)
filtered_hparam_columns = list(set(filtered_hparam_columns).union(set(to_keep)))
return ["|".join(v) for v in df[filtered_hparam_columns].astype(str).values]
def _statistics_by_fold(self, runs_df, to_keep=None):
to_keep = to_keep or []
metrics = [c for c in runs_df.columns if c.startswith("results.")]
run_parameters = [c for c in runs_df.columns if c.startswith("run_definition.")]
def create_new_run(cur_run, agg_vals, extracted_runs):
concats = pd.concat(agg_vals, axis=1).T
mean_dict = concats.mean().to_dict()
std_dict = concats.agg(confidence_interval_95).to_dict()
conf_dict = {k + ".conf": v for k, v in std_dict.items() if np.isfinite(v)}
extracted_runs.append({**cur_run, **mean_dict, **conf_dict})
extracted_runs = []
runs_df["hparam_config"] = self._create_hparam_hash(
runs_df, to_keep=to_keep
)
runs_df = runs_df.sort_values(by="hparam_config")
cur_run = None
agg_vals = []
cur_hparam_config = None
for (_, row), (_, metrics_row) in zip(runs_df.iterrows(), runs_df[metrics].iterrows()):
if cur_hparam_config is None or cur_hparam_config != row["hparam_config"]:
if cur_hparam_config is not None:
create_new_run(cur_run, agg_vals, extracted_runs)
cur_run = row[run_parameters].to_dict()
cur_hparam_config = row["hparam_config"]
agg_vals = []
agg_vals.append(metrics_row)
create_new_run(cur_run, agg_vals, extracted_runs)
return pd.DataFrame(extracted_runs)
@functools.lru_cache()
def best_runs_df(self, compare_col):
"""Returns, for every task/compare_col combination, the best run and its results"""
runs_df = self._read_runs()
runs_df = self._statistics_by_fold(runs_df, to_keep=[compare_col])
tasks = runs_df[self.task_col].unique()
best_hparams = {}
for d in tasks:
best_run_idxes = self._best_run_indices(runs_df[runs_df[self.task_col] == d], compare_col)
best_run_rows = runs_df.loc[best_run_idxes]
best_run_rows = best_run_rows.set_index(
compare_col, drop=True
)
best_hparams[d] = best_run_rows
best_hparams = | pd.concat(best_hparams, axis=0) | pandas.concat |
import pytest
import xmltodict
import os
import pandas as pd
import cv2 as cv
import numpy as np
@pytest.fixture(scope="session")
def create_good_xml(tmp_path_factory):
input_dir = tmp_path_factory.mktemp("input_dir")
output_dir = tmp_path_factory.mktemp("output_dir")
fiducials = [{'@row': 0,
'@col': 0,
'@spot_type': 'Reference, Diagnostic'},
{'@row': 1,
'@col': 0,
'@spot_type': 'Reference, Diagnostic'}
]
spots = [{'@row': 0,
'@col': 1,
'@id': 'spot-1-2',
'@spot_type': 'Diagnostic'},
{'@row': 1,
'@col': 2,
'@id': 'spot-2-3',
'@spot_type': 'Diagnostic'},
{'@row': 3,
'@col': 4,
'@id': 'spot-4-5',
'@spot_type': 'Diagnostic'}
]
repl = [{'@row': 0,
'@col': 1,
'@id': 'H1 HA',
'id': ['spot-1-2', 'spot-2-3']},
{'@row': 0,
'@col': 1,
'@id': 'H3 HA',
'id': ['spot-4-5']}
]
params = {'@rows': 6,
'@cols': 6,
'@vspace': 0.4,
'@hspace': 0.4,
'@expected_diameter': 0.2,
'@background_offset': 1,
'@background_thickness': 1,
'@max_diameter': 1,
'@min_diameter': 1,
}
doc = {'configuration': {'well_configurations': {'configuration': {'array': {}}}}}
# set the hardware parameters
doc['configuration']['well_configurations']['configuration']['array']['layout'] = params
# set the fiducials
doc['configuration']['well_configurations']['configuration']['array']['layout']['marker'] = fiducials
# set the spot IDs
doc['configuration']['well_configurations']['configuration']['array']['spots'] = {}
doc['configuration']['well_configurations']['configuration']['array']['spots']['spot'] = spots
# set the number of replicates
doc['configuration']['well_configurations']['configuration']['array']['spots']['multiplet'] = repl
with open(os.path.join(str(input_dir), 'temp.xml'), 'w', encoding='utf-8') as temp_xml:
temp_xml.write(xmltodict.unparse(doc))
return input_dir, output_dir
@pytest.fixture(scope="session")
def create_good_xlsx(tmp_path_factory):
input_dir = tmp_path_factory.mktemp("input_dir")
output_dir = tmp_path_factory.mktemp("multisero_output_dir")
# make a dummy worksheet with realistic parameters
params_worksheet = {'': '',
'rows': '6',
'columns': '6',
'v_pitch': '0.4',
'h_pitch': '0.45',
'spot_width': '0.2',
'pixel_size': '0.0049'
}
keys = dict()
vals = dict()
for idx, value in enumerate(params_worksheet.keys()):
keys[idx] = value
for idx, value in enumerate(params_worksheet.values()):
vals[idx] = value
p = pd.Series(keys, name="Parameter")
v = pd.Series(vals, name="Value")
params_df = pd.DataFrame([p, v]).T
# make a dummy antigen array layout with realistic fiducials
fiducials = {0: {0: 'Fiducial', 1: '', 2: '', 3: '', 4: '', 5: 'Fiducial'},
1: {0: 'Fiducial', 1: '', 2: '', 3: '', 4: '', 5: ''},
2: {0: 'Positive Control', 1: '', 2: '', 3: '', 4: '', 5: 'Negative Control'},
3: {0: 'Positive Control', 1: '', 2: '', 3: '', 4: '', 5: 'Negative Control'},
4: {0: 'Positive Control', 1: '', 2: '', 3: '', 4: '', 5: 'Negative Control'},
5: {0: 'Fiducial', 1: '', 2: '', 3: '', 4: '', 5: 'Fiducial'}
}
fiducials_df = | pd.DataFrame(fiducials) | pandas.DataFrame |
##############################################################################################################
# This script reads in the two gold standards Litbank and Dekker et al and compares them to
# the .token files created by booknlp
# The data used here consists of only the 12 overlapping novels with their respecive overlapping
# parts of the text.
#
# Output:
# The script appends a csv with the Precision, Recall, and F1 for the respective book and respective tool
# and stores the false positives, false negatives, and correct detections
#
#
# BookNLP recognises the following NER tags/types
# (PERSON, NUMBER, DATE, DURATION, MISC, TIME, LOCATION, ORDINAL, MONEY, ORGANIZATION, SET, O)
# Dekker et al.'s collection covers the entity person (i.e. I-PERSON)
# LitBank covers six of the ACE 2005 categories:
# People (PER), Facilities (FAC), Geo-political entities (GPE), Locations (LOC), Vehicles (VEH), Organizations (ORG)
#
# Therefore we map the BookNLP entities to those of Dekker et al. in the following way:
# O stays O and PERSON turns to PER. We ignore rest for character detection (in particular)
##############################################################################################################
import pandas as pd
import csv
import sys
import re
# import own script
from hyphens import *
from calculate_metrics import *
books_mapping = {'AliceInWonderland': '11_alices_adventures_in_wonderland',
'DavidCopperfield': '766_david_copperfield',
'Dracula': '345_dracula',
'Emma': '158_emma',
'Frankenstein': '84_frankenstein_or_the_modern_prometheus',
'HuckleberryFinn': '76_adventures_of_huckleberry_finn',
'MobyDick': '2489_moby_dick',
'OliverTwist': '730_oliver_twist',
'PrideAndPrejudice': '1342_pride_and_prejudice',
'TheCallOfTheWild': '215_the_call_of_the_wild',
'Ulysses': '4300_ulysses',
'VanityFair': '599_vanity_fair'}
passed_variable = sys.argv[1]
booknlp_filepath = "/mnt/book-nlp/data/tokens/overlap/" + str(passed_variable) + ".tokens"
dekker_filepath = "/mnt/data/gold_standard/overlap/dekker_et_al/" + str(passed_variable) + ".gs"
litbank_filepath = "/mnt/data/gold_standard/overlap/litbank/" + books_mapping.get(str(passed_variable)) + ".tsv"
#######################################
# get current annotated book - BookNLP
#######################################
current_file = pd.read_csv(booknlp_filepath, sep='\t', quoting=csv.QUOTE_NONE, usecols=["originalWord","ner"])
current_file = current_file.rename(columns={"originalWord": "original_word", "ner": "booknlp"})
# alternatively convert all PERSON to PER
current_file["booknlp"].replace('PERSON', 'PER', inplace = True)
# replace rest of entities with O
current_file.loc[~current_file["booknlp"].isin(['PER']), "booknlp"] = "O"
# correct hyphened words from booknlp (note: stanford CoreNLP only splits on "most hyphens")
current_file = correct_hyphened(current_file)
# reset the index to avoid all parts of hyphened words having same index
current_file = current_file.reset_index()
del current_file['index']
# remove chapter separation with stars"
if str(passed_variable) == "AliceInWonderland":
current_file = current_file.drop(current_file.index[1911:1931])
current_file = current_file.reset_index(drop=True)
#####################################
# get gold standard - Dekker
#####################################
gs_d = | pd.read_csv(dekker_filepath, sep=' ', quoting=csv.QUOTE_NONE, usecols=[0,1], names=["original_word", "gs"]) | pandas.read_csv |
import pandas as pd
from pandas._testing import assert_frame_equal
import pytest
import numpy as np
from scripts.my_normalize_data import (
normalize_expedition_section_cols,
remove_bracket_text,
remove_whitespace,
normalize_columns
)
class XTestNormalizeColumns:
def test_replace_column_name_with_value_from_columns_mapping(self):
columns_mapping = {"aa": "A"}
data = {"aa": [1]}
df = pd.DataFrame(data)
data = {"A": [1]}
expected = pd.DataFrame(data)
normalize_columns(df, columns_mapping)
assert_frame_equal(df, expected)
def test_replace_multiple_column_name_with_value_from_columns_mapping(self):
columns_mapping = {"aa": "A", "b b": "B"}
data = {"aa": [1], "b b": [2]}
df = pd.DataFrame(data)
data = {"A": [1], "B": [2]}
expected = pd.DataFrame(data)
normalize_columns(df, columns_mapping)
assert_frame_equal(df, expected)
def test_does_not_affect_columns_not_in_columns_mapping(self):
columns_mapping = {"aa": "A", "b b": "B"}
data = {"aa": [1], "b b": [2], "cc": [3]}
df = pd.DataFrame(data)
data = {"A": [1], "B": [2], "cc": [3]}
expected = pd.DataFrame(data)
normalize_columns(df, columns_mapping)
assert_frame_equal(df, expected)
def test_does_not_affect_columns_if_columns_mapping_has_no_value(self):
columns_mapping = {"aa": None, "bb": "", "cc": np.nan}
data = {"aa": [1], "b b": [2], "cc": [3]}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
normalize_columns(df, columns_mapping)
assert_frame_equal(df, expected)
class XTestRemoveBracketText:
def test_removes_text_within_brackets_at_end_of_cell(self):
df = pd.DataFrame(['aa [A]', 'bb [BB]', 'cc [C] ', 'dd [dd] '])
expected = pd.DataFrame(['aa', 'bb', 'cc', 'dd'])
remove_bracket_text(df)
assert_frame_equal(df, expected)
def test_does_not_remove_text_within_brackets_at_start_of_cell(self):
df = pd.DataFrame(['[A] aa', '[BB] bb', '[C] cc ', ' [dd] dd '])
expected = df.copy()
remove_bracket_text(df)
assert_frame_equal(df, expected)
def test_does_not_remove_text_within_brackets_in_middle_of_cell(self):
df = pd.DataFrame(['aa [A] aa', 'bb [BB] bb', ' cc [C] cc ', ' dd [dd] dd '])
expected = df.copy()
remove_bracket_text(df)
assert_frame_equal(df, expected)
def test_removes_letters_numbers_punctuation_within_brackets(self):
df = pd.DataFrame(['aa [A A]', 'bb [BB 123]', 'cc [123-456.] '])
expected = pd.DataFrame(['aa', 'bb', 'cc'])
remove_bracket_text(df)
| assert_frame_equal(df, expected) | pandas._testing.assert_frame_equal |
import tiledb, numpy as np
import json
import sys
import os
import io
from collections import OrderedDict
import warnings
from tiledb import TileDBError
if sys.version_info >= (3,3):
unicode_type = str
else:
unicode_type = unicode
unicode_dtype = np.dtype(unicode_type)
# TODO
# - handle missing values
# - handle extended datatypes
# - implement distributed CSV import
# - implement support for read CSV via TileDB VFS from any supported FS
TILEDB_KWARG_DEFAULTS = {
'ctx': None,
'sparse': True,
'index_dims': None,
'allows_duplicates': True,
'mode': 'ingest',
'attrs_filters': None,
'coords_filters': None,
'full_domain': False,
'tile': None,
'row_start_idx': None,
'fillna': None,
'column_types': None,
'capacity': None,
'date_spec': None,
'cell_order': 'row-major',
'tile_order': 'row-major',
'debug': None,
}
def parse_tiledb_kwargs(kwargs):
args = dict(TILEDB_KWARG_DEFAULTS)
for key in TILEDB_KWARG_DEFAULTS.keys():
if key in kwargs:
args[key] = kwargs.pop(key)
return args
class ColumnInfo:
def __init__(self, dtype, repr=None):
self.dtype = dtype
self.repr = repr
def dtype_from_column(col):
import pandas as pd
col_dtype = col.dtype
# TODO add more basic types here
if col_dtype in (np.int32, np.int64, np.uint32, np.uint64, np.float, np.double,
np.uint8):
return ColumnInfo(col_dtype)
# TODO this seems kind of brittle
if col_dtype.base == np.dtype('M8[ns]'):
if col_dtype == np.dtype('datetime64[ns]'):
return ColumnInfo(col_dtype)
elif hasattr(col_dtype, 'tz'):
raise ValueError("datetime with tz not yet supported")
else:
raise ValueError("unsupported datetime subtype ({})".format(type(col_dtype)))
# Pandas 1.0 has StringDtype extension type
if col_dtype.name == 'string':
return ColumnInfo(unicode_dtype)
if col_dtype == 'bool':
return ColumnInfo(np.uint8, repr=np.dtype('bool'))
if col_dtype == np.dtype("O"):
# Note: this does a full scan of the column... not sure what else to do here
# because Pandas allows mixed string column types (and actually has
# problems w/ allowing non-string types in object columns)
inferred_dtype = pd.api.types.infer_dtype(col)
if inferred_dtype == 'bytes':
return ColumnInfo(np.bytes_)
elif inferred_dtype == 'string':
# TODO we need to make sure this is actually convertible
return ColumnInfo(unicode_dtype)
elif inferred_dtype == 'mixed':
raise ValueError(
"Column '{}' has mixed value dtype and cannot yet be stored as a TileDB attribute".format(col.name)
)
raise ValueError(
"Unhandled column type: '{}'".format(
col_dtype
)
)
# TODO make this a staticmethod on Attr?
def attrs_from_df(df,
index_dims=None, filters=None,
column_types=None, ctx=None):
attr_reprs = dict()
if ctx is None:
ctx = tiledb.default_ctx()
if column_types is None:
column_types = dict()
attrs = list()
for name, col in df.items():
# ignore any column used as a dim/index
if index_dims and name in index_dims:
continue
if name in column_types:
spec_type = column_types[name]
# Handle ExtensionDtype
if hasattr(spec_type, 'type'):
spec_type = spec_type.type
attr_info = ColumnInfo(spec_type)
else:
attr_info = dtype_from_column(col)
attrs.append(tiledb.Attr(name=name, dtype=attr_info.dtype, filters=filters))
if attr_info.repr is not None:
attr_reprs[name] = attr_info.repr
return attrs, attr_reprs
def dim_info_for_column(ctx, df, col, tile=None, full_domain=False, index_dtype=None):
if isinstance(col, np.ndarray):
col_values = col
else:
col_values = col.values
if len(col_values) < 1:
raise ValueError("Empty column '{}' cannot be used for dimension!".format(col_name))
if index_dtype is not None:
dim_info = ColumnInfo(index_dtype)
elif col_values.dtype is np.dtype('O'):
col_val0_type = type(col_values[0])
if col_val0_type in (bytes, unicode_type):
# TODO... core only supports TILEDB_ASCII right now
dim_info = ColumnInfo(np.bytes_)
else:
raise TypeError("Unknown column type not yet supported ('{}')".format(col_val0_type))
else:
dim_info = dtype_from_column(col_values)
return dim_info
def dim_for_column(ctx, name, dim_info, col, tile=None, full_domain=False, ndim=None):
if isinstance(col, np.ndarray):
col_values = col
else:
col_values = col.values
if tile is None:
if ndim is None:
raise TileDBError("Unexpected Nonetype ndim")
if ndim == 1:
tile = 10000
elif ndim == 2:
tile = 1000
elif ndim == 3:
tile = 100
else:
tile = 10
dtype = dim_info.dtype
if full_domain:
if not dim_info.dtype in (np.bytes_, np.unicode):
# Use the full type domain, deferring to the constructor
(dtype_min, dtype_max) = tiledb.libtiledb.dtype_range(dim_info.dtype)
dim_max = dtype_max
if dtype.kind == 'M':
date_unit = np.datetime_data(dtype)[0]
dim_min = np.datetime64(dtype_min + 1, date_unit)
tile_max = np.iinfo(np.uint64).max - tile
if np.abs(np.uint64(dtype_max) - np.uint64(dtype_min)) > tile_max:
dim_max = np.datetime64(dtype_max - tile, date_unit)
elif dtype is np.int64:
dim_min = dtype_min + 1
else:
dim_min = dtype_min
if dtype.kind != 'M' and np.issubdtype(dtype, np.integer):
tile_max = np.iinfo(np.uint64).max - tile
if np.abs(np.uint64(dtype_max) - np.uint64(dtype_min)) > tile_max:
dim_max = dtype_max - tile
else:
dim_min, dim_max = (None, None)
else:
dim_min = np.min(col_values)
dim_max = np.max(col_values)
if not dim_info.dtype in (np.bytes_, np.unicode):
if np.issubdtype(dtype, np.integer):
dim_range = np.uint64(np.abs(np.uint64(dim_max) - np.uint64(dim_min)))
if dim_range < tile:
tile = dim_range
elif np.issubdtype(dtype, np.float64):
dim_range = dim_max - dim_min
if dim_range < tile:
tile = np.ceil(dim_range)
dim = tiledb.Dim(
name = name,
domain = (dim_min, dim_max),
dtype = dim_info.dtype,
tile = tile
)
return dim
def get_index_metadata(dataframe):
md = dict()
for index in dataframe.index.names:
# Note: this may be expensive.
md[index] = dtype_from_column(dataframe.index.get_level_values(index)).dtype
return md
def create_dims(ctx, dataframe, index_dims,
tile=None, full_domain=False, sparse=None):
import pandas as pd
index = dataframe.index
index_dict = OrderedDict()
index_dtype = None
per_dim_tile = False
if tile is not None:
if isinstance(tile, dict):
per_dim_tile = True
# input check, can't do until after per_dim_tile
if (per_dim_tile and not all(map(lambda x: isinstance(x,(int,float)), tile.values()))) or \
(per_dim_tile is False and not isinstance(tile, (int,float))):
raise ValueError("Invalid tile kwarg: expected int or tuple of ints "
"got '{}'".format(tile))
if isinstance(index, pd.MultiIndex):
for name in index.names:
index_dict[name] = dataframe.index.get_level_values(name)
elif isinstance(index, (pd.Index, pd.RangeIndex, pd.Int64Index)):
if hasattr(index, 'name') and index.name is not None:
name = index.name
else:
index_dtype = np.dtype('uint64')
name = 'rows'
index_dict[name] = index.values
else:
raise ValueError("Unhandled index type {}".format(type(index)))
# create list of dim types
# we need to know all the types in order to validate before creating Dims
dim_types = list()
for idx,(name, values) in enumerate(index_dict.items()):
if per_dim_tile and name in tile:
dim_tile = tile[name]
elif per_dim_tile:
# in this case we fall back to the default
dim_tile = None
else:
# in this case we use a scalar (type-checked earlier)
dim_tile = tile
dim_types.append(dim_info_for_column(ctx, dataframe, values,
tile=dim_tile, full_domain=full_domain,
index_dtype=index_dtype))
if any([d.dtype in (np.bytes_, np.unicode_) for d in dim_types]):
if sparse is False:
raise TileDBError("Cannot create dense array with string-typed dimensions")
elif sparse is None:
sparse = True
d0 = dim_types[0]
if not all(d0.dtype == d.dtype for d in dim_types[1:]):
if sparse is False:
raise TileDBError("Cannot create dense array with heterogeneous dimension data types")
elif sparse is None:
sparse = True
ndim = len(dim_types)
dims = list()
for idx, (name, values) in enumerate(index_dict.items()):
if per_dim_tile and name in tile:
dim_tile = tile[name]
elif per_dim_tile:
# in this case we fall back to the default
dim_tile = None
else:
# in this case we use a scalar (type-checked earlier)
dim_tile = tile
dims.append(dim_for_column(ctx, name, dim_types[idx], values,
tile=dim_tile, full_domain=full_domain, ndim=ndim))
if index_dims:
for name in index_dims:
if per_dim_tile and name in tile:
dim_tile = tile[name]
elif per_dim_tile:
# in this case we fall back to the default
dim_tile = None
else:
# in this case we use a scalar (type-checked earlier)
dim_tile = tile
col = dataframe[name]
dims.append(
dim_for_column(ctx, dataframe, col.values, name, tile=dim_tile)
)
return dims, sparse
def write_array_metadata(array, attr_metadata = None, index_metadata = None):
"""
:param array: open, writable TileDB array
:param metadata: dict
:return:
"""
if attr_metadata:
attr_md_dict = {n: str(t) for n,t in attr_metadata.items()}
array.meta['__pandas_attribute_repr'] = json.dumps(attr_md_dict)
if index_metadata:
index_md_dict = {n: str(t) for n,t in index_metadata.items()}
array.meta['__pandas_index_dims'] = json.dumps(index_md_dict)
def from_dataframe(uri, dataframe, **kwargs):
# deprecated in 0.6.3
warnings.warn("tiledb.from_dataframe is deprecated; please use .from_pandas",
DeprecationWarning)
from_pandas(uri, dataframe, **kwargs)
def from_pandas(uri, dataframe, **kwargs):
"""Create TileDB array at given URI from pandas dataframe
:param uri: URI for new TileDB array
:param dataframe: pandas DataFrame
:param mode: Creation mode, one of 'ingest' (default), 'schema_only', 'append'
:Keyword Arguments: optional keyword arguments for TileDB, see ``tiledb.from_csv``.
:raises: :py:exc:`tiledb.TileDBError`
:return: None
"""
import pandas as pd
args = parse_tiledb_kwargs(kwargs)
ctx = args.get('ctx', None)
tile_order = args['tile_order']
cell_order = args['cell_order']
allows_duplicates = args.get('allows_duplicates', True)
sparse = args['sparse']
index_dims = args.get('index_dims', None)
mode = args.get('mode', 'ingest')
attrs_filters = args.get('attrs_filters', None)
coords_filters = args.get('coords_filters', None)
full_domain = args.get('full_domain', False)
capacity = args.get('capacity', False)
tile = args.get('tile', None)
nrows = args.get('nrows', None)
row_start_idx = args.get('row_start_idx', None)
fillna = args.pop('fillna', None)
date_spec = args.pop('date_spec', None)
column_types = args.pop('column_types', None)
write = True
create_array = True
if mode is not None:
if mode == 'schema_only':
write = False
elif mode == 'append':
create_array = False
elif mode != 'ingest':
raise TileDBError("Invalid mode specified ('{}')".format(mode))
if capacity is None:
capacity = 0 # this will use the libtiledb internal default
if ctx is None:
ctx = tiledb.default_ctx()
if create_array:
if attrs_filters is None:
attrs_filters = tiledb.FilterList(
[tiledb.ZstdFilter(1, ctx=ctx)])
if coords_filters is None:
coords_filters = tiledb.FilterList(
[tiledb.ZstdFilter(1, ctx=ctx)])
if nrows:
if full_domain is None:
full_domain = False
# create the domain and attributes
# if sparse==None then this function may return a default based on types
dims, sparse = create_dims(ctx, dataframe, index_dims, sparse=sparse,
tile=tile, full_domain=full_domain)
domain = tiledb.Domain(
*dims,
ctx = ctx
)
attrs, attr_metadata = attrs_from_df(dataframe,
index_dims=index_dims,
filters=attrs_filters,
column_types=column_types)
# now create the ArraySchema
schema = tiledb.ArraySchema(
domain=domain,
attrs=attrs,
cell_order=cell_order,
tile_order=tile_order,
coords_filters=coords_filters,
allows_duplicates=allows_duplicates,
capacity=capacity,
sparse=sparse
)
tiledb.Array.create(uri, schema, ctx=ctx)
# apply fill replacements for NA values if specified
if fillna is not None:
dataframe.fillna(fillna, inplace=True)
# apply custom datetime parsing to given {'column_name': format_spec} pairs
# format_spec should be provied using Python format codes:
# https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
if date_spec is not None:
if type(date_spec) is not dict:
raise TypeError("Expected 'date_spec' to be a dict, got {}".format(type(date_spec)))
for name, spec in date_spec.items():
dataframe[name] = pd.to_datetime(dataframe[name], format=spec)
if write:
write_dict = {k: v.values for k,v in dataframe.to_dict(orient='series').items()}
index_metadata = get_index_metadata(dataframe)
try:
A = tiledb.open(uri, 'w', ctx=ctx)
if A.schema.sparse:
coords = []
for k in range(A.schema.ndim):
coords.append(dataframe.index.get_level_values(k))
# TODO ensure correct col/dim ordering
A[tuple(coords)] = write_dict
else:
if row_start_idx is None:
row_start_idx = 0
row_end_idx = row_start_idx + len(dataframe)
A[row_start_idx:row_end_idx] = write_dict
if create_array:
write_array_metadata(A, attr_metadata, index_metadata)
finally:
A.close()
def _tiledb_result_as_dataframe(readable_array, result_dict):
import pandas as pd
# TODO missing key in the rep map should only be a warning, return best-effort?
# TODO this should be generalized for round-tripping overloadable types
# for any array (e.g. np.uint8 <> bool)
repr_meta = None
index_dims = None
if '__pandas_attribute_repr' in readable_array.meta:
# backwards compatibility
repr_meta = json.loads(readable_array.meta['__pandas_attribute_repr'])
if '__pandas_index_dims' in readable_array.meta:
index_dims = json.loads(readable_array.meta['__pandas_index_dims'])
indexes = list()
for col_name, col_val in result_dict.items():
if repr_meta and col_name in repr_meta:
new_col = | pd.Series(col_val, dtype=repr_meta[col_name]) | pandas.Series |
import pandas as pd
import pytest
# Q1 how many total number of days does the flights table cover?
def total_no_of_days_in_flight_table(path_of_flights_csv):
try:
flights = pd.read_csv(path_of_flights_csv)
flights["date"] = flights.year.astype('str') + "-" + flights.month.astype('str') + "-" + flights.day.astype(
'str')
y = flights.date.unique().shape[0]
return y
#Answer: 365
except Exception:
print("Error:{}".format(Exception.with_traceback))
# Q2 how many departure cities (not airports) does the flights database cover?
def total_no_of_dep_cities_in_flight_table(path_of_flights_csv, path_of_airport_csv):
try:
airports = | pd.read_csv(path_of_airport_csv) | pandas.read_csv |
import os
import re
from pandas import DataFrame
from kloppy import EPTSSerializer
from kloppy.domain import (
Period,
AttackingDirection,
Orientation,
Point,
BallState,
Team,
)
from kloppy.infra.serializers.tracking.epts.metadata import load_metadata
from kloppy.infra.serializers.tracking.epts.reader import (
build_regex,
read_raw_data,
)
from kloppy.infra.utils import performance_logging
class TestEPTSTracking:
def test_regex(self):
base_dir = os.path.dirname(__file__)
with open(f"{base_dir}/files/epts_meta.xml", "rb") as metadata_fp:
metadata = load_metadata(metadata_fp)
regex_str = build_regex(
metadata.data_format_specifications[0],
metadata.player_channels,
metadata.sensors,
)
regex = re.compile(regex_str)
# NOTE: use broken example of FIFA
result = regex.search(
"1779143:,-2.013,-500,100,9.63,9.80,4,5,177,182;-461,-615,-120,99,900,9.10,4,5,170,179;-2638,3478,120,110,1.15,5.20,3,4,170,175;:-2656,367,100:"
)
assert result is not None
def test_read(self):
base_dir = os.path.dirname(__file__)
with open(f"{base_dir}/files/epts_meta.xml", "rb") as metadata_fp:
metadata = load_metadata(metadata_fp)
with open(f"{base_dir}/files/epts_raw.txt", "rb") as raw_data:
iterator = read_raw_data(raw_data, metadata)
with performance_logging("load"):
assert list(iterator)
def test_read_to_pandas(self):
base_dir = os.path.dirname(__file__)
with open(
f"{base_dir}/files/epts_meta.xml", "rb"
) as metadata_fp, open(
f"{base_dir}/files/epts_raw.txt", "rb"
) as raw_data:
metadata = load_metadata(metadata_fp)
records = read_raw_data(
raw_data, metadata, sensor_ids=["heartbeat", "position"]
)
data_frame = DataFrame.from_records(records)
assert "player_1_max_heartbeat" in data_frame.columns
assert "player_1_x" in data_frame.columns
def test_skip_sensors(self):
base_dir = os.path.dirname(__file__)
with open(
f"{base_dir}/files/epts_meta.xml", "rb"
) as metadata_fp, open(
f"{base_dir}/files/epts_raw.txt", "rb"
) as raw_data:
metadata = load_metadata(metadata_fp)
records = read_raw_data(
raw_data, metadata, sensor_ids=["heartbeat"]
)
data_frame = | DataFrame.from_records(records) | pandas.DataFrame.from_records |
import cv2
import os
import time
import sys
import ctypes
import numpy as np
import pandas as pd
from tracking import analytical_tracker
scriptDir = os.path.dirname(__file__)
tracking_folder = os.path.join(scriptDir, '../tracking/')
path = os.path.abspath(tracking_folder)
sys.path.append(path)
tracker = analytical_tracker()
#gaze = gaze_tracker
def export(delta_since_last_change, pupil_l, pupil_r, project_folder, image):
# Set data structure
data = {
'Time Stamp': delta_since_last_change,
'Pupil Left': pupil_l,
'Pupil Right': pupil_r
}
# Convert to panda data frame
df = pd.DataFrame(data, columns = ['Time Stamp', 'Pupil Left', 'Pupil Right'])
path = r"C:\Users\fedel\Desktop\excelData\PhD_data.xlsx"
writer = pd.ExcelWriter(project_folder, engine='openpyxl')
writer.save()
# Convert & export to excel
# Converted to 1 file with different sheet
#df.to_excel(project_folder + 'results.xlsx', sheet_name=image, index=False)
df.to_excel(writer, sheet_name=image, index=False)
writer.save()
def show_image(img_path):
# Get screen size
user32 = ctypes.windll.user32
size_screen = user32.GetSystemMetrics(1), user32.GetSystemMetrics(0)
# Create white background
background = (np.zeros((int(size_screen[0]), int(size_screen[1]), 3)) + 255).astype('uint8')
# Calculate midpoint of screen
mid_x = int(size_screen[0]) / 2
mid_y = int(size_screen[1]) / 2
# Get images
#img = cv2.imread(img_path)
#from https://stackoverflow.com/questions/31656366/cv2-imread-and-cv2-imshow-return-all-zeros-and-black-image
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
if img.shape[2] == 4: # we have an alpha channel
a1 = ~img[:,:,3] # extract and invert that alpha
img = cv2.add(cv2.merge([a1,a1,a1,a1]), img) # add up values (with clipping)
img = cv2.cvtColor(img, cv2.COLOR_RGBA2RGB) # strip alpha channel
# Get height & width from image
img_width, img_height = img.shape[:2]
# Caclulate middle of screen
yoff = round((mid_y - img_height)/2)
#xoff = round((mid_x + img_width/8))
xoff = round((mid_x + img_width/8))
# Creating overlay
dst = background.copy()
dst[yoff: yoff + img_height, xoff:xoff + img_width] = img
# Show images
cv2.namedWindow("Display", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("Display", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.imshow('Display', dst)
def cycle_images(final_folder_path):
# Get file path from current data directory
filename = "original_video.avi"
currentdir_folder = os.path.join(final_folder_path, filename)
project_folder = os.path.abspath(currentdir_folder)
exel_currentdir_folder = os.path.join(final_folder_path, "results.xlsx")
exel_project_folder = os.path.abspath(exel_currentdir_folder)
TIME = 3
t = TIME * 1000 # transform to miliseconds
# Get file path
scriptDir = os.path.dirname(__file__)
imgdir_folder = os.path.join(scriptDir, '../img/')
img_folder = os.path.abspath(imgdir_folder)
# Get images from folder
images = os.listdir(img_folder)
cnt = len(images)
idx = 1
cap = cv2.VideoCapture(0)
fps = 15
# Video Codec
#fourcc = cv2.VideoWriter_fourcc(*'XVID')
#output = cv2.VideoWriter(project_folder, fourcc, fps, (640, 480))
prev_time = time.time()
delta_since_last_change = 0
#writer = pd.ExcelWriter(final_folder_path + 'results.xlsx', engine='openpyxl')
writer = pd.ExcelWriter(exel_project_folder, engine='openpyxl')
show_image(os.path.join(img_folder, images[0]))
pupil_l_x = []
pupil_l_y = []
pupil_r_x = []
pupil_r_y = []
deltas = []
while (idx < cnt):
ret, frame = cap.read()
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
# pupils = tracker.detect_in_frame(tracker,frame)
#pupils = gaze.track_in_frame(gaze,frame)
frame = cv2.flip(frame, 1)
tracker.refresh(frame)
try:
pupils = (tracker.pupil_left_screen_coords(),tracker.pupil_right_screen_coords())
#print(pupils)
# output.write(frame)
#if pupils[0] == None:
# pupil_l.append((0,0))
#else:
# pupil_l.append(pupils[0])
#if pupils[1] == None:
# pupil_r.append((0,0))
#else:
# pupil_r.append(pupils[1])
pupil_l_x.append(pupils[0][0])
pupil_l_y.append(pupils[0][1])
pupil_r_x.append(pupils[1][0])
pupil_r_y.append(pupils[1][1])
#print(pupil_l)
#cv2.circle(frame,(int(pupils[0][0]),int(pupils[0][1])),10,(0, 255, 0),3)
#cv2.circle(frame,(int(pupils[1][0]),int(pupils[1][1])),10,(255, 0, 0),3)
#cv2.imshow('frame', frame)
except Exception:
pupil_l_x.append(0)
pupil_l_y.append(0)
pupil_r_x.append(0)
pupil_r_y.append(0)
delta = time.time() - prev_time
delta_since_last_change += delta
prev_time = time.time()
deltas.append(delta)
if delta_since_last_change >= TIME:
img_path = os.path.join(img_folder, images[idx])
if images[idx] == "Twit(512,512).png":
# Get screen size
user32 = ctypes.windll.user32
size_screen = user32.GetSystemMetrics(1), user32.GetSystemMetrics(0)
# Create white background
background = (np.zeros((int(size_screen[0]), int(size_screen[1]), 3)) + 255).astype('uint8')
dst = background.copy()
cv2.circle(dst,(512,512),10,(0,0,255),-1)
cv2.namedWindow("Display", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("Display", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.imshow('Display', dst)
else:
show_image(img_path)
print(images[idx])
# Set data structure
data = {
'Time Stamp': deltas,
'Pupil Left x': pupil_l_x,
'Pupil Left y': pupil_l_y,
'Pupil Right x': pupil_r_x,
'Pupil Right y': pupil_r_y
}
# Convert to panda data frame
df = | pd.DataFrame(data, columns = ['Time Stamp', 'Pupil Left x','Pupil Left y', 'Pupil Right x','Pupil Right y']) | pandas.DataFrame |
import pandas as pd
import time
from datetime import datetime
from datetime import timedelta
import numpy as np
import os
import mysql.connector
import pyodbc
from mktcalendar import *
def get_uni(start, end, lookback, uni_size=1400):
unidate = start - TDay * lookback
t_low_price = 2.0
t_high_price = 500.0
t_min_advp = 1000000.0
sql = ("SELECT g.gvkey, t.tradingItemId 'tid', t.tickerSymbol symbol,"
" t.tradingItemStatusId status, ctr.country,"
" curr.currencyName currency, m.marketCap mkt_cap, p.priceClose 'close'"
" FROM ciqTradingItem t"
" INNER JOIN ciqSecurity s ON t.securityId =s.securityId"
" INNER JOIN ciqCompany co ON s.companyId =co.companyId"
" INNER JOIN ciqCountryGeo ctr ON ctr.countryId =co.countryId"
" INNER JOIN ciqCurrency curr ON t.currencyId =curr.currencyId"
" INNER JOIN ciqMarketCap m ON co.companyId=m.companyId"
" INNER JOIN ciqGvKeyIID g ON g.objectId=t.tradingItemId"
" INNER JOIN ciqPriceEquity2 p ON p.tradingItemId=t.tradingItemId"
" AND p.pricingDate = m.pricingDate"
" WHERE ctr.country= 'United States'"
" AND curr.currencyName = 'US Dollar'"
" AND s.securitySubTypeId = 1"
" AND m.pricingDate = '%s'"
% unidate)
cnxn_s = 'Trusted_Connection=yes;Driver={ODBC Driver 17 for SQL Server};Server=dbDevCapIq;Database=xpressfeed'
cnxn = pyodbc.connect(cnxn_s)
uni_df = pd.read_sql(sql, cnxn, index_col=['gvkey', 'tid'])
cnxn.close()
print("Universe size (US/USD): %d" % len(uni_df))
trailingSt = unidate - TDay * 21
trailingEd = unidate - TDay
sql = ("SELECT g.gvkey, p.tradingItemId 'tid', p.pricingDate, p.volume"
" FROM ciqPriceEquity2 p"
" INNER JOIN ciqGvKeyIID g ON g.objectId = p.tradingItemId"
" WHERE p.pricingDate BETWEEN '%s' AND '%s'"
" AND g.gvkey IN %s"
" AND p.tradingItemId In %s"
% (trailingSt, trailingEd, tuple(uni_df.index.levels[0]), tuple(uni_df.index.levels[1])))
cnxn = pyodbc.connect(cnxn_s)
price_df = pd.read_sql(sql, cnxn, index_col=['gvkey', 'tid'])
cnxn.close()
price_df = pd.merge(uni_df, price_df, on=['gvkey', 'tid'])
uni_df['tradable_med_volume_21'] = price_df['volume'].median(level=['gvkey', 'tid'])
print("Universe size (prices): %d" % len(uni_df))
uni_df = uni_df[(uni_df['close'] > t_low_price) & (uni_df['close'] < t_high_price)]
print("Universe size (price range): %d" % len(uni_df))
uni_df['mdvp'] = uni_df['tradable_med_volume_21'] * uni_df['close']
uni_df = uni_df[uni_df['mdvp'] > t_min_advp]
print("Universe size (mdvp): %d" % len(uni_df))
uni_df.reset_index(level=1, inplace=True)
uni_df.sort_values('mdvp', ascending=False, inplace=True)
uni_df = uni_df[~uni_df.index.duplicated()]
print("Universe size (duplicates): %d" % len(uni_df))
sql = ("SELECT gvkey, gics_sector sector, gics_industry_group 'group'"
" FROM factors.stock_info_v6c"
" WHERE trade_date = '%s'"
% unidate)
cnxn = mysql.connector.connect(host='jv-research', port=3306, user='mek_limited', password='<PASSWORD>$')
secdata_df = pd.read_sql(sql, cnxn)
cnxn.close()
secdata_df['gvkey'] = [element[:-3] for element in secdata_df['gvkey']]
uni_df = pd.merge(uni_df, secdata_df, on='gvkey')
print("Universe size (secdata): %d" % len(uni_df))
uni_df = uni_df[uni_df['group'] != 3520]
print("Universe size (bio): %d" % len(uni_df))
uni_df['rank'] = uni_df['mkt_cap'].fillna(0).rank(ascending=False)
uni_df = uni_df[uni_df['rank'] <= uni_size]
print("Universe size (mktcap): %d" % len(uni_df))
uni_df.set_index('gvkey', inplace=True)
end_s = end.strftime("%Y%m%d")
dir = './%s/' % end_s
if not os.path.exists(dir):
os.makedirs(dir)
uni_df.to_csv(r"%suni_df.csv" % dir, "|")
return uni_df[['symbol', 'sector', 'tid']]
def load_barra(uni_df, start, end):
date = end - TDay
print("Loading barra...")
sql1 = ("SELECT trade_date 'date', gvkey, MO1_4 momentum, BP btop, DYLD divyild,"
" SIZE 'size', EP growth"
" FROM factors.loadings_v6c_xmkt "
" WHERE trade_date BETWEEN '%s' AND '%s'"
% (start, date))
sql2 = ("SELECT trade_date 'date', gvkey, gics_industry_group ind1"
" FROM factors.stock_info_v6c i"
" WHERE trade_date BETWEEN '%s' AND '%s'"
% (start, date))
cnxn = mysql.connector.connect(host='jv-research', port=3306, user='mek_limited', password='<PASSWORD>$')
barra_df1 = pd.read_sql(sql1, cnxn)
barra_df2 = pd.read_sql(sql2, cnxn)
cnxn.close()
barra_df = pd.merge(barra_df1, barra_df2, on=['date', 'gvkey'])
barra_df['gvkey'] = [element[:-3] for element in barra_df['gvkey']]
barra_df = pd.merge(barra_df, uni_df, on='gvkey')
barra_df.set_index(['date', 'gvkey'], inplace=True)
end_s = end.strftime("%Y%m%d")
dir = './%s/' % end_s
if not os.path.exists(dir):
os.makedirs(dir)
barra_df.to_csv(r"%sbarra_df.csv" % dir, "|")
return barra_df
def load_price(uni_df, start, end):
print("Loading daily info...")
date = end - TDay
sql = ("SELECT DISTINCT g.gvkey, p.tradingItemId 'tid', p.priceOpen 'open',"
" p.priceClose 'close', p.priceHigh 'high', p.priceLow 'low', p.volume,"
" sp.latestSplitFactor 'split', d.divAmount 'div', p.pricingDate 'date',"
" m.marketCap 'mkt_cap'"
" FROM ciqPriceEquity2 p"
" INNER JOIN ciqGvKeyIID g ON g.objectId=p.tradingItemId"
" INNER JOIN ciqTradingItem t ON t.tradingItemId=p.tradingItemId"
" INNER JOIN ciqSecurity s ON t.securityId =s.securityId"
" INNER JOIN ciqMarketCap m ON s.companyId=m.companyId"
" AND m.pricingDate = p.pricingDate"
" LEFT JOIN ciqSplitCache sp ON sp.tradingItemId = p.tradingItemId"
" AND sp.SplitDate = p.pricingDate"
" LEFT JOIN ciqDividendCache d ON d.tradingItemId = p.tradingItemId"
" AND d.dividendDate = p.pricingDate"
" WHERE p.pricingDate BETWEEN '%s' AND '%s'"
" AND g.gvkey IN %s"
" AND p.tradingItemId In %s"
% (start, date, tuple(uni_df.index.values), tuple(uni_df['tid'].values)))
cnxn_s = 'Trusted_Connection=yes;Driver={ODBC Driver 17 for SQL Server};Server=dbDevCapIq;Database=xpressfeed'
cnxn = pyodbc.connect(cnxn_s)
price_df = | pd.read_sql(sql, cnxn) | pandas.read_sql |
# This is an exploratory test of Geocodio API services. API key: <KEY>
# The website can be found here:https://www.geocod.io/
import pandas as pd
from geocodio import GeocodioClient
from Address_Dictionary import address_dictionary_1 #,address_dictionary_2
from os import walk
from pathlib import Path
def clean_data(df):
# setup dataframe and geocodio client
temp = df.copy()
client = GeocodioClient("<KEY>")
# add additional columns
temp['Cleaned_Location'] = temp['Location']
temp['Coordinates'] = ''
temp['Error_Logging'] = ''
# retrieve all addresses previously geocoded
coordinate_df = pd.read_csv('Coordinate_Dictionary.csv')
for i, row in temp.iterrows():
# use address dictionary for coordinates if location exists in address dictionary
location = temp.loc[i,'Location']
if location in coordinate_df['Location'].unique():
temp.loc[i,'Cleaned_Location'] = coordinate_df.loc[coordinate_df['Location'] == location, 'Cleaned_Location'].iloc[0]
temp.loc[i,'Coordinates'] = coordinate_df.loc[coordinate_df['Location'] == location,'Coordinates'].iloc[0]
continue
# add milwaukee, WI to address if not already present
if 'MKE' in temp.loc[i,'Cleaned_Location']:
temp.loc[i,'Cleaned_Location'] = temp.loc[i,'Cleaned_Location'].replace('MKE',' MILWAUKEE, WI')
else:
temp.loc[i,'Cleaned_Location'] = temp.loc[i,'Cleaned_Location']+ ', MILWAUKEE, WI'
# clean addresses of common abbreviations and typos
temp.loc[i,'Cleaned_Location'] = address_dictionary_1(temp.loc[i,'Cleaned_Location'])
# get and record coordinates of given address
try:
geocoded_location = client.geocode(temp.loc[i,'Cleaned_Location'])
# catch error when our api key has run out of calls
except:
print('No calls remaining...')
# save all geocoded addresses
temp = temp.loc[0:i-1,:]
coordinate_df.to_csv('Coordinate_Dictionary.csv',index=False,mode='w')
return temp
# check whether data exists (works perfectly fine, but can be improved)
if len(geocoded_location['results']) > 0:
coordinates = str(geocoded_location['results'][0]['location'])
# add new coordinates to coordinate dictionary
coordinate_entry = pd.DataFrame({'Location':[temp.loc[i,'Location']],
'Cleaned_Location':[temp.loc[i,'Cleaned_Location']],
'Coordinates':[coordinates]
})
coordinate_df = coordinate_df.append(coordinate_entry, ignore_index=True)
# log errors
else:
coordinates = ''
temp.loc[i,'Error_Logging'] = str(geocoded_location)
error = pd.DataFrame({'location':[temp.loc[i,'Location']],
'cleaned_location':[temp.loc[i,'Cleaned_Location']],
'geocoding_result':[temp.loc[i,'Error_Logging']]})
error.to_csv('../geocoding_data/Error_Logging.csv', mode='a', header=False)
temp.loc[i,'Coordinates'] = coordinates
coordinate_df.to_csv('Coordinate_Dictionary.csv',index=False,mode='w')
return temp
data_path = '../data/'
geocoding_data_path = '../geocoding_data/'
f = []
for (dir_path, dir_names, file_names) in walk(data_path):
f.extend(file_names)
break
if 'readme.txt' in file_names:
file_names.remove('readme.txt')
for file_name in file_names:
data_df = | pd.read_csv(data_path+file_name) | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
def test_empty_with_index_col_false(self):
# GH 10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_float_parser(self):
# GH 9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = pd.DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises((OverflowError, pandas.parser.OverflowError),
self.read_csv, StringIO(data),
converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# GH 9535
expected = pd.DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
result = pd.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True)
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
result = next(
iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
def test_eof_states(self):
# GH 10728 and 10548
# With skip_blank_lines = True
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# GH 10728
# WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# GH 10548
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
with tm.assertRaisesRegexp(ValueError,
'skip footer cannot be negative'):
df = self.read_csv(StringIO(text), skipfooter=-1)
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_table(*args, **kwds)
def test_sniff_delimiter(self):
text = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data = self.read_csv(StringIO(text), index_col=0, sep=None)
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')
tm.assert_frame_equal(data, data2)
text = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data3 = self.read_csv(StringIO(text), index_col=0,
sep=None, skiprows=2)
tm.assert_frame_equal(data, data3)
text = u("""ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
""").encode('utf-8')
s = BytesIO(text)
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
data4 = self.read_csv(s, index_col=0, sep=None, skiprows=2,
encoding='utf-8')
tm.assert_frame_equal(data, data4)
def test_regex_separator(self):
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep='\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
self.assertIsNone(expected.index.name)
tm.assert_frame_equal(df, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = [[1, 2334., 5],
[10, 13, 10]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = [[1, 2., 4],
[5, np.nan, 10.]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = self.read_csv(StringIO(data_expected), header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From <NAME>: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assertRaisesRegexp(ValueError, "must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assertRaisesRegexp(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_fwf_colspecs_is_list_or_tuple(self):
with tm.assertRaisesRegexp(TypeError,
'column specifications must be a list or '
'tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(self.data1),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
with tm.assertRaisesRegexp(TypeError,
'Each column specification must be.+'):
read_fwf(StringIO(self.data1), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, 3), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(0, None), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, None), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
def test_fwf_regression(self):
# GH 3594
# turns out 'T060' is parsable as a datetime slice!
tzlist = [1, 10, 20, 30, 60, 80, 100]
ntz = len(tzlist)
tcolspecs = [16] + [8] * ntz
tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
df = read_fwf(StringIO(data),
index_col=0,
header=None,
names=tcolnames,
widths=tcolspecs,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, '%Y%j%H%M%S'))
for c in df.columns:
res = df.loc[:, c]
self.assertTrue(len(res))
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71"""
df = read_fwf(StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37),
(49, 51), (58, 62), (63, 1000)],
names=['time', 'pri', 'pgn', 'dst', 'src', 'data'],
converters={
'pgn': lambda x: int(x, 16),
'src': lambda x: int(x, 16),
'dst': lambda x: int(x, 16),
'data': lambda x: len(x.split(' '))})
expected = DataFrame([[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8]],
columns=["time", "pri", "pgn", "dst", "src", "data"])
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
def test_fwf_compression(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest("Need gzip and bz2 to run this test")
data = """1111111111
2222222222
3333333333""".strip()
widths = [5, 5]
names = ['one', 'two']
expected = read_fwf(StringIO(data), widths=widths, names=names)
if compat.PY3:
data = bytes(data, encoding='utf-8')
comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]
for comp_name, compresser in comps:
with tm.ensure_clean() as path:
tmp = compresser(path, mode='wb')
tmp.write(data)
tmp.close()
result = read_fwf(path, widths=widths, names=names,
compression=comp_name)
tm.assert_frame_equal(result, expected)
def test_BytesIO_input(self):
if not compat.PY3:
raise nose.SkipTest(
"Bytes-related test - only needs to work on Python 3")
result = pd.read_fwf(BytesIO("שלום\nשלום".encode('utf8')), widths=[
2, 2], encoding='utf8')
expected = pd.DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
data = BytesIO("שלום::1234\n562::123".encode('cp1255'))
result = pd.read_table(data, sep="::", engine='python',
encoding='cp1255')
expected = pd.DataFrame([[562, 123]], columns=["שלום", "1234"])
tm.assert_frame_equal(result, expected)
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
buf = StringIO()
sys.stdout = buf
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True)
self.assertEqual(
buf.getvalue(), 'Filled 3 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
buf = StringIO()
sys.stdout = buf
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True, index_col=0)
self.assertEqual(
buf.getvalue(), 'Filled 1 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
def test_float_precision_specified(self):
# Should raise an error if float_precision (C parser option) is
# specified
with tm.assertRaisesRegexp(ValueError, "The 'float_precision' option "
"is not supported with the 'python' engine"):
self.read_csv(StringIO('a,b,c\n1,2,3'), float_precision='high')
def test_iteration_open_handle(self):
if PY3:
raise nose.SkipTest(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
try:
read_table(f, squeeze=True, header=None, engine='c')
except Exception:
pass
else:
raise ValueError('this should not happen')
result = read_table(f, squeeze=True, header=None,
engine='python')
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_iterator(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_single_line(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_malformed(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_skip_footer(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = self.read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_decompression_regex_sep(self):
# GH 6607
# This is a copy which should eventually be moved to ParserTests
# when the issue with the C parser is fixed
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
data = data.replace(b',', b'::')
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='bz2')
tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
path, compression='bz3')
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with multi-level index is fixed in the C parser.
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
# GH 6893
data = ' A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9'
expected = DataFrame.from_records([(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)],
columns=list('abcABC'), index=list('abc'))
actual = self.read_table(StringIO(data), sep='\s+')
tm.assert_frame_equal(actual, expected)
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\s+')
tm.assert_almost_equal(df.values, expected)
expected = [[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_almost_equal(list(df.values), list(expected))
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = [[1, 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
class TestFwfColspaceSniffing(tm.TestCase):
def test_full_file(self):
# File with all values
test = '''index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
2000-01-05T00:00:00 0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0.487094399463 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
2000-01-11T00:00:00 0.157160753327 34 foo'''
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_missing(self):
# File with missing values
test = '''index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
34'''
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces(self):
# File with spaces in columns
test = '''
Account Name Balance CreditLimit AccountCreated
101 <NAME> 9315.45 10000.00 1/17/1998
312 <NAME> 90.00 1000.00 8/6/2003
868 <NAME> 0 17000.00 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 <NAME> 789.65 5000.00 2/5/2007
'''.strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces_and_missing(self):
# File with spaces and missing values in columsn
test = '''
Account Name Balance CreditLimit AccountCreated
101 10000.00 1/17/1998
312 <NAME> 90.00 1000.00 8/6/2003
868 5/25/1985
761 <NAME>-Smith 49654.87 100000.00 12/5/2006
317 <NAME> 789.65
'''.strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_messed_up_data(self):
# Completely messed up file
test = '''
Account Name Balance Credit Limit Account Created
101 10000.00 1/17/1998
312 <NAME> 90.00 1000.00
761 <NAME> 49654.87 100000.00 12/5/2006
317 <NAME> 789.65
'''.strip('\r\n')
colspecs = ((2, 10), (15, 33), (37, 45), (49, 61), (64, 79))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_multiple_delimiters(self):
test = r'''
col1~~~~~col2 col3++++++++++++++++++col4
~~22.....11.0+++foo~~~~~~~~~~<NAME>
33+++122.33\\\bar.........<NAME>
++44~~~~12.01 baz~~<NAME>
~~55 11+++foo++++<NAME>-Smith
..66++++++.03~~~bar <NAME>
'''.strip('\r\n')
colspecs = ((0, 4), (7, 13), (15, 19), (21, 41))
expected = read_fwf(StringIO(test), colspecs=colspecs,
delimiter=' +~.\\')
tm.assert_frame_equal(expected, read_fwf(StringIO(test),
delimiter=' +~.\\'))
def test_variable_width_unicode(self):
if not compat.PY3:
raise nose.SkipTest(
'Bytes-related test - only needs to work on Python 3')
test = '''
שלום שלום
ום שלל
של ום
'''.strip('\r\n')
expected = pd.read_fwf(BytesIO(test.encode('utf8')),
colspecs=[(0, 4), (5, 9)], header=None, encoding='utf8')
tm.assert_frame_equal(expected, read_fwf(BytesIO(test.encode('utf8')),
header=None, encoding='utf8'))
class CParserTests(ParserTests):
""" base class for CParser Testsing """
def test_buffer_overflow(self):
# GH9205
# test certain malformed input files that cause buffer overflows in
# tokenizer.c
malfw = "1\r1\r1\r 1\r 1\r" # buffer overflow in words pointer
malfs = "1\r1\r1\r 1\r 1\r11\r" # buffer overflow in stream pointer
malfl = "1\r1\r1\r 1\r 1\r11\r1\r" # buffer overflow in lines pointer
for malf in (malfw, malfs, malfl):
try:
df = self.read_table(StringIO(malf))
except Exception as cperr:
self.assertIn(
'Buffer overflow caught - possible malformed input file.', str(cperr))
def test_buffer_rd_bytes(self):
# GH 12098
# src->buffer can be freed twice leading to a segfault if a corrupt
# gzip file is read with read_csv and the buffer is filled more than
# once before gzip throws an exception
data = '\x1F\x8B\x08\x00\x00\x00\x00\x00\x00\x03\xED\xC3\x41\x09' \
'\x00\x00\x08\x00\xB1\xB7\xB6\xBA\xFE\xA5\xCC\x21\x6C\xB0' \
'\xA6\x4D' + '\x55' * 267 + \
'\x7D\xF7\x00\x91\xE0\x47\x97\x14\x38\x04\x00' \
'\x1f\x8b\x08\x00VT\x97V\x00\x03\xed]\xefO'
for i in range(100):
try:
_ = self.read_csv(StringIO(data),
compression='gzip',
delim_whitespace=True)
except Exception as e:
pass
class TestCParserHighMemory(CParserTests, tm.TestCase):
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = False
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = False
return read_table(*args, **kwds)
def test_compact_ints(self):
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
as_recarray=True, compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
def test_parse_dates_empty_string(self):
# #2263
s = StringIO("Date, test\n2012-01-01, 1\n,2")
result = self.read_csv(s, parse_dates=["Date"], na_filter=False)
self.assertTrue(result['Date'].isnull()[1])
def test_usecols(self):
raise nose.SkipTest(
"Usecols is not supported in C High Memory engine.")
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
# check with delim_whitespace=True
df = self.read_csv(StringIO(data.replace(',', ' ')), comment='#',
delim_whitespace=True)
tm.assert_almost_equal(df.values, expected)
# check with custom line terminator
df = self.read_csv(StringIO(data.replace('\n', '*')), comment='#',
lineterminator='*')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_skiprows_lineterminator(self):
# GH #9079
data = '\n'.join(['SMOSMANIA ThetaProbe-ML2X ',
'2007/01/01 01:00 0.2140 U M ',
'2007/01/01 02:00 0.2141 M O ',
'2007/01/01 04:00 0.2142 D M '])
expected = pd.DataFrame([['2007/01/01', '01:00', 0.2140, 'U', 'M'],
['2007/01/01', '02:00', 0.2141, 'M', 'O'],
['2007/01/01', '04:00', 0.2142, 'D', 'M']],
columns=['date', 'time', 'var', 'flag',
'oflag'])
# test with the three default lineterminators LF, CR and CRLF
df = self.read_csv(StringIO(data), skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data.replace('\n', '\r')),
skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data.replace('\n', '\r\n')),
skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
def test_trailing_spaces(self):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n"
expected = pd.DataFrame([[1., 2., 4.],
[5.1, np.nan, 10.]])
# this should ignore six lines including lines with trailing
# whitespace and blank lines. issues 8661, 8679
df = self.read_csv(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
# test skipping set of rows after a row with trailing spaces, issue
# #8983
expected = pd.DataFrame({"A": [1., 5.1], "B": [2., np.nan],
"C": [4., 10]})
df = self.read_table(StringIO(data.replace(',', ' ')),
delim_whitespace=True,
skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\s+')
tm.assert_almost_equal(df.values, expected)
expected = [[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_almost_equal(list(df.values), list(expected))
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = [[1, 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_passing_dtype(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the dtype argument is supported by all engines.
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
# empty frame
# GH12048
actual = self.read_csv(StringIO('A,B'), dtype=str)
expected = DataFrame({'A': [], 'B': []}, index=[], dtype=str)
tm.assert_frame_equal(actual, expected)
def test_dtype_and_names_error(self):
# GH 8833
# passing both dtype and names resulting in an error reporting issue
data = """
1.0 1
2.0 2
3.0 3
"""
# base cases
result = self.read_csv(StringIO(data), sep='\s+', header=None)
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]])
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), sep='\s+',
header=None, names=['a', 'b'])
expected = DataFrame(
[[1.0, 1], [2.0, 2], [3.0, 3]], columns=['a', 'b'])
tm.assert_frame_equal(result, expected)
# fallback casting
result = self.read_csv(StringIO(
data), sep='\s+', header=None, names=['a', 'b'], dtype={'a': np.int32})
expected = DataFrame([[1, 1], [2, 2], [3, 3]], columns=['a', 'b'])
expected['a'] = expected['a'].astype(np.int32)
tm.assert_frame_equal(result, expected)
data = """
1.0 1
nan 2
3.0 3
"""
# fallback casting, but not castable
with tm.assertRaisesRegexp(ValueError, 'cannot safely convert'):
self.read_csv(StringIO(data), sep='\s+', header=None,
names=['a', 'b'], dtype={'a': np.int32})
def test_fallback_to_python(self):
# GH 6607
data = 'a b c\n1 2 3'
# specify C engine with unsupported options (raise)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep=None,
delim_whitespace=False)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep='\s')
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', skip_footer=1)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), delim_whitespace=True,
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), lineterminator='\n',
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
class TestCParserLowMemory(CParserTests, tm.TestCase):
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = True
kwds['buffer_lines'] = 2
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = True
kwds['buffer_lines'] = 2
return read_table(*args, **kwds)
def test_compact_ints(self):
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.to_records(index=False).dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.to_records(index=False).dtype, ex_dtype)
def test_compact_ints_as_recarray(self):
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
as_recarray=True, compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
def test_precise_conversion(self):
# GH #8002
tm._skip_if_32bit()
from decimal import Decimal
normal_errors = []
precise_errors = []
for num in np.linspace(1., 2., num=500): # test numbers between 1 and 2
text = 'a\n{0:.25}'.format(num) # 25 decimal digits of precision
normal_val = float(self.read_csv(StringIO(text))['a'][0])
precise_val = float(self.read_csv(
StringIO(text), float_precision='high')['a'][0])
roundtrip_val = float(self.read_csv(
StringIO(text), float_precision='round_trip')['a'][0])
actual_val = Decimal(text[2:])
def error(val):
return abs(Decimal('{0:.100}'.format(val)) - actual_val)
normal_errors.append(error(normal_val))
precise_errors.append(error(precise_val))
# round-trip should match float()
self.assertEqual(roundtrip_val, float(text[2:]))
self.assertTrue(sum(precise_errors) <= sum(normal_errors))
self.assertTrue(max(precise_errors) <= max(normal_errors))
def test_pass_dtype(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'})
self.assertEqual(result['one'].dtype, 'u1')
self.assertEqual(result['two'].dtype, 'object')
def test_pass_dtype_as_recarray(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'},
as_recarray=True)
self.assertEqual(result['one'].dtype, 'u1')
self.assertEqual(result['two'].dtype, 'S1')
def test_empty_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), dtype={'one': 'u1'})
expected = DataFrame({'one': np.empty(0, dtype='u1'),
'two': np.empty(0, dtype=np.object)})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_index_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), index_col=['one'],
dtype={'one': 'u1', 1: 'f'})
expected = DataFrame({'two': np.empty(0, dtype='f')},
index=Index([], dtype='u1', name='one'))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_multiindex_pass_dtype(self):
data = 'one,two,three'
result = self.read_csv(StringIO(data), index_col=['one', 'two'],
dtype={'one': 'u1', 1: 'f8'})
exp_idx = MultiIndex.from_arrays([np.empty(0, dtype='u1'), np.empty(0, dtype='O')],
names=['one', 'two'])
expected = DataFrame(
{'three': np.empty(0, dtype=np.object)}, index=exp_idx)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_mangled_column_pass_dtype_by_names(self):
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={
'one': 'u1', 'one.1': 'f'})
expected = DataFrame(
{'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_mangled_column_pass_dtype_by_indexes(self):
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={0: 'u1', 1: 'f'})
expected = DataFrame(
{'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_dup_column_pass_dtype_by_names(self):
data = 'one,one'
result = self.read_csv(
StringIO(data), mangle_dupe_cols=False, dtype={'one': 'u1'})
expected = pd.concat([Series([], name='one', dtype='u1')] * 2, axis=1)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_dup_column_pass_dtype_by_indexes(self):
### FIXME in GH9424
raise nose.SkipTest(
"GH 9424; known failure read_csv with duplicate columns")
data = 'one,one'
result = self.read_csv(
StringIO(data), mangle_dupe_cols=False, dtype={0: 'u1', 1: 'f'})
expected = pd.concat([Series([], name='one', dtype='u1'),
Series([], name='one', dtype='f')], axis=1)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_usecols_dtypes(self):
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(0, 1, 2),
names=('a', 'b', 'c'),
header=None,
converters={'a': str},
dtype={'b': int, 'c': float},
)
result2 = self.read_csv(StringIO(data), usecols=(0, 2),
names=('a', 'b', 'c'),
header=None,
converters={'a': str},
dtype={'b': int, 'c': float},
)
self.assertTrue((result.dtypes == [object, np.int, np.float]).all())
self.assertTrue((result2.dtypes == [object, np.float]).all())
def test_usecols_implicit_index_col(self):
# #2654
data = 'a,b,c\n4,apple,bat,5.7\n8,orange,cow,10'
result = self.read_csv(StringIO(data), usecols=['a', 'b'])
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_with_whitespace(self):
data = 'a b c\n4 apple bat 5.7\n8 orange cow 10'
result = self.read_csv(StringIO(data), delim_whitespace=True,
usecols=('a', 'b'))
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_regex_sep(self):
# #2733
data = 'a b c\n4 apple bat 5.7\n8 orange cow 10'
df = self.read_csv(StringIO(data), sep='\s+', usecols=('a', 'b'))
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(df, expected)
def test_pure_python_failover(self):
data = "a,b,c\n1,2,3#ignore this!\n4,5,6#ignorethistoo"
result = self.read_csv(StringIO(data), comment='#')
expected = DataFrame({'a': [1, 4], 'b': [2, 5], 'c': [3, 6]})
tm.assert_frame_equal(result, expected)
def test_decompression(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, compression='gzip')
tm.assert_frame_equal(result, expected)
result = self.read_csv(open(path, 'rb'), compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, compression='bz2')
tm.assert_frame_equal(result, expected)
# result = self.read_csv(open(path, 'rb'), compression='bz2')
# tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
path, compression='bz3')
with open(path, 'rb') as fin:
if compat.PY3:
result = self.read_csv(fin, compression='bz2')
tm.assert_frame_equal(result, expected)
else:
self.assertRaises(ValueError, self.read_csv,
fin, compression='bz2')
def test_decompression_regex_sep(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
data = data.replace(b',', b'::')
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
# GH 6607
# Test currently only valid with the python engine because of
# regex sep. Temporarily copied to TestPythonParser.
# Here test for ValueError when passing regex sep:
with tm.assertRaisesRegexp(ValueError, 'regex sep'): # XXX
result = self.read_csv(path, sep='::', compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
# GH 6607
with tm.assertRaisesRegexp(ValueError, 'regex sep'): # XXX
result = self.read_csv(path, sep='::', compression='bz2')
tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
path, compression='bz3')
def test_memory_map(self):
# it works!
result = self.read_csv(self.csv1, memory_map=True)
def test_disable_bool_parsing(self):
# #2090
data = """A,B,C
Yes,No,Yes
No,Yes,Yes
Yes,,Yes
No,No,No"""
result = read_csv(StringIO(data), dtype=object)
self.assertTrue((result.dtypes == object).all())
result = read_csv(StringIO(data), dtype=object, na_filter=False)
self.assertEqual(result['B'][2], '')
def test_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
df2 = self.read_csv(StringIO(data), sep=';', decimal=',')
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_custom_lineterminator(self):
data = 'a,b,c~1,2,3~4,5,6'
result = self.read_csv(StringIO(data), lineterminator='~')
expected = self.read_csv(StringIO(data.replace('~', '\n')))
tm.assert_frame_equal(result, expected)
data2 = data.replace('~', '~~')
result = self.assertRaises(ValueError, read_csv, StringIO(data2),
lineterminator='~~')
def test_raise_on_passed_int_dtype_with_nas(self):
# #2631
data = """YEAR, DOY, a
2001,106380451,10
2001,,11
2001,106380451,67"""
self.assertRaises(Exception, read_csv, StringIO(data), sep=",",
skipinitialspace=True,
dtype={'DOY': np.int64})
def test_na_trailing_columns(self):
data = """Date,Currenncy,Symbol,Type,Units,UnitPrice,Cost,Tax
2012-03-14,USD,AAPL,BUY,1000
2012-05-12,USD,SBUX,SELL,500"""
result = self.read_csv(StringIO(data))
self.assertEqual(result['Date'][1], '2012-05-12')
self.assertTrue(result['UnitPrice'].isnull().all())
def test_parse_ragged_csv(self):
data = """1,2,3
1,2,3,4
1,2,3,4,5
1,2
1,2,3,4"""
nice_data = """1,2,3,,
1,2,3,4,
1,2,3,4,5
1,2,,,
1,2,3,4,"""
result = self.read_csv(StringIO(data), header=None,
names=['a', 'b', 'c', 'd', 'e'])
expected = self.read_csv(StringIO(nice_data), header=None,
names=['a', 'b', 'c', 'd', 'e'])
tm.assert_frame_equal(result, expected)
# too many columns, cause segfault if not careful
data = "1,2\n3,4,5"
result = self.read_csv(StringIO(data), header=None,
names=lrange(50))
expected = self.read_csv(StringIO(data), header=None,
names=lrange(3)).reindex(columns=lrange(50))
tm.assert_frame_equal(result, expected)
def test_tokenize_CR_with_quoting(self):
# #3453, this doesn't work with Python parser for some reason
data = ' a,b,c\r"a,b","e,d","f,f"'
result = self.read_csv(StringIO(data), header=None)
expected = self.read_csv(StringIO(data.replace('\r', '\n')),
header=None)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data))
expected = self.read_csv(StringIO(data.replace('\r', '\n')))
tm.assert_frame_equal(result, expected)
def test_raise_on_no_columns(self):
# single newline
data = "\n"
self.assertRaises(ValueError, self.read_csv, StringIO(data))
# test with more than a single newline
data = "\n\n\n"
self.assertRaises(ValueError, self.read_csv, StringIO(data))
def test_warn_if_chunks_have_mismatched_type(self):
# Issue #3866 If chunks are different types and can't
# be coerced using numerical types, then issue warning.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(DtypeWarning):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_invalid_c_parser_opts_with_not_c_parser(self):
from pandas.io.parsers import _c_parser_defaults as c_defaults
data = """1,2,3,,
1,2,3,4,
1,2,3,4,5
1,2,,,
1,2,3,4,"""
engines = 'python', 'python-fwf'
for default in c_defaults:
for engine in engines:
kwargs = {default: object()}
with tm.assertRaisesRegexp(ValueError,
'The %r option is not supported '
'with the %r engine' % (default,
engine)):
read_csv(StringIO(data), engine=engine, **kwargs)
def test_passing_dtype(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the dtype argument is supported by all engines.
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
def test_fallback_to_python(self):
# GH 6607
data = 'a b c\n1 2 3'
# specify C engine with C-unsupported options (raise)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep=None,
delim_whitespace=False)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep='\s')
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', skip_footer=1)
def test_raise_on_sep_with_delim_whitespace(self):
# GH 6607
data = 'a b c\n1 2 3'
with tm.assertRaisesRegexp(ValueError, 'you can only specify one'):
self.read_table(StringIO(data), sep='\s', delim_whitespace=True)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), delim_whitespace=True,
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), lineterminator='\n',
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_bool_header_arg(self):
# GH 6114
data = """\
MyColumn
a
b
a
b"""
for arg in [True, False]:
with tm.assertRaises(TypeError):
pd.read_csv(StringIO(data), header=arg)
with tm.assertRaises(TypeError):
pd.read_table(StringIO(data), header=arg)
with tm.assertRaises(TypeError):
pd.read_fwf(StringIO(data), header=arg)
def test_multithread_stringio_read_csv(self):
# GH 11786
max_row_range = 10000
num_files = 100
bytes_to_df = [
'\n'.join(
['%d,%d,%d' % (i, i, i) for i in range(max_row_range)]
).encode() for j in range(num_files)]
files = [BytesIO(b) for b in bytes_to_df]
# Read all files in many threads
pool = ThreadPool(8)
results = pool.map(pd.read_csv, files)
first_result = results[0]
for result in results:
tm.assert_frame_equal(first_result, result)
def test_multithread_path_multipart_read_csv(self):
# GH 11786
num_tasks = 4
file_name = '__threadpool_reader__.csv'
num_rows = 100000
df = self.construct_dataframe(num_rows)
with tm.ensure_clean(file_name) as path:
df.to_csv(path)
final_dataframe = self.generate_multithread_dataframe(path,
num_rows,
num_tasks)
tm.assert_frame_equal(df, final_dataframe)
class TestMiscellaneous(tm.TestCase):
# for tests that don't fit into any of the other classes, e.g. those that
# compare results for different engines or test the behavior when 'engine'
# is not passed
def test_compare_whitespace_regex(self):
# GH 6607
data = ' a b c\n1 2 3 \n4 5 6\n 7 8 9'
result_c = pd.read_table(StringIO(data), sep='\s+', engine='c')
result_py = pd.read_table(StringIO(data), sep='\s+', engine='python')
print(result_c)
tm.assert_frame_equal(result_c, result_py)
def test_fallback_to_python(self):
# GH 6607
data = 'a b c\n1 2 3'
# specify C-unsupported options with python-unsupported option
# (options will be ignored on fallback, raise)
with tm.assertRaisesRegexp(ValueError, 'Falling back'):
pd.read_table(StringIO(data), sep=None,
delim_whitespace=False, dtype={'a': float})
with tm.assertRaisesRegexp(ValueError, 'Falling back'):
pd.read_table(StringIO(data), sep='\s', dtype={'a': float})
with tm.assertRaisesRegexp(ValueError, 'Falling back'):
pd.read_table(StringIO(data), skip_footer=1, dtype={'a': float})
# specify C-unsupported options without python-unsupported options
with tm.assert_produces_warning(parsers.ParserWarning):
pd.read_table(StringIO(data), sep=None, delim_whitespace=False)
with tm.assert_produces_warning(parsers.ParserWarning):
pd.read_table(StringIO(data), sep='\s')
with tm.assert_produces_warning(parsers.ParserWarning):
pd.read_table(StringIO(data), skip_footer=1)
class TestParseSQL(tm.TestCase):
def test_convert_sql_column_floats(self):
arr = np.array([1.5, None, 3, 4.2], dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array([1.5, np.nan, 3, 4.2], dtype='f8')
assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_strings(self):
arr = np.array(['1.5', None, '3', '4.2'], dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array(['1.5', np.nan, '3', '4.2'], dtype=object)
assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_unicode(self):
arr = np.array([u('1.5'), None, u('3'), u('4.2')],
dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array([u('1.5'), np.nan, u('3'), u('4.2')],
dtype=object)
assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_ints(self):
arr = np.array([1, 2, 3, 4], dtype='O')
arr2 = np.array([1, 2, 3, 4], dtype='i4').astype('O')
result = lib.convert_sql_column(arr)
result2 = lib.convert_sql_column(arr2)
expected = np.array([1, 2, 3, 4], dtype='i8')
assert_same_values_and_dtype(result, expected)
assert_same_values_and_dtype(result2, expected)
arr = np.array([1, 2, 3, None, 4], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([1, 2, 3, np.nan, 4], dtype='f8')
assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_longs(self):
arr = np.array([long(1), long(2), long(3), long(4)], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([1, 2, 3, 4], dtype='i8')
assert_same_values_and_dtype(result, expected)
arr = np.array([long(1), long(2), long(3), None, long(4)], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([1, 2, 3, np.nan, 4], dtype='f8')
assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_bools(self):
arr = np.array([True, False, True, False], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([True, False, True, False], dtype=bool)
assert_same_values_and_dtype(result, expected)
arr = np.array([True, False, None, False], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([True, False, np.nan, False], dtype=object)
assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_decimals(self):
from decimal import Decimal
arr = np.array([Decimal('1.5'), None, Decimal('3'), Decimal('4.2')])
result = lib.convert_sql_column(arr)
expected = np.array([1.5, np.nan, 3, 4.2], dtype='f8')
assert_same_values_and_dtype(result, expected)
class TestUrlGz(tm.TestCase):
def setUp(self):
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
self.local_table = read_table(localtable)
@tm.network
def test_url_gz(self):
url = 'https://raw.github.com/pydata/pandas/master/pandas/io/tests/data/salary.table.gz'
url_table = read_table(url, compression="gzip", engine="python")
tm.assert_frame_equal(url_table, self.local_table)
@tm.network
def test_url_gz_infer(self):
url = ('https://s3.amazonaws.com/pandas-test/salary.table.gz')
url_table = read_table(url, compression="infer", engine="python")
tm.assert_frame_equal(url_table, self.local_table)
class TestS3(tm.TestCase):
def setUp(self):
try:
import boto
except ImportError:
raise nose.SkipTest("boto not installed")
@tm.network
def test_parse_public_s3_bucket(self):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
if comp == 'bz2' and compat.PY2:
# The Python 2 C parser can't read bz2 from S3.
self.assertRaises(ValueError, pd.read_csv,
's3://pandas-test/tips.csv' + ext,
compression=comp)
else:
df = pd.read_csv('s3://pandas-test/tips.csv' +
ext, compression=comp)
self.assertTrue(isinstance(df, pd.DataFrame))
self.assertFalse(df.empty)
tm.assert_frame_equal(pd.read_csv(
tm.get_data_path('tips.csv')), df)
# Read public file from bucket with not-public contents
df = pd.read_csv('s3://cant_get_it/tips.csv')
self.assertTrue(isinstance(df, pd.DataFrame))
self.assertFalse(df.empty)
tm.assert_frame_equal(pd.read_csv( | tm.get_data_path('tips.csv') | pandas.util.testing.get_data_path |
'''
Simple vanilla LSTM multiclass classifier for raw EEG data
'''
import scipy.io as spio
import numpy as np
from keras import backend as K
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from keras.optimizers import Adam
from keras.models import load_model
from keras.callbacks import ModelCheckpoint
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
import gc
import h5py
def loadmat(filename):
def _check_keys(d):
'''
checks if entries in dictionary are mat-objects. If yes
todict is called to change them to nested dictionaries
'''
for key in d:
if isinstance(d[key], spio.matlab.mio5_params.mat_struct):
d[key] = _todict(d[key])
return d
def _has_struct(elem):
"""Determine if elem is an array and if any array item is a struct"""
return isinstance(elem, np.ndarray) and any(isinstance(
e, spio.matlab.mio5_params.mat_struct) for e in elem)
def _todict(matobj):
'''
A recursive function which constructs from matobjects nested dictionaries
'''
d = {}
for strg in matobj._fieldnames:
elem = matobj.__dict__[strg]
if isinstance(elem, spio.matlab.mio5_params.mat_struct):
d[strg] = _todict(elem)
elif _has_struct(elem):
d[strg] = _tolist(elem)
else:
d[strg] = elem
return d
def _tolist(ndarray):
'''
A recursive function which constructs lists from cellarrays
(which are loaded as numpy ndarrays), recursing into the elements
if they contain matobjects.
'''
elem_list = []
for sub_elem in ndarray:
if isinstance(sub_elem, spio.matlab.mio5_params.mat_struct):
elem_list.append(_todict(sub_elem))
elif _has_struct(sub_elem):
elem_list.append(_tolist(sub_elem))
else:
elem_list.append(sub_elem)
return elem_list
data = spio.loadmat(filename, struct_as_record=False, squeeze_me=True)
return _check_keys(data)
"""Helper function to truncate dataframes to a specified shape - usefull to reduce all EEG trials to the same number
of time stamps.
"""
def truncate(arr, shape):
desired_size_factor = np.prod([n for n in shape if n != -1])
if -1 in shape: # implicit array size
desired_size = arr.size // desired_size_factor * desired_size_factor
else:
desired_size = desired_size_factor
return arr.flat[:desired_size].reshape(shape)
def main():
PATH = "G:\\UWA_MDS\\2021SEM1\\Research_Project\\KARA_ONE_Data\\ImaginedSpeechData\\"
subjects = ['MM05', 'MM08', 'MM09', 'MM10', 'MM11', 'MM12', 'MM14', 'MM15', 'MM16', 'MM18', 'MM19', 'MM20', 'MM21', 'P02']
for subject in subjects:
print("Working on Subject: " + subject)
print("Loading .set data")
""" Load EEG data with loadmat() function"""
SubjectData = loadmat(PATH + subject + '\\EEG_data.mat')
print("Setting up dataframes")
""" Setup target and EEG dataframes"""
targets = pd.DataFrame(SubjectData['EEG_Data']['prompts'])
targets.columns = ['prompt']
sequences = pd.DataFrame(SubjectData['EEG_Data']['activeEEG'])
sequences.columns = ['trials']
EEG = pd.concat([sequences.reset_index(drop=True),targets.reset_index(drop=True)], axis=1)
words = ['gnaw', 'pat', 'knew', 'pot']
EEG = EEG.loc[EEG['prompt'].isin(words)]
EEG = EEG.reset_index(drop=True)
sequences = pd.DataFrame(EEG['trials'])
targets = pd.DataFrame(EEG['prompt'])
seq = np.asarray(sequences['trials'])
for i in range(0,len(seq)):
seq[i] = seq[i].transpose()
i=i+1
sequences['trials'] = seq
print("Train / Test splitting data")
#Stratified train test splits
train_x, test_x, train_y, test_y = train_test_split(sequences, targets, stratify=targets, test_size=0.2, random_state=9)
#Encode target prompts to 0/1
train_y= pd.get_dummies(train_y['prompt'])
test_y= pd.get_dummies(test_y['prompt'])
#need train_x and test_x as arrays in order to truncate them down to the smallest time trial
train_x = np.asarray(train_x['trials'])
test_x = np.asarray(test_x['trials'])
#find minimum length of all the trials present in both test and train trials
min_ln = min(min(i.shape for i in train_x)[0], min(i.shape for i in test_x)[0])
#reduce all trials down to common length set by min_ln
for arr in [train_x, test_x]:
i=0
for trial in arr:
arr[i] = truncate(trial, (min_ln, 62))
i = i+1
#for LSTM model we need data in a 3D array, (,
train_x = np.rollaxis(np.dstack(train_x), -1)
test_x = np.rollaxis(np.dstack(test_x), -1)
#Make directories to store model results if not exit
save_path = PATH + subject + '\\lstm_model'
import os
from os import path
if not os.path.exists(save_path):
os.mkdir(save_path)
save_model = save_path + '\\lstm_vanilla_model'
# Build and fit model
from keras.callbacks import EarlyStopping
with tf.device('/cpu:0'):
print("Building LSTM")
model = Sequential()
model.add(LSTM(256, return_sequences=True, input_shape=(train_x.shape[1], train_x.shape[2])))
model.add(LSTM(256))
model.add(Dropout(0.5))
model.add(Dense(100, activation='relu'))
model.add(Dense(4, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print("Fitting Model")
chk = ModelCheckpoint(save_model, monitor='loss', save_best_only=True, mode='min', verbose=1)
es = EarlyStopping(monitor='loss', min_delta=0.01, verbose=1, patience=5, mode='auto')
model.fit(train_x, train_y, epochs=30, batch_size=train_x.shape[0], verbose=1, callbacks=[chk, es])
#history = model.fit(train_x, train_y, epochs=50, batch_size=train_x.shape[0], verbose=1, callbacks=[chk, es])
print("Model successfully trained!")
# Store a printout of the model summary
model_sum = save_path + '\\lstm_summary.png'
from keras.utils import plot_model
plot_model(model, to_file=model_sum, show_shapes=True, show_layer_names=True)
#Plots of model training
#img_loc = PATH + subject + '\\lstm_training_loss.png'
#plt.plot(history.history['loss'], label='train')
#plt.plot(history.history['val_loss'], label='test')
#plt.legend()
#plt.savefig(img_loc)
print("Performing model evaluation...")
model2 = load_model(save_model)
test_preds = model2.predict_classes(test_x)
test_preds = pd.DataFrame(test_preds)
test_preds.columns = ['prompts']
test_preds = test_preds.replace({0 : 'gnaw', 1 : 'knew', 2 : 'pat', 3 : 'pot'})
new_df = test_y.idxmax(axis=1)
accuracy_score(new_df, test_preds['prompts'])
from sklearn.metrics import confusion_matrix
my_mat = confusion_matrix(new_df, test_preds['prompts'])
my_mat = pd.DataFrame(my_mat, index=[i for i in ['gnaw', 'knew', 'pat', 'pot']],
columns=[i for i in ['gnaw', 'knew', 'pat', 'pot']])
hdf_loc = PATH + subject + '\\lstm_conf_mat.h5'
my_mat.to_hdf(hdf_loc, key='conf_mat', mode='w')
import seaborn as sn
sn.heatmap(my_mat, annot=True)
img_loc = PATH + subject + '\\lstm_conf_mat.png'
plt.savefig(img_loc)
plt.clf()
print("Model evaluation complete, results stored to subject folder, resetting Keras.")
del model
gc.collect()
K.clear_session()
tf.compat.v1.reset_default_graph() # TF graph isn't same as Keras graph
#Compute subject model accuracies in dict, save to csv
matrix = "\\lstm_conf_mat.h5"
lstm_acc = {}
for subject in subjects:
file_path = PATH + subject + matrix
with h5py.File(file_path, 'r') as f:
# Get the HDF5 group
group = f['conf_mat']
acc = {}
i = 0
for block in group['block0_values'].value:
acc[group['block0_items'].value[i]] = block[i] / sum(block)
i += 1
lstm_acc[subject] = np.array(list(acc.values())).mean()
del group
print("LSTM subject accuracies:")
for k, v in lstm_acc.items():
print(k, v)
print("LSTM Cross Subject Accuracy:")
np.array(list(lstm_acc.values())).mean()
#Save lstm_acc to csv files
( | pd.DataFrame.from_dict(data=lstm_acc, orient='index') | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright © Spyder Project Contributors
#
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
# ----------------------------------------------------------------------------
"""
Tests for the Variable Explorer Collections Editor.
"""
# Standard library imports
import os # Example module for testing display inside CollecitonsEditor
from os import path
import copy
import datetime
from xml.dom.minidom import parseString
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # Python 2
# Third party imports
import numpy
import pandas
import pytest
from flaky import flaky
from qtpy.QtCore import Qt
from qtpy.QtWidgets import QWidget
# Local imports
from spyder.plugins.variableexplorer.widgets.collectionseditor import (
RemoteCollectionsEditorTableView, CollectionsEditorTableView,
CollectionsModel, CollectionsEditor, LARGE_NROWS, ROWS_TO_LOAD)
from spyder.plugins.variableexplorer.widgets.namespacebrowser import (
NamespacesBrowserFinder)
from spyder.plugins.variableexplorer.widgets.tests.test_dataframeeditor import \
generate_pandas_indexes
from spyder.py3compat import PY2
# =============================================================================
# Constants
# =============================================================================
# Full path to this file's parent directory for loading data
LOCATION = path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
# =============================================================================
# Utility functions
# =============================================================================
def data(cm, i, j):
return cm.data(cm.index(i, j))
def data_table(cm, n_rows, n_cols):
return [[data(cm, i, j) for i in range(n_rows)] for j in range(n_cols)]
# =============================================================================
# Pytest Fixtures
# =============================================================================
@pytest.fixture
def nonsettable_objects_data():
"""Rturn Python objects with immutable attribs to test CollectionEditor."""
test_objs = [pandas.Period("2018-03"), pandas.Categorical([1, 2, 42])]
expected_objs = [pandas.Period("2018-03"), | pandas.Categorical([1, 2, 42]) | pandas.Categorical |
from src.typeDefs.aggregateMonthlyDataRecord import IAggregateDataRecord
import datetime as dt
from typing import List
from src.repos.metricsData.metricsDataRepo import MetricsDataRepo
from src.utils.addMonths import addMonths
from src.utils.getPrevFinYrDt import getPrevFinYrDt,getFinYrDt
import pandas as pd
from src.config.appConfig import getConstituentsMappings
import numpy as np
from src.typeDefs.section_1_3.section_1_3_a import ISection_1_3_a
def fetchSection1_3_aContext(appDbConnStr: str, startDt: dt.datetime, endDt: dt.datetime) -> ISection_1_3_a:
constituentsInfos = getConstituentsMappings()
constConfig = {}
for c in constituentsInfos:
constConfig[c["entity_tag"]] = c["display_name"]
dataRecords = | pd.DataFrame() | pandas.DataFrame |
from selenium import webdriver as wd
from selenium.webdriver.chrome.options import Options
import time
import csv
import os
import random
import json
import shutil
import pandas as pd
from modules.checker import Checker
from modules.basic_scraping_module import get_response #, get_soup
from modules.supplier_utils.uniform_category_transformer import query_uniform_category
def read_scrapy_setting():
img_hist = "./res3/img_html_source/img_hist.txt"
with open(img_hist, "r", encoding="utf-8-sig") as fp:
data = fp.readlines()
break_point = int(data[1].split(":")[-1].strip())
avg_wait_time = int(data[2].split(":")[-1].strip())
return break_point, avg_wait_time
class Webdriver():
def get_webdriver(self):
chrome_options = Options()
chrome_options.headless = True
wd_path = "D:/geckodriver/chromedriver.exe"
driver = wd.Chrome(wd_path, options=chrome_options)
driver.implicitly_wait(10)
return driver
class Clothes_crawler():
def imgID_padding(self):
csv_path = "./res3/tier_2.csv"
df = pd.read_csv(csv_path)
#print(data.head())
new_col_data = [i for i in range(1, len(df)+1)]
new_col_name = "img_id"
df[new_col_name] = new_col_data
#print(data.tail())
out_csv_path = "./res3/tier_2_modified.csv"
df.to_csv(out_csv_path, encoding="utf-8-sig", index=False)
###########################################################
def copy_single_prod_img(self, img_id, existing_img_id):
img_dir = "./res3/img_html_source/"
shutil.copy(f"{img_dir}{existing_img_id}.jpg", f"{img_dir}{img_id}.jpg")
def download_single_prod_img(self, prod_img_link, img_id, wait_time):
img_path = f"./res3/img_html_source/{img_id}.jpg"
if os.path.exists(img_path):
print(f"[img {img_id}] Image is already exists.")
return 0
# [***] send requests to image link
# put all correct image links to the new csv file
# path: ./res3/img_html_source
if "grey.gif" not in prod_img_link:
try:
r = get_response(prod_img_link)
with open(img_path, "wb") as fp:
fp.write(r.content)
print(f"[img {img_id}] Successfully downloaded.")
# 等待隨機時間 (以傳入參數 wait_time 為中心)
self.wait_some_seconds(wait_time + random.randint(-53,41)/10)
return 1
except:
print(f"[img {img_id}] ERR-2: Fail to access image link when scrapying image")
return -1
else:
print("跳過")
def wait_some_seconds(self, wait_time):
#print(f"(隨機)等待 {wait_time} 秒")
print(f"等待 {wait_time} 秒")
time.sleep(wait_time)
def download_multiple_prod_imgs(self, break_point=-1, wait_time=10):
# reset crawler
self.set_driver()
# read image history if exists
img_hist = "./res3/img_html_source/img_hist.txt"
if os.path.exists(img_hist):
with open(img_hist, "r", encoding="utf-8-sig") as fp:
data = fp.readlines()
img_id_start = int(data[0].split(":")[-1].strip()) # starts from next image of last image in the directory
else:
img_id_start = 5001 # 1
# read image mapping if exists
img_mapping_json = "./res3/img_html_source/img_record.json"
if os.path.exists(img_mapping_json):
with open(img_mapping_json, "r", encoding="utf-8-sig") as fp:
img_mapping = json.load(fp)
else:
img_mapping = dict() # k: prod_link, v: img_id
# create env
env_path = r"./res3/img_html_source"
if not os.path.exists(env_path):
os.mkdir(env_path)
# read product urls from existing tier-2 csv
csv_path = "./res3/tier_2_modified.csv"
prod_data = pd.read_csv(csv_path)
#print(prod_data.tail())
'''
prodIDs, prod_SKU_IDs, prod_links = prod_data["productID"], prod_data["product_SKU_ID"], prod_data["product_link"]
'''
prodIDs, prod_SKU_IDs, prod_img_links = prod_data["productID"], prod_data["product_SKU_ID"], prod_data["product_img_link"]
# test
#print(prodIDs.head())
#print(prod_SKU_IDs.head())
#print(prod_links.head())
for i in range(img_id_start-1, len(prodIDs)): # i starts from 0
prod_img_link = prod_img_links[i]
img_id = i+1 # integer
if i == break_point: # break_point starts from 1
break
print("\n", f"No: {img_id}", sep="")
print(f"prodID: {prodIDs[i]}")
print(f"prod_SKU_ID: {prod_SKU_IDs[i]}")
print(f"prod_img_link: {prod_img_link}")
#if prod_link not in img_mapping.keys():
if not os.path.exists(f"{env_path}/{img_id}.jpg"):
img_mapping[prod_img_link] = img_id
''' 到server圖檔資料庫抓圖片 '''
print(f"[img {img_id}] 圖片不存在,正在抓圖片")
return_val = self.download_single_prod_img(prod_img_link, img_id, wait_time)
if return_val == -1:
break
else:
''' 複製已存圖片 '''
print(f"[img {img_id}] 相同圖片已存在本機,正在複製圖片")
existing_img_id = img_mapping[prod_img_link]
self.copy_single_prod_img(img_id, existing_img_id)
#print("img_mapping:", img_mapping, sep="\n")
# 紀錄 img_id
with open(img_hist, "r", encoding="utf-8-sig") as fp:
data = fp.readlines()
msg = ""
msg += data[0].split(":")[0] + ": " + str(img_id) + "\n" # 更新開始索引
msg += data[1].split(":")[0] + ": " + "\n" # 清空結束索引
msg += data[2]
'''
with open(img_hist, "w", encoding="utf-8-sig") as fp:
fp.write(str(img_id))
'''
with open(img_hist, "w", encoding="utf-8-sig") as fp:
fp.write(msg)
# 紀錄 img_mapping
with open(img_mapping_json, "w", encoding="utf-8-sig") as fp:
json.dump(img_mapping, fp, ensure_ascii=False)
def set_driver(self):
webdriver = Webdriver()
self.driver = webdriver.get_webdriver()
def get_genres(self):
return ["WOMEN","MEN","KIDS","BABY","SPORTS"]
def scroll(self):
# 下拉至頁尾以fetch所有品項
for i in range(4):
self.driver.execute_script("window.scrollTo(0,document.body.scrollHeight)")
time.sleep(1)
def save_to_csv(self, list_obj, csv_path, col_names):
if not os.path.exists("./res3"):
os.mkdir("./res3")
record_amount = 0 # in case: csv file isn't exists
if os.path.exists(csv_path):
with open(csv_path, mode='r', encoding="utf-8-sig") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
# 包含header列的總記錄筆數:
record_amount = len([record for record in csv_reader])
with open(csv_path, mode='a', newline="", encoding="utf-8-sig") as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=col_names)
if record_amount == 0: # 該csv檔沒有header
writer.writeheader()
for dict_obj in list_obj:
writer.writerow(dict_obj)
print("csv檔案儲存完畢!")
""" Clothes Website: Lativ, Tier-2 Scrapying """
def detailPage_links_crawling(self, page_n): # 銷售標籤頁: 第 n 頁 / 190 頁
try:
self.set_driver()
# 先去讀取存好的 tier_1.csv 資料
path = "./res3/tier_1.csv"
#print(os.path.exists(path))
self.lativ_labels = pd.read_csv(path, header=0)
sales_category_link = self.lativ_labels["link"] # in first, scrapying the label_page
#print(sales_category_link)
data_amount = len(sales_category_link)
print(f"共有 {data_amount} 個銷售分頁")
#####################
''' 最重要的info '''
prod_info_list = list() # [{商品ID,商品鏈結,商品價格,商品圖片,...},{.......}]
child_category_list = list() # 不重複的款式名稱
#####################
xpaths = dict()
xpaths.setdefault("child_categories", "//div[@class='child-category-name is-style']")
xpaths.setdefault("productPage_links", "//td/div[contains(text(),'@@@')]/following-sibling::ul[1]/li[contains(@style,'margin-right')]/a")
xpaths.setdefault("SKU_ID", "//li/a[contains(@href,'!!!')]/following-sibling::div[contains(@class,'product-color-list')]/a")
#####################
print("開始爬蟲...") # 開始爬各個標籤分頁
# 爬取第n頁
sales_categoryID = page_n
link = list(sales_category_link)[page_n-1]
print(f"開始搜尋第 {sales_categoryID} 個銷售分頁 ...")
print(f"網址: {link}")
self.driver.implicitly_wait(10)
self.driver.get(link) # 某一個銷售分頁
self.scroll()
# 先撈出所有 "款式名稱(child categories name) (如:圓領上衣,連帽上衣的「文字」)
tags = self.driver.find_elements_by_xpath(xpaths["child_categories"])
child_category_names = [tag.text.strip() for tag in tags]
print(f"共有 {len(child_category_names)} 種服飾款式")
path = "./res3/child_categories.csv"
# 遍歷所有款式名稱,抓出每一項該款式名稱下的商品資訊
for i, child_category in enumerate(list(child_category_names)):
#for i, child_category in enumerate(list(child_category_names)[:3]):
print(f"正在抓第 {i+1} 種服飾款式:{child_category}")
''' 求算 child_categoryID '''
need_to_append = False
if not os.path.exists(path): # 第一次執行
if child_category not in child_category_list:
need_to_append = True
else:
child_categories = pd.read_csv(path, header=0)
if not any(child_categories["child_category"]==child_category):
[child_category_list.append(-1) for _ in range(len(child_categories["child_categoryID"]))]
need_to_append = True
if need_to_append:
child_category_list.append(child_category)
''' 撈出: 該款式所有衣服「商品links」'''
xpath_link = xpaths["productPage_links"].replace("@@@", child_category)
tags = self.driver.find_elements_by_xpath(xpath_link)
product_links = [tag.get_attribute("href") for tag in tags]
''' 撈出: 該款式所有衣服「商品ID」'''
productIDs = [url.split("/")[-1] for url in product_links]
''' 撈出: 該款式所有衣服「商品SKU_ID」'''
product_SKU_IDs = dict()
for productID in productIDs:
xpath = xpaths["SKU_ID"].replace("!!!", productID)
tags = self.driver.find_elements_by_xpath(xpath)
prod_SKU_links = [tag.get_attribute("href").split("/")[-1] for tag in tags]
product_SKU_IDs.setdefault(productID, prod_SKU_links)
''' 撈出: 該款式所有衣服「商品價格」'''
xpath2 = xpath_link + "/following-sibling::span"
tags = self.driver.find_elements_by_xpath(xpath2)
product_prices = [tag.text.strip() for tag in tags]
''' 撈出: 該款式所有衣服商品「圖片網址」 '''
xpath3 = xpath_link + "/img"
tags = self.driver.find_elements_by_xpath(xpath3)
product_img_links = [tag.get_attribute("src") for tag in tags]
''' 撈出: 該款式所有衣服「商品名稱」'''
xpath4 = xpath_link + "/following-sibling::div[@class='productname']"
tags = self.driver.find_elements_by_xpath(xpath4)
product_names = [tag.text.strip() for tag in tags]
''' 暫存商品資訊 '''
for i in range(len(productIDs)):
productID = productIDs[i]
# 找到該商品所有SKU_ID
product_SKU_ID_list = product_SKU_IDs[productID]
for j in range(len(product_SKU_ID_list)):
product_SKU_ID = product_SKU_ID_list[j]
prod_info_list.append({"productID": productID,
"product_SKU_ID": product_SKU_ID,
"product_name": product_names[i],
"product_price": product_prices[i],
"product_img_link": product_img_links[i],
"product_link": product_links[i],
"child_category": child_category,
"sales_categoryID": sales_categoryID
})
self.save_to_csv(prod_info_list,
"./res3/tier_2.csv",
["productID",
"product_SKU_ID",
"product_name",
"product_price",
"product_img_link",
"product_link",
"child_category",
"sales_categoryID"
])
print(f"第 {sales_categoryID} 個銷售分頁爬蟲成功!")
except:
#print(f"第 {sales_categoryID} 個銷售分頁爬蟲失敗")
print("為保持原子性(Atomicity),不儲存此銷售分頁目前爬到的所有記錄")
finally:
self.driver.close()
""" Clothes Website: Lativ, Tier-1 Scrapying """
def labelPage_links_crawling(self):
print("開始爬蟲...")
self.driver.implicitly_wait(10)
# genre_label_category => category => sales_category
# E.g., {"WOMEN":{"上衣類":{"聯名印花長T","厚棉系列"},"襯衫類":{...},...}}
genre_label_recorder = dict()
#csv_saving_type = 1
for genre in self.get_genres():
print(f"正在爬 {genre} 類商品")
url = "https://www.lativ.com.tw/{}".format(genre)
self.driver.get(url)
# 1. 抓出category的text
# 2. 利用此text,找到其下的所有sales-categories
label_recorder = dict()
categories_text = list()
categories = self.driver.find_elements_by_xpath("//li/h2")
for category in categories:
categories_text.append(category.text)
label_recorder.setdefault(category.text, dict())
for category_text in categories_text:
print(f" 正在爬 {category_text} 標籤下的銷售類別")
xpath = f"//h2[contains(text(),'{category_text}')]" + "/../ul/li/a"
sales_categories = self.driver.find_elements_by_xpath(xpath)
for tag in sales_categories:
label_recorder[category_text].setdefault(tag.text, tag.get_attribute("href"))
genre_label_recorder[genre] = label_recorder
print("爬蟲結束!")
self.driver.close()
# 回傳爬到的所有labels
return genre_label_recorder #, csv_saving_type
def save_duplicated_SKUID_as_json():
checker = Checker()
path = "./res3/duplicated_SKU_IDs.json"
duplicated_SKU_IDs = checker.check_duplicate_SKU_IDs()
checker.save_to_json(duplicated_SKU_IDs, path)
""" Clothes Website: Lativ, Tier-4 Scrapying
(P.S. Tier-3 is for image crawling,
and Tier-4 over there is for color info recrawling)
"""
def product_scrapying(self, csv_tier_2_path, output_csv_path):
# data-structures for providing input info
df = pd.read_csv(csv_tier_2_path)
SPUs, prod_SKU_links = df["product_SPU_ID"], df["product_link"]
# data-structures for the verification use
prod_names = df["product_name"]
spu_value_counts = SPUs.value_counts()
# data-structures for recording output info
output_info = dict()
output_info.setdefault("product_SPU_ID", list())
output_info.setdefault("new_prod_ID", list())
output_info.setdefault("SKU_color_name", list())
xpaths = dict()
xpaths.setdefault("SKU_link", "//div[@class='color']/a")
xpaths.setdefault("SKU_img", "//div[@class='color']/a/img")
recorded_SPUs = dict()
recorded_SPUs.setdefault("valid", list())
recorded_SPUs.setdefault("invalid", list())
#n = 2
for i, v in enumerate(zip(SPUs, prod_SKU_links)):
#for i, v in enumerate(zip(SPUs[:n], prod_SKU_links[:n])):
SPU, prod_link = v[0], v[1]
if SPU not in recorded_SPUs["valid"]+recorded_SPUs["invalid"]:
try:
#recorded_SPUs.append(SPU)
''' Visit `prod_link` '''
self.set_driver()
self.driver.get(prod_link)
wait_time = 6 + random.randint(-26, 26)/10
self.wait_some_seconds(wait_time)
''' Append the current `prod_link` into one type of list in `recorded_SPUs` '''
# Verify the prod_link is REAL or not
# by extracting the product name and comparing
curr_prod_name = self.driver.find_element_by_xpath("//span[@class='title1']")
curr_prod_name = curr_prod_name.text
if prod_names[i] not in curr_prod_name:
recorded_SPUs["invalid"].append(SPU)
print("[WARNING] 因商品名稱與記錄不符,推測應為先前抓取時重導向到無效連結,故此輪爬蟲提早結束(進入下一輪)")
print(f"prod_names[{i}]: {prod_names[i]}")
print(f"curr_prod_name: {curr_prod_name}")
continue
''' Crawl info '''
SKU_links = self.driver.find_elements_by_xpath(xpaths["SKU_link"])
new_prod_IDs = [link.get_attribute("href").split('/')[-1] for link in SKU_links]
# Double-verify the prod_link is REAL or not
# by getting the amount of all SKU products
# and comparing with recorded # of all SKU prods under the same SPU prods
if len(new_prod_IDs) != spu_value_counts[SPU]:
recorded_SPUs["invalid"].append(SPU)
print("[WARNING] 因同一SPU商品下的SKU商品數量與記錄不符,無法正確將欲爬取資訊與先前資料做連結(需手工檢查),故此輪爬蟲提早結束(進入下一輪)")
continue
else:
recorded_SPUs["valid"].append(SPU)
print(f"[INFO] 正在記錄尚未記錄的正確 SPU_ID: {SPU}")
print("[INFO] 正在爬取 商品SKU顏色 資訊...")
imgs = self.driver.find_elements_by_xpath(xpaths["SKU_img"])
SKU_color_names = [img.get_attribute("alt") for img in imgs]
#tmp_SPUs = [str(SPU)] * len(imgs)
for new_prod_ID, SKU_color_name in zip(new_prod_IDs, SKU_color_names):
output_info["product_SPU_ID"].append(SPU)
output_info["new_prod_ID"].append(new_prod_ID)
output_info["SKU_color_name"].append(SKU_color_name)
'''
output_info["product_SPU_ID"].append(", ".join(tmp_SPUs))
output_info["new_prod_ID"].append(", ".join(new_prod_IDs))
output_info["SKU_color_name"].append(", ".join(SKU_color_names))
'''
#wait_time = 3
#print(f"[INFO] 爬蟲結束,等待 {wait_time} 秒")
#time.sleep(wait_time)
except:
print("[WARNING] 此輪發生未知錯誤")
output_df = pd.DataFrame.from_dict(output_info)
output_df.to_csv(output_csv_path,
index=False,
encoding="utf-8-sig")
###########################################################
def make_dir(self, dir_path):
if not os.path.exists(dir_path):
print(f"[INFO] 正在建立資料夾: \"{dir_path}\"",
end='\n'*2)
os.mkdir(dir_path)
else:
print(f"[INFO] 資料夾: \"{dir_path}\" 已存在")
def make_dirs(self, dir_paths):
for path in dir_paths:
self.make_dir(path)
def generate_download_link(self, server_id, spu_id, sku_id):
return f"https://s{server_id}.lativ.com.tw/i/"+\
f"{spu_id}/{spu_id}{sku_id}1/{spu_id}{sku_id}_500.jpg"
def prepare_empty_dirs_and_record_crawling_info(self, tier_1_csv_path, tier_2_csv_path, output_dir, tier_3_csv_path):
''' 建立目標路徑的上層資料夾 (media/products/) '''
paths_to_create = list()
tmp = output_dir.split('/')
#MIN_IDX = 1
#MAX_IDX = len(tmp)+1
MIN_IDX = 2
MAX_IDX = len(tmp)
for i in range(MIN_IDX, MAX_IDX):
#print(f"({i})", end=' ')
#print('/'.join(tmp[:i]))
paths_to_create.append('/'.join(tmp[:i]))
#print(paths_to_create)
self.make_dirs(paths_to_create)
df1 = pd.read_csv(tier_1_csv_path)
sales_cat_table = dict()
genre_category_combs = set()
for _, record in df1.iterrows():
sales_cat_id = record["sales-category ID"]
sales_cat_table.setdefault(sales_cat_id, dict())
genre = record["genre"]
uniform_category = record["uniform_category"]
sales_cat_table[sales_cat_id]["genre"] = genre
sales_cat_table[sales_cat_id]["uniform_category"] = uniform_category
genre_category_combs.add(f"{output_dir}{genre}/{uniform_category}")
# =============================================================================
# example: query `genre`, `category` for `sales-category ID`: 67
# =============================================================================
'''
test_sales_cat_id = 67
print(sales_cat_table[test_sales_cat_id]["genre"])
print(sales_cat_table[test_sales_cat_id]["uniform_category"])
'''
# =============================================================================
# example: list all unrepeated directory
# =============================================================================
'''print(genre_category_combs)'''
''' 建立目標路徑的中層資料夾 (genre/category/) '''
genre_dirs = ['/'.join(e.split('/')[:-1]) for e in genre_category_combs]
self.make_dirs(genre_dirs)
self.make_dirs(genre_category_combs)
''' 利用:
(1) 輪流產生的 server_id (目標靜態伺服器主機)
(2) spu_id
(3) sku_id
※ 註: spu_id+sku_id 唯一組不重複的 SKU商品,擁有唯一商品圖片
=> 下方程式碼先藉由上述資訊,產生:
(1) product_ID (spu_id + sku_id)
(2) server_id (目標靜態伺服器主機)
(3) dl_link (圖片下載鏈結)
(4) img_path (本地圖片位置)
(5) is_dl (是否已下載) | choices: ('Y','N')
並產出一個 csv file: `tier_3.csv`
使 tier_2_v??.csv 可透過 csv 文件找出每一筆
商品紀錄對應的 `本地圖片路徑` & `線上下載網址`
'''
df2 = pd.read_csv(tier_2_csv_path)
product_IDs = df2["product_ID"]
sales_category_IDs = df2["sales_categoryID"]
#print(sales_category_IDs[:1000])
# =============================================================================
# example: get `sales_categoryID` for given `product_ID`
# =============================================================================
#test_product_ID = "52552___03" # expect for "80"
#test_product_ID = "53005___01" # expect for "81"
#print(sales_category_IDs[list(product_IDs).index(test_product_ID)])
product_dirs = list()
#download_links = list()
df3_info = {"product_ID": list(),
"server_id": list(),
"dl_link": list(),
"sales_cat_id": list(),
"img_path": list()}
server_id = 0
SERVER_NUM = 4
for product_ID in set(product_IDs):
spu_id, sku_id = product_ID.split("___")
server_id += 1
if server_id > SERVER_NUM:
server_id = 1
dl_link = self.generate_download_link(server_id, spu_id, sku_id)
#download_links.append(dl_link)
sales_cat_id = sales_category_IDs[list(product_IDs).index(product_ID)]
uniform_category = sales_cat_table[sales_cat_id]["uniform_category"]
product_dir_path = f"{output_dir}"+\
f"{sales_cat_table[sales_cat_id]['genre']}"+\
f"/{uniform_category}/{spu_id}"
img_path = f"{product_dir_path}/{product_ID}.jpg"
'''
print(f"product_ID: {product_ID}")
print(f"server_id: s{server_id}")
print(f"dl_link: {dl_link}")
print(f"sales_cat_id: {sales_cat_id}")
print(f"img_path: {img_path}\n")
'''
df3_info["product_ID"].append(product_ID)
df3_info["server_id"].append(f"s{server_id}")
df3_info["dl_link"].append(dl_link)
df3_info["sales_cat_id"].append(sales_cat_id)
df3_info["img_path"].append(img_path)
product_dirs.append(product_dir_path)
df3 = pd.DataFrame(df3_info)
df3.to_csv(tier_3_csv_path,
index=False,
encoding="utf-8-sig")
'''
#print(len(list(set(df2["product_ID"]))))
#print(len(list(set(df2["product_SPU_ID"]))))
#print(len(list(set(df2["product_SKU_ID"]))))
#print(len(list(set(df2["product_link"]))))
#print(len(download_links))
unrepeated spu+sku spu sku prod_link dl
"tier_2_v2": 4267, 1560, 3296, 1560, 4267
"tier_2_v3": 3296, 1235, 20, 1339, 3296
'''
''' 建立目標路徑的底層資料夾 (genre/category/) '''
self.make_dirs(product_dirs)
#print(product_dirs)
def download_single_image(self, link, img_path, wait_time):
if not os.path.exists(img_path):
try:
print("[INFO] 正在下載圖片")
r = get_response(link)
with open(img_path, "wb") as fp:
fp.write(r.content)
print("[INFO] 圖片獲取成功!\n"+\
f"圖片路徑:\n{img_path}")
# 等待隨機時間 (以傳入參數 wait_time 為中心)
self.wait_some_seconds(wait_time + random.randint(-21,21)/10)
except:
print("[WARNING] 無法獲取圖片")
else:
#print(f"[INFO] 圖片已存在 (路徑: {img_path})")
print("[INFO] 圖片已存在")
def crawl_images(self, tier_1_csv_path, tier_2_csv_path, output_dir, tier_3_csv_path):
''' 爬圖前置作業:
1. 製備階層式圖片資料夾(環境)
2. 將圖片網址、本地路徑等資訊,記錄到 `tier_3_csv_path`
'''
self.prepare_empty_dirs_and_record_crawling_info(tier_1_csv_path, tier_2_csv_path, output_dir, tier_3_csv_path)
'''
爬圖片:
1. 取得 [爬圖前置作業] 記錄在 csv file 的資訊
2. 獲取圖片並儲存
'''
df3 = pd.read_csv(tier_3_csv_path)
dl_links = df3["dl_link"]
img_paths = df3["img_path"]
wait_time = 5
# =============================================================================
# example: download one image to `test_img_path` from `test_dl_link`
# =============================================================================
'''
test_dl_link = "https://www.apowersoft.tw/wp-content/uploads/2017/07/add-ass-subtitles-to-video-logo.jpg"
test_img_path = "D:/MyPrograms/Clothes2U/functions/台灣服飾商 ETL/Lativ_Crawler/res3/media_example/products/WOMEN/內衣類/46431___03/52202___01.jpg"
self.download_single_image(test_dl_link, test_img_path, wait_time)
'''
for dl_link, img_path in zip(dl_links, img_paths):
self.download_single_image(dl_link, img_path, wait_time)
#print(f"{dl_link}\n{img_path}\n")
class Content_Analyzer():
def deduplicate(self, input_csv_path, output_csv_path):
if not os.path.exists(output_csv_path):
''' 1. Get unrepeated data '''
df = pd.read_csv(input_csv_path)
#print(df.shape)
'''x = df[df.duplicated()]
print(x)
print(type(x))
print(len(x))'''
#spu_sku_list = list()
unique_prods = dict()
for index, row in list(df.iterrows()):
#print(row)
spu_id = str(row['productID'])[:5]
sku_id = str(row['product_SKU_ID'])[-3:-1]
uni_product_id = f"{spu_id}___{sku_id}"
if uni_product_id not in unique_prods:
# tier2_v2
'''
unique_prods.setdefault(uni_product_id,
{"product_ID": uni_product_id,
"product_SPU_ID": spu_id,
"product_SKU_ID": sku_id,
"product_name": row["product_name"],
"product_price": row["product_price"],
"product_link": row["product_link"],
"child_category": row["child_category"],
"sales_categoryID": row["sales_categoryID"]})
'''
# tier2_v3
unique_prods.setdefault(uni_product_id,
{"product_ID": uni_product_id,
"product_SPU_ID": spu_id,
"product_SKU_ID": sku_id,
"product_name": row["product_name"],
"product_price": row["product_price"],
"product_link": row["product_link"],
"child_category": row["child_category"],
"sales_categoryID": row["sales_categoryID"]})
else:
curr_child_category = row['child_category']
if not any([curr_child_category == existing_child_cat
for existing_child_cat
in unique_prods[uni_product_id]["child_category"].split("___")
if curr_child_category == existing_child_cat]):
unique_prods[uni_product_id]["child_category"] += f"___{curr_child_category}"
#spu_sku_list.append(f"{row['productID']}___{row['product_SKU_ID']}")
#print(f"{row['productID']}___{row['product_SKU_ID']}")
#print(len(unique_prods))
#print(unique_prods["52010011___52010021"])
#print(len(spu_sku_list))
#print(len(set(spu_sku_list)))
#spu_sku_list = list(set(spu_sku_list))
#print(len(spu_sku_list))
df = pd.DataFrame.from_dict(unique_prods,
orient='index',
columns=["product_ID","product_SPU_ID","product_SKU_ID",
"product_name","product_price","product_link",
"child_category","sales_categoryID"])
#print(df.iloc[0])
''' 2. Save unrepeated data to the new csv file '''
product_SPU_IDs, product_links = df["product_SPU_ID"], df["product_link"]
if len(product_SPU_IDs)==len(product_links):
df.to_csv(output_csv_path,
index=False,
encoding="utf-8-sig")
print(f"[INFO] Writing csv file: {output_csv_path}")
else:
print("[WARNING] The number of `product_SPU_ID` does not equal the number of `product_link`")
def modify_tier_1(self, tier_1_csv_path, output_tier_1_csv_path):
df = pd.read_csv(tier_1_csv_path)
categories = df["category"]
uniform_categories = [query_uniform_category(category) for category in categories]
df["uniform_category"] = | pd.Series(uniform_categories, index=df.index) | pandas.Series |
# coding: utf-8
"""Load SNP data, create SNP signatures
Author: <NAME> - Vector Engineering Team (<EMAIL>)
"""
import gzip
import io
import numpy as np
import pandas as pd
from pathlib import Path
def extract_ids(fasta_file):
"""Extract Accession IDs (entry names) from a fasta file
Parameters
----------
fasta_file: str
Returns
-------
out: list of tuples, (Accession ID, date)
"""
out = []
# Read sequences
cur_entry = ""
cur_seq = ""
# Get the date from the fasta file name, as a string
file_date = Path(fasta_file).name.replace(".fa.gz", "")
with gzip.open(fasta_file, "rt") as fp:
lines = fp.readlines()
for i, line in enumerate(lines):
# Strip whitespace
line = line.strip()
# Ignore empty lines that aren't the last line
if not line and i < (len(lines) - 1):
continue
# If not the name of an entry, add this line to the current sequence
# (some FASTA files will have multiple lines per sequence)
if line[0] != ">":
cur_seq = cur_seq + line
# Start of another entry = end of the previous entry
if line[0] == ">" or i == (len(lines) - 1):
# Avoid capturing the first one and pushing an empty sequence
if cur_entry:
out.append((cur_entry, file_date,))
# Clear the entry and sequence
cur_entry = line[1:]
# Ignore anything past the first whitespace
if cur_entry:
cur_entry = cur_entry.split()[0]
cur_seq = ""
# print("Read {} entries for file {}".format(len(out), fasta_file))
return out
def process_snps(
processed_fasta_files,
snp_files,
# SNPs must occur at least this many times to pass filters
count_threshold=3,
mode="dna", # dna, gene_aa, protein_aa
):
manifest = []
for fasta_file in sorted(Path(processed_fasta_files).glob("*.fa.gz")):
manifest.extend(extract_ids(fasta_file))
manifest = pd.DataFrame.from_records(manifest, columns=["Accession ID", "date"])
pruned_manifest = manifest.drop_duplicates(["Accession ID"], keep="last")
# Dump all SNP chunks into a text buffer
snp_df_io = io.StringIO()
for i, chunk in enumerate(snp_files):
file_date = Path(chunk).name.replace("_" + mode + "_snp.csv", "")
with open(chunk, "r") as fp_in:
# Write dates, so we can remove duplicate sequences
# and default to the SNVs of the latest sequence, by date
for j, line in enumerate(fp_in):
# Write the header of the first file
if i == 0 and j == 0:
snp_df_io.write(line.strip() + ",date\n")
# Or write any line that's not the header
# (to avoid writing the header more than once)
elif j > 0:
snp_df_io.write(line.strip() + "," + file_date + "\n")
# Read the buffer into a dataframe, then discard the buffer
snp_df_io.seek(0)
snp_df = | pd.read_csv(snp_df_io) | pandas.read_csv |
import streamlit as st
import pandas as pd
from pyvis.network import Network
import networkx as nx
import matplotlib.pyplot as plt
import bz2
import pickle
import _pickle as cPickle
import pydot
import math
import numpy as num
def decompress_pickle(file):
data = bz2.BZ2File(file, 'rb')
data = cPickle.load(data)
return data
uploaded_files = st.sidebar.file_uploader("Choose files", accept_multiple_files=True)
# sidebar for navigating pages
page_nav = st.sidebar.selectbox("Select view:",('Document overviews','Focus concepts','Path views','Active Study view','Study phenomena','Study sets'))
@st.cache
def do_this_first(uploaded_files):
#st.write(st.__version__)
# Load any compressed pickle file
# for uploaded_file in uploaded_files:
# concepts = decompress_pickle(uploaded_file)
# st.write("filename:", uploaded_file.name)
filenames = [file.name for file in uploaded_files] # return this
import pandas as pd
Agg_Conceptdata = pd.DataFrame()
All_Conceptdata = pd.DataFrame()
Agg_np_to_sent = dict()
Agg_sent_to_npflat = dict()
Agg_sent_to_phen = dict()
Agg_phen_to_sent = dict()
Agg_att_to_sent = dict()
Agg_sent_to_att = dict()
Agg_ins_to_sent = dict()
Agg_sent_to_ins = dict()
Agg_set_to_sent = dict()
Agg_sent_to_set = dict()
Agg_np_to_forms = dict()
doc_to_np = dict()
np_to_doc = dict()
Agg_df = pd.DataFrame()
Agg_df = pd.DataFrame()
Agg_np_to_roles = dict()
Agg_sent_to_clt = dict()
Agg_sents = dict()
#Agg_sents_df = pd.DataFrame()
#Agg_docs_df = pd.DataFrame()
All_df = pd.DataFrame()
for uploaded_file in uploaded_files:
concepts = decompress_pickle(uploaded_file)
filename = uploaded_file.name
#st.write("filename:", uploaded_file.name)
Conceptdata = concepts['Conceptdata']
sent_to_npflat = concepts['sent_to_npflat']
np_to_sent = concepts['np_to_sent']
np_to_forms = concepts['np_to_forms']
sent_to_phen = concepts['sent_to_phen']
phen_to_sent = concepts['phen_to_sent']
sent_to_att = concepts['sent_to_att']
att_to_sent = concepts['att_to_sent']
att_to_sent = concepts['att_to_sent']
ins_to_sent = concepts['ins_to_sent']
sent_to_ins = concepts['sent_to_ins']
set_to_sent = concepts['set_to_sent']
sent_to_set = concepts['sent_to_set']
np_to_roles = concepts['np_to_roles']
sent_to_clt = concepts['sent_to_clt']
sents = concepts['sents']
df = concepts['df']
Conceptdata['docname'] = filename
Agg_Conceptdata = Agg_Conceptdata.append(Conceptdata,ignore_index=True)
Agg_sent_to_clt[filename.replace(".pbz2","")] = sent_to_clt
Agg_np_to_sent[filename.replace(".pbz2","")] = np_to_sent
Agg_sents[filename.replace(".pbz2","")] = sents
Agg_sent_to_npflat[filename.replace(".pbz2","")] = sent_to_npflat
Agg_sent_to_set[filename.replace(".pbz2","")] = sent_to_set
Agg_sent_to_att[filename.replace(".pbz2","")] = sent_to_att
Agg_sent_to_phen[filename.replace(".pbz2","")] = sent_to_phen
Agg_sent_to_ins[filename.replace(".pbz2","")] = sent_to_ins
Agg_df = Agg_df.append(df,ignore_index=True)
doc_to_np[filename] = list(np_to_sent.keys()) # return this
for np in np_to_sent:
# if np in Agg_np_to_sent:
# Agg_np_to_sent[np] = Agg_np_to_sent[np] + [(filename,s) for s in np_to_sent[np]]
# else:
# Agg_np_to_sent[np] = [(filename,s) for s in np_to_sent[np]]
if np in np_to_doc:
np_to_doc[np] = np_to_doc[np] + [filename]
else:
np_to_doc[np] = [filename]
for np in np_to_forms:
if np in Agg_np_to_forms:
Agg_np_to_forms[np] = Agg_np_to_forms[np] + np_to_forms[np]
else:
Agg_np_to_forms[np] = np_to_forms[np]
for np in np_to_roles:
if np in Agg_np_to_roles:
Agg_np_to_roles[np] = Agg_np_to_roles[np] + np_to_roles[np]
else:
Agg_np_to_roles[np] = np_to_roles[np]
for np in phen_to_sent:
if np in Agg_phen_to_sent:
Agg_phen_to_sent[np] = Agg_phen_to_sent[np] + [(filename,s) for s in phen_to_sent[np]]
else:
Agg_phen_to_sent[np] = [(filename,s) for s in phen_to_sent[np]]
for np in att_to_sent:
if np in Agg_att_to_sent:
Agg_att_to_sent[np] = Agg_att_to_sent[np] + [(filename,s) for s in att_to_sent[np]]
else:
Agg_att_to_sent[np] = [(filename,s) for s in att_to_sent[np]]
for np in set_to_sent:
if np in Agg_set_to_sent:
Agg_set_to_sent[np] = Agg_set_to_sent[np] + [(filename,s) for s in set_to_sent[np]]
else:
Agg_set_to_sent[np] = [(filename,s) for s in set_to_sent[np]]
for np in ins_to_sent:
if np in Agg_ins_to_sent:
Agg_ins_to_sent[np] = Agg_ins_to_sent[np] + [(filename,s) for s in ins_to_sent[np]]
else:
Agg_ins_to_sent[np] = [(filename,s) for s in ins_to_sent[np]]
#st.write(Agg_Conceptdata.columns)
All_Conceptdata = pd.DataFrame()
def most_common_form(np):
return pd.Series(Agg_np_to_forms[np]).value_counts().sort_values(ascending=False).index[0]
Agg_np_to_mcform = dict()
for np in Agg_np_to_forms:
Agg_np_to_mcform[np] = most_common_form(np)
All_Conceptdata = Agg_Conceptdata.groupby('Concept').agg(doc_Occurence = pd.NamedAgg('docname',lambda x: list(x)),
doc_Frequency = pd.NamedAgg('docname',lambda x: x.shape[0]),
Raw_Frequency = pd.NamedAgg('Frequency','sum'),
Mean = pd.NamedAgg('Mean','mean'),
Median = pd.NamedAgg('Median','mean'),
Sdev = pd.NamedAgg('Sdev','mean'),
Ext_IDF = pd.NamedAgg('IDF',num.nanmin))
All_Conceptdata['Mean_Frequency'] = All_Conceptdata['Raw_Frequency']/All_Conceptdata['doc_Frequency']
All_Conceptdata['normalized_RawFreq'] = All_Conceptdata['Raw_Frequency']/All_Conceptdata['Raw_Frequency'].max()
All_Conceptdata['normalized_MeanFreq'] = All_Conceptdata['Mean_Frequency']/All_Conceptdata['Mean_Frequency'].max()
All_Conceptdata['intIDF'] = All_Conceptdata['doc_Frequency'].apply(lambda x: math.log(len(filenames),2)-abs(math.log(1+x,2)))
All_Conceptdata['intmeanTFIDF'] = All_Conceptdata['normalized_MeanFreq']*All_Conceptdata['intIDF']
for filename in filenames:
colname = filename.replace(".pbz2","")
All_Conceptdata = pd.merge(left = All_Conceptdata,
right = Agg_Conceptdata.loc[Agg_Conceptdata['docname']==filename,['Concept','Frequency']],
how='left',
left_on = 'Concept',
right_on = 'Concept')
All_Conceptdata[colname+'_TF'] = All_Conceptdata['Frequency']
del All_Conceptdata['Frequency']
All_Conceptdata[colname+'_TF'].fillna(0,inplace=True)
All_Conceptdata[colname+'_IntTFIDF'] = All_Conceptdata[colname+'_TF']*All_Conceptdata['intIDF']
All_Conceptdata['MCForm'] = All_Conceptdata['Concept'].apply(lambda x: Agg_np_to_mcform[x])
All_Conceptdata['role_frac'] = All_Conceptdata['Concept'].apply(lambda x: dict(pd.Series(Agg_np_to_roles[x]).value_counts(normalize=True)))
All_Conceptdata['phen_frac'] = All_Conceptdata['role_frac'].apply(lambda x: x.get('phen',0))
All_Conceptdata['att_frac'] = All_Conceptdata['role_frac'].apply(lambda x: x.get('att',0))
All_Conceptdata['set_frac'] = All_Conceptdata['role_frac'].apply(lambda x: x.get('set',0))
All_Conceptdata['ins_frac'] = All_Conceptdata['role_frac'].apply(lambda x: x.get('ins',0))
del All_Conceptdata['role_frac']
All_df = pd.DataFrame()
Agg_df['tuple'] = Agg_df[['Concept1','Concept2']].apply(lambda x:tuple(x),axis=1)
All_df = Agg_df.groupby('tuple').agg(Concept1 = pd.NamedAgg('Concept1',lambda x: list(x)[0]),
Concept2 = pd.NamedAgg('Concept2',lambda x: list(x)[0]),
Bondstrength = pd.NamedAgg('Bondstrength','sum'),
mean_dAB = pd.NamedAgg('dAB',num.nanmean),
mean_dBA = pd.NamedAgg('dBA',num.nanmean),
ExtIDFA = pd.NamedAgg('IDFA',num.nanmean),
ExtIDFB = pd.NamedAgg('IDFB',num.nanmean),
SdevA = pd.NamedAgg('SdevA',num.nanmean),
SdevB = | pd.NamedAgg('SdevB',num.nanmean) | pandas.NamedAgg |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.