prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
import numpy as np
class Utilities:
def one_hot_encoder(self, data, col, ax=1):
"""
This will convert a single row of targets that are sequential and
convert it into the appropriate number of colums with a prefix
matching the column's original name
For example 'rank' into ['rank_1','rank_2',...,'rank_n']
Input:
* data: labeled data (collection of features and targets)
* col: the string label of the column to modify
* ax: the axis of modification
"""
# Make dummy variables for rank
one_hot_data = pd.concat([data, | pd.get_dummies(data[col], prefix=col) | pandas.get_dummies |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import pandas as pd
import pandas_should # noqa
class TestEqualAccessorMixin(object):
def test_equal_true(self):
s1 = pd.Series([1, 2, 3])
s2 = pd.Series([1, 2, 3])
assert s1.should.equal(s2)
def test_equal_false(self):
s1 = pd.Series([1, 2, 3])
s2 = pd.Series([1, 2, 3, 4])
assert not s1.should.equal(s2)
@pytest.mark.parametrize('alias_name', [
'be_equal_to', 'be_equals_to', 'be_eq_to', 'eq',
])
def test_qeual_aliases(self, alias_name):
s = pd.Series([1, 2, 3])
assert hasattr(s.should, alias_name)
def test_not_equal_true(self):
s1 = pd.Series([1, 2, 3])
s2 = pd.Series([1, 2, 3, 4])
assert s1.should.not_equal(s2)
def test_not_equal_false(self):
s1 = pd.Series([1, 2, 3])
s2 = pd.Series([1, 2, 3])
assert not s1.should.not_equal(s2)
@pytest.mark.parametrize('alias_name', [
'be_not_equal_to', 'be_not_equals_to', 'be_neq_to', 'neq',
])
def test_not_qeual_aliases(self, alias_name):
s = pd.Series([1, 2, 3])
assert hasattr(s.should, alias_name)
def test_have_same_length_true(self):
s1 = pd.Series([1, 2, 3])
s2 = pd.Series([1, 2, 3])
assert s1.should.have_same_length(s2)
def test_have_same_length_false(self):
s1 = pd.Series([1, 2, 3])
s2 = pd.Series([1, 2, 3, 4])
assert not s1.should.have_same_length(s2)
def test_have_same_length_multiple(self):
s1 = pd.Series([1, 2, 3])
s2 = pd.Series([1, 2])
s3 = pd.Series([3])
assert s1.should.have_same_length(s2, s3)
class TestNullAccessorMixin(object):
def test_have_null_true(self):
s = pd.Series([1, None, 3])
assert s.should.have_null()
def test_have_null_false(self):
s = pd.Series([1, 2, 3])
assert not s.should.have_null()
def test_have_null_count(self):
s = pd.Series([1, None, 3])
assert s.should.have_null(count=True) == (True, 1)
def test_have_not_null_true(self):
s = pd.Series([1, 2, 3])
assert s.should.have_not_null()
def test_have_not_null_false(self):
s = pd.Series([1, None, 3])
assert not s.should.have_not_null()
@pytest.mark.parametrize('alias_name', ['havent_null'])
def test_have_not_null_aliases(self, alias_name):
s = pd.Series([1, 2, 3])
assert hasattr(s.should, alias_name)
class TestLengthAccessorMixin(object):
@pytest.mark.parametrize('s, length', [
(pd.Series([1, 2, 3]), 3),
(pd.Series([1, 2]), 2),
])
def test_have_length(self, s, length):
assert s.should.have_length(length)
@pytest.mark.parametrize('alias_name', ['length'])
def test_have_length_aliases(self, alias_name):
s = pd.Series([1, 2, 3])
assert hasattr(s.should, alias_name)
class TestValueRangeAccessorMixin(object):
@pytest.mark.parametrize('min_, max_, expect', [
(0, 5, True),
(1, 4, True),
(2, 4, False),
(1, 3, False),
])
def test_fall_within_the_range(self, min_, max_, expect):
data = [1, 2, 3, 4]
s = pd.Series(data)
assert s.should.fall_within_range(min_, max_) == expect
@pytest.mark.parametrize('alias_name', ['value_range'])
def test_fall_within_the_range_aliases(self, alias_name):
s = pd.Series([1, 2, 3])
assert hasattr(s.should, alias_name)
def test_greater_than(self):
s = pd.Series([1, 2, 3])
assert s.should.greater_than(0)
assert not s.should.greater_than(1)
assert not s.should.greater_than(2)
@pytest.mark.parametrize('alias_name', ['gt'])
def test_greater_than_aliases(self, alias_name):
s = pd.Series([1, 2, 3])
assert hasattr(s.should, alias_name)
def test_greater_than_or_equal(self):
s = pd.Series([1, 2, 3])
assert s.should.greater_than_or_equal(0)
assert s.should.greater_than_or_equal(1)
assert not s.should.greater_than_or_equal(2)
@pytest.mark.parametrize('alias_name', ['gte'])
def test_greater_than_or_equal_aliases(self, alias_name):
s = pd.Series([1, 2, 3])
assert hasattr(s.should, alias_name)
def test_less_than(self):
s = pd.Series([1, 2, 3])
assert s.should.less_than(4)
assert not s.should.less_than(3)
assert not s.should.less_than(2)
@pytest.mark.parametrize('alias_name', ['lt'])
def test_less_than_aliases(self, alias_name):
s = | pd.Series([1, 2, 3]) | pandas.Series |
import sys, os, shutil
import subprocess
import re
from collections import defaultdict
import pandas as pd
from copy import deepcopy
import numpy as np
pd.set_option('display.max_rows', None, 'display.max_columns', None)
def line_simplify(line): # e.g., change a line "int a; {a=1;}" to four lines "int a; \\ { \\ a=1; \\ }"
remaining_line = line.strip()
simplified_lines = []
match_result = re.search('[{};]', remaining_line)
while match_result is not None: # may find ";{}" in a raw string, currently does not consider this
pos, char = match_result.start(), match_result.group(0)
if char == ';':
simplified_lines.append(remaining_line[:pos + 1])
else:
assert char == '{' or char == '}'
simplified_lines += [remaining_line[:pos], char]
remaining_line = remaining_line[pos + 1:]
match_result = re.search('[{};]', remaining_line)
simplified_lines.append(remaining_line)
empty_removed = [line.strip() + '\n' for line in simplified_lines if not line == '']
return empty_removed
def instrument(path, basename, max_iter=50, record_initial_state=False):
src = path + "/c/" + basename + ".c"
intermediate = path + "/instrumented/" + basename + ".c.tmp"
out = path + "/instrumented/" + basename + ".c"
header_file = path + "/csv/"+basename+"_header.csv"
while_index = 0
free_vars = []
sampled_vars = []
unreferenced_sampled_vars = []
unknown_count = 0
precondition = []
last_line_is_while = False
loop_reached = False
# this first pass will find the free variables, sampled variables and the preconditions
# most instrumenting is done in this pass, including iteration capping
with open (intermediate, "w") as tmpfile, open (src, "r") as f:
tmpfile.write("#include <stdlib.h>\n")
tmpfile.write("#include <stdio.h>\n")
tmpfile.write("#include <assert.h>\n")
tmpfile.write("#include <time.h>\n")
for full_line in f:
if full_line.strip().startswith('//'):
tmpfile.write(full_line.strip() + '\n')
continue
simplified_lines = line_simplify(full_line)
for line in simplified_lines:
if line.find('unknown()') >= 0:
unknown_count += 1
line = line.replace('unknown()', 'rand()%2 < unknown_' + str(unknown_count))
if line.startswith('while'):
while_index += 1
loop_reached = True
loop_condition = line[5:].strip()
tmpfile.write("int while_counter_" + str(while_index) + " = 0;\n")
tmpfile.write('while (while_counter_' + str(while_index) + ' < ' + str(max_iter) + ')\n')
tmpfile.write('{\n')
last_line_is_while = True
tmpfile.write('if (!' + loop_condition + ') break;\n')
continue
if last_line_is_while:
assert line == '{\n'
last_line_is_while = False
continue
if line.find('main') >= 0: # function declaration, hacking with the dataset
tmpfile.write('int main(int argc, char** argv)\n')
continue
if line.startswith('assume'):
line = line.replace('assume', 'assert')
tmpfile.write(line)
line = line.strip()
# find and remove unreferenced variables
unreferenced_sampled_vars_old = unreferenced_sampled_vars.copy()
for var in unreferenced_sampled_vars_old:
if re.search('(\W|^)' + var + '\W', line) is not None:
unreferenced_sampled_vars.remove(var)
if not loop_reached:
if line.startswith('int'):
new_int_strs = line[4:-1].split(',')
for new_int_str in new_int_strs:
if new_int_str.find('=') >= 0: # initialized var
new_int = new_int_str.split('=')[0]
sampled_vars.append(new_int.strip())
unreferenced_sampled_vars.append(new_int.strip())
else:
free_vars.append(new_int_str.strip())
sampled_vars.append(new_int_str.strip())
unreferenced_sampled_vars.append(new_int_str.strip())
elif line.startswith('assert'):
print(line)
precondition.append(re.search('\(.*\);$', line).group(0)[1:-2].strip('()'))
elif re.search('[^<>]=', line) is not None: # single assignment initialization
var_name = line.strip('()').split('=')[0].strip()
assert var_name in free_vars
free_vars.remove(var_name)
for var in unreferenced_sampled_vars:
sampled_vars.remove(var)
if var in free_vars:
free_vars.remove(var)
assert while_index == 1 # no nested loop
# this second pass add the command line argument reading, and print statement
with open(intermediate, "r") as tmpfile, open(out, "w") as outfile, open(header_file, "w") as header_fd:
free_var_index = 1
loop_reached, last_line_is_while, last_line_is_main = False, False, False
for line in tmpfile:
# read initial values of free variables from command line
if not loop_reached and line.startswith('int') and line.find('main') < 0:
line = line.strip()
new_line_exprs = []
new_int_strs = line[4:-1].split(',')
for new_int_str in new_int_strs:
new_int_str = new_int_str.strip()
if new_int_str.find('=') < 0 and new_int_str in free_vars:
new_line_exprs.append(new_int_str + '=atoi(argv[' + str(free_var_index) + '])')
free_var_index += 1
else:
new_line_exprs.append(new_int_str)
outfile.write('int ' + ', '.join(new_line_exprs) + ';\n')
continue
elif line.startswith('while') and record_initial_state:
# record the initial values of all sampled variables
initial_vars = []
for var in sampled_vars:
outfile.write('int ' + var + '0 = ' + var + ';\n')
initial_vars.append(var + '0')
sampled_vars += initial_vars
outfile.write(line)
if line.startswith('while'):
loop_reached, last_line_is_while = True, True
elif last_line_is_while:
print_list = [str(while_index), "while_counter_" + str(while_index) + "++", "1"] + sampled_vars
format_str = ["%d ", "%d ", "%d "] + ["%d " for _ in range(len(sampled_vars))]
print_stmt = "printf(\"{} \\n\", {});\n".format(", ".join(format_str), ", ".join(print_list))
outfile.write(print_stmt)
# write the separate header file
header_fd.write("init,final,1," + ",".join(print_list[3:]) + "\n")
last_line_is_while = False
elif line.startswith('int main'):
last_line_is_main = True
elif last_line_is_main and unknown_count > 0:
last_line_is_main = False
outfile.write('srand(time(0));\n')
for i in range(1, unknown_count + 1):
outfile.write('int unknown_' + str(i) + ' = atoi(argv[' + str(len(free_vars) + i) + ']);\n')
for i in range(1, unknown_count + 1):
free_vars.append('unknown_' + str(i))
precondition += ['unknown_' + str(i) + ' >= 0', 'unknown_' + str(i) + ' <= 2']
os.remove(intermediate)
print('free vars:', free_vars)
print('precondition:', precondition)
return free_vars, precondition
def gen_initial_points(params, precondition, width, large_sample_num):
def str_to_numeral(str):
try:
return float(eval(str))
except:
return None
def shuffle_return(arr):
new_arr = deepcopy(arr)
np.random.shuffle(new_arr)
return new_arr
bounds = {param: {'upper': float('inf'), 'lower': float('-inf')} for param in params}
for equation in precondition:
match_result = re.match('^(.+)(==|>=|<=|>|<)(.+)$', equation)
first, op, second = match_result.group(1).strip(), match_result.group(2), match_result.group(3).strip()
if op == '>':
op = '>='
second = second + ' + 1'
elif op == '<':
op = '<='
second = second + ' - 1'
if first in params: # currently assumes that variable is on the left side
bound_num = str_to_numeral(second)
if bound_num is not None:
if op == '>=':
bounds[first]['lower'] = np.maximum(bounds[first]['lower'], bound_num)
elif op == '<=':
bounds[first]['upper'] = np.minimum(bounds[first]['upper'], bound_num)
# now we have the (optional) upper and lower bound for each variable
values_for_each_var = defaultdict(list)
def random_norepeat(low, high, num):
return np.random.choice(np.arange(low, high), np.minimum(num, int(high-low)), replace=False)
for param in params:
upper, lower = bounds[param]['upper'], bounds[param]['lower']
if lower != float('-inf') and upper != float('inf'):
if upper - lower < width:
values_for_each_var[param] = np.concatenate((np.array([lower, upper]), shuffle_return(np.arange(lower + 1, upper, 1))))
else:
values_for_each_var[param] = np.concatenate((np.array([lower, upper]), random_norepeat(lower+1, upper, 4 * width)))
elif lower != float('-inf') and upper == float('inf'):
values_for_each_var[param] = np.concatenate((np.arange(lower, lower + width, 1), random_norepeat(lower + width, lower + width * 3, width),
random_norepeat(lower + width * 3, lower + width * 10, 2 * width), random_norepeat(lower + width * 10, lower + width * 20, 2 * width)))
elif lower == float('-inf') and upper != float('inf'):
values_for_each_var[param] = np.concatenate((np.arange(upper - width + 1, upper + 1, 1), random_norepeat(upper - 3 * width + 1, upper - width + 1, width),
random_norepeat(upper - 10 * width + 1, upper - 3 * width + 1, 2 * width), random_norepeat(upper - 20 * width + 1, upper - 10 * width + 1, 2 * width)))
elif lower == float('-inf') and upper == float('inf'):
values_for_each_var[param] = np.concatenate((np.array([0, 1, -1]), shuffle_return(np.concatenate((np.arange(2, width), np.arange(-width, -1)))),
shuffle_return(np.concatenate((random_norepeat(width, 5*width, 2 * width), random_norepeat(-5*width, -width, 2 * width))))))
else:
assert False
return values_for_each_var
def sample_core(initial_points, uniq, start_run_id=0):
run_dir = 'runtemp'
if not os.path.exists(run_dir):
os.mkdir(run_dir)
run_id = start_run_id
for initial_point in initial_points:
value_list = ['{}'.format(value) for value in initial_point]
with open(run_dir + "/" + basename + str(run_id) + ".csv", "w") as outfile:
subprocess.run([path + "/bin/" + basename] + value_list, stdout=outfile, stderr=subprocess.PIPE)
run_id += 1
if len(initial_points) == 0: # no free var for this program
with open(run_dir + "/" + basename + '0' + ".csv", "w") as outfile:
subprocess.run([path + "/bin/" + basename], stdout=outfile, stderr=subprocess.PIPE)
run_id += 1
with open(path + "/csv/" + basename + "_header.csv", 'r') as header_file:
line = header_file.readline()
line_splited = line.strip().split(',')
dfhs = line_splited + ['run_id']
run_traces = []
for i in range(start_run_id, run_id):
logf = run_dir + "/" + basename + str(i) + ".csv"
with open(logf, 'r') as run_out_file:
lines = run_out_file.readlines()
for line in lines:
splited_line = line.strip().split(',')
line_list = [float(word.strip()) for word in splited_line] + [i]
run_traces.append(line_list)
dfs = | pd.DataFrame(run_traces, columns=dfhs) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
from shutil import copyfile
import ast
dataset_path='../hotel_mask_dataset/'
if not os.path.exists(dataset_path):
os.mkdir(dataset_path)
processd_path=os.path.join(dataset_path,'processed')
if not os.path.exists(processd_path):
os.mkdir(processd_path)
test=pd.read_csv('./test.csv')
train= | pd.read_csv('./train.csv') | pandas.read_csv |
import argparse
import logging
import os
import json
import boto3
import subprocess
import sys
from urllib.parse import urlparse
#os.system('pip install autogluon')
from autogluon import TabularPrediction as task
import pandas as pd # this should come after the pip install.
logging.basicConfig(level=logging.DEBUG)
logging.info(subprocess.call('ls -lR /opt/ml/input'.split()))
# ------------------------------------------------------------ #
# Hosting methods #
# ------------------------------------------------------------ #
def model_fn(model_dir):
"""
Load the gluon model. Called once when hosting service starts.
:param: model_dir The directory where model files are stored.
:return: a model (in this case an AutoGluon network)
"""
net = task.load(model_dir)
return net
def transform_fn(net, data, input_content_type, output_content_type):
"""
Transform a request using the Gluon model. Called once per request.
:param net: The AutoGluon model.
:param data: The request payload.
:param input_content_type: The request content type.
:param output_content_type: The (desired) response content type.
:return: response payload and content type.
"""
# we can use content types to vary input/output handling, but
# here we just assume json for both
data = json.loads(data)
# the input request payload has to be deserialized twice since it has a discrete header
data = json.loads(data)
df_parsed = pd.DataFrame(data)
prediction = net.predict(df_parsed)
response_body = json.dumps(prediction.tolist())
return response_body, output_content_type
# ------------------------------------------------------------ #
# Training methods #
# ------------------------------------------------------------ #
def train(args):
# SageMaker passes num_cpus, num_gpus and other args we can use to tailor training to
# the current container environment, but here we just use simple cpu context.
model_dir = args.model_dir
target = args.label_column
train_file_path = get_file_path(args.train, args.train_filename)
train_data = task.Dataset(file_path= train_file_path )
subsample_size = int(args.train_rows) # subsample subset of data for faster demo, try setting this to much larger values
train_data = train_data.sample(n=subsample_size, random_state=0)
predictor = task.fit(train_data = train_data, label=target, output_directory=model_dir)
return predictor
# ------------------------------------------------------------ #
# Training execution #
# ------------------------------------------------------------ #
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
parser.add_argument('--test', type=str, default=os.environ['SM_CHANNEL_TEST'])
parser.add_argument('--train_filename', type=str, default='train.csv')
parser.add_argument('--test_filename', type=str, default='train.csv')
parser.add_argument('--s3_output', type=str, default=os.environ['SM_HP_S3_OUTPUT'])
parser.add_argument('--train_job_name', type=str, default='autogluon')
parser.add_argument('--label_column', type=str, default='target')
parser.add_argument('--train_rows', type=str, default=50)
parser.add_argument('--target', type=str, default='target')
return parser.parse_args()
# ------------------------------------------------------------ #
# Util Functions
# ------------------------------------------------------------ #
def get_file_path(folder, file_name):
file_path = folder + '/' + file_name
print("file_path: ", file_path)
print(subprocess.check_output(["ls", file_path]))
return file_path
def display_args(args):
'''
# 모든 파라미터를 보여주기
'''
for arg in vars(args):
print (f'{arg}: {getattr(args, arg)}')
def get_bucket_prefix(args):
'''
bucket, prefix 가져오기
'''
u = urlparse(args.s3_output, allow_fragments=False)
bucket = u.netloc
print("bucket: ", bucket)
prefix = u.path.strip('/')
print("prefix: ", prefix)
return bucket, prefix
def inference(test_data, predictor):
s3 = boto3.client('s3')
try:
y_test = test_data[args.label_column] # values to predict
test_data_nolab = test_data.drop(labels=[args.label_column], axis=1) # delete label column to prove we're not cheating
y_pred = predictor.predict(test_data_nolab)
y_pred_df = | pd.DataFrame.from_dict({'True': y_test, 'Predicted': y_pred}) | pandas.DataFrame.from_dict |
import requests
import json
import datetime as dt
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from fbprophet import Prophet
from .slack_api import SlackApi
class ForecastPrice(object):
"""
指定の取引所で時系列解析で価格予測するクラス
"""
def __init__(self, data_url):
"""
:param str data_url: 過去データを取得するURL
"""
self.__data_url = data_url
self.__get_data_count = 60 * 3
self.__periods = 10
def forecast(self):
"""
open,high,low,closeそれぞれの過去データから時系列解析を行う
:rtype: object
:return: 時系列解析結果
"""
h_base_date_list, h_date_list, h_price_list = self.__get_price_data(
type="high")
l_base_date_list, l_date_list, l_price_list = self.__get_price_data(
type="low")
high_forecast = self.__forecast(
type="high",
base_date_list=h_base_date_list,
date_list=h_date_list,
price_list=h_price_list)
low_forecast = self.__forecast(
type="low",
base_date_list=l_base_date_list,
date_list=l_date_list,
price_list=l_price_list)
del h_base_date_list
del h_date_list
del h_price_list
del l_base_date_list
del l_date_list
del l_price_list
return {
"high": high_forecast,
"low": low_forecast,
}
def __get_price_data(self, type="close"):
"""
過去の価格情報を取得する
:param str type: 価格種類(open, high, low, close)
:rtype: object
:return: 日付、価格リスト
"""
if type == "open":
type_index = 1
if type == "high":
type_index = 2
if type == "low":
type_index = 3
if type == "close":
type_index = 4
now_date = dt.datetime.now()
startDate = now_date - dt.timedelta(hours=self.__get_data_count / 60)
endDate = now_date
startTimestamp = startDate.timestamp()
endTimestamp = endDate.timestamp()
after = str(int(startTimestamp))
before = str(int(endTimestamp))
query = {"periods": "60", "after": after, "before": before}
url = self.__data_url
res = json.loads(requests.get(url, params=query).text)["result"]["60"]
res = np.array(res)
# 日時リスト作成
time_stamp = res[:, 0].reshape(len(res), 1)
time_stamp = [dt.datetime.fromtimestamp(time_stamp[i]) for i in range(
len(time_stamp))]
date_list = []
base_date = dt.datetime.now() - dt.timedelta(hours=len(time_stamp))
tmp_date = base_date
for item in time_stamp:
tmp_date += dt.timedelta(hours=1)
tmp_datetime = tmp_date
date_list.append(tmp_datetime)
# 価格リスト作成
price_list = res[:, type_index].reshape(len(res), 1)
price_list = [float(price_list[i]) for i in range(len(price_list))]
# リストをクリーンアップする
index = 0
remove_indexs = []
for item in price_list:
if item <= 0:
remove_indexs.insert(0, index)
index += 1
for remove_index in remove_indexs:
del time_stamp[remove_index]
del price_list[remove_index]
del date_list[remove_index]
return time_stamp, date_list, price_list
def __forecast(self, type, base_date_list, date_list, price_list):
"""
日付、価格リストから時系列解析を行う
結果のグラフをSlackの指定のチャンネルへ送信する
:param list base_date_list: 日付リスト
:param list date_list: 解析用に改変した日付リスト(分→日)
:param list price_list: 価格リスト
:rtype: object
:return: 解析結果
"""
# 解析には直近データを検証用に利用するため別で用意する
periods = int(self.__periods)
# 予測モデルを作成する
fit_data = pd.DataFrame([date_list, price_list]).T
fit_data.columns = ["ds", "y"]
model = Prophet()
model.fit(fit_data)
# 解析する
future_data = model.make_future_dataframe(periods=periods, freq="H")
forecast_data = model.predict(future_data)
# 解析データから日付と値を取得する
forecast_dss = forecast_data.ds.values
forecast_dss = [pd.to_datetime(forecast_dss[i]) for i in range(
len(forecast_dss))]
forecast_yhats = forecast_data.yhat.values
forecast_yhats = [round(float(forecast_yhats[i])) for i in range(
len(forecast_yhats))]
# 時系列解析用に変更していた日時を元に戻す
index = 0
last_timestamp = base_date_list[-1]
future_values = []
for item in forecast_dss:
if len(base_date_list) <= index:
last_timestamp += dt.timedelta(minutes=1)
tmp_date = last_timestamp
base_date_list.append(tmp_date)
price_list.append(price_list[-1])
future_values.append(forecast_yhats[index])
index += 1
# 移動平均を求める
ma25 = | pd.Series(price_list) | pandas.Series |
# -*- coding: utf-8 -*-
"""
.. codeauthor:: <NAME> <<EMAIL>>
.. affiliation::
Laboratory of Protein Design and Immunoengineering <lpdi.epfl.ch>
<NAME> <<EMAIL>>
.. func:: open_rosetta_file
.. func:: parse_rosetta_file
.. func:: parse_rosetta_json
.. func:: parse_rosetta_contacts
.. func:: parse_rosetta_fragments
.. func:: write_rosetta_fragments
.. func:: write_fragment_sequence_profiles
.. func:: get_sequence_and_structure
.. func:: make_structures
"""
# Standard Libraries
import os
import sys
import re
import glob
import gzip
import json
import string
import shutil
from collections import OrderedDict
# External Libraries
import six
import pandas as pd
import numpy as np
import yaml
# This Library
import rstoolbox.core as core
import rstoolbox.components as rc
from rstoolbox.utils import baseline, make_rosetta_app_path, execute_process
__all__ = ['open_rosetta_file', 'parse_rosetta_file', 'parse_rosetta_contacts',
'parse_rosetta_fragments', 'write_rosetta_fragments',
'write_fragment_sequence_profiles', 'get_sequence_and_structure',
'make_structures', 'parse_rosetta_json', 'parse_rosetta_pdb']
_headers = ["SCORE", "REMARK", "RES_NUM", "FOLD_TREE", "RT",
"ANNOTATED_SEQUENCE", "NONCANONICAL_CONNECTION",
"SYMMETRY_INFO", "CHAIN_ENDINGS"]
def _file_vs_json( data ):
"""
Transform file into json if needed.
"""
if data is None:
return {}
if isinstance( data, str ):
if not os.path.isfile( data ):
raise IOError("{0}: file not found.".format(data))
try:
fd = gzip.open( data ) if data.endswith(".gz") else open( data )
text = "".join([x.strip() for x in fd])
data = json.loads(text)
except ValueError:
fd = gzip.open( data ) if data.endswith(".gz") else open( data )
data = yaml.safe_load(fd.read())
fd.close()
return data
def _check_type( value ):
"""
Makes sure that, upon reading a value, it gets assigned
the correct type.
"""
try:
int(value)
except ValueError:
try:
float(value)
except ValueError:
return value
else:
return float(value)
else:
return int(value)
def _gather_file_list( filename, multi=False ):
"""
Provided a file name or pattern, generates a list
with all the files that are expected to be read.
:param str filename: file name or pattern (without "*"). If filename
ends with ``$`` it is assumed that that is the end of the name.
:param bool multi: Tell if a file name or pattern is provided.
Default is 'False' (single file name)
:return: list of str names of files
"""
files = []
if isinstance(filename, list):
multi = True
if not multi:
if not os.path.isfile( filename ):
raise IOError("{0}: file not found.".format(filename))
files.append( filename )
else:
if isinstance(filename, six.string_types):
if filename.endswith('$'):
filename = filename.rstrip('$')
elif not filename.endswith("*"):
filename = filename + "*"
files = glob.glob( filename )
else:
files = filename
if len(files) == 0:
raise IOError("Input pattern did not find any file.")
for _ in files:
if not os.path.isfile( _ ):
raise IOError("{0}: file not found.".format(_))
return files
def _add_sequences( manager, data, chains ):
"""
Fix and load requested sequence/structure into the data.
Also takes labels into account.
:return: data
"""
# Correct by non polymer residues
nonPoly = [x for x, v in enumerate(chains["seq"]) if v == 'Z']
ochaini = chains["id"]
if len(nonPoly) != len(chains["seq"]):
for index in sorted(nonPoly, reverse=True):
try:
del chains["id"][index]
del chains["seq"][index]
if len(chains["dssp"]) > 0:
del chains["dssp"][index]
except IndexError:
pass
for seqname, seq in manager.get_expected_sequences( chains ):
data.setdefault( seqname, [] ).append( seq )
if len(chains["dssp"]) > 0:
for ssename, str3d in manager.get_expected_structures( chains ):
data.setdefault( ssename, [] ).append( str3d )
if len(chains["psipred"]) > 0:
for ssename, str3d in manager.get_expected_psipred( chains ):
data.setdefault( ssename, [] ).append( str3d )
if len(chains["phi"]) > 0:
for ssename, str3d in manager.get_expected_dihedrals( chains, "phi" ):
data.setdefault( ssename, [] ).append( str3d )
if len(chains["psi"]) > 0:
for ssename, str3d in manager.get_expected_dihedrals( chains, "psi" ):
data.setdefault( ssename, [] ).append( str3d )
for x in [i for i in data if i.startswith("lbl_")]:
data[x][-1] = rc.Selection(data[x][-1]).map_to_sequences(ochaini)
return data
def _fix_unloaded( data ):
"""Check errors in which data has not been loaded properly and
add :data:`~numpy.nan` to those.
Assumes the maximum amount of columns is that of the first content
read.
:return: data
"""
if (len(data)) > 0:
datalens = [len(data[k]) for k in data]
if len(set(datalens)) == 1:
return data
datamax = max(datalens)
for k in data:
if len(data[k]) < datamax:
data[k].append(np.nan)
return data
def open_rosetta_file( filename, multi=False, check_symmetry=True ):
"""
*Internal function*; reads through a Rosetta silent file and yields only
the lines that the library knows how to parse.
For each *"parsable"* line, it yields 4 values:
===== ============= ====================================
order type content
===== ============= ====================================
1 :class:`str` data of the line
2 :class:`bool` is line header?
3 :class:`int` name of the readed file
4 :class:`bool` does the file contain symmetry info?
===== ============= ====================================
:param filename: file name, file pattern to search or list of files.
:type filename: Union[:class:`str`, :func:`list`]
:param multi: Tell if a file name (single file) or pattern (multifile) is provided.
:type multi: :class:`bool`
:param check_symmetry: Check if the silent file contains symmetry info.
:type check_symmetry: :class:`bool`
:yields: Union[:class:`str`, :class:`bool`, :class:`int`, :class:`bool`]
:raises:
:IOError: if ``filename`` cannot be found.
:IOError: if ``filename`` pattern (``multi=True``) generates no files.
.. seealso:
:func:`parse_rosetta_file`
"""
symm = False
files = _gather_file_list( filename, multi )
for file_count, f in enumerate( files ):
if check_symmetry:
counter = 0
fd = gzip.open( f ) if f.endswith(".gz") else open( f )
for line in fd:
line = line.decode('utf8') if f.endswith(".gz") else line
if line.startswith('SYMMETRY_INFO'):
symm = True
if line.startswith('SCORE') and not line.strip().split()[-1] == "description":
counter += 1
if counter == 2:
break
fd.close()
fd = gzip.open( f ) if f.endswith(".gz") else open( f )
for line in fd:
line = line.decode('utf8') if f.endswith(".gz") else line
if line.strip().split()[0].strip(":") in _headers:
yield line, line.strip().split()[-1] == "description", file_count, symm
fd.close()
def parse_rosetta_file( filename, description=None, multi=False ):
"""Read a Rosetta score or silent file and returns the design population
in a :class:`.DesignFrame`.
By default, it will pick the data contained in **all the score columns**
with the exception of positional scores (such as *per-residue ddg*). The
user can specify scores to be ignored.
When working with *silent files*, extra information can be picked, such as
*sequence* and *secondary structure* data, *residue labels* or positional
scores. The fine control of these options is explained in detail in
:ref:`tutorial: reading Rosetta <readrosetta>`.
Some basic usage cases::
# (1) The default scenario, just read scores from a single file.
df = rstoolbox.io.parse_rosetta_file("silentfile")
# (2) Reading from multiple files. Assumes all files start with
# the particular prefix.
df = rstoolbox.io.parse_rosetta_file("silentfile", multi=True)
# (3) Getting all scores and the sequence of each design.
description = {'sequence': 'A'}
df = rstoolbox.io.parse_rosetta_file("silentfile", description)
# (4) Get only total_score and RMSD, and rename total_score to score.
description = {'scores': ['RMSD'], 'scores_rename': {'total_score': 'score'}}
df = rstoolbox.io.parse_rosetta_file("silentfile", description)
:param filename: file name, file pattern to search or list of files.
:type filename: Union[:class:`str`, :func:`list`]
:param description: Parsing rules. It can be a dictionary describing
the rules or the name of a file containing such dictionary. The
dictionary definition is explained in :ref:`tutorial: reading Rosetta <readrosetta>`.
:type description: Union[:class:`str`, :class:`dict`]
:param bool multi: When :data:`True`, indicates that data is readed from multiple files.
:return: :class:`.DesignFrame`.
:raises:
:IOError: if ``filename`` cannot be found.
:IOError: if ``filename`` pattern (``multi=True``) generates no files.
.. rubric:: Example
.. ipython::
In [1]: from rstoolbox.io import parse_rosetta_file
...: import pandas as pd
...: pd.set_option('display.width', 1000)
...: pd.set_option('display.max_columns', 500)
...: df = parse_rosetta_file("../rstoolbox/tests/data/input_2seq.minisilent.gz")
...: df.head(2)
"""
manager = rc.Description( **_file_vs_json( description ) )
header = []
data = OrderedDict()
for line, is_header, _, symm in open_rosetta_file( filename, multi ):
if is_header:
header = manager.check_graft_columns(line.strip().split()[1:])
continue
if line.startswith("SCORE"):
per_res = {}
chains = {"id": [], "seq": "", "dssp": "", "psipred": "", "phi": [], "psi": []}
_fix_unloaded( data )
# General scores
for cv, value in enumerate( manager.manage_missing(header[:-1], line.strip().split()[1:-1])):
hcv = header[cv]
if manager.wanted_per_residue_score( hcv ):
hcvn = re.sub(r'\d+$', "", hcv)
per_res.setdefault( hcvn, {} )
per_res[hcvn][int(re.findall(r'\d+$', hcv)[0])] = _check_type( value )
continue
if manager.wanted_score( hcv ):
data.setdefault( manager.score_name(hcv), []).append( _check_type( value ) )
# Namings from the description
# Also, description is added separately from the rest... in case there are weird
# changes in the number of score terms without the previously expected header line.
dscptn = line.strip().split()[-1]
if manager.wanted_score( 'description' ):
data.setdefault( manager.score_name('description'), []).append(_check_type(dscptn))
manager.check_naming( header )
for namingID, namingVL in manager.get_naming_pairs(dscptn):
data.setdefault( namingID, [] ).append( _check_type( namingVL ) )
# Fix per-residue
for k in per_res:
data.setdefault( k, [] ).append( OrderedDict(sorted(per_res[k].items())).values() )
# Setup labels
data = manager.setup_labels( data )
continue
if line.startswith("RES_NUM"): # In multichains and not starting in A1.
for x in line.split()[1:-1]:
chain, numbers = x.split(":")
nums = numbers.split("-")
if len(nums) == 1 or nums[0] == "":
nums = 1
else:
nums = (int(nums[1]) - int(nums[0])) + 1
chains["id"].extend([chain, ] * nums)
continue
if line.startswith("SYMMETRY_INFO"): # When working with symmetry, RES_NUM is not there...
chain = "".join(string.ascii_uppercase[:int(line.split()[2])])
for c in chain:
chains["id"].extend([c, ] * int(line.split()[4]))
data = _add_sequences( manager, data, chains )
continue
if line.startswith("ANNOTATED_SEQUENCE"):
chains["seq"] = list(re.sub( r'\[[^]]*\]', '', line.strip().split()[1] ))
if not symm:
# When info is chain A starting in 1, it is not printed in the silent file
if len(chains["id"]) == 0:
chains["id"].extend(["A", ] * len(chains["seq"]))
data = _add_sequences( manager, data, chains )
else:
chains["seq"] = list("".join(chains["seq"]).rstrip("X"))
continue
if line.startswith("REMARK DSSP"):
chains["dssp"] = list(line.split()[2].strip())
continue
if line.startswith("REMARK PSIPRED"):
chains["psipred"] = list(line.split()[2].strip())
continue
if line.startswith("REMARK LABELS"):
for label in line.split()[2].split(";"):
labinfo = label.split(":")
if "lbl_" + labinfo[0].upper() in data:
data["lbl_" + labinfo[0].upper()][-1] = labinfo[1]
continue
if line.startswith("REMARK PHI"):
try:
chains["phi"] = [float(x) for x in line.split()[2].strip().split(",")]
except IndexError:
chains["phi"] = []
continue
if line.startswith("REMARK PSI"):
try:
chains["psi"] = [float(x) for x in line.split()[2].strip().split(",")]
except IndexError:
chains["psi"] = []
continue
df = rc.DesignFrame( _fix_unloaded( data ) )
df.add_source_files( _gather_file_list( filename, multi ) )
return df
def parse_rosetta_json( filename ):
"""Read a json formated rosetta score file.
Only reads back scores, as those are the only content present in a ``JSON`` file.
:param str filename: File containing the Rosetta score file.
:return: :class:`.DesignFrame`.
.. note::
To be coherent with the silent files, the decoy id column name ``decoy`` is
changed to ``description``.
:raises:
:IOError: if ``filename`` cannot be found.
.. rubric:: Example
.. ipython::
In [1]: from rstoolbox.io import parse_rosetta_json
...: import pandas as pd
...: pd.set_option('display.width', 1000)
...: pd.set_option('display.max_columns', 500)
...: df = parse_rosetta_json("../rstoolbox/tests/data/score.json.gz")
...: df.head(2)
"""
is_gz = filename.endswith(".gz")
fd = gzip.open( filename ) if is_gz else open( filename )
data = {}
for line in fd:
if is_gz:
dt = json.loads(line.decode('utf8').strip())
else:
dt = json.loads(line.strip())
for k in dt:
data.setdefault(k, []).append(dt[k])
df = rc.DesignFrame( data )
df.rename(columns={'decoy': 'description'})
df.add_source_file( filename )
return df
def parse_rosetta_pdb( filename, keep_weights=False, per_residue=False, dropna=True ):
"""Read the ``POSE_ENERGIES_TABLE`` from a Rosetta output PDB file.
The ``POSE_ENERGIES_TABLE`` only contain the score terms contained inside
the executed score function. It will not add other score terms added through
filters.
:param str filename: Name of the PDB file.
:param bool keep_weights: If :data:`True`, keep the weights row.
:param bool per_residue: If :data:`True`, keep a row of data for each residue.
Otherwise, compress the sequence into ``sequence_{}`` columns.
:param bool dropna: If :data:`True`, non-standard residues are dropped when making
the sequence. Otherwise, it appears as ``X``. Consider that modifications of
residues that are known by Rosetta such as ``LYS:CtermProteinFull`` or ``HIS_D``
are considered standard in this context.
:return: :class:`.DesignFrame`
"""
def chain_ids(infile):
with open(infile) as fp:
for result in re.findall(r'ATOM.{17}(\w)', fp.read(), re.S):
yield result
def data_between(infile):
with open(infile) as fp:
for result in re.findall(r'(#BEGIN_POSE_ENERGIES_TABLE.*?#END_POSE_ENERGIES_TABLE)',
fp.read(), re.S):
return result
d = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K',
'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',
'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W',
'ALA': 'A', 'VAL': 'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M'}
chains = list(pd.Series(chain_ids(filename)).unique())
idata = data_between(filename)
name = idata.split('\n')[0].strip().split()[-1].replace('.pdb', '')
df = pd.read_csv(six.StringIO(idata), comment='#', sep=r'\s+')
df = df.assign(description=[name, ] * df.shape[0])[~df['label'].str.startswith('VRT_')]
chcol = ['', '']
pick = ['pose', ]
if not keep_weights:
df = df[df['label'] != 'weights']
else:
pick.append('weights')
if len(chains) == 1:
chcol.extend([chains[0], ] * (df.shape[0] - len(chcol) + 1))
else:
chain_chng = list(df[df['label'].str.contains('NtermProteinFull')].index)
chain_chng.append(int(df.iloc[-1].name) + 1)
for i in range(0, len(chain_chng) - 1):
chcol.extend([chains[i], ] * (int(chain_chng[i + 1]) - int(chain_chng[i])))
df = df.assign(chain=pd.Series(chcol), )
if not per_residue:
sdata = {'description': [name, ]}
for g, gdf in df[df['chain'] != ''].groupby('chain'):
sdata.setdefault('sequence_{}'.format(g),
[''.join(gdf['label'].str.split('[:_]').str[0].map(d).fillna('X'))])
if dropna:
sdata['sequence_{}'.format(g)][-1] = sdata['sequence_{}'.format(g)][-1].replace('X', '')
df = df[df['label'].isin(pick)].merge(pd.DataFrame(sdata), on='description')
df = df.drop(columns=['chain'])
if not keep_weights:
df = df.drop(columns=['label'])
return rc.DesignFrame( df )
def parse_rosetta_contacts( filename ):
"""Read a residue contact file as generated by **ContactMapMover**.
Returns three objects:
===== ============================ =================================================
order type content
===== ============================ =================================================
1 :class:`~pandas.DataFrame` matrix with boolean :data:`True` in
contacts; **Rosetta numbering** (no ``seqID``)
2 :func:`list` of :class:`str` list of 3-letter code amino acids for row axis
3 :func:`list` of :class:`str` list of 3-letter code amino acids for column axis
===== ============================ =================================================
In a regular run for the **ContactMapMover** without selectors,
list 2 and 3 will be identical.
:param str filename: File containing the Rosetta fragments.
:return: Union[:class:`~pandas.DataFrame`,
:func:`list` of :class:`str`,
:func:`list` of :class:`str`]
:raises:
:IOError: if ``filename`` cannot be found.
"""
if not os.path.isfile(filename):
raise IOError("{} not found!".format(filename))
df = | pd.read_csv(filename, comment="#", delim_whitespace=True) | pandas.read_csv |
from collections import defaultdict
from functools import lru_cache
from itertools import product, chain
import pandas as pd
import requests
@lru_cache
def perform_query(query):
"""Performs a SPARQL query to the wikidata endpoint
Args:
query: A string containing a functional sparql query
Returns:
A json with the response content.
"""
endpoint_url = "https://query.wikidata.org/sparql"
try:
response = requests.get(
endpoint_url,
params={"query": query},
headers={"Accept": "application/sparql-results+json"},
)
response.raise_for_status()
except requests.exceptions.HTTPError as err:
raise requests.exceptions.HTTPError(err)
else:
raw_results = response.json()
return raw_results
def parse_query_results(query_result):
"""Parse wikidata query results into a nice dataframe
Args:
query_result: A json dict with the results from the query
Returns:
A pandas dataframe with a column for each component from field_list.
"""
parsed_results = defaultdict(list)
data = query_result["results"]["bindings"]
keys = frozenset(chain.from_iterable(data))
for json_key, item in product(data, keys):
try:
parsed_results[item].append(json_key[item]["value"])
except:
# If there is no data for a key, append a null string
parsed_results[item].append("")
results_df = | pd.DataFrame.from_dict(parsed_results) | pandas.DataFrame.from_dict |
import unittest
import import_ipynb
import pandas as pd
import pandas.testing as pd_testing
from sklearn.cluster import KMeans
class Test(unittest.TestCase):
def setUp(self):
import Exercise12_01
self.exercises = Exercise12_01
self.file_url = 'https://raw.githubusercontent.com/PacktWorkshops/The-Data-Science-Workshop/master/Chapter12/Dataset/taxstats2015.csv'
self.df = pd.read_csv(self.file_url)
self.postcode_url = 'https://github.com/PacktWorkshops/The-Data-Science-Workshop/blob/master/Chapter12/Dataset/taxstats2016individual06taxablestatusstateterritorypostcodetaxableincome%20(2).xlsx?raw=true'
self.postcode_df = pd.read_excel(self.postcode_url, sheet_name='Individuals Table 6B', header=2)
self.merged_df = | pd.merge(self.df, self.postcode_df, how='left', on='Postcode') | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 18 15:04:50 2018
@authors: a.pakbin, <NAME>
"""
import numpy as np
from copy import copy
import pandas as pd
pd.set_option('mode.chained_assignment', None)
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
import random as rnd
from xgboost.sklearn import XGBClassifier
import sys
import os
import matplotlib.pyplot as plt
import re
def data_reader(data_address, file_name, non_attribute_column_names=None,label_column_name=None):
data=pd.read_csv(data_address+'/'+file_name)
if non_attribute_column_names:
columns_to_drop=list(set(non_attribute_column_names)-set([label_column_name]))
data=data.drop(columns_to_drop, axis=1)
return data
def matrix_partitioner(df, proportion, label=None):
number_of_ones=int(round(proportion*len(df)))
ones=np.ones(number_of_ones)
zeros=np.zeros(len(df)-number_of_ones)
ones_and_zeros=np.append(ones,zeros)
permuted=np.random.permutation(ones_and_zeros)
boolean_permuted=permuted>0
if label:
return [df[boolean_permuted].reset_index(),df[~boolean_permuted].reset_index(),label[boolean_permuted],label[~boolean_permuted]]
else:
return [df[boolean_permuted].reset_index(),df[~boolean_permuted].reset_index()]
def dataframe_partitioner(df, output_label, proportion):
y=df[output_label].values
X=df.drop([output_label], axis=1)
return matrix_partitioner(X,label=y,proportion=proportion)
def one_hot_detacher(X, categorical_column_names):
one_hot_column_names=list()
for categorical_column in categorical_column_names:
for column_name in X.columns:
if column_name.startswith(categorical_column):
one_hot_column_names.append(column_name)
one_hot=X[one_hot_column_names]
X.drop(one_hot_column_names, axis=1, inplace=True)
return [X, one_hot]
def one_hot_attacher(X, one_hot):
return X.join(one_hot)
def normalize(X, data_type, categorical_column_names, training_mean=None, training_std=None):
[X, one_hot]=one_hot_detacher(X, categorical_column_names)
if data_type=='train_set':
mean=np.mean(X,axis=0)
std=np.var(X, axis=0)
elif data_type=='test_set':
mean=training_mean
std=training_std
aux_std=copy(std)
aux_std[aux_std==0]=1
normalized=(X-mean)/aux_std
complete_normalized=one_hot_attacher(normalized, one_hot)
if data_type=='train_set':
return [complete_normalized, mean, std]
elif data_type=='test_set':
return complete_normalized
def train_test_normalizer(X_train, X_test, categorical_column_names):
[X_TRAIN_NORMALIZED, X_TRAIN_MEAN, X_TRAIN_STD]=normalize(X=X_train, data_type='train_set', categorical_column_names=categorical_column_names)
X_TEST_NORMALIZED=normalize(X=X_test, data_type='test_set', categorical_column_names=categorical_column_names, training_mean=X_TRAIN_MEAN, training_std=X_TRAIN_STD)
return [X_TRAIN_NORMALIZED, X_TEST_NORMALIZED]
def possible_values_finder(data, categorical_column_names):
column_dict = dict()
for categorical_column_name in categorical_column_names:
unique_vals = list(set([str(x) for x in data[categorical_column_name].unique()])-set(['nan','NaN','NAN','null']))
column_dict[categorical_column_name]=unique_vals
return column_dict
def one_hot_encoder(X, categorical_column_names, possible_values):
for categorical_column_name in categorical_column_names:
possible_values_ = possible_values[categorical_column_name]
new_vals = [categorical_column_name + '_' + str(s) for s in possible_values_]
dummies = pd.get_dummies(X[categorical_column_name], prefix=categorical_column_name)
dummies = dummies.T.reindex(new_vals).T.fillna(0)
X = X.drop([categorical_column_name], axis=1)
X = X.join(dummies)
return X
def train_test_one_hot_encoder(X_train, X_test, categorical_column_names, possible_values):
X_TRAIN=one_hot_encoder(X_train, categorical_column_names, possible_values)
X_TEST=one_hot_encoder(X_test, categorical_column_names, possible_values)
return [X_TRAIN, X_TEST]
def categorical_distribution_finder(X, categorical_column_names):
NAMES=list()
DISTS=list()
for categorical_column_name in categorical_column_names:
names=list()
nom_of_all=0
quantity=list()
grouped= X.groupby([categorical_column_name])
for category, group in grouped:
names.append(category)
quantity.append(len(group))
nom_of_all=nom_of_all+len(group)
distribution = [float(x) / nom_of_all for x in quantity]
NAMES.append(names)
DISTS.append(distribution)
return(NAMES, DISTS)
def categorical_imputer(X, categorical_column_names, data_type='train', names=None, distributions=None):
if data_type=='train':
[names, distributions]=categorical_distribution_finder(X, categorical_column_names)
for idx, categorical_column_name in enumerate(categorical_column_names):
for i in range(0, len(X)):
if pd.isnull(X[categorical_column_name].iloc[i]):
X[categorical_column_name].iloc[i]=np.random.choice(names[idx], p=distributions[idx])
if data_type=='train':
return [X, names, distributions]
elif data_type=='test':
return X
def numerical_imputer(X, training_mean=None):
if training_mean is None:
training_mean=X.mean()
imputed=X.fillna(training_mean)
return [imputed, training_mean]
else:
imputed=X.fillna(training_mean)
return imputed
#
# X_train and X_test are data-frames of MIMIC3 data with certain columns dropped
# - the numerical imputation is straightforward: any missing values are replaced
# with the mean value for that column
#
def train_test_imputer(X_train, X_test, categorical_column_names):
[X_TRAIN_CAT_IMPUTED, NAMES, DISTS]=categorical_imputer(X_train, categorical_column_names)
X_TEST_CAT_IMPUTED=categorical_imputer(X_test, categorical_column_names, 'test', NAMES, DISTS)
[X_TRAIN_IMPUTED, X_TRAIN_MEAN]=numerical_imputer(X_TRAIN_CAT_IMPUTED)
X_TEST_IMPUTED=numerical_imputer(X_TEST_CAT_IMPUTED, X_TRAIN_MEAN)
return [X_TRAIN_IMPUTED, X_TEST_IMPUTED]
def auc_calculator(model, X, y, num_of_folds):
auc_list=list()
skf=StratifiedKFold(n_splits=num_of_folds, shuffle=True, random_state=rnd.randint(1,1e6))
for train_index, test_index in skf.split(X,y):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y[train_index], y[test_index]
model.fit(X_train, y_train)
predictions=model.predict_proba(X_test)[:,1]
try:
auc=roc_auc_score(y_true=y_test, y_score=predictions)
except ValueError:
print("Exception in roc_auc_score(): trying to ignore")
auc = 0
auc_list.append(auc)
return sum(auc_list)/len(auc_list)
def grid_search(X, y, num_of_folds, verbose, first_dim, second_dim=None, third_dim=None, return_auc_values=False):
best_auc=0
best_auc_setting=None
auc_matrix=np.zeros((len(first_dim),len(second_dim),len(third_dim)))
for max_depth_index, max_depth in enumerate(first_dim):
for n_estimator_index, n_estimator in enumerate(second_dim):
for learning_rate_index, learning_rate in enumerate(third_dim):
model=XGBClassifier(max_depth=int(max_depth), n_estimators=int(n_estimator), learning_rate=learning_rate)
auc=auc_calculator(model, X, y, num_of_folds)
auc_matrix[max_depth_index, n_estimator_index, learning_rate_index]=auc
if auc>best_auc:
best_auc=auc
best_auc_setting=[max_depth,n_estimator,learning_rate]
if verbose==True:
sys.stdout.write('\r GRID SEARCHING XGB: progress: {0:.3f} % ...'.format(
(max_depth_index*(len(second_dim)*len(third_dim))+
n_estimator_index*(len(third_dim))+
learning_rate_index
+1)/(len(first_dim)*len(second_dim)*len(third_dim))*100))
print ('\n')
if return_auc_values:
return [best_auc_setting,auc_matrix]
else:
return best_auc_setting
def vectors_to_csv(address, file_name, vector_one, label_one, vector_two=None, label_two=None,vector_three=None, label_three=None):
if vector_two is None:
df=pd.DataFrame(data={label_one:vector_one})
elif vector_three is None:
df=pd.DataFrame(data={label_one:vector_one, label_two:vector_two})
else:
df=pd.DataFrame(data={label_one:vector_one, label_two:vector_two, label_three:vector_three})
df.to_csv(address+'/'+file_name+'.csv')
def create_subfolder_if_not_existing(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def save_roc_curve(data_address, TPR, FPR, auc):
plt.figure()
plt.title('Receiver Operating Characteristic')
plt.plot(FPR, TPR, 'b', label = 'AUC = %0.2f' % auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
# plt.show()
plt.savefig(data_address)
plt.close()
def feature_importance_updator(accumulative_feature_importance, new_importance):
if accumulative_feature_importance is None:
return new_importance
else:
return accumulative_feature_importance+new_importance
def feature_importance_saver(address, col_names, accumulative_feature_importance, num_of_folds):
mean_feature_importances=accumulative_feature_importance/num_of_folds
DF= | pd.DataFrame(data={'FEATURE': col_names, 'IMPORTANCE': mean_feature_importances}) | pandas.DataFrame |
import pandas as pd
import numpy as np
EP_COLUMNS_NAME = ['chrom-Enh','chromStart','chromEnd','Gene ID','chrom-Gene','TSS','Transcript',
'signalValue','EP Score']
EP_COLUMNS_NAME_NEW = ['chrom-Enh','chromStart','chromEnd','Gene ID','chrom-Gene','TSS','Transcript',
'signalValue','EP Score','label']
TAD_COLUMNS_NAME = ['chrom','tad_start','tad_end']
def load_ep(ep_fpath, seps):
ep_df = | pd.read_csv(ep_fpath, sep=seps, names=EP_COLUMNS_NAME) | pandas.read_csv |
"""
Provide a generic structure to support window functions,
similar to how we have a Groupby object.
"""
from collections import defaultdict
from datetime import timedelta
from textwrap import dedent
from typing import List, Optional, Set
import warnings
import numpy as np
import pandas._libs.window as libwindow
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.core.dtypes.common import (
ensure_float64,
is_bool,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDateOffset,
ABCDatetimeIndex,
ABCPeriodIndex,
ABCSeries,
ABCTimedeltaIndex,
)
from pandas._typing import Axis, FrameOrSeries
from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.generic import _shared_docs
from pandas.core.groupby.base import GroupByMixin
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
class _Window(PandasObject, SelectionMixin):
_attributes = [
"window",
"min_periods",
"center",
"win_type",
"axis",
"on",
"closed",
] # type: List[str]
exclusions = set() # type: Set[str]
def __init__(
self,
obj,
window=None,
min_periods: Optional[int] = None,
center: Optional[bool] = False,
win_type: Optional[str] = None,
axis: Axis = 0,
on: Optional[str] = None,
closed: Optional[str] = None,
**kwargs
):
self.__dict__.update(kwargs)
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.min_periods = min_periods
self.center = center
self.win_type = win_type
self.win_freq = None
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.validate()
@property
def _constructor(self):
return Window
@property
def is_datetimelike(self) -> Optional[bool]:
return None
@property
def _on(self):
return None
@property
def is_freq_type(self) -> bool:
return self.win_type == "freq"
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.min_periods is not None and not is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
if self.closed is not None and self.closed not in [
"right",
"both",
"left",
"neither",
]:
raise ValueError("closed must be 'right', 'left', 'both' or " "'neither'")
def _create_blocks(self):
"""
Split data into blocks & return conformed data.
"""
obj = self._selected_obj
# filter out the on from the object
if self.on is not None:
if obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False)
blocks = obj._to_dict_of_blocks(copy=False).values()
return blocks, obj
def _gotitem(self, key, ndim, subset=None):
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : str / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
"%r object has no attribute %r" % (type(self).__name__, attr)
)
def _dir_additions(self):
return self.obj._dir_additions()
def _get_window(self, other=None):
return self.window
@property
def _window_type(self) -> str:
return self.__class__.__name__
def __repr__(self) -> str:
"""
Provide a nice str repr of our rolling object.
"""
attrs = (
"{k}={v}".format(k=k, v=getattr(self, k))
for k in self._attributes
if getattr(self, k, None) is not None
)
return "{klass} [{attrs}]".format(
klass=self._window_type, attrs=",".join(attrs)
)
def __iter__(self):
url = "https://github.com/pandas-dev/pandas/issues/11704"
raise NotImplementedError("See issue #11704 {url}".format(url=url))
def _get_index(self) -> Optional[np.ndarray]:
"""
Return index as an ndarray.
Returns
-------
None or ndarray
"""
if self.is_freq_type:
return self._on.asi8
return None
def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray:
"""Convert input to numpy arrays for Cython routines"""
if values is None:
values = getattr(self._selected_obj, "values", self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if is_float_dtype(values.dtype):
values = ensure_float64(values)
elif is_integer_dtype(values.dtype):
values = ensure_float64(values)
elif needs_i8_conversion(values.dtype):
raise NotImplementedError(
"ops for {action} for this "
"dtype {dtype} are not "
"implemented".format(action=self._window_type, dtype=values.dtype)
)
else:
try:
values = ensure_float64(values)
except (ValueError, TypeError):
raise TypeError(
"cannot handle this type -> {0}" "".format(values.dtype)
)
# Always convert inf to nan
values[np.isinf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None) -> FrameOrSeries:
"""
Wrap a single result.
"""
if obj is None:
obj = self._selected_obj
index = obj.index
if isinstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
from pandas import to_timedelta
result = to_timedelta(result.ravel(), unit="ns").values.reshape(
result.shape
)
if result.ndim == 1:
from pandas import Series
return Series(result, index, name=obj.name)
return type(obj)(result, index=index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj, exclude=None) -> FrameOrSeries:
"""
Wrap the results.
Parameters
----------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resampled)
exclude: list of columns to exclude, default to None
"""
from pandas import Series, concat
from pandas.core.index import ensure_index
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.append(result)
# if we have an 'on' column
# we want to put it back into the results
# in the same location
columns = self._selected_obj.columns
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
final.append(Series(self._on, index=obj.index, name=name))
if self._selection is not None:
selection = ensure_index(self._selection)
# need to reorder to include original location of
# the on column (if its not already there)
if name not in selection:
columns = self.obj.columns
indexer = columns.get_indexer(selection.tolist() + [name])
columns = columns.take(sorted(indexer))
# exclude nuisance columns so that they are not reindexed
if exclude is not None and exclude:
columns = [c for c in columns if c not in exclude]
if not columns:
raise DataError("No numeric types to aggregate")
if not len(final):
return obj.astype("float64")
return concat(final, axis=1).reindex(columns=columns, copy=False)
def _center_window(self, result, window) -> np.ndarray:
"""
Center the result in the window.
"""
if self.axis > result.ndim - 1:
raise ValueError(
"Requested axis is larger then no. of argument " "dimensions"
)
offset = _offset(window, True)
if offset > 0:
if isinstance(result, (ABCSeries, ABCDataFrame)):
result = result.slice_shift(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.copy(result[tuple(lead_indexer)])
return result
def aggregate(self, func, *args, **kwargs):
result, how = self._aggregate(func, *args, **kwargs)
if result is None:
return self.apply(func, raw=False, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs["sum"] = dedent(
"""
Calculate %(name)s sum of given DataFrame or Series.
Parameters
----------
*args, **kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed value.
Returns
-------
Series or DataFrame
Same type as the input, with the same index, containing the
%(name)s sum.
See Also
--------
Series.sum : Reducing sum for Series.
DataFrame.sum : Reducing sum for DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4, 5])
>>> s
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> s.rolling(3).sum()
0 NaN
1 NaN
2 6.0
3 9.0
4 12.0
dtype: float64
>>> s.expanding(3).sum()
0 NaN
1 NaN
2 6.0
3 10.0
4 15.0
dtype: float64
>>> s.rolling(3, center=True).sum()
0 NaN
1 6.0
2 9.0
3 12.0
4 NaN
dtype: float64
For DataFrame, each %(name)s sum is computed column-wise.
>>> df = pd.DataFrame({"A": s, "B": s ** 2})
>>> df
A B
0 1 1
1 2 4
2 3 9
3 4 16
4 5 25
>>> df.rolling(3).sum()
A B
0 NaN NaN
1 NaN NaN
2 6.0 14.0
3 9.0 29.0
4 12.0 50.0
"""
)
_shared_docs["mean"] = dedent(
"""
Calculate the %(name)s mean of the values.
Parameters
----------
*args
Under Review.
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.mean : Equivalent method for Series.
DataFrame.mean : Equivalent method for DataFrame.
Examples
--------
The below examples will show rolling mean calculations with window sizes of
two and three, respectively.
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).mean()
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
>>> s.rolling(3).mean()
0 NaN
1 NaN
2 2.0
3 3.0
dtype: float64
"""
)
class Window(_Window):
"""
Provide rolling window calculations.
.. versionadded:: 0.18.0
Parameters
----------
window : int, or offset
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes. This is
new in 0.19.0
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
`min_periods` will default to 1. Otherwise, `min_periods` will default
to the size of the window.
center : bool, default False
Set the labels at the center of the window.
win_type : str, default None
Provide a window type. If ``None``, all points are evenly weighted.
See the notes below for further information.
on : str, optional
For a DataFrame, a datetime-like column on which to calculate the rolling
window, rather than the DataFrame's index. Provided integer column is
ignored and excluded from result since an integer index is not used to
calculate the rolling window.
axis : int or str, default 0
closed : str, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints.
For offset-based windows, it defaults to 'right'.
For fixed windows, defaults to 'both'. Remaining cases not implemented
for fixed windows.
.. versionadded:: 0.20.0
Returns
-------
a Window or Rolling sub-classed for the particular operation
See Also
--------
expanding : Provides expanding transformations.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
To learn more about the offsets & frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
The recognized win_types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width)
* ``exponential`` (needs tau), center is set to None.
If ``win_type=None`` all points are evenly weighted. To learn more about
different window types see `scipy.signal window functions
<https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling sum with a window length of 2, using the 'triang'
window type.
>>> df.rolling(2, win_type='triang').sum()
B
0 NaN
1 0.5
2 1.5
3 NaN
4 NaN
Rolling sum with a window length of 2, min_periods defaults
to the window length.
>>> df.rolling(2).sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicitly set the min_periods
>>> df.rolling(2, min_periods=1).sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
A ragged (meaning not-a-regular frequency), time-indexed DataFrame
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
... index = [pd.Timestamp('20130101 09:00:00'),
... pd.Timestamp('20130101 09:00:02'),
... pd.Timestamp('20130101 09:00:03'),
... pd.Timestamp('20130101 09:00:05'),
... pd.Timestamp('20130101 09:00:06')])
>>> df
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
length window corresponding to the time period.
The default for min_periods is 1.
>>> df.rolling('2s').sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
"""
def validate(self):
super().validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window <= 0:
raise ValueError("window must be > 0 ")
import_optional_dependency(
"scipy", extra="Scipy is required to generate window weight."
)
import scipy.signal as sig
if not isinstance(self.win_type, str):
raise ValueError("Invalid win_type {0}".format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError("Invalid win_type {0}".format(self.win_type))
else:
raise ValueError("Invalid window {0}".format(window))
def _prep_window(self, **kwargs):
"""
Provide validation for our window type, return the window
we have already been validated.
"""
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
return com.asarray_tuplesafe(window).astype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_map = {
"kaiser": ["beta"],
"gaussian": ["std"],
"general_gaussian": ["power", "width"],
"slepian": ["width"],
"exponential": ["tau"],
}
if win_type in arg_map:
win_args = _pop_args(win_type, arg_map[win_type], kwargs)
if win_type == "exponential":
# exponential window requires the first arg (center)
# to be set to None (necessary for symmetric window)
win_args.insert(0, None)
return tuple([win_type] + win_args)
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = "%s window requires %%s" % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.get_window(win_type, window, False).astype(float)
def _apply_window(self, mean=True, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
mean : bool, default True
If True computes weighted mean, else weighted sum
Returns
-------
y : same type as input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj = self._create_blocks()
block_list = list(blocks)
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, len(window))
return libwindow.roll_window(
np.concatenate((arg, additional_nans)) if center else arg,
window,
minp,
avg=mean,
)
result = np.apply_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
_agg_see_also_doc = dedent(
"""
See Also
--------
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3, win_type='boxcar').agg('mean')
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -0.885035 0.212600 -0.711689
3 -0.323928 -0.200122 -1.093408
4 -0.071445 -0.431533 -1.075833
5 0.504739 0.676083 -0.996353
6 0.358206 1.903256 -0.774200
7 0.906020 1.283573 0.085482
8 -0.096361 0.818139 0.472290
9 0.070889 0.134399 -0.031308
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/DataFrame",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must apply directly
result = arg(self)
return result
agg = aggregate
@ | Substitution(name="window") | pandas.util._decorators.Substitution |
"""Generate HVTN505 dataset for Michael on statsrv"""
import pandas as pd
import numpy as np
import re
import itertools
__all__ = ['parseProcessed',
'parseRaw',
'unstackIR',
'compressSubsets',
'subset2vec',
'vec2subset',
'itersubsets',
'subset2label',
'subsetdf',
'applyResponseCriteria',
'computeMarginals',
'generateGzAPerfExceptions']
def unstackIR(df, uVars):
"""Return a response and magnitude df with one row per ptid
and columns for each combination of uVars"""
varFunc = lambda r: ' '.join(r[uVars])
tmpDf = df.copy()
tmpDf['var'] = tmpDf.apply(varFunc, axis=1)
responseDf = tmpDf.pivot(index='ptid', columns='var', values='response')
magDf = tmpDf.pivot(index='ptid', columns='var', values='mag')
return responseDf, magDf
def _parsePTID(v):
"""Returns a string version of a PTID"""
if | pd.isnull(v) | pandas.isnull |
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([0.1, 0.05, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([0.1, 0.075, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15], [0.075, 0.45], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_nans_in_second_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, np.NaN, 0.05, 0.1, np.NaN, -0.5, 0.2],
index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, np.NaN], [0.075, np.NaN], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_non_unique_columns():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL1'])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
def test_calc_rets_two_generics_two_asts():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets1 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')])
rets2 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.4], index=idx)
rets = {"CL": rets1, "CO": rets2}
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL0", "CL1"])
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
( | TS('2015-01-04') | pandas.Timestamp |
#!/usr/bin/env python
# coding: utf-8
# # Diff DFs
# Compute various diffs between two Pandas DataFrames
#
# See [examples](#examples) / [usage](#diff) below.
# In[1]:
from IPython.display import HTML
from numpy import nan
from pandas import concat, Index, IndexSlice as idx, isna, MultiIndex
from re import sub
def neq(l, r): return l!=r and not ( | isna(l) | pandas.isna |
import time
import pandas as pd
import numpy as np
# import seaborn as sns
from . import connect_path as cc
from .build_connectivity import *
from igraph import *
import scipy as sp
from scipy.stats import chi2_contingency
from functools import reduce
class GlomKcConnectivityMatrix(ConnectivityMatrix):
"""docstring for ."""
def __init__(self, name, conn, col_ids=[], row_ids=[]):
ConnectivityMatrix.__init__(self, name, conn, col_ids, row_ids)
self.fill_diagonal()
def fill_diagonal(self, syn='1s'):
conn = self.conn[syn]
idx = np.where(conn >= 2)
for i in range(idx[0].shape[0]):
row = idx[0][i]
col = int(idx[1][i])
n = conn[row, col]
self.co_matrix[syn][col, col] += n * (n - 1) / 2
def get_co_matrix(conn):
"""
A much simpler implementation of class GlomKcConnectivityMatrix(ConnectivityMatrix).
Given conn, produce co_matrix with simple functions
"""
co_matrix = get_ci_matrix(conn.transpose())
idx = np.where(conn >= 2)
for i in range(idx[0].shape[0]):
row = idx[0][i]
col = int(idx[1][i])
n = conn[row, col]
co_matrix[col, col] += n * (n - 1) / 2
return co_matrix
def freq_identical_pairs(co_m):
# frequency distribution in the diagonal of the glom-glom matrix (Fig 4b)
diagonal = np.diagonal(co_m)
return pd.Series(diagonal[diagonal != 0]).value_counts(sort=False)
def freq_non_identical_pairs(co_m):
t2 = co_m[np.triu_indices(co_m.shape[0], k=1)]
t2 = t2[t2 > 1]
return pd.Series(t2).value_counts(sort=False)
def to_df(ser, col_i): return pd.DataFrame(ser, columns=[col_i])
def join_freq_table(tbl):
# join a list of frequency tables to a single DataFrame
# and calculate mean and std
r = to_df(tbl[0], 0).join([to_df(e, i)
for i, e in enumerate(tbl) if i > 0], how='outer')
r[pd.isnull(r)] = 0
ci_lower = []
ci_upper = []
for i, row in r.iterrows():
s = np.sort(row)
ci_lower.append(s[int(np.floor(r.shape[1] * 0.025)) - 1])
ci_upper.append(s[int(np.ceil(r.shape[1] * 0.975)) - 1])
r = pd.DataFrame({'mean': r.mean(axis=1), 'sd': r.std(axis=1),
'ci_lower': ci_lower, 'ci_upper': ci_upper},
columns=['mean', 'sd', 'ci_lower', 'ci_upper'])
return r
def shuffle_glom_claw_conn(gc_obj, seg_id_mapping):
# 1) take a glom_claw conn object (note that each claw only has 1 glom input)
# 2) permute the connections as in Caron et al. to construct permuted glom_claw conn
# 3) combine from glom_claw to glom_kc matrix
# 4) get common output matrix and fill in the diagonal
gc_conn = gc_obj.conn['5s']
perm = np.zeros(gc_conn.shape)
perm[np.nonzero(gc_conn)[0], np.random.permutation(
np.nonzero(gc_conn)[1])] = 5
# combine from glom_claw_conn to glom_kc_conn
gc_perm = ConnectivityMatrix('perm', perm, gc_obj.col_ids, gc_obj.row_ids)
gk_perm = gc_perm.combine(seg_id_mapping, 'combined', 0, '1s')
# fill in the diagonal, plot 4b, 4c
co_matrix = gk_perm.co_matrix['1s']
co_matrix = gc_perm.fill_co_matrix_diagonal(
seg_id_mapping, co_matrix, syn='1s')
return {'glom_kc': gk_perm, 'glom_claw': gc_perm}
def make_freq_table(pm, ob, group):
# join permuted and observed and assign a group type
pm = join_freq_table(pm)
ob = pd.DataFrame(ob, columns=['observed_pairs'])
pm = pd.concat([pm, pd.DataFrame(pm.index.values, columns=['num_kc'],
index=pm.index.values)], axis=1)
r = ob.join(pm, how='outer')
r = r.where(~pd.isnull(r), other=0)
r['num_kc'] = r.index.values.astype(int)
r.assign(type=group)
return r
def reorder_glom_seq(glom_ids):
# glom_ids - a list of glomerular ids. For example, ana.conn_data['glom_kc_contracted'].col_ids
# glom_ids = ana.conn_data['glom_kc_contracted'].col_ids
fafb_c = cc.fafb_connection()
t1 = [str(i) for i in cc.ids_to_annotations(fafb_c, glom_ids)]
fpath = '/Users/zhengz11/myscripts/data_results/161021-caron_equiv/161024_FAFB_glom_seq.xlsx'
glom_seq = pd.read_excel(fpath).fafb.tolist()
glom_seq.pop(17)
# glom_seq += [i for i in t1 if i not in glom_seq]
reorder_idx = [t1.index(i) for i in glom_seq]
reorder_glom = [t1[i] for i in reorder_idx]
reorder_glom_id = [glom_ids[i] for i in reorder_idx]
return reorder_idx, reorder_glom, reorder_glom_id
def observed_vs_shuffle(co_matrix, gc_obj, seg_skid, num_exp=10):
# gc_obj - a glom_claw connectivity object. For example, ana.conn_data['glom_claw_contracted']
# seg_skid - a mapping from segments to skids for KCs. For example, ana.kc_mapping.segment_skid
fafb_ob_f4b = freq_identical_pairs(co_matrix)
fafb_ob_f4c = freq_non_identical_pairs(co_matrix)
fafb_pm_f4b = []
fafb_pm_f4c = []
for i in range(num_exp):
# permute and build glom claw conn
perm = shuffle_glom_claw_conn(gc_obj, seg_skid)
co_matrix = perm['glom_kc'].co_matrix['1s']
fafb_pm_f4b.append(freq_identical_pairs(co_matrix))
fafb_pm_f4c.append(freq_non_identical_pairs(co_matrix))
print(i)
f4b = make_freq_table(fafb_pm_f4b, fafb_ob_f4b, 'identical')
f4c = make_freq_table(fafb_pm_f4c, fafb_ob_f4c, 'non-identical')
return f4b, f4c
'''
whenever re-run, set diagonal to zeros
co_matrix = ana.conn_data['glom_kc_contracted'].co_matrix['5s']
co_matrix[np.diag_indices(co_matrix.shape[0])]=0
co_matrix = ana.conn_data['glom_kc_contracted'].co_matrix['5s']
co_matrix = ana.conn_data['glom_claw_contracted'].fill_co_matrix_diagonal(ana.kc_mapping.segment_skid, co_matrix, syn='5s')
'''
def group_division(total, num_group):
r = []
for i in range(num_group - 1):
r.extend([i] * int(round(float(total) / num_group, 0)))
r.extend([num_group - 1] * (total - len(r)))
return r
def get_gk_conn_list(gk_conn):
'''Given a glomerulus-KC connectivity matrix, generate a list (idx) of 2 elements.
idx[0] - represent each output instance of a bouton
idx[1] - represent each input instance of a claw. Namely, a claw since one claw only receives input from 1 bouton
'''
idx_list = [np.nonzero(gk_conn)]
for i in range(2, int(np.max(gk_conn) + 1)):
idx = np.where(gk_conn == i)
idx = [np.repeat(idx[j], i - 1) for j in (0, 1)]
idx_list.append(idx)
idx = [np.concatenate([idx_list[i][j]
for i in range(len(idx_list))]) for j in (0, 1)]
return idx
def shuffle_glom_kc(gk_conn):
'''Given a glomerulus-KC connectivity matrix, shuffle the connection
while maintain the numbers of boutons and claws and return the shuffled matrix.
Note that ndividual claw connection is not identifiable but as numbers in the glom-KC cell
e.g. 2 in a cell means the KC and glom connects with 2 claws'''
idx = get_gk_conn_list(gk_conn)
shuffled_conn = np.zeros(gk_conn.shape)
idx[1] = np.random.permutation(idx[1])
for i in range(len(idx[0])):
shuffled_conn[idx[0][i], idx[1][i]] += 1
return shuffled_conn
def shuffle_glom_kc_iterate(gk_conn, num_exp):
'''same as shuffle_glom_kc but add num_exp'''
idx = get_gk_conn_list(gk_conn)
r = []
for j in range(num_exp):
shuffled_conn = np.zeros(gk_conn.shape)
idx[1] = np.random.permutation(idx[1])
for i in range(len(idx[0])):
shuffled_conn[idx[0][i], idx[1][i]] += 1
r.append(shuffled_conn)
return r
def shuffle_glom_kc_w_prob(gk_conn, col_prob):
'''Given a glomerulus-KC connectivity matrix, shuffle the connection
while maintain the numbers CLAWS ONLY and return the shuffled matrix.
Note that ndividual claw connection is not identifiable but as numbers in the glom-KC cell
e.g. 2 in a cell means the KC and glom connects with 2 claws
This one with probability of choice for each glomerulus (eacg column)'''
sfl_conn = np.zeros(gk_conn.shape)
num_col = sfl_conn.shape[1]
for i in range(sfl_conn.shape[0]):
t1 = np.random.choice(int(num_col), size=int(sum(gk_conn[i,:])), p=col_prob)
for j in t1:
sfl_conn[i, j] += 1
return sfl_conn
def simulated_vs_shuffle(gk_obj, num_exp=10):
# gk_obj - a glom_KC connectivity object. For example, ana.conn_data['glom_claw_contracted']
co_matrix = gk_obj.co_matrix['1s']
fafb_ob_f4b = freq_identical_pairs(co_matrix)
fafb_ob_f4c = freq_non_identical_pairs(co_matrix)
fafb_pm_f4b = []
fafb_pm_f4c = []
for i in range(num_exp):
# permute and build glom claw conn
perm_obj = GlomKcConnectivityMatrix(
'shuffled', shuffle_glom_kc(gk_obj.conn['1s']))
fafb_pm_f4b.append(freq_identical_pairs(perm_obj.co_matrix['1s']))
fafb_pm_f4c.append(freq_non_identical_pairs(perm_obj.co_matrix['1s']))
# print(i)
f4b = make_freq_table(fafb_pm_f4b, fafb_ob_f4b, 'identical')
f4c = make_freq_table(fafb_pm_f4c, fafb_ob_f4c, 'non-identical')
return f4b, f4c
def simulated_vs_shuffle_simple(gk_conn):
# for example, gk_conn=gk_obj.conn['1s']
# similar to simulated_vs_shuffle but only permute once and therefore trim all sd, ci, mean, etc.
gk_co=get_co_matrix(gk_conn)
perm_co = get_co_matrix(shuffle_glom_kc(gk_conn))
r_f4b = pd.DataFrame({'observed': freq_identical_pairs(
gk_co), 'permuted': freq_identical_pairs(perm_co)})
r_f4c = pd.DataFrame({'observed': freq_non_identical_pairs(
gk_co), 'permuted': freq_non_identical_pairs(perm_co)})
r_f4b = r_f4b.where(~pd.isnull(r_f4b), other=0)
r_f4c = r_f4c.where(~pd.isnull(r_f4c), other=0)
return r_f4b, r_f4c
def get_weighted_transitivity(ci_matrix, conn):
simi = get_ci_similarity(ci_matrix, conn)
return get_local_transitivity(simi, 'weighted')
def get_local_transitivity(ci_matrix, graph_type='binary'):
ci_matrix = ci_matrix.copy()
if graph_type == 'binary':
ci_matrix[ci_matrix > 0] = 1
g = Graph.Adjacency(ci_matrix.tolist(), mode=ADJ_UPPER)
elif graph_type == 'weighted':
g = Graph.Weighted_Adjacency(ci_matrix.tolist(), mode=ADJ_UPPER)
return g.transitivity_local_undirected(mode="zero", weights="weight")
def pick_random_neighbour(conn_row, geom_row, threshold=3000):
if np.count_nonzero(conn_row) > 0:
data = np.zeros((geom_row.shape))
# conn_idx = np.nonzero(conn_row)[0]
to_sample = np.where(geom_row < threshold)[0]
if len(to_sample) == 0:
data = conn_row
else:
data[np.random.choice(to_sample, size=1)[0]] = 5
return data
def pick_partner(conn, geom, func):
perm_matrix = np.zeros((conn.shape))
for i in range(conn.shape[0]):
if np.count_nonzero(conn[i, :]) > 0:
perm_matrix[i, :] = func(conn[i, :], geom[i, :])
return perm_matrix
def pick_random_bouton(conn_row, geom_row):
if np.count_nonzero(conn_row) > 0:
data = np.zeros((geom_row.shape))
# conn_idx = np.nonzero(conn_row)[0]
# to_sample = np.setdiff1d(range(len(conn_row)), conn_idx, True)
data[np.random.choice(list(range(len(conn_row))), size=1)[0]] = 5
return data
def pick_next_neighbour(conn_row, geom_row):
if np.count_nonzero(conn_row) > 0:
data = np.zeros((geom_row.shape))
geom_sort = np.argsort(geom_row)
conn_idx = np.nonzero(conn_row)[0]
to_sample = np.setdiff1d(geom_sort[:5], conn_idx, True)
data[to_sample[0]]=5
return data
def pick_random_from_neighbours(conn_row, geom_row, n=5):
if np.count_nonzero(conn_row) > 0:
data = np.zeros((geom_row.shape))
geom_sort = np.argsort(geom_row)
conn_idx = np.nonzero(conn_row)[0]
to_sample = np.setdiff1d(geom_sort[:n], conn_idx, True)
data[np.random.choice(to_sample, 1)]=5
return data
def detect_structured_matrix(sampled_kc=200, ratio=0.2, nglom=52, ngroup=5, nkc=2000, p=1, num_exp=1000, unfilled_claw=True):
fpath = "/Users/zhengz11/myscripts/data_results/160928-caron_equiv/160928-Caron_suppl_table.xlsx"
suppl_tbl = | pd.read_excel(fpath, 'combined') | pandas.read_excel |
import math
import pandas as pd
import numpy as np
import os
import json
from dataV3 import *
def eval_dependency(directory, iaa_dir, schema_dir, out_dir):
print("DEPENDENCY STARTING")
schema = []
iaa = []
for dirpath, dirnames, files in os.walk(schema_dir):
for file in files:
# minimal check here; everything in the schema directory should be a schema csv
if file.endswith('.csv'):
file_path = os.path.join(dirpath, file)
print("found schema " + file_path)
schema.append(file_path)
print("looking for IAA", iaa_dir)
for dirpath, dirnames, files in os.walk(iaa_dir):
for file in files:
print("IAA OUTPUT",file)
if file.endswith('.csv'):
file_path = os.path.join(dirpath, file)
print("evaluating dependencies for " + file_path)
iaa.append(file_path)
temp = []
print("IAA files found", iaa)
for h in iaa:
hdf = pd.read_csv(h, encoding = 'utf-8')
if len(hdf.index) == 0:
raise Exception("TOFIX: eval_dependency has S_IAA with length 0.")
schem_sha = hdf['schema_sha256'].iloc[0]
matched_schema = False
for sch in schema:
if schem_sha in sch:
temp.append(sch)
matched_schema = True
break
if not matched_schema:
raise NameError("No schema matching file:", h)
schema = temp
print('Schemas found', schema)
ins = []
for i in range(len(iaa)):
print(i)
ins.append((schema[i], iaa[i], out_dir))
handleDependencies(schema[i], iaa[i], out_dir)
def unpack_dependency_ins(input):
return handleDependencies(input[0], input[1], input[2])
def handleDependencies(schemaPath, iaaPath, out_dir):
print(out_dir)
print("+++++++")
schemData = pd.read_csv(schemaPath, encoding = 'utf-8')
iaaData = pd.read_csv(iaaPath,encoding = 'utf-8')
#we don't know if it'll get read in as int or str, but forcing str resolves edge cases when failed IAA
iaaData['agreed_Answer'] = iaaData['agreed_Answer'].apply(str)
assert schemData['namespace'].iloc[0] == iaaData['namespace'].iloc[0], "schema IAA mismatch_"+schemData['namespace'].iloc[0]+"\\/"+iaaData['namespace'].iloc[0]
dependencies = create_dependencies_dict(schemData)
tasks = np.unique(iaaData['source_task_uuid'].tolist())
iaaData['prereq_passed'] = iaaData['agreed_Answer']
iaaData = iaaData.sort_values(['question_Number'])
iaaData['question_Number'] = iaaData["question_Number"].apply(int)
#filter out questions that should never of been asksed because no agreement on prerequisites
for q in range(len(iaaData)):
qnum = iaaData['question_Number'].iloc[q]
ans = iaaData['agreed_Answer'].iloc[q]
tsk = iaaData['source_task_uuid'].iloc[q]
iaaData['prereq_passed'].iloc[q] = checkPassed(qnum, dependencies, iaaData, tsk, ans)
iaaData = iaaData.sort_values(["article_sha256",'prereq_passed','question_Number'])
iaaData = iaaData[iaaData['prereq_passed'] == True]
for t in tasks:
iaaTask = iaaData[iaaData['source_task_uuid'] == t]
#childQuestions
#TODO: speed this up by only checking the
for ch in dependencies.keys():
child = dependencies[ch]
needsLove = checkNeedsLove(iaaTask, ch)
if needsLove:
indices = np.zeros(0)
#check if this question even got a score
iaaQ = iaaTask[(iaaTask['question_Number']) == (ch)]
answers = iaaQ['agreed_Answer'].tolist()
answers = find_real_answers(answers)
rows = find_index(iaaQ, answers, 'agreed_Answer')
#refersh out her eso children can pull highlights from multiple parentes, if they exist
validParent = False
newInds = []
if len(answers)>0:
#questions the child depends on
for par in child.keys():
iaaPar = iaaTask[iaaTask['question_Number'] == (par)]
neededAnswers = child[par]
#Potential for multiple answers from parent to lead to same child question
#We don't want to favor one prerequisite's highlight over another
for ans in neededAnswers:
for i in range(len(iaaPar)):
if iaaPar['agreed_Answer'].iloc[i].isdigit():
if int(iaaPar['agreed_Answer'].iloc[i]) == ans:
validParent = True
inds_str = iaaPar['highlighted_indices'].iloc[i]
inds = get_indices_hard(inds_str)
newInds.append(inds)
if validParent:
for i in range(len(newInds)):
indices = np.append(indices, newInds[i])
#If parent didn't pass, this question should not of been asked
#This should be handled by the previous step; the below if statemnt is an artifact of older version
#could be useful for debugging if we make changes
if not validParent:
for row in rows:
iaaData.at[row,'agreed_Answer'] = -1
iaaData.at[row, 'coding_perc_agreement'] = -1
indices = np.unique(indices).tolist()
for row in rows:
row_indices = get_indices_hard(iaaData.at[row, 'highlighted_indices'])
indices = merge_indices(row_indices, indices).tolist()
iaaData.at[row, 'highlighted_indices'] = json.dumps(indices)
path, name = get_path(iaaPath)
outputpath = os.path.join(out_dir, 'Dep_'+name)
print("outputting dependency to", outputpath)
iaaData.to_csv(outputpath, encoding = 'utf-8', index = False)
return out_dir
def checkNeedsLove(df, qNum):
#Checks if the question's parent prompts users for a highlight
#true if it does
qdf = df[df['question_Number'] == qNum]
hls = (qdf['highlighted_indices'])
#If no rows correspond to the child question
if qdf.empty:
return False
for h in hls:
if len(json.dumps(h))>3:
return True
return False
def checkPassed(qnum, dependencies, iaadata, task, answer):
"""
checks if the question passed and if a prerequisite question passed
"""
iaatask = iaadata[iaadata['source_task_uuid'] == task]
qdata = iaatask[iaatask['question_Number'] == qnum]
if not checkIsVal(answer):
return False
if not checkIsNum(qnum) or pd.isna(qnum):
return False
if qnum in dependencies.keys():
#this loop only triggered if child question depends on a prereq
for parent in dependencies[qnum].keys():
#Can't ILOC because checklist questions have many answers
pardata = iaatask[iaatask['question_Number'] == parent]
parAns = pardata['agreed_Answer'].tolist()
valid_answers = dependencies[qnum][parent]
for v in valid_answers:
#cast to string because all answers(even numeric) were forced to be strings
strv = str(v)
#Won't be found if it doesn't pass
if strv in parAns:
par_ans_data = pardata[pardata['agreed_Answer'] == strv]
#print(len(par_ans_data['prereq_passed']), 'ppassed', par_ans_data['prereq_passed'])
#In case the parent's prereq didn't pass
if par_ans_data['prereq_passed'].iloc[0] == True:
return True
return False
return True
def checkIsVal(value):
#returns true if value is a possible output from IAA that indicates the child q had user highlights
if value == "M" or value == "L":
return True
#if its NAN
if | pd.isna(value) | pandas.isna |
import sys
import os
sys.path.extend([os.getcwd() + '/NetworkVariables_Analysis/'])
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from Aux_funcs import generate_holidays, PlotKernelDensityEstimator
import pickle
import numpy as np
import datetime as dt
import networkx as nx
import seaborn
full_figsize = (16, 7)
tick_size = 16
label_size = 18
title_size = 20
bar_width = 0.65
bar_thick_width = 2
date_range_used = [dt.datetime(2016, 1, 1), dt.datetime(2021, 1, 1)]
lead_articles = pd.read_csv(os.getcwd() + '/NetworkVariables_Analysis/SPX_articles_lead.csv', index_col=0, parse_dates=True)
"""
Article number evolving
"""
def holidays_weekends_filter(ts_df):
exchange_holidays = generate_holidays()
ts_df['days'] = ts_df.index.dayofweek
workingdays = ts_df[~(ts_df.days.isin([5, 6]))]
holidays = pd.DataFrame()
for date in exchange_holidays:
print(date)
holiday = workingdays[workingdays.index.date == date]
holidays = | pd.concat([holidays, holiday], axis=0) | pandas.concat |
import gc
import os
import sqlite3
# modify = frame.groupby("SAMODIFY")['USUBJID'].apply(pd.unique).apply(len)
# modify = frame.groupby("SAMODIFY")['USUBJID'].apply(pd.unique)
import numpy as np
import pandas as pd
from . import functions
# pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
# pd.set_option('display.max_rows', None)
pd.set_option('display.expand_frame_repr', False)
class Domain:
"""
A generic class that loads a domain and provides basic exploratory data analysis
"""
# TODO add function to return list of current USUBJID
# TODO change to use sqlite as backend / for calculations (will save memory etc) still use dataframes for return
# and presentation
def __init__(self, domain: str, data_directory: str, num_rows=None):
# Load domain as a dataframe and store as a class field
self.frame = self.read_domain(domain, data_directory, num_rows)
"""
A Pandas DataFrame which is the data structure where we store the information about this domain.
"""
# Store the name of domain as a class field
self.domain = domain
"""
A string that contains the name of the domain that we have currently loaded.
"""
# We handle term based domains slightly different
if domain in ['HO', 'SA', 'IN']:
# Save term based domain information as a protected attribute - we use this behind the scenes
self.__is_term_outcome = True
# Process outcome column - this is appended to the end of our class specific frame object
self.process_occur()
else:
self.__is_term_outcome = False
@staticmethod
def __read_domain_deprecated(domain, data_folder, data_file):
"""
:param domain: Domain to load e.g. DM, SA, IN
:param data_folder: Directory where database is located
:param data_file: Filename of sqlite database
:return: dataframe that contains the whole domain
"""
try:
db_file = os.path.join(data_folder, data_file)
con = f'sqlite:///{db_file}'
# con = sqlite3.connect(db_file)
# df = pd.read_sql_table(domain, uri)
if domain == "SA":
columns = "STUDYID, USUBJID, SASEQ, SADY, SATERM, SAMODIFY, SACAT, SAPRESP, SAOCCUR"
elif domain == "IN":
columns = "USUBJID, INSEQ, INDY, INSTDY, INTRT, INMODIFY, INCAT, INPRESP, INOCCUR, INREFID"
# columns = "*"
else:
columns = "*"
df = pd.read_sql("SELECT {} FROM '{}'".format(columns, domain), con)
df = df.rename(columns=lambda x: x.strip())
return df
except Exception as e2:
print("Domain could not be loaded from sqlite database", e2)
return
finally:
print("Domain {} Loaded".format(domain))
# con.close()
gc.collect()
@staticmethod
def read_domain(domain: str, data_folder: str, num_rows: int) -> pd.DataFrame:
"""
Loads a domain from auxiliary generated pickle files for faster Python I/O than with SQL table reads
:param num_rows: Integer (optional): Number of rows to load from dataframe (default loads all)
:param domain: String name of domain
:param data_folder: String, Path to folder containing .pickle files
:return: pd.DataFrame containing the full domain (all columns and rows)
"""
db_file = os.path.join(data_folder, domain)
df = pd.read_pickle(f"{db_file}.pickle")
if num_rows is None:
return df
else:
return df[:num_rows]
def columns(self):
"""
:return: prints list of columns contained in self.frame
"""
print(self.frame.columns.to_list())
def exclude_columns(self, columns: list):
"""
Excludes some columns from the class variable 'frame'
:param columns: Columns to drop from domain
:return: None (operates on class variable)
"""
try:
self.frame.drop(labels=columns, axis=1, inplace=True)
except KeyError:
print(print(f"At leas one column: '{columns}' is not in the current domain: '{self.domain}'"))
def include_columns(self, columns: list):
"""
:param columns: (list) Columns to include in dataframe.
:return: None (operates on class variable)
"""
try:
self.frame = self.frame[columns]
except KeyError:
print(print(f"At leas one column: '{columns}' is not in the current domain: '{self.domain}'"))
def column_events(self, column: str):
try:
print(self.frame[column].unique())
except KeyError:
print(f"Column '{column}' is not in the current domain: '{self.domain}'")
def select_variables_from_column(self, column: str, *variables: str) -> pd.DataFrame:
"""
Filters and returns a dataframe based off column and variable information, Returns an error if column is not
found within the current domain.
:param variable: String (or Strings) containing variables to be selected from column
:param column: String containing the column within self.frame to selct variable from
:return: Filtered dataframe containing only entries where self.frame[column] contains the value of variable
"""
try:
mask = self.frame[column].isin(variables)
filtered = self.frame[mask]
# df = self.frame[self.frame[column] == variable]
if len(filtered) == 0:
print(f"There were no occurences of {variables} within {column}")
print(f"There is {filtered.USUBJID.nunique()} unique patients in filtered dataframe")
return filtered
except KeyError as e:
print(f"Column '{column}' is not in the current domain: '{self.domain}'")
def table_missingness(self, column=None, variable=None):
"""
Print's a missingness table for either a whole table, or a filtered table where we have selected
frame.column == variable
:param column: (optional) column to search for term variable
:param variable: (optional) variable to search for
:return: None
"""
if variable is None and column is None:
n_unique = self.frame.USUBJID.nunique()
print(f"Total number of rows: {len(self.frame)}")
print(f"Total number of unique patients: {n_unique}")
print(self.frame.isna().sum())
elif column is None or variable is None:
print("Must specify both a column and a variable or neither")
else:
try:
trimmed = self.frame[self.frame[column] == variable]
n_unique = trimmed.USUBJID.nunique()
print(f"Total number of rows: {len(trimmed)}")
print(f"Total number of unique patients: {n_unique}")
print(trimmed.isna().sum())
except KeyError as e:
print(f"Column '{column}' is not in the current domain: '{self.domain}'")
def column_summary(self, column: str, *variables, proportions=False, status=False, ):
"""
Summarises and returns column information
:param column: String, Column name
:param variables: String, optional name of variables to filter by
:param status: If True, include Y, N or U information from self.frame.status
:param proportions: Boolean: If True print normalised proportions for items in column, by default: False returns
counts of events in column.
:return:
"""
print(f"Number of unique patients in domain: {self.frame.USUBJID.nunique()}")
unique_ids = self.frame.groupby(column)['USUBJID'].apply(pd.unique).apply(len).rename("Unique Patients")
if self.__is_term_outcome:
try:
# Loads column as pd.Series
if len(variables) == 0:
filtered = self.frame
else:
# filtered = self.frame[column]
mask = self.frame[column].isin(variables)
filtered = self.frame[mask]
if status:
with pd.option_context('display.max_rows', None):
test = filtered.groupby([column, "status"]).size().rename("Number of rows")
unique_ids = filtered.groupby([column, "status"])['USUBJID'].apply(pd.unique).apply(
len).rename("Unique patients")
print(pd.concat((test, unique_ids), axis = 1))
else:
with | pd.option_context('display.max_rows', None) | pandas.option_context |
'''
Script Purpose:
Traverse content directory looking for classes that inherit from prototype classes and have test methods wrapped by JungleController.
If no modification has been made to a content file since last successful run of test_tree.py it will not be re run. (No Wasted Effort)
Definitions:
Prototype Classes = classes with 'proto' in their name
Test Methods = Bound methods belonging to a class that inherits from a prototype class
'''
import glob
import copy
import importlib
from jungle.utils.jungleprofiler import JungleExperiment, JungleEncoder
import json
from json.decoder import JSONDecodeError
import os
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style('whitegrid')
class TestTreeAutomation:
''' Automation Code for discovering test functions used in conjunction with JungleController '''
def __init__(self, dev=False):
directory = 'code'
self.json_mod_log = 'content_mod_log.json'
self.file_dict_path = 'test_tree.json'
self.file_mod_dict = {}
self.dev = dev
# Load File Mod Log
self.load()
# Iterate over all of the files in content
for filename in glob.iglob('%s/**/*.py' % directory, recursive=True):
if self.isfile_modified(filename) or self.dev:
print('File %s has been modified since last TestTreeAutomation Call' % filename)
self.test_file(filename)
else:
print('File %s has NOT been modified since last TestTreeAutomation Call' % filename)
# Write the Test Tree and File Mod Log to JSONs
self.post_process()
self.write()
def load(self):
''' Load Mod Log and prior TestTree '''
# Mod Log
try:
with open(self.json_mod_log, mode='r') as f:
self.old_file_mod_dict = json.load(f)
except FileNotFoundError:
self.old_file_mod_dict = {}
# Last TestTree
try:
with open(self.file_dict_path, mode='r') as f:
self.file_dict = json.load(f)
except (FileNotFoundError, JSONDecodeError):
self.file_dict = {}
def isfile_modified(self, filename):
''' Check if file needs to be updated, also update the last modified '''
latest_mod_time = os.stat(filename).st_mtime
# update last mod time
self.file_mod_dict[filename] = latest_mod_time
try:
if latest_mod_time <= self.old_file_mod_dict[filename]:
return False
except KeyError:
print('New File Found: %s' % filename)
return True
def test_file(self, filename):
''' Discover and complete tests '''
module_text = filename.replace('\\', '.').strip('.py')
print('\nFile: %s\tSanitized: %s' % (filename, module_text))
temp_module = importlib.import_module(module_text)
prototypes = {}
for obj_name in dir(temp_module):
obj = getattr(temp_module, obj_name)
try:
obj_base = obj.__bases__
obj_base_name = ' - '.join(ob.__name__ for ob in obj_base)
local_test_methods = [method for method in dir(obj) if 'test' in method.lower()]
print('\n\tObject Name: %s' % obj_name)
print('\tObject: %s' % obj)
print('\tObject Base: %s' % obj_base_name)
if 'proto' in obj_base_name.lower():
for test_name in local_test_methods:
print('\t\tTest Name: %s' % test_name)
test_method = getattr(obj(), test_name)
print('\t\tTest Method: %s' % test_method)
try:
test_return = test_method()
print(test_return.__repr__)
if isinstance(test_return, JungleExperiment):
if obj_base_name in prototypes:
if test_name in prototypes[obj_base_name]:
prototypes[obj_base_name][test_name].update({obj_name: test_return})
# prototypes[obj_base_name][test_name][obj_name] = test_return
else:
prototypes[obj_base_name][test_name] = {obj_name: test_return}
else:
prototypes[obj_base_name] = {test_name: {obj_name: test_return}}
except Exception as e:
raise e
except AttributeError:
pass
if prototypes:
self.file_dict[module_text] = prototypes
return prototypes
def write(self):
''' Write data to file '''
print('Writing Filename Mod Log')
with open(self.json_mod_log, mode='w') as out_file:
json.dump(self.file_mod_dict, out_file, sort_keys=True, indent=3)
print('Writing Test Tree')
with open(self.file_dict_path, mode='w') as out_file:
jdump = json.dump(self.file_dict, out_file, sort_keys=True, indent=3, cls=JungleEncoder)
def post_process(self):
''' Scoop all of the related jungle controllers and combine them for reporting'''
for file, prototype_dicts in self.file_dict.items():
for prototype, test_dicts in prototype_dicts.items():
for test, methods_dict in test_dicts.items():
print('Just called post process')
print(test)
print(methods_dict)
print('Calling combine jungle controllers')
self.combine_junglecontrollers(methods_dict)
def combine_junglecontrollers(self, methods_dict):
''' dict of method keys and JungleController dictionaries'''
df_list = []
print('---------------------------------------\nMethods Dict')
print(methods_dict)
for method, jc in methods_dict.items():
if not isinstance(jc, JungleExperiment):
raise TypeError('arg: %s is not of type JungleController' % type(jc))
else:
cdf = jc.controller_df
cdf['Method'] = method
df_list.append(cdf)
concat_df = | pd.concat(df_list) | pandas.concat |
"""
# @Description:
Script: Train the recurrent neural network model for face finding tasks with continuous likelihood.
"""
import argparse
import numpy as np
import pandas as pd
import torch
from torch import nn
import pyro
import pyro.distributions as dist
from pyro.infer.util import torch_item
from tqdm import trange
import mlflow
import mlflow.pytorch
import matplotlib.pyplot as plt
import os
from neural.modules import (
SetEquivariantDesignRNN,
BatchDesignBaseline,
RandomDesignBaseline,
rmv,
)
from oed.primitives import observation_sample, latent_sample, compute_design
from experiment_tools.pyro_tools import auto_seed
from oed.design import OED
from contrastive.mi import PriorContrastiveEstimation
from gru_net import GRUEncoderNetwork as EncoderNetwork, GRUEmitterNetwork as EmitterNetwork
from plotters import plot_trace_2d, plot_trace_3d, plot_trace
# <editor-fold desc="[FB] Load libraries ...">
from face.face_model import AppearanceModel
import pickle
from PIL import Image
# </editor-fold>
class HiddenObjects(nn.Module):
"""Face location finding example"""
def __init__(
self,
design_net,
# base_signal=0.1, # G-map hyperparam
# max_signal=1e-4, # G-map hyperparam
theta_loc=None, # prior on theta mean hyperparam
theta_covmat=None, # prior on theta covariance hyperparam
noise_scale=None, # this is the scale of the noise term
p=1, # physical dimension
K=1, # number of sources
T=2, # number of experiments
):
super().__init__()
self.design_net = design_net
# self.base_signal = base_signal
# self.max_signal = max_signal
# Set prior:
self.theta_loc = theta_loc if theta_loc is not None else torch.zeros((K, p))
self.theta_covmat = theta_covmat if theta_covmat is not None else torch.eye(p)
self.theta_prior = dist.MultivariateNormal(
self.theta_loc, self.theta_covmat
).to_event(1)
# Observations noise scale:
self.noise_scale = noise_scale if noise_scale is not None else torch.tensor(1.0)
self.n = 1 # batch=1
self.p = p # dimension of theta (location finding example will be 1, 2 or 3).
self.K = K # number of sources
self.T = T # number of experiments
def forward_map(self, xi, theta):
"""Defines the forward map for the hidden object example
y = G(xi, theta) + Noise.
"""
# two norm squared Acoustic example
# sq_two_norm = (xi - theta).pow(2).sum(axis=-1)
# sq_two_norm_inverse = (self.max_signal + sq_two_norm).pow(-1)
# sum over the K sources, add base signal and take log.
# mean_y = torch.log(self.base_signal + sq_two_norm_inverse.sum(-1, keepdim=True))
# Face finder likelihood - gaussian for mean response
# beta = 1
# alpha = beta / self.p # Control likelihood spread
# sq_two_norm = (xi - theta).pow(2).sum(axis=-1) # axis =-1 = SUM ALONG ALL DIMS ?
# mean_y = torch.exp((-alpha * sq_two_norm).sum(-1, keepdim=True))
# return mean_y
# Face finder likelihood - exponential of absolute distance for mean response
#beta = 5
#alpha = beta / self.p # Control likelihood spread
#sq_two_norm = (xi - theta).pow(2).sum(axis=-1)
#absdist = sq_two_norm.sqrt()
#mean_y = torch.exp((-alpha * absdist).sum(-1, keepdim=True))
#return mean_y
# Cauchy-lorentz distance ...
# f(x, x_0, γ) = 1 / [ πγ ( 1 + ((x-x_0)/γ)^2 ) ]
# γ: {0.05, 0.1, 0.15}, the half-width at half-maximum
# Parameter
#gamma = 0.05
#pi = 3.14
# Equation
#term1 = (xi - theta).pow(2).sum(axis=-1) / gamma
#term1 = term1.pow(2) + 1
#denominator = pi * gamma * term1
#mean_y = 1 / denominator
#return mean_y
# Laplace>
# y_scale = 9.
# b = 0.5
# term1 = -(torch.abs(xi - theta).sum(axis=-1) / b)
# term1 = torch.exp(term1)
# mean_y = y_scale * (1. / (2. * b)) * term1
# Sum of normals
alpha = 20
# alpha = 12 #Try this for p = 10 only
beta = 1 / self.p
sq_two_norm = (xi - theta).pow(2).sum(axis=-1) # axis =-1 = SUM ALONG ALL DIMS ?
term1 = torch.exp((-alpha * sq_two_norm ).sum(-1, keepdim=True))
term2 = torch.exp((-beta * sq_two_norm).sum(-1, keepdim=True))
mean_y = term1 + term2
return mean_y
def model(self):
if hasattr(self.design_net, "parameters"):
pyro.module("design_net", self.design_net)
########################################################################
# Sample latent variables theta
########################################################################
theta = latent_sample("theta", self.theta_prior)
xi, y = None, None
y_outcomes = []
# T-steps experiment
for t in range(self.T):
####################################################################
# Get a design xi; shape is [num-outer-samples x 1 x 1]
####################################################################
xi = compute_design(
f"xi{t + 1}", self.design_net.lazy(xi, y)
)
####################################################################
# Sample y at xi; shape is [num-outer-samples x 1]
####################################################################
mean = self.forward_map(xi, theta) # Get mean of the observation
sd = self.noise_scale
y = observation_sample(f"y{t + 1}", dist.Normal(mean, sd).to_event(1)) # Sample observation from likelihood
# y = y.detach()
y_outcomes.append(y)
return y_outcomes
def forward(self, theta=None):
"""Run the policy"""
self.design_net.eval()
if theta is not None:
model = pyro.condition(self.model, data={"theta": theta})
else:
model = self.model
designs = []
observations = []
with torch.no_grad():
trace = pyro.poutine.trace(model).get_trace()
for t in range(self.T):
xi = trace.nodes[f"xi{t + 1}"]["value"]
designs.append(xi)
y = trace.nodes[f"y{t + 1}"]["value"]
observations.append(y)
return torch.cat(designs).unsqueeze(1), torch.cat(observations).unsqueeze(1)
def eval(self, n_trace=25, theta=None, verbose=True):
"""run the policy, print output and return in a pandas df"""
self.design_net.eval()
if theta is not None:
model = pyro.condition(self.model, data={"theta": theta})
else:
model = self.model
# <editor-fold desc="[FB] Load appearance model ...">
output_path = "./face/output" # path to "app_model.pkl" file TODO
with open(os.path.join(output_path, "app_model.pkl"), "rb") as f:
app_model = pickle.load(f)
# </editor-fold>
output = []
true_thetas = []
with torch.no_grad():
for i in range(n_trace):
print("\nExample run {}".format(i + 1))
trace = pyro.poutine.trace(model).get_trace()
true_theta = trace.nodes["theta"]["value"].cpu()
# Save target face params ...">
if i % 5 == 0:
recon = app_model.decode(true_theta)
_img = Image.fromarray((recon * 255).astype(np.uint8))
_img.save(os.path.join(output_path, f"target_{i}.jpg"))
# </editor-fold>
if verbose:
print(f"*True Theta: {true_theta}*")
run_xis = []
run_ys = []
# Print optimal designs, observations for given theta
for t in range(self.T):
xi = trace.nodes[f"xi{t + 1}"]["value"].cpu().reshape(-1)
run_xis.append(xi)
y = trace.nodes[f"y{t + 1}"]["value"].cpu().item()
run_ys.append(y)
if verbose:
print(f"xi{t + 1}: {xi}")
print(f" y{t + 1}: {y}")
# Save design face params ..">
if i % 5 == 0 and t % 5 == 0:
recon = app_model.decode(xi)
_img = Image.fromarray((recon * 255).astype(np.uint8))
_img.save(os.path.join(output_path, f"target_{i}_recon_{t}.jpg"))
# </editor-fold>
run_df = pd.DataFrame(torch.stack(run_xis).numpy())
run_df.columns = [f"xi_{i}" for i in range(self.p)]
run_df["observations"] = run_ys
run_df["order"] = list(range(1, self.T + 1))
run_df["run_id"] = i + 1
output.append(run_df)
true_thetas.append(true_theta.numpy())
# Output target and designs as images at this point. Need loop for each design !
# Load appearance model
# with open(os.path.join(output_path, "app_model.pkl"), "rb") as f:
# app_model = pickle.load(f)
# Now decode
# recon = app_model.decode(latent)
# _img = Image.fromarray((recon * 255).astype(np.uint8))
# _img.save(os.path.join(output_path, "recon.jpg"))
# {FB} The latest plotting function that works for any p-dimension data
if i % 5 == 0:
plot_trace(i, self.p, self.T, run_df, true_theta.numpy(), face_finding=True, face_folder='./face/output')
# -------------- Deprecated old plotting function --------------
# if true_theta.shape[1] == 1:
# # 1D plot
# fig, ax = plt.subplots()
# ax.plot(run_df["order"], run_df[f"xi_0"], 'ro--')
# ax.plot(self.T, true_theta, 'bo')
# ax.set(xlabel='order', ylabel='location', title=f"*True Theta: {true_theta}*")
# ax.grid()
# # save plot
# plt.savefig(f"trace_{i}.png")
# plt.close()
# elif true_theta.shape[1] == 2:
# plot_trace_2d(run_df['xi_0'], run_df['xi_1'], i, true_theta)
# elif true_theta.shape[1] == 3:
# plot_trace_3d(run_df['xi_0'], run_df['xi_1'], run_df['xi_2'], i, true_theta)
# -------------- Deprecated old plotting function --------------
print( | pd.concat(output) | pandas.concat |
import pandas as pd
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import warnings
import itertools
import datetime
import os
from math import sqrt
#import seaborn as sns
class ContagionAnalysis():
def __init__(self, world):
self.world = world
# time as lable to write files
now = datetime.datetime.now()
self.now = now.strftime("%Y-%m-%d_%H:%M")
def run_contaigon_analysis(self, opinion_type, analysis="expo_frac", n_bins = 20, binning = True, save_plots = False, show_plot=True, write_data = True, output_folder = ""):
''' Makes a full contagion analysis
Parameters:
opinion_type: (str) name of trait
analysis: (str) name of analysis type (expo_frac, expo_nmb)
n_bins: (int) number of bins
binning: (bool) if to do binning
save_plots: (bool) if to save plot on hd
write_data: (bool) if to write data on hd
ouput_folder: (str) folder to save data + plots
'''
# name to lable files
name = self.world.name + \
"_" + analysis + \
"_" + self.now
self.output_folder = output_folder
print("Write into: " + self.TEMP_DIR + output_folder)
if not os.path.exists(self.TEMP_DIR + output_folder):
os.makedirs(self.TEMP_DIR + output_folder)
# calc exposure
exposure = self.calc_exposure(analysis, opinion_type)
#write data
if write_data:
exposure.to_pickle(self.TEMP_DIR + output_folder + "exposure_" + name + ".pkl")
# calc trait change
data, expo_agg = self.opinion_change_per_exposure(exposure, opinion_type)
#write data
if write_data:
data.to_pickle(self.TEMP_DIR + output_folder + "data_" + name + ".pkl")
# plot
plot_data = self.plot_opinion_change_per_exposure_number(data, analysis, binning, n_bins, \
save_plots, show_plot)
return [data, plot_data]
def _get_neighbors(self,g,i):
''' returns neighbors of node i in graph g '''
try:
return [n for n in g[i]]
except KeyError:
return []
def _calc_expo_frac(self, node_id, opinion_type, t, op_nodes, graph, all_opinions):
''' Calculate exposure as fraction of encounters to people with other opinion '''
neighbors = self._get_neighbors(graph, node_id)
opinions = op_nodes.loc[neighbors]
nmb_1 = opinions.loc[opinions[opinion_type] == True, opinion_type].count()
nmb_2 = opinions.loc[opinions[opinion_type] == False, opinion_type].count()
exposure = pd.DataFrame({ opinion_type: [True, False],\
'n_influencer': [nmb_1, nmb_2],\
'frac_influencer': [nmb_1, nmb_2] })
if (len(neighbors) <= 2) & (self.world.type == "SYN"):
if self.world.cc == True:
exposure *= 0
# normalize exposure
if len(neighbors) > 0:
exposure.frac_influencer /= len(neighbors)
exposure['n_nbs'] = len(neighbors)
exposure['node_id'] = node_id
exposure['time'] = t
return exposure
def calc_exposure(self, analysis, opinion_type, exposure_time = 7):
''' Calculate exposure for opinion type, distinguish between different analysis types '''
print("INFO: Calc exposure...")
# prepare some varibales for late use
all_opinions = pd.DataFrame( self.world.op_nodes[opinion_type].unique(), \
columns=[opinion_type])
nodes = self.world.op_nodes.node_id.unique()
self.world.op_nodes.time = pd.to_datetime(self.world.op_nodes.time)
op_nodes = [self.world.op_nodes[self.world.op_nodes.time == t].set_index('node_id') \
for t in self.world.time.time]
# distinguish between analysis types and calc exposure
if analysis == "expo_frac":
print("INFO: Calc expo frac")
expo = []
for t in self.world.time.time:
rel_graph = self.world.get_relation_graph_t(t = t)
op_nodes_t = self.world.op_nodes.loc[self.world.op_nodes.time == t].set_index('node_id')
expo += [ self._calc_expo_frac( node_id, opinion_type, t, op_nodes_t, rel_graph, all_opinions) \
for node_id in nodes]
expo = pd.concat(expo)
# calc mean over last exposure_time days
sigma = pd.to_timedelta(exposure_time, unit='d').total_seconds() #seconds
two_sigma_sqr = 2* sigma * sigma
expo.time = pd.to_datetime(expo.time)
expo = expo.groupby(['node_id',opinion_type])["time", "n_influencer", "n_nbs", "frac_influencer"].apply( \
lambda p: self._agg_expo(p, two_sigma_sqr, analysis) \
).reset_index()
if analysis == "expo_frac":
expo.set_index(['node_id','time',opinion_type],inplace=True)
expo["exposure"] = expo.n_influencer_mean / expo.n_nbs_mean
expo.reset_index(inplace=True)
expo.set_index(['node_id','time'],inplace=True)
return expo
def _agg_expo(self, expo_slice, two_sigma_sqr, analysis):
''' weighted temporal mean of expo_slice '''
expo_slice = expo_slice.copy()
expo_slice.time = expo_slice.time.astype('int')/1000000000.0 # to seconds
time_matrix = np.array([expo_slice.time.values]*len(expo_slice.time))
diff = (time_matrix - time_matrix.transpose()) #seconds
matrix = np.exp(-(diff * diff)/two_sigma_sqr)
filter_past = np.tril(np.ones_like(matrix))
matrix *= filter_past
if analysis == "expo_nmb":
expo_slice["exposure"] = np.dot(matrix, expo_slice.exposure)
else:
norm = np.dot(matrix, np.ones_like(expo_slice.frac_influencer))
expo_slice["frac_influencer_mean"] = np.dot(matrix, expo_slice.frac_influencer)
expo_slice["frac_influencer_mean"] /= norm
expo_slice["n_influencer_summed"] = np.dot(matrix, expo_slice.n_influencer)
expo_slice["n_influencer_mean"] = expo_slice["n_influencer_summed"] / norm
expo_slice["n_nbs_summed"] = np.dot(matrix, expo_slice.n_nbs)
expo_slice["n_nbs_mean"] = expo_slice["n_nbs_summed"] / norm
expo_slice.time = | pd.to_datetime(expo_slice.time, unit="s") | pandas.to_datetime |
"""Store the data in a nice big dataframe"""
import sys
from datetime import datetime, timedelta
import pandas as pd
import geopandas as gpd
import numpy as np
class Combine:
"""Combine defined countries together"""
THE_EU = [ 'Austria', 'Italy', 'Belgium', 'Latvia',
'Bulgaria', 'Lithuania', 'Croatia',
'Luxembourg', 'Cyprus', 'Malta',
'Czechia', 'Netherlands', 'Denmark',
'Poland', 'Estonia', 'Portugal',
'Finland', 'Romania', 'France',
'Slovakia', 'Germany', 'Slovenia',
'Greece', 'Spain', 'Hungary',
'Sweden', 'Ireland' ]
def __init__(self, options):
"""Init"""
self.options = options
self.timeseries = []
self.countries = None
self.description = None
self.merged = None
self.cc = None
self.populations = []
self.national_populations = None
self.get_populations()
self.countries_long = {'nl': 'The Netherlands', 'sco': 'Scotland', 'eng': 'England',
'wal': 'Wales', 'ni': 'Northern Ireland'}
self.jhu = JHU(self)
def judat(self):
"""Dumb helper for another library"""
self.timeseries.append(NLTimeseries(False).national(False))
self.combine_national(False)
#self.merged['Week'] = self.merged.index.strftime('%U')
#self.merged = self.merged.groupby(['Week']) \
#.agg({'Aantal': 'sum'})
print(self.merged)
def process(self):
"""Do it"""
cumulative = False
if self.options.pivot:
cumulative = True
for nation in self.cc:
usejhu = True
if self.options.nation:
print(f'Processing National data {nation}')
if nation in ['wal', 'sco', 'eng']:
self.timeseries.append(UKTimeseries(False).national(nation,cumulative))
usejhu = False
#if nation == 'nl':
#self.timeseries.append(NLTimeseries(False).national(cumulative))
#usejhu = False
if usejhu:
self.timeseries.append(XXTimeseries(False,
{nation: self.countries_long[nation]}).national(cumulative))
else:
print(f'Processing combined data {nation}')
if nation in ['wal', 'sco', 'eng']:
self.timeseries.append(UKTimeseries(True).get_data())
usejhu = False
if nation == 'nl':
self.timeseries.append(NLTimeseries(True).get_data())
usejhu = False
if usejhu:
self.timeseries.append(XXTimeseries(True).get_data())
if len(self.timeseries) == 0:
print('No country Data to process')
sys.exit()
if self.options.pivot:
self.combine_pivot()
return
if self.options.nation:
self.combine_national()
return
self.get_combined_data()
def combine_pivot(self):
"""Pivot data for pandas_alive"""
print('Pivotting data')
self.merged = pd.concat(self.timeseries)
self.merged['Datum'] = pd.to_datetime(self.merged['Datum'])
# So we can add it as an option later
column = 'Overleden'
#column = 'Aantal'
# Convert to 100K instead of millions
for country in self.cc:
self.merged.loc[(self.merged.country == country), 'population'] \
= self.national_populations[country] * 10
# Per-Capita
self.merged[column] = self.merged[column] / self.merged['population']
self.merged = self.merged.pivot(index='Datum',
columns='country',
values=column).fillna(0)
self.trim_data()
print(self.merged)
def combine_national(self, trim=True):
"""Combine national totals"""
self.merged = pd.concat(self.timeseries)
self.merged['Datum'] = | pd.to_datetime(self.merged['Datum']) | pandas.to_datetime |
import pandas as pd
import numpy as np
import random as rnd
import os
# visualization
import matplotlib.pyplot as plt
import matplotlib
# Preprocessing
from sklearn import preprocessing
import datetime
# machine learning
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import f1_score
from sklearn import model_selection as ms
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import roc_curve, auc, roc_auc_score
from sklearn.model_selection import cross_val_score
import xgboost as clf1
import copy
class SepsisPrediction:
def feature_fun(self, col, df):
standard_devaition = df[col].std()
kurtosis = df[col].kurtosis()
skewness = df[col].skew()
mean = df[col].mean()
minimum = df[col].min()
maximum = df[col].max()
rms_diff = (sum(df[col].diff().fillna(0, inplace=False).apply(lambda x: x*x))/(len(df)+1))**0.5
return standard_devaition, kurtosis, skewness, mean, minimum, maximum, rms_diff
def process(self, demo_df, opt, time_prior, time_duration):
demo_df = demo_df[['patientunitstayid', 'offset', 'paO2_FiO2', 'platelets_x_1000',
'total_bilirubin', 'urinary_creatinine', 'creatinine', 'HCO3', 'pH',
'paCO2', 'direct_bilirubin', 'excess', 'ast', 'bun', 'calcium',
'glucose', 'lactate', 'magnesium', 'phosphate', 'potassium', 'hct',
'hgb', 'ptt', 'wbc', 'fibrinogen', 'troponin', 'GCS_Score', 'heartrate', 'respiration', 'label']]
demo_df[['patientunitstayid', 'offset']] = demo_df[['patientunitstayid', 'offset']].astype('int32')
demo_df[['label', 'paO2_FiO2', 'platelets_x_1000',
'total_bilirubin', 'urinary_creatinine', 'creatinine', 'HCO3', 'pH',
'paCO2', 'direct_bilirubin', 'excess', 'ast', 'bun', 'calcium',
'glucose', 'lactate', 'magnesium', 'phosphate', 'potassium', 'hct',
'hgb', 'ptt', 'wbc', 'fibrinogen', 'troponin', 'GCS_Score', 'heartrate', 'respiration']] = demo_df[['label','paO2_FiO2', 'platelets_x_1000',
'total_bilirubin', 'urinary_creatinine', 'creatinine', 'HCO3', 'pH',
'paCO2', 'direct_bilirubin', 'excess', 'ast', 'bun', 'calcium',
'glucose', 'lactate', 'magnesium', 'phosphate', 'potassium', 'hct',
'hgb', 'ptt', 'wbc', 'fibrinogen', 'troponin', 'GCS_Score', 'heartrate', 'respiration']].astype('float32')
dt = {}
colms = ['paO2_FiO2', 'platelets_x_1000',
'total_bilirubin', 'urinary_creatinine', 'creatinine', 'HCO3', 'pH',
'paCO2', 'direct_bilirubin', 'excess', 'ast', 'bun', 'calcium',
'glucose', 'lactate', 'magnesium', 'phosphate', 'potassium', 'hct',
'hgb', 'ptt', 'wbc', 'fibrinogen', 'troponin', 'GCS_Score', 'heartrate', 'respiration']
col_names = demo_df.columns
sorted_df = demo_df
pids = demo_df.patientunitstayid.unique()
colm = ['paO2_FiO2', 'platelets_x_1000',
'total_bilirubin', 'urinary_creatinine', 'creatinine', 'HCO3', 'pH',
'paCO2', 'direct_bilirubin', 'excess', 'ast', 'bun', 'calcium',
'glucose', 'lactate', 'magnesium', 'phosphate', 'potassium', 'hct',
'hgb', 'ptt', 'wbc', 'fibrinogen', 'troponin', 'GCS_Score', 'heartrate', 'respiration']
dct = {}
for col in colm:
dct[col+'_std'] = []
dct[col+'_kurtosis'] = []
dct[col+'_skewness'] = []
dct[col+'_mean'] = []
dct[col+'_minimum'] = []
dct[col+'_maximum'] = []
dct[col+'_rms_diff'] = []
dct['label'] = []
for pid in pids:
if sum(sorted_df[sorted_df.patientunitstayid==pid]['label'])==0:
for col in colm:
extracted_feature = feature_fun(col, sorted_df[sorted_df.patientunitstayid==pid])
dct[col+'_std'].append(extracted_feature[0])
dct[col+'_kurtosis'].append(extracted_feature[1])
dct[col+'_skewness'].append(extracted_feature[2])
dct[col+'_mean'].append(extracted_feature[3])
dct[col+'_minimum'].append(extracted_feature[4])
dct[col+'_maximum'].append(extracted_feature[5])
dct[col+'_rms_diff'].append(extracted_feature[6])
dct['label'].append(0)
else:
sepsis_onset_idx = sorted_df[sorted_df.patientunitstayid==pid][sorted_df['label']==1].index.values.astype(int)[0]
sepsis_onset_offset = sorted_df[sorted_df.patientunitstayid==pid].loc[sepsis_onset_idx]['offset']
# print(sorted_df[sorted_df.patientunitstayid==pid])
data_start = sorted_df[sorted_df.patientunitstayid==pid][sorted_df['offset']>sepsis_onset_offset-(time_duration+time_prior)*60].index.values.astype(int)[0]
data_end = sorted_df[sorted_df.patientunitstayid==pid][sorted_df['offset']>sepsis_onset_offset-(time_prior)*60].index.values.astype(int)[0]
# print(sorted_df[sorted_df.patientunitstayid==pid])
# print("Possible",data_start, data_end)
if time_prior*60<sorted_df[sorted_df.patientunitstayid==pid].loc[sepsis_onset_idx]['offset']-sorted_df[sorted_df.patientunitstayid==pid].iloc[0]['offset']:
if data_start<data_end:
# print(sorted_df.loc[data_start:data_end+1])
for col in colm:
extracted_feature = feature_fun(col, sorted_df.loc[data_start:data_end])
dct[col+'_std'].append(extracted_feature[0])
dct[col+'_kurtosis'].append(extracted_feature[1])
dct[col+'_skewness'].append(extracted_feature[2])
dct[col+'_mean'].append(extracted_feature[3])
dct[col+'_minimum'].append(extracted_feature[4])
dct[col+'_maximum'].append(extracted_feature[5])
dct[col+'_rms_diff'].append(extracted_feature[6])
dct['label'].append(1)
df = pd.DataFrame.from_dict(dct)
df.to_csv('Sepsis'+str(time_prior)+'-'+str(time_duration)+str(opt)+'.csv')
def case_preprocess(self, df):
temp_df=df.drop(columns=['Unnamed: 0'])
temp_df=temp_df.dropna()
sepsis_df = temp_df[temp_df['label']==1]
return sepsis_df
def control_preprocess(eslf, df):
temp_df=df.drop(columns=['Unnamed: 0'])
temp_df=temp_df.dropna()
controls_df = temp_df[temp_df['label']==0]
return controls_df
def get_controls(self, df):
downsampled_df, _, _, _ = train_test_split(df, df['label'], test_size=0.01)
return downsampled_df
def run_xgboost(self, runs, sepsis_X_train, sepsis_x_cv, sepsis_y_cv, X_train, x_cv, y_cv):
params = {'eta': 0.1, 'max_depth': 6, 'scale_pos_weight': 1, 'objective': 'reg:linear','subsample':0.25,'verbose': False}
xgb_model = None
Temp_X_cv = copy.copy(sepsis_x_cv)
Temp_y_cv = copy.copy(sepsis_y_cv)
for i in range(runs):
pf = pd.concat([sepsis_X_train, get_controls(X_train).reset_index(drop=True)])
labels = pf['label']
print("count: ", i+1)
print(sum(labels), len(labels))
if True:
temp, X_cv, label, Y_cv = train_test_split(pf, labels, test_size=0.05)
xg_train_1 = clf1.DMatrix(temp.drop(['label'],axis=1), label=label)
xg_test = clf1.DMatrix(X_cv.drop(['label'],axis=1), label=Y_cv)
model = clf1.train(params, xg_train_1, 50, xgb_model=xgb_model)
model.save_model('model.model')
xgb_model = 'model.model'
print("Fold"+str(i)+'training')
print(classification_report(Y_cv, (model.predict(xg_test)>0.5).astype(int)))
print('F1 score:', f1_score(Y_cv, (model.predict(xg_test)>0.5).astype(int)))
print("Fold"+str(i)+'test')
CV_X = pd.concat([sepsis_x_cv, x_cv])
cv_y = | pd.concat([sepsis_y_cv, y_cv]) | pandas.concat |
import logging
import os
import pandas as pd
from os.path import join as join_path
from app.utils import kpi_metrics
from collections import defaultdict
from app.utils import utils
from app.config.load_config import LoadJson
class Evaluate_model:
def __init__(self,data_txt='val.txt',metadata='metadata.pickle',kpi_name='KPI_metrics.csv'):
self.json_pipeline = LoadJson()
self.classes = self.json_pipeline.get_labels()
# get inference parameters
self.iou ,self.cnf,self.outpath = self.json_pipeline.get_inferparams()
self.metadata_path = os.path.join(self.outpath ,metadata)
self.kpiresult = os.path.join(self.json_pipeline.dir_path, kpi_name)
self.val_text = os.path.join(self.json_pipeline.model_path, data_txt)
self.val_csv = join_path(self.json_pipeline.model_path,data_txt[:-4]+'.csv')
# define parameters
self.gt_dict = defaultdict(list)
self.pred_dict = defaultdict(list)
self.result_csv = join_path(self.outpath,'predictions.csv')
self.transform()
self.fit()
def transform(self):
with open(self.val_text, 'r+')as fd:
val_imgs = fd.readlines()
val_imgs = list(map(lambda x: x.strip(),val_imgs))
utils.read_and_save_ascsv(self.metadata_path,val_imgs,csv_path=self.result_csv)
# read both Csv and create dataframe
test_df = pd.read_csv(self.result_csv)
val_df = | pd.read_csv(self.val_csv) | pandas.read_csv |
import pytest
import collections
from pathlib import Path
import pandas as pd
from mbf_genomics import DelayedDataFrame
from mbf_genomics.annotator import Constant, Annotator
import pypipegraph as ppg
from pypipegraph.testing import run_pipegraph, force_load
from pandas.testing import assert_frame_equal
from mbf_genomics.util import find_annos_from_column
class LenAnno(Annotator):
def __init__(self, name):
self.columns = [name]
def calc(self, df):
return pd.DataFrame(
{self.columns[0]: ["%s%i" % (self.columns[0], len(df))] * len(df)}
)
@pytest.mark.usefixtures("no_pipegraph")
@pytest.mark.usefixtures("clear_annotators")
class Test_DelayedDataFrameDirect:
def test_create(self):
test_df = pd.DataFrame({"A": [1, 2]})
def load():
return test_df
a = DelayedDataFrame("shu", load)
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
def test_create_from_df(self):
test_df = pd.DataFrame({"A": [1, 2]})
a = DelayedDataFrame("shu", test_df)
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
def test_write(self):
test_df = pd.DataFrame({"A": [1, 2]})
def load():
return test_df
a = DelayedDataFrame("shu", load, result_dir="sha")
assert Path("sha").exists()
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
fn = a.write()[1]
assert "/sha" in str(fn.parent.absolute())
assert fn.exists()
assert_frame_equal(pd.read_csv(fn, sep="\t"), test_df)
def test_write_excel(self):
test_df = pd.DataFrame({"A": [1, 2]})
def load():
return test_df
a = DelayedDataFrame("shu", load, result_dir="sha")
assert Path("sha").exists()
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
fn = a.write("sha.xls")[1]
assert fn.exists()
assert_frame_equal(pd.read_excel(fn), test_df)
def test_write_excel2(self):
data = {}
for i in range(0, 257):
c = "A%i" % i
d = [1, 1]
data[c] = d
test_df = pd.DataFrame(data)
def load():
return test_df
a = DelayedDataFrame("shu", load, result_dir="sha")
fn = a.write("sha.xls")[1]
assert fn.exists()
assert_frame_equal(pd.read_excel(fn), test_df)
def test_write_mangle(self):
test_df = pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
def load():
return test_df
a = DelayedDataFrame("shu", load)
assert_frame_equal(a.df, test_df)
assert (a.non_annotator_columns == ["A", "B"]).all()
def mangle(df):
df = df.drop("A", axis=1)
df = df[df.B == "c"]
return df
fn = a.write("test.csv", mangle)[1]
assert fn.exists()
assert_frame_equal(pd.read_csv(fn, sep="\t"), mangle(test_df))
def test_magic(self):
test_df = pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
a = DelayedDataFrame("shu", lambda: test_df)
assert hash(a)
assert a.name in str(a)
assert a.name in repr(a)
def test_annotator(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
a += Constant("column", "value")
a.annotate()
assert "column" in a.df.columns
assert (a.df["column"] == "value").all()
def test_add_non_anno(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
with pytest.raises(TypeError):
a += 5
def test_annotator_wrong_columns(self):
class WrongConstant(Annotator):
def __init__(self, column_name, value):
self.columns = [column_name]
self.value = value
def calc(self, df):
return pd.DataFrame({"shu": self.value}, index=df.index)
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
with pytest.raises(ValueError):
a += WrongConstant("column", "value")
def test_annotator_minimum_columns(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
assert "Direct" in str(a.load_strategy)
class MissingCalc(Annotator):
column_names = ["shu"]
with pytest.raises(AttributeError):
a += MissingCalc()
class EmptyColumnNames(Annotator):
columns = []
def calc(self, df):
return pd.DataFrame({})
with pytest.raises(IndexError):
a += EmptyColumnNames()
class EmptyColumnNamesButCacheName(Annotator):
cache_name = "shu"
columns = []
def calc(self, df):
return pd.DataFrame({})
with pytest.raises(IndexError):
a += EmptyColumnNamesButCacheName()
class MissingColumnNames(Annotator):
def calc(self, df):
pass
with pytest.raises(AttributeError):
a += MissingColumnNames()
class NonListColumns(Annotator):
columns = "shu"
def calc(self, df):
pass
with pytest.raises(ValueError):
a += NonListColumns()
def test_DynamicColumNames(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
class Dynamic(Annotator):
@property
def columns(self):
return ["a"]
def calc(self, df):
return pd.DataFrame({"a": ["x", "y"]})
a += Dynamic()
a.annotate()
assert_frame_equal(
a.df, pd.DataFrame({"A": [1, 2], "B": ["c", "d"], "a": ["x", "y"]})
)
def test_annos_added_only_once(self):
count = [0]
class CountingConstant(Annotator):
def __init__(self, column_name, value):
count[0] += 1
self.columns = [column_name]
self.value = value
def calc(self, df):
return pd.DataFrame({self.columns[0]: self.value}, index=df.index)
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
c = CountingConstant("hello", "c")
a += c
a.annotate()
assert "hello" in a.df.columns
assert count[0] == 1
a += c # this get's ignored
def test_annos_same_column_different_anno(self):
count = [0]
class CountingConstant(Annotator):
def __init__(self, column_name, value):
count[0] += 1
self.columns = [column_name]
self.value = value
def calc(self, df):
return pd.DataFrame({self.columns[0]: self.value}, index=df.index)
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
c = CountingConstant("hello", "c")
a += c
a.annotate()
assert "hello" in a.df.columns
assert count[0] == 1
c = CountingConstant("hello2", "c")
a += c
a.annotate()
assert "hello2" in a.df.columns
assert count[0] == 2
d = CountingConstant("hello2", "d")
assert c is not d
with pytest.raises(ValueError):
a += d
def test_annos_same_column_different_anno2(self):
class A(Annotator):
cache_name = "hello"
columns = ["aa"]
def calc(self, df):
return pd.DataFrame({self.columns[0]: "a"}, index=df.index)
class B(Annotator):
cache_name = "hello2"
columns = ["aa"]
def calc(self, df):
return pd.DataFrame({self.columns[0]: "a"}, index=df.index)
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
a += A()
with pytest.raises(ValueError):
a += B()
def test_annos_dependening(self):
class A(Annotator):
cache_name = "hello"
columns = ["aa"]
def calc(self, df):
return pd.DataFrame({self.columns[0]: "a"}, index=df.index)
class B(Annotator):
cache_name = "hello2"
columns = ["ab"]
def calc(self, df):
return df["aa"] + "b"
def dep_annos(self):
return [A()]
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
a += B()
a.annotate()
assert "ab" in a.df.columns
assert "aa" in a.df.columns
assert (a.df["ab"] == (a.df["aa"] + "b")).all()
def test_annos_dependening_none(self):
class A(Annotator):
cache_name = "hello"
columns = ["aa"]
def calc(self, df):
return pd.DataFrame({self.columns[0]: "a"}, index=df.index)
class B(Annotator):
cache_name = "hello2"
columns = ["ab"]
def calc(self, df):
return df["aa"] + "b"
def dep_annos(self):
return [None, A(), None]
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
a += B()
a.annotate()
assert "ab" in a.df.columns
assert "aa" in a.df.columns
assert (a.df["ab"] == (a.df["aa"] + "b")).all()
def test_filtering(self):
class A(Annotator):
cache_name = "A"
columns = ["aa"]
def calc(self, df):
return pd.DataFrame({self.columns[0]: "a"}, index=df.index)
class B(Annotator):
cache_name = "B"
columns = ["ab"]
def calc(self, df):
return df["aa"] + "b"
def dep_annos(self):
return [A()]
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
a += Constant("C", "c")
assert "C" in a.df.columns
b = a.filter("sha", lambda df: df["A"] == 1)
assert "C" in b.df.columns
a += A()
assert "aa" in a.df.columns
assert "aa" in b.df.columns
b += B()
assert "ab" in b.df.columns
assert not "ab" in a.df.columns
def test_filtering2(self):
counts = collections.Counter()
class A(Annotator):
cache_name = "A"
columns = ["aa"]
def calc(self, df):
counts["A"] += 1
return pd.DataFrame({self.columns[0]: "a"}, index=df.index)
class B(Annotator):
cache_name = "B"
columns = ["ab"]
def calc(self, df):
counts["B"] += 1
return df["aa"] + "b"
def dep_annos(self):
return [A()]
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
b = a.filter("sha", lambda df: df["A"] == 1)
b += B()
assert "aa" in b.df.columns
assert "ab" in b.df.columns
assert not "aa" in a.df.columns
assert not "ab" in a.df.columns
assert counts["A"] == 1
a += A()
assert "aa" in a.df.columns
assert counts["A"] == 2 # no two recalcs
assert not "ab" in a.df.columns
a += B()
assert "ab" in a.df.columns
assert counts["A"] == 2 # no two recalcs
assert counts["B"] == 2 # no two recalcs
def test_filtering_result_dir(self):
counts = collections.Counter()
class A(Annotator):
cache_name = "A"
columns = ["aa"]
def calc(self, df):
counts["A"] += 1
return pd.DataFrame({self.columns[0]: "a"}, index=df.index)
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
b = a.filter("sha", lambda df: df["A"] == 1, result_dir="shu2")
assert b.result_dir.absolute() == Path("shu2").absolute()
def test_filtering_on_annotator(self):
class A(Annotator):
cache_name = "A"
columns = ["aa"]
def calc(self, df):
return pd.DataFrame(
{self.columns[0]: (["a", "b"] * int(len(df) / 2 + 1))[: len(df)]},
index=df.index,
)
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
with pytest.raises(KeyError):
b = a.filter("sha", lambda df: df["aa"] == "a")
b = a.filter("sha", lambda df: df["aa"] == "a", [A()])
canno = Constant("C", "c")
a += canno
b += canno
assert (b.df["A"] == [1]).all()
def test_multi_level(self):
a = DelayedDataFrame(
"shu",
lambda: pd.DataFrame(
{"A": [1, 2, 3], "B": ["a", "b", "c"], "idx": ["x", "y", "z"]}
).set_index("idx"),
)
b = a.filter("sha", lambda df: df["C"] == 4, Constant("C", 4))
a1 = LenAnno("count")
b += a1
c = b.filter("shc", lambda df: df["A"] >= 2)
a2 = LenAnno("count2")
c += a2
c.annotate()
print(c.df)
assert len(c.df) == 2
assert (c.df["A"] == [2, 3]).all()
assert (c.df["count"] == "count3").all()
assert (c.df["count2"] == "count22").all()
def test_anno_not_returning_enough_rows_and_no_index_range_index_on_df(self):
class BrokenAnno(Annotator):
columns = ["X"]
def calc(self, df):
return pd.DataFrame({"X": [1]})
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2, 3], "B": ["a", "b", "c"]})
)
with pytest.raises(ValueError) as excinfo:
a += BrokenAnno()
print(str(excinfo))
assert "Length and index mismatch " in str(excinfo.value)
def test_anno_returning_series(self):
a = DelayedDataFrame(
"shu",
lambda: pd.DataFrame(
{"A": [1, 2, 3], "B": ["a", "b", "c"], "idx": ["x", "y", "z"]}
).set_index("idx"),
)
class SeriesAnno(Annotator):
columns = ["C"]
def calc(self, df):
return pd.Series(list(range(len(df))))
a += SeriesAnno()
assert (a.df["C"] == [0, 1, 2]).all()
def test_anno_returning_series_but_defined_two_columns(self):
a = DelayedDataFrame(
"shu",
lambda: pd.DataFrame(
{"A": [1, 2, 3], "B": ["a", "b", "c"], "idx": ["x", "y", "z"]}
).set_index("idx"),
)
class SeriesAnno(Annotator):
columns = ["C", "D"]
def calc(self, df):
return pd.Series(list(range(len(df))))
with pytest.raises(ValueError) as excinfo:
a += SeriesAnno()
assert "result was no dataframe" in str(excinfo)
def test_anno_returning_string(self):
a = DelayedDataFrame(
"shu",
lambda: pd.DataFrame(
{"A": [1, 2, 3], "B": ["a", "b", "c"], "idx": ["x", "y", "z"]}
).set_index("idx"),
)
class SeriesAnno(Annotator):
columns = ["C", "D"]
def calc(self, df):
return "abc"
with pytest.raises(ValueError) as excinfo:
a += SeriesAnno()
assert "return non DataFrame" in str(excinfo)
def test_anno_returing_right_length_but_wrong_start_range_index(self):
a = DelayedDataFrame("shu", lambda: pd.DataFrame({"A": [1, 2, 3]}))
class BadAnno(Annotator):
columns = ["X"]
def calc(self, df):
return pd.Series(["a", "b", "c"], index=pd.RangeIndex(5, 5 + 3))
with pytest.raises(ValueError) as excinfo:
a += BadAnno()
assert "Index mismatch" in str(excinfo)
def test_lying_about_columns(self):
a = DelayedDataFrame(
"shu",
lambda: pd.DataFrame(
{"A": [1, 2, 3], "B": ["a", "b", "c"], "idx": ["x", "y", "z"]}
).set_index("idx"),
)
class SeriesAnno(Annotator):
columns = ["C"]
def calc(self, df):
return pd.DataFrame({"D": [0, 1, 2]})
with pytest.raises(ValueError) as excinfo:
a += SeriesAnno()
assert "declared different" in str(excinfo)
def test_filtering_by_definition_operators(self):
a = DelayedDataFrame("shu", pd.DataFrame({"A": [-1, 0, 1, 2, 3, 4]}))
assert (a.filter("a1", [("A", "==", 0)]).df["A"] == [0]).all()
assert (a.filter("a2", [("A", ">=", 3)]).df["A"] == [3, 4]).all()
assert (a.filter("a3", [("A", "<=", 0)]).df["A"] == [-1, 0]).all()
assert (a.filter("a4", [("A", ">", 3)]).df["A"] == [4]).all()
assert (a.filter("a5", [("A", "<", 0)]).df["A"] == [-1]).all()
assert (a.filter("a6", [("A", "|>", 0)]).df["A"] == [-1, 1, 2, 3, 4]).all()
assert (a.filter("a7", [("A", "|>=", 1)]).df["A"] == [-1, 1, 2, 3, 4]).all()
assert (a.filter("a8", [("A", "|<", 2)]).df["A"] == [-1, 0, 1]).all()
assert (a.filter("a9", [("A", "|<=", 2)]).df["A"] == [-1, 0, 1, 2]).all()
with pytest.raises(ValueError):
a.filter("a10", [("A", "xx", 2)])
class XAnno(Annotator):
def __init__(self, column_name, values):
self.columns = [column_name]
self.values = values
def calc(self, df):
return pd.DataFrame({self.columns[0]: self.values}, index=df.index)
@pytest.mark.usefixtures("both_ppg_and_no_ppg")
@pytest.mark.usefixtures("clear_annotators")
class Test_DelayedDataFrameBoth:
def test_filtering_by_definition(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
c = XAnno("C", [1, 2])
a += c
d = XAnno("D", [4, 5])
# native column
a1 = a.filter("a1", ("A", "==", 1))
# search for the anno
a2 = a.filter("a2", ("C", "==", 2))
# extract the column name from the anno - anno already added
a4 = a.filter("a4", (d, "==", 5))
# extract the column name from the anno - anno not already added
a3 = a.filter("a3", (c, "==", 1))
# lookup column to name
a6 = a.filter("a6", ("X", "==", 2), column_lookup={"X": "C"})
# lookup column to anno
a7 = a.filter("a7", ("X", "==", 2), column_lookup={"X": c})
if not ppg.inside_ppg():
e1 = XAnno("E", [6, 7])
e2 = XAnno("E", [6, 8])
assert find_annos_from_column("E") == [e1, e2]
# column name to longer unique
with pytest.raises(KeyError):
a.filter("a5", ("E", "==", 5))
with pytest.raises(KeyError):
a.filter("a5", ((c, "D"), "==", 5))
force_load(a1.annotate())
force_load(a2.annotate())
force_load(a3.annotate())
force_load(a4.annotate())
force_load(a6.annotate())
force_load(a7.annotate())
run_pipegraph()
assert (a1.df["A"] == [1]).all()
assert (a2.df["A"] == [2]).all()
assert (a3.df["A"] == [1]).all()
assert (a4.df["A"] == [2]).all()
assert (a6.df["A"] == [2]).all()
assert (a7.df["A"] == [2]).all()
@pytest.mark.usefixtures("new_pipegraph")
class Test_DelayedDataFramePPG:
def test_create(self):
test_df = pd.DataFrame({"A": [1, 2]})
def load():
return test_df
a = DelayedDataFrame("shu", load)
assert not hasattr(a, "df")
print("load is", a.load())
force_load(a.load(), False)
ppg.run_pipegraph()
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
def test_write(self):
test_df = pd.DataFrame({"A": [1, 2]})
def load():
return test_df
a = DelayedDataFrame("shu", load)
fn = a.write()[0]
ppg.run_pipegraph()
assert Path(fn.filenames[0]).exists()
assert_frame_equal(pd.read_csv(fn.filenames[0], sep="\t"), test_df)
def test_write_mixed_manglers(self):
test_df = pd.DataFrame({"A": [1, 2]})
def load():
return test_df
a = DelayedDataFrame("shu", load)
a.write(mangler_function=lambda df: df)
def b(df):
return df.head()
ok = False
try:
a.write(mangler_function=b)
except Exception as e:
se = str(type(e))
if "JobContractError" in se: # ppg
ok = True
elif "JobRedefinitionError" in se: # ppg2
ok = True
if not ok:
raise ValueError("Did not raise the expected exception")
def test_annotator_basic(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
a += Constant("aa", "aa")
force_load(a.annotate())
ppg.run_pipegraph()
assert (a.df["aa"] == "aa").all()
def test_annotator_raising(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
class RaiseAnno(Annotator):
columns = ["aa"]
cache_name = "empty"
def calc(self, df):
raise ValueError("hello")
anno1 = RaiseAnno()
a += anno1
force_load(a.annotate())
with pytest.raises(ppg.RuntimeError):
ppg.run_pipegraph()
anno_job = a.anno_jobs[RaiseAnno().get_cache_name()]
assert "hello" in str(anno_job.lfg.exception)
def test_annotator_columns_not_list(self):
class BrokenAnno(Annotator):
def __init__(
self,
):
self.columns = "shu"
def calc(self, df):
return pd.DataFrame(
{self.columns[0]: ["%s%i" % (self.columns[0], len(df))] * len(df)}
)
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
a += BrokenAnno()
lg = a.anno_jobs[BrokenAnno().get_cache_name()]
force_load(a.annotate())
with pytest.raises(ppg.RuntimeError):
ppg.run_pipegraph()
assert "list" in str(lg().lfg.exception)
def test_annotator_empty_columns(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
class EmptyColumnNames(Annotator):
columns = []
cache_name = "empty"
def calc(self, df):
return pd.DataFrame({"shu": [1, 2]})
def __repr__(self):
return "EmptyColumNames()"
a += EmptyColumnNames()
force_load(a.annotate())
anno_job_cb = a.anno_jobs[EmptyColumnNames().get_cache_name()]
with pytest.raises(ppg.RuntimeError):
ppg.run_pipegraph()
assert anno_job_cb() is anno_job_cb()
assert "anno.columns was empty" in repr(anno_job_cb().exception)
def test_annotator_missing_columns(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
class MissingColumnNames(Annotator):
cache_name = "MissingColumnNames"
def calc(self, df):
return pd.DataFrame({})
def __repr__(self):
return "MissingColumnNames()"
a += MissingColumnNames()
lg = a.anno_jobs["MissingColumnNames"]
force_load(a.annotate())
with pytest.raises(ppg.RuntimeError):
ppg.run_pipegraph()
assert "AttributeError" in repr(lg().lfg.exception)
def test_DynamicColumNames(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
class Dynamic(Annotator):
@property
def columns(self):
return ["a"]
def calc(self, df):
return pd.DataFrame({"a": ["x", "y"]})
a += Dynamic()
a.anno_jobs[Dynamic().get_cache_name()]
force_load(a.annotate())
ppg.run_pipegraph()
assert_frame_equal(
a.df, | pd.DataFrame({"A": [1, 2], "B": ["c", "d"], "a": ["x", "y"]}) | pandas.DataFrame |
import pytest
import moto
import boto3
@pytest.fixture(scope="function")
def moto_s3():
with moto.mock_s3():
s3 = boto3.resource("s3", region_name="us-east-1")
s3.create_bucket(
Bucket="bucket",
)
yield s3
@pytest.fixture(scope="module")
def moto_glue():
import os
with moto.mock_glue():
region_name = "us-east-1"
os.environ["AWS_DEFAULT_REGION"] = region_name
glue = boto3.client("glue", region_name=region_name)
yield glue
def test_glue(moto_glue, moto_s3):
from flytelineage.glue import GlueCatalogTarget
target = GlueCatalogTarget(bucket_path="bucket/prefix", kms_key_arn="bogus")
from flytelineage.interface import Pipeline
pipeline = Pipeline(
id="1",
name="a.b.c",
)
from flytelineage.dataset import DatasetSchema
import numpy as np
import pandas as pd
ds = DatasetSchema("foo")
a = np.array([[5, "hello", True], [2, "goodbye", False]])
df = pd.DataFrame(a, columns=list("ABC"))
schema = ds.infer(df)
dataset = (ds, schema, df)
result = target.ingest(pipeline, [dataset])
assert len(result) == 1
assert result[0].get("paths")[0].startswith("s3://bucket/prefix/flyte_a_b/foo/")
def test_glue_with_db(moto_glue, moto_s3):
database_name = "mydb"
import awswrangler as wr
wr.catalog.create_database(name=database_name)
from flytelineage.glue import GlueCatalogTarget
target = GlueCatalogTarget(
bucket_path="bucket/prefix", kms_key_arn="bogus", db_name=database_name
)
from flytelineage.interface import Pipeline
pipeline = Pipeline(
id="1",
name="a.b.c",
)
from flytelineage.dataset import DatasetSchema
import numpy as np
import pandas as pd
ds = DatasetSchema("foo")
a = np.array([[5, "hello", True], [2, "goodbye", False]])
df = pd.DataFrame(a, columns=list("ABC"))
schema = ds.infer(df)
dataset = (ds, schema, df)
result = target.ingest(pipeline, [dataset])
assert len(result) == 1
assert result[0].get("paths")[0].startswith("s3://bucket/prefix/mydb/foo")
def test_glue_error(moto_glue, moto_s3):
from flytelineage.glue import GlueCatalogTarget
target = GlueCatalogTarget(bucket_path="bucket/prefix", kms_key_arn="bogus")
from flytelineage.interface import Pipeline
pipeline = Pipeline(
id="1",
name="a.b.c",
)
from flytelineage.dataset import DatasetSchema
import numpy as np
import pandas as pd
ds = DatasetSchema("foo")
a = np.array([[5, "hello", True], [2, "goodbye", False]])
df = | pd.DataFrame(a) | pandas.DataFrame |
from collections import ChainMap
from datetime import datetime
import pandas as pd
from dbnd._core.tracking.schemas.column_stats import ColumnStatsArgs
from targets.value_meta import ValueMetaConf
from targets.values.pandas_histograms import PandasHistograms
# fmt: off
diverse_df = pd.DataFrame({
'int_column': [6, 7, None, 1, 9, None, 3, 7, 5, 1, 1, 6, 7, 3, 7, 4, 5, 4, 3, 7, 3,
2, None, 6, 6, 2, 4, None, 7, 2, 2, 6, 9, 6, 1, 9, 2, 4, 0, 5, 3, 8,
9, 6, 7, 5, None, 1, 1, 2, None, 5, 6, 8, 6, 9, 1, 9, 5, 9, 6, 5, 6,
8, 9, 1, 9, 4, None, 3, 1, 6, 1, 4, 9, 3, 1, 2, None, 7, 3, 1, 9, 2,
4, 5, 2, 8, 7, 8, 1, 7, 7, 6, 3, 0, 6, 8, 6, 9],
'float_column': [9.0, 4.0, 6.0, 6.0, 7.0, 2.0, 5.0, 1.0, 8.0, 4.0, 3.0, 4.0, 2.0,
7.0, 3.0, 9.0, 7.0, 5.0, 3.0, 9.0, 4.0, 9.0, None, 5.0, 5.0, 2.0,
4.0, 4.0, 7.0, 5.0, 1.0, 8.0, 7.0, 4.0, 1.0, 0.0, 6.0, 2.0, 1.0,
2.0, 7.0, 3.0, 0.0, 8.0, 3.0, 2.0, None, 0.0, 8.0, None, 9.0, 2.0,
2.0, 9.0, 1.0, 6.0, 6.0, 1.0, 0.0, 8.0, 7.0, 9.0, 2.0, 9.0, 9.0,
2.0, 0.0, 7.0, 5.0, 7.0, 3.0, 5.0, 1.0, 2.0, 4.0, 3.0, 1.0, 0.0,
3.0, 1.0, 4.0, 8.0, 2.0, None, 2.0, 9.0, 7.0, 7.0, 8.0, 5.0, 7.0,
None, 7.0, 4.0, 8.0, 7.0, 9.0, 7.0, 6.0, None],
'bool_column': [None, True, None, True, None, None, None, True, True, None, None,
True, None, True, None, None, False, False, None, False, None,
True, False, False, True, None, True, None, False, False, None,
True, False, True, None, None, None, None, None, True, True, None,
None, None, False, None, True, None, True, False, True, True,
False, False, None, False, False, True, True, None, None, True,
True, True, False, None, False, True, False, False, False, None,
False, False, None, True, True, False, None, True, False, False,
True, True, False, None, None, True, False, False, False, False,
False, True, False, False, None, False, True, True],
'str_column': ['baz', 'baz', 'bar', None, '', '', 'baz', 'foo', None, '', 'bar',
None, 'bar', 'baz', '', None, 'foo', None, 'bar', None, 'bar',
'bar', '', None, 'foo', '', 'bar', 'foo', 'baz', None, '', 'bar',
'foo', 'foo', 'foo', 'foo', 'bar', None, None, 'foo', '', '', '',
'bar', 'foo', '', 'bar', '', '', 'baz', 'baz', 'bar', 'baz', 'baz',
None, '', 'foo', '', None, 'baz', 'baz', 'baz', 'foo', 'foo', 'baz',
None, 'foo', None, 'foo', None, 'bar', None, 'bar', 'baz', 'foo',
'foo', None, 'foo', '', 'baz', 'baz', 'baz', None, 'bar', None,
None, 'bar', '', 'foo', 'baz', 'baz', '', 'foo', 'baz', 'foo', '',
'bar', None, 'foo', ''],
"multi_data_types": [
"string_type","another_one",datetime(2020, 1, 1),None,pd.DataFrame({"...": [1]}),42,"42",24,"foo","foo",
"string_type","another_one",datetime(2020, 1, 1),None,pd.DataFrame({"...": [1]}),42,"42",24,"24","foo",
"string_type","another_one",datetime(2020, 1, 1),None,pd.DataFrame({"...": [1]}),42,"42",24,"24","foo",
"string_type","another_one",datetime(2020, 1, 1),None, | pd.DataFrame({"...": [1]}) | pandas.DataFrame |
"""
This module implements plotting functions useful to report analysis results.
Author: <NAME>, <NAME>, 2017
"""
import warnings
from string import ascii_lowercase
import numpy as np
import pandas as pd
import nibabel as nib
from scipy import ndimage
from nilearn.image import threshold_img
from nilearn.image.resampling import coord_transform
from nilearn._utils import check_niimg_3d
from nilearn._utils.niimg import _safe_get_data
def _local_max(data, affine, min_distance):
"""Find all local maxima of the array, separated by at least min_distance.
Adapted from https://stackoverflow.com/a/22631583/2589328
Parameters
----------
data : array_like
3D array of with masked values for cluster.
affine : np.ndarray
Square matrix specifying the position of the image array data
in a reference space.
min_distance : int
Minimum distance between local maxima in ``data``, in terms of mm.
Returns
-------
ijk : `numpy.ndarray`
(n_foci, 3) array of local maxima indices for cluster.
vals : `numpy.ndarray`
(n_foci,) array of values from data at ijk.
"""
ijk, vals = _identify_subpeaks(data)
xyz, ijk, vals = _sort_subpeaks(ijk, vals, affine)
ijk, vals = _pare_subpeaks(xyz, ijk, vals, min_distance)
return ijk, vals
def _identify_subpeaks(data):
"""Identify cluster peak and subpeaks based on minimum distance.
Parameters
----------
data : `numpy.ndarray`
3D array of with masked values for cluster.
Returns
-------
ijk : `numpy.ndarray`
(n_foci, 3) array of local maximum indices for cluster.
vals : `numpy.ndarray`
(n_foci,) array of values from data at ijk.
Notes
-----
When a cluster's local maximum corresponds to contiguous voxels with the
same values (as in a binary cluster), this function determines the center
of mass for those voxels.
"""
# Initial identification of subpeaks with minimal minimum distance
data_max = ndimage.filters.maximum_filter(data, 3)
maxima = data == data_max
data_min = ndimage.filters.minimum_filter(data, 3)
diff = (data_max - data_min) > 0
maxima[diff == 0] = 0
labeled, n_subpeaks = ndimage.label(maxima)
labels_index = range(1, n_subpeaks + 1)
ijk = np.array(ndimage.center_of_mass(data, labeled, labels_index))
ijk = np.round(ijk).astype(int)
vals = np.apply_along_axis(
arr=ijk, axis=1, func1d=_get_val, input_arr=data
)
# Determine if all subpeaks are within the cluster
# They may not be if the cluster is binary and has a shape where the COM is
# outside the cluster, like a donut.
cluster_idx = np.vstack(np.where(labeled)).T.tolist()
subpeaks_outside_cluster = [
i
for i, peak_idx in enumerate(ijk.tolist())
if peak_idx not in cluster_idx
]
vals[subpeaks_outside_cluster] = np.nan
if subpeaks_outside_cluster:
warnings.warn(
"Attention: At least one of the (sub)peaks falls outside of the "
"cluster body."
)
return ijk, vals
def _sort_subpeaks(ijk, vals, affine):
"""Sort subpeaks in cluster in descending order of stat value.
Parameters
----------
ijk : 2D numpy.ndarray
The matrix indices of subpeaks to sort.
vals : 1D numpy.ndarray
The statistical value associated with each subpeak in ``ijk``.
affine : (4x4) numpy.ndarray
The affine of the img from which the subpeaks were extracted.
Used to convert IJK indices to XYZ coordinates.
Returns
-------
xyz : 2D numpy.ndarray
The sorted coordinates of the subpeaks.
ijk : 2D numpy.ndarray
The sorted matrix indices of subpeaks.
vals : 1D numpy.ndarray
The sorted statistical value associated with each subpeak in ``ijk``.
"""
order = (-vals).argsort()
vals = vals[order]
ijk = ijk[order, :]
xyz = nib.affines.apply_affine(affine, ijk) # Convert to xyz in mm
return xyz, ijk, vals
def _pare_subpeaks(xyz, ijk, vals, min_distance):
"""Reduce list of subpeaks based on distance.
Parameters
----------
xyz : 2D numpy.ndarray
Subpeak coordinates to reduce. Rows correspond to peaks, columns
correspond to x, y, and z dimensions.
ijk : 2D numpy.ndarray
The subpeak coordinates in ``xyz``, but converted to matrix indices.
vals : 1D numpy.ndarray
The statistical value associated with each subpeak in ``xyz``/``ijk``.
min_distance : float
The minimum distance between subpeaks, in millimeters.
Returns
-------
ijk : 2D numpy.ndarray
The reduced index of subpeaks.
vals : 1D numpy.ndarray
The statistical values associated with the reduced set of subpeaks.
"""
keep_idx = np.ones(xyz.shape[0]).astype(bool)
for i in range(xyz.shape[0]):
for j in range(i + 1, xyz.shape[0]):
if keep_idx[i] == 1:
dist = np.linalg.norm(xyz[i, :] - xyz[j, :])
keep_idx[j] = dist > min_distance
ijk = ijk[keep_idx, :]
vals = vals[keep_idx]
return ijk, vals
def _get_val(row, input_arr):
"""Extract values from array based on index.
Parameters
----------
row : :obj:`tuple` of length 3
3-length index into ``input_arr``.
input_arr : 3D :obj:`numpy.ndarray`
Array from which to extract value.
Returns
-------
:obj:`float` or :obj:`int`
The value from ``input_arr`` at the row index.
"""
i, j, k = row
return input_arr[i, j, k]
def get_clusters_table(stat_img, stat_threshold, cluster_threshold=None,
two_sided=False, min_distance=8.):
"""Creates pandas dataframe with img cluster statistics.
This function should work on any statistical maps where more extreme values
indicate greater statistical significance.
For example, z-statistic or -log10(p) maps are valid inputs, but a p-value
map is not.
.. important::
For binary clusters (clusters comprised of only one value),
the table reports the center of mass of the cluster,
rather than any peaks/subpeaks.
This center of mass may, in some cases, appear outside of the cluster.
Parameters
----------
stat_img : Niimg-like object
Statistical image to threshold and summarize.
stat_threshold : :obj:`float`
Cluster forming threshold. This value must be in the same scale as
``stat_img``.
cluster_threshold : :obj:`int` or None, optional
Cluster size threshold, in :term:`voxels<voxel>`.
If None, then no cluster size threshold will be applied. Default=None.
two_sided : :obj:`bool`, optional
Whether to employ two-sided thresholding or to evaluate positive values
only. Default=False.
min_distance : :obj:`float`, optional
Minimum distance between subpeaks, in millimeters. Default=8.
.. note::
If two different clusters are closer than ``min_distance``, it can
result in peaks closer than ``min_distance``.
Returns
-------
df : :obj:`pandas.DataFrame`
Table with peaks and subpeaks from thresholded ``stat_img``.
The columns in this table include:
================== ====================================================
Cluster ID The cluster number. Subpeaks have letters after the
number.
X/Y/Z The coordinate for the peak, in millimeters.
Peak Stat The statistical value associated with the peak.
The statistic type is dependent on the type of the
statistical image.
Cluster Size (mm3) The size of the cluster, in millimeters cubed.
Rows corresponding to subpeaks will not have a value
in this column.
================== ====================================================
"""
cols = ['Cluster ID', 'X', 'Y', 'Z', 'Peak Stat', 'Cluster Size (mm3)']
# Replace None with 0
cluster_threshold = 0 if cluster_threshold is None else cluster_threshold
# check that stat_img is niimg-like object and 3D
stat_img = check_niimg_3d(stat_img)
# Apply threshold(s) to image
stat_img = threshold_img(
img=stat_img,
threshold=stat_threshold,
cluster_threshold=cluster_threshold,
two_sided=two_sided,
mask_img=None,
copy=True,
)
# If cluster threshold is used, there is chance that stat_map will be
# modified, therefore copy is needed
stat_map = _safe_get_data(stat_img, ensure_finite=True,
copy_data=(cluster_threshold is not None))
# Define array for 6-connectivity, aka NN1 or "faces"
conn_mat = ndimage.generate_binary_structure(rank=3, connectivity=1)
voxel_size = np.prod(stat_img.header.get_zooms())
signs = [1, -1] if two_sided else [1]
no_clusters_found = True
rows = []
for sign in signs:
# Flip map if necessary
temp_stat_map = stat_map * sign
# Binarize using cluster-defining threshold
binarized = temp_stat_map > stat_threshold
binarized = binarized.astype(int)
# If the stat threshold is too high simply return an empty dataframe
if np.sum(binarized) == 0:
warnings.warn(
'Attention: No clusters with stat {0} than {1}'.format(
'higher' if sign == 1 else 'lower',
stat_threshold * sign,
)
)
continue
# Now re-label and create table
label_map = ndimage.measurements.label(binarized, conn_mat)[0]
clust_ids = sorted(list(np.unique(label_map)[1:]))
peak_vals = np.array(
[np.max(temp_stat_map * (label_map == c)) for c in clust_ids])
# Sort by descending max value
clust_ids = [clust_ids[c] for c in (-peak_vals).argsort()]
for c_id, c_val in enumerate(clust_ids):
cluster_mask = label_map == c_val
masked_data = temp_stat_map * cluster_mask
cluster_size_mm = int(np.sum(cluster_mask) * voxel_size)
# Get peaks, subpeaks and associated statistics
subpeak_ijk, subpeak_vals = _local_max(
masked_data,
stat_img.affine,
min_distance=min_distance,
)
subpeak_vals *= sign # flip signs if necessary
subpeak_xyz = np.asarray(
coord_transform(
subpeak_ijk[:, 0],
subpeak_ijk[:, 1],
subpeak_ijk[:, 2],
stat_img.affine,
)
).tolist()
subpeak_xyz = np.array(subpeak_xyz).T
# Only report peak and, at most, top 3 subpeaks.
n_subpeaks = np.min((len(subpeak_vals), 4))
for subpeak in range(n_subpeaks):
if subpeak == 0:
row = [
c_id + 1,
subpeak_xyz[subpeak, 0],
subpeak_xyz[subpeak, 1],
subpeak_xyz[subpeak, 2],
subpeak_vals[subpeak],
cluster_size_mm,
]
else:
# Subpeak naming convention is cluster num+letter:
# 1a, 1b, etc
sp_id = '{0}{1}'.format(
c_id + 1,
ascii_lowercase[subpeak - 1],
)
row = [
sp_id,
subpeak_xyz[subpeak, 0],
subpeak_xyz[subpeak, 1],
subpeak_xyz[subpeak, 2],
subpeak_vals[subpeak],
'',
]
rows += [row]
# If we reach this point, there are clusters in this sign
no_clusters_found = False
if no_clusters_found:
df = | pd.DataFrame(columns=cols) | pandas.DataFrame |
# coding: utf-8
# Import libraries
import pandas as pd
from pandas import ExcelWriter
import numpy as np
import pickle
def create_m1():
"""
The CREATE_M1 operation builds the first data matrix for each gene of interest, collecting the current gene expression and methylation values, along with the expression values of all the genes in the same gene set. One data matrix for each target gene is created and exported locally in as many Excel files as the considered genes; while the whole set of M1 matrixes is returned as a Python dictionary (dict_model_v1.p), where each target gene (set as key) is associated to a Pandas dataframe containing M1 data of interest (set as value).
:return: a Python dictionary
Example::
import genereg as gr
m1_dict = gr.DataMatrixes.create_m1()
"""
# Load input data:
# Genes of interest
EntrezConversion_df = pd.read_excel('./Genes_of_Interest.xlsx',sheetname='Sheet1',header=0,converters={'GENE_SYMBOL':str,'ENTREZ_GENE_ID':str,'GENE_SET':str})
# Methylation values for genes of interest
methyl_df = pd.read_excel('./3_TCGA_Data/Methylation/Methylation_Values.xlsx',sheetname='Sheet1',header=0)
# Gene expression values for genes of interest
expr_interest_df = pd.read_excel('./3_TCGA_Data/Gene_Expression/Gene_Expression-InterestGenes.xlsx',sheetname='Sheet1',header=0)
# Create a list containing the Gene Symbols of the genes of interest
gene_interest_SYMs = []
for i, r in EntrezConversion_df.iterrows():
sym = r['GENE_SYMBOL']
if sym not in gene_interest_SYMs:
gene_interest_SYMs.append(sym)
# Get the TCGA aliquots
aliquots = []
for i, r in methyl_df.iterrows():
if i != 'ENTREZ_GENE_ID':
aliquots.append(i)
# Create a dictionary where, for each gene of interest set as key (the model gene), we have a dataframe representing the model (matrix of data) of that gene.
# This model the expression and methylation values of the model gene in the first and second columns, and the expression of all the genes that belong to the
# model gene set in the other columns, while the different TCGA aliquots are the indexes of the rows.
dict_model_v1 = {}
# Define the variables we need for the computation
model_gene_pathways = [] # list of the gene sets the model gene belongs to
same_pathway_genes = [] # list of the symbols of the genes belonging to the same gene sets as the model gene
df_columns = [] # list of the model columns names
# Execute the following code for each gene of interest
for gene in gene_interest_SYMs:
model_gene_SYM = gene # get the Gene Symbol of the current gene
# Get the gene sets of the model gene
for i, r in EntrezConversion_df.iterrows():
sym = r['GENE_SYMBOL']
if sym == model_gene_SYM:
p = r['GENE_SET']
model_gene_pathways.append(p)
# Get the genes of interest belonging to the model gene set
for i, r in EntrezConversion_df.iterrows():
path = r['GENE_SET']
if path in model_gene_pathways:
symbol = r['GENE_SYMBOL']
if symbol != model_gene_SYM:
same_pathway_genes.append(symbol)
# Define the columns of the model gene matrix of data
df_columns.append('EXPRESSION ('+model_gene_SYM+')') # the second column contains the expression of the model gene
df_columns.append('METHYLATION ('+model_gene_SYM+')') # the third column contains the methylation of the model gene
for g in same_pathway_genes:
df_columns.append(g) # we have a column for each gene in the same gene set of the model gene
# In correspondence of the model gene key in the dictionary,
# set its model as value, with the proper indexes and column names
dict_model_v1[model_gene_SYM] = pd.DataFrame(index = aliquots, columns = df_columns)
# Reset the variables for the next iteration on the next gene of interest
model_gene_pathways = []
same_pathway_genes = []
df_columns = []
# Fill the models for each gene of interest
for gene, matrix in dict_model_v1.items():
first_col = 'EXPRESSION ('+gene+')'
second_col = 'METHYLATION ('+gene+')'
# Add the expression and methylation values of each model gene and for each TCGA aliquot
for index, row in matrix.iterrows():
model_expr = expr_interest_df.get_value(index,gene) # get the expression
model_methyl = methyl_df.get_value(index,gene) # get the mathylation value
# set the two values in the correct cell of the matrix
matrix.set_value(index,first_col,model_expr)
matrix.set_value(index,second_col,model_methyl)
# Add the expression values for all the other genes belonging to the same gene set of the model gene
for index, row in matrix.iterrows():
for column_name, values in matrix.iteritems(): # iterate along the columns of the dataframe
# skip the first two columns and add the proper values
if (column_name != first_col) and (column_name != second_col):
expr = expr_interest_df.get_value(index,column_name)
matrix.set_value(index,column_name,expr)
# Export the dictionary into a pickle file in order to be able to import it back and use it to progressively build the next models for the genes of interest, adding further information.
pickle.dump(dict_model_v1, open('./4_Data_Matrix_Construction/Model1/dict_model_v1.p', 'wb'))
# Export the models as .xlsx files
for gene in gene_interest_SYMs:
model_gene_SYM = gene
pathway = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == model_gene_SYM, 'GENE_SET'].iloc[0]
gene_ID = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == model_gene_SYM, 'ENTREZ_GENE_ID'].iloc[0]
file_name = 'Gene_'+gene_ID+'_['+model_gene_SYM+']'+'_('+pathway+')-Model_v1.xlsx'
writer = ExcelWriter('./4_Data_Matrix_Construction/Model1/'+file_name)
output_df = dict_model_v1[model_gene_SYM]
output_df.to_excel(writer,'Sheet1')
writer.save()
# Handle genes belonging to multiple gene sets
multiple_pathway_genes = []
n = EntrezConversion_df['GENE_SYMBOL'].value_counts()
for i, v in n.items():
if v > 1 :
multiple_pathway_genes.append(i)
for g in multiple_pathway_genes:
filtered_df = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == g]
pathways = (filtered_df.GENE_SET.unique()).tolist()
gene_ID = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == g, 'ENTREZ_GENE_ID'].iloc[0]
for p in pathways:
current_pathway_model = dict_model_v1[g].copy()
# Extract the genes of interest in the current gene set
current_pathway_genes = []
for i, r in EntrezConversion_df.iterrows():
sym = r['GENE_SYMBOL']
path = r['GENE_SET']
if path == p:
current_pathway_genes.append(sym)
# Extract list of columns in the full model
all_columns = []
for column_name, values in current_pathway_model.iteritems():
if (column_name != 'EXPRESSION ('+g+')') and (column_name != 'METHYLATION ('+g+')'):
all_columns.append(column_name)
# Extract the columns to remove form the model
other_pathway_genes = list(set(all_columns) - set(current_pathway_genes))
for i in other_pathway_genes:
if (i != g):
current_pathway_model.drop(i, axis=1, inplace=True)
writer = ExcelWriter('./4_Data_Matrix_Construction/Model1/Gene_'+gene_ID+'_['+g+']_('+p+')-Model_v1.xlsx')
current_pathway_model.to_excel(writer,'Sheet1')
writer.save()
return dict_model_v1
def create_m2():
"""
The CREATE_M2 operation builds the second data matrix for each gene of interest, adding to the first matrix data about the expression of candidate regulatory genes of each gene of interest. One data matrix for each target gene is created and exported locally in as many Excel files as the considered genes; while the whole set of M2 matrixes is returned as a Python dictionary (dict_model_v2.p), where each target gene (set as key) is associated to a Pandas dataframe containing M2 data of interest (set as value).
:return: a Python dictionary
Example::
import genereg as gr
m2_dict = gr.DataMatrixes.create_m2()
"""
# Load input data:
# Genes of interest
EntrezConversion_df = pd.read_excel('./Genes_of_Interest.xlsx',sheetname='Sheet1',header=0,converters={'GENE_SYMBOL':str,'ENTREZ_GENE_ID':str,'GENE_SET':str})
# Models_v1 of genes of interest
dict_model_v1 = pickle.load(open('./4_Data_Matrix_Construction/Model1/dict_model_v1.p', 'rb'))
# Distinct regulatory genes for each gene of interest
dict_RegulGenes = pickle.load(open('./2_Regulatory_Genes/dict_RegulGenes.p', 'rb'))
# Gene expression values for regulatory genes
expr_regulatory_df = pd.read_excel('./3_TCGA_Data/Gene_Expression/Gene_Expression-RegulatoryGenes.xlsx',sheetname='Sheet1',header=0)
# Create a list containing the Gene Symbols of the genes of interest
gene_interest_SYMs = []
for i, r in EntrezConversion_df.iterrows():
sym = r['GENE_SYMBOL']
if sym not in gene_interest_SYMs:
gene_interest_SYMs.append(sym)
# Get the TCGA aliquots
aliquots = []
for i, r in expr_regulatory_df.iterrows():
if i != 'ENTREZ_GENE_ID':
aliquots.append(i)
# Create a dictionary where, for each gene of interest set as key (the model gene), we have a dataframe representing the model (matrix of data) of that gene.
# This model contains all the information in the first model, plus additional columns with the expression of the regulatory genes for each model gene,
# while the different TCGA aliquots are the indexes of the rows
dict_model_v2 = {}
# Define the variables we need for the computation
model_gene_RegulGenes_SYM = [] # list of gene symbols for the regulatory genes of the model gene
new_columns = [] # list of the new columns names to be added to the model
# Execute the following code for each gene of interest
for gene in gene_interest_SYMs:
model_gene_SYM = gene # get the Gene Symbol of the current gene
# Get the list of regulatory genes for the model gene
model_gene_RegulGenes_SYM = dict_RegulGenes[model_gene_SYM]
# Get the first model for the current gene (model_v1)
model_1_df = dict_model_v1[model_gene_SYM]
# Identify the new columns to be added to the matrix:
# in this case they are the columns corresponding to regulatory genes of the model gene
# (be careful not to have duplicated columns, so add only the symbols of the genes
# that are not already contained in the previous model)
old_columns = list(model_1_df.columns.values)
for g in model_gene_RegulGenes_SYM:
if g not in old_columns:
new_columns.append(g)
# Create the new part of the model to add
new_df = pd.DataFrame(index = aliquots, columns = new_columns)
# Add the expression values for all the new regulatory genes and for each TCGA aliquot
for index, row in new_df.iterrows():
for column_name, values in new_df.iteritems(): # iterate along the columns of the dataframe
expr = expr_regulatory_df.get_value(index,column_name)
new_df.set_value(index,column_name,expr)
# Join the two dataframes and create the new model (model_v2)
model_2_df = model_1_df.join(new_df)
# Set the new model in correspondence of the correct model gene key in the new dictionary
dict_model_v2[model_gene_SYM] = model_2_df
# Reset the variables for the next iteration on the next gene of interest
model_gene_RegulGenes_SYM = []
new_columns = []
# Check if some genes of interest have their own as candidate regulatory genes. If so, remove that column from the matrix
for gene in gene_interest_SYMs:
data_matrix = dict_model_v2[gene]
matrix_cols = list(data_matrix.columns.values)
if gene in matrix_cols:
data_matrix.drop(gene, axis=1, inplace=True)
# Export the dictionary into a pickle file in order to be able to import it back and use it to progressively build the next models for the genes of interest, adding further information
pickle.dump(dict_model_v2, open('./4_Data_Matrix_Construction/Model2/dict_model_v2.p', 'wb'))
# Export the models as .xlsx files
for gene in gene_interest_SYMs:
model_gene_SYM = gene
pathway = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == model_gene_SYM, 'GENE_SET'].iloc[0]
gene_ID = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == model_gene_SYM, 'ENTREZ_GENE_ID'].iloc[0]
file_name = 'Gene_'+gene_ID+'_['+model_gene_SYM+']'+'_('+pathway+')-Model_v2.xlsx'
writer = ExcelWriter('./4_Data_Matrix_Construction/Model2/'+file_name)
output_df = dict_model_v2[model_gene_SYM]
output_df.to_excel(writer,'Sheet1')
writer.save()
# Handle genes belonging to multiple gene sets
multiple_pathway_genes = []
n = EntrezConversion_df['GENE_SYMBOL'].value_counts()
for i, v in n.items():
if v > 1 :
multiple_pathway_genes.append(i)
for g in multiple_pathway_genes:
filtered_df = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == g]
pathways = (filtered_df.GENE_SET.unique()).tolist()
gene_ID = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == g, 'ENTREZ_GENE_ID'].iloc[0]
for p in pathways:
# Import the 'model_v1' matrix for the current gene
current_pathway_model = pd.read_excel('./4_Data_Matrix_Construction/Model1/Gene_'+gene_ID+'_['+g+']_('+p+')-Model_v1.xlsx',sheetname='Sheet1',header=0)
# Get the list of regulatory genes for the model gene
current_gene_RegulGenes_SYM = dict_RegulGenes[g]
# Create the M2 model for the current gene in the current gene set, identifying the new columns to be added to the matrix
current_pathway_new_columns = []
current_pathway_old_columns = list(current_pathway_model.columns.values)
for gene in current_gene_RegulGenes_SYM:
if gene not in current_pathway_old_columns:
current_pathway_new_columns.append(gene)
# Create the new part of the model to add
current_pathway_new_df = pd.DataFrame(index = aliquots, columns = current_pathway_new_columns)
# Add the expression values for all the new regulatory genes and for each TCGA aliquot
for index, row in current_pathway_new_df.iterrows():
for column_name, values in current_pathway_new_df.iteritems(): # iterate along the columns of the dataframe
expr = expr_regulatory_df.get_value(index,column_name)
current_pathway_new_df.set_value(index,column_name,expr)
# Join the two dataframes and create the new model (model_v2)
current_pathway_model_2_df = current_pathway_model.join(current_pathway_new_df)
# Check if some genes of interest have their own as candidate regulatory genes. If so, remove that column from the matrix
current_pathway_matrix_cols = list(current_pathway_model_2_df.columns.values)
if g in current_pathway_matrix_cols:
current_pathway_model_2_df.drop(g, axis=1, inplace=True)
writer = ExcelWriter('./4_Data_Matrix_Construction/Model2/Gene_'+gene_ID+'_['+g+']_('+p+')-Model_v2.xlsx')
current_pathway_model_2_df.to_excel(writer,'Sheet1')
writer.save()
return dict_model_v2
def create_m3():
"""
The CREATE_M3 operation builds the third data matrix for the analysis for each gene of interest, adding to the second matrix data about the expression of candidate regulatory genes of genes of interest belonging to the same gene set of the model gene. One data matrix for each target gene is created and exported locally in as many Excel files as the considered genes; while the whole set of M3 matrixes is returned as a Python dictionary (dict_model_v3.p), where each target gene (set as key) is associated to a Pandas dataframe containing M3 data of interest (set as value).
:return: a Python dictionary
Example::
import genereg as gr
m3_dict = gr.DataMatrixes.create_m3()
"""
# Load input data:
# Genes of interest
EntrezConversion_df = pd.read_excel('./Genes_of_Interest.xlsx',sheetname='Sheet1',header=0,converters={'GENE_SYMBOL':str,'ENTREZ_GENE_ID':str,'GENE_SET':str})
# Models_v2 of genes of interest
dict_model_v2 = pickle.load(open('./4_Data_Matrix_Construction/Model2/dict_model_v2.p', 'rb'))
# Distinct regulatory genes for each gene of interest
dict_RegulGenes = pickle.load(open('./2_Regulatory_Genes/dict_RegulGenes.p', 'rb'))
# Gene expression values for regulatory genes
expr_regulatory_df = pd.read_excel('./3_TCGA_Data/Gene_Expression/Gene_Expression-RegulatoryGenes.xlsx',sheetname='Sheet1',header=0)
# Create a list containing the Gene Symbols of the genes of interest
gene_interest_SYMs = []
for i, r in EntrezConversion_df.iterrows():
sym = r['GENE_SYMBOL']
if sym not in gene_interest_SYMs:
gene_interest_SYMs.append(sym)
# Get the TCGA aliquots
aliquots = []
for i, r in expr_regulatory_df.iterrows():
if i != 'ENTREZ_GENE_ID':
aliquots.append(i)
# Create a dictionary where, for each gene of interest set as key (the model gene), we have a dataframe representing the model (matrix of data) of that gene.
# This model contains all the information in the second model, plus additional columns with the expression of the regulatory genes for each one of the genes belonging to the model gene set,
# while the different TCGA aliquots are the indexes of the rows
dict_model_v3 = {}
# Define the variables we need for the computation
model_gene_pathways = [] # list of the gene sets the model gene belongs to
same_pathway_genes = [] # list of the symbols of the genes belonging to the same gene sets as the model gene
same_pathway_genes_RegulGenes_SYM = [] # list of gene symbols for the regulatory genes of the genes in the same gene set
new_columns = [] # list of the new columns names to be added to the model
# Execute the following code for each gene of interest
for gene in gene_interest_SYMs:
model_gene_SYM = gene # get the Gene Symbol of the current gene
# Get the gene sets of the model gene
for i, r in EntrezConversion_df.iterrows():
sym = r['GENE_SYMBOL']
if sym == model_gene_SYM:
p = r['GENE_SET']
model_gene_pathways.append(p)
# Get the genes of interest belonging to the model gene sets
for i, r in EntrezConversion_df.iterrows():
path = r['GENE_SET']
if path in model_gene_pathways:
symbol = r['GENE_SYMBOL']
if symbol != model_gene_SYM:
same_pathway_genes.append(symbol)
# Get the list of regulatory genes for each one of the genes belonging to the same gene sets of the model gene
for elem in same_pathway_genes:
elem_regulatory_genes = dict_RegulGenes[elem]
same_pathway_genes_RegulGenes_SYM = same_pathway_genes_RegulGenes_SYM + elem_regulatory_genes
same_pathway_genes_RegulGenes_SYM = list(set(same_pathway_genes_RegulGenes_SYM)) # keep only distinct regulatory genes
# Get the second model for the current gene (model_v2)
model_2_df = dict_model_v2[model_gene_SYM]
# Identify the new columns to be added to the matrix:
# in this case they are the columns corresponding to regulatory genes of genes in the
# same gene sets of our model gene
# (be careful not to have duplicated columns, so add only the symbols of the genes
# that are not already contained in the previous model)
old_columns = list(model_2_df.columns.values)
for g in same_pathway_genes_RegulGenes_SYM:
if g not in old_columns:
new_columns.append(g)
# Create the new part of the model to add
new_df = pd.DataFrame(index = aliquots, columns = new_columns)
# Add the expression values for all the new regulatory genes and for each TCGA aliquot
for index, row in new_df.iterrows():
for column_name, values in new_df.iteritems(): # iterate along the columns of the dataframe
expr = expr_regulatory_df.get_value(index,column_name)
new_df.set_value(index,column_name,expr)
# Join the two dataframes and create the new model (model_v3)
model_3_df = model_2_df.join(new_df)
# Set the new model in correspondence of the correct model gene key in the new dictionary
dict_model_v3[model_gene_SYM] = model_3_df
# Reset the variables for the next iteration on the next gene of interest
model_gene_pathways = []
same_pathway_genes = []
same_pathway_genes_RegulGenes_SYM = []
new_columns = []
# Remove duplicate columns of the model gene
for gene in gene_interest_SYMs:
data_matrix = dict_model_v3[gene]
matrix_cols = list(data_matrix.columns.values)
if gene in matrix_cols:
data_matrix.drop(gene, axis=1, inplace=True)
# Export the dictionary into a pickle file in order to be able to import it back and use it to progressively build the next models for the genes of interest, adding further information
pickle.dump(dict_model_v3, open('./4_Data_Matrix_Construction/Model3/dict_model_v3.p', 'wb'))
# Export the models as .xlsx files
for gene in gene_interest_SYMs:
model_gene_SYM = gene
pathway = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == model_gene_SYM, 'GENE_SET'].iloc[0]
gene_ID = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == model_gene_SYM, 'ENTREZ_GENE_ID'].iloc[0]
file_name = 'Gene_'+gene_ID+'_['+model_gene_SYM+']'+'_('+pathway+')-Model_v3.xlsx'
writer = ExcelWriter('./4_Data_Matrix_Construction/Model3/'+file_name)
output_df = dict_model_v3[model_gene_SYM]
output_df.to_excel(writer,'Sheet1')
writer.save()
# Handle genes belonging to multiple gene sets
multiple_pathway_genes = []
n = EntrezConversion_df['GENE_SYMBOL'].value_counts()
for i, v in n.items():
if v > 1 :
multiple_pathway_genes.append(i)
for g in multiple_pathway_genes:
filtered_df = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == g]
pathways = (filtered_df.GENE_SET.unique()).tolist()
gene_ID = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == g, 'ENTREZ_GENE_ID'].iloc[0]
for p in pathways:
# Import the 'model_v2' matrix for the current gene
current_pathway_model = pd.read_excel('./4_Data_Matrix_Construction/Model2/Gene_'+gene_ID+'_['+g+']_('+p+')-Model_v2.xlsx',sheetname='Sheet1',header=0)
# Current gene set model
current_pathway_genes = []
current_pathway_RegulGenes_SYM = []
current_pathway_new_columns = []
# Get the genes of interest belonging to the model gene set
for i, r in EntrezConversion_df.iterrows():
path = r['GENE_SET']
if path == p:
sym = r['GENE_SYMBOL']
if sym != g:
current_pathway_genes.append(sym)
# Get the list of regulatory genes for each one of the genes belonging to the same gene sets of the model gene
for elem in current_pathway_genes:
elem_regulatory_genes = dict_RegulGenes[elem]
current_pathway_RegulGenes_SYM = current_pathway_RegulGenes_SYM + elem_regulatory_genes
current_pathway_RegulGenes_SYM = list(set(current_pathway_RegulGenes_SYM)) # keep only distinct regulatory genes
# Identify the new columns to be added to the matrix
current_pathway_old_columns = list(current_pathway_model.columns.values)
for gene in current_pathway_RegulGenes_SYM:
if gene not in current_pathway_old_columns:
current_pathway_new_columns.append(gene)
# Create the new part of the model to add
current_pathway_new_df = pd.DataFrame(index = aliquots, columns = current_pathway_new_columns)
# Add the expression values for all the new regulatory genes and for each TCGA aliquot
for index, row in current_pathway_new_df.iterrows():
for column_name, values in current_pathway_new_df.iteritems():
expr = expr_regulatory_df.get_value(index,column_name)
current_pathway_new_df.set_value(index,column_name,expr)
# Join the two dataframes and create the new model (model_v3)
current_pathway_model_3_df = current_pathway_model.join(current_pathway_new_df)
# Remove duplicate columns of the model gene
current_pathway_matrix_cols = list(current_pathway_model_3_df.columns.values)
if g in current_pathway_matrix_cols:
current_pathway_model_3_df.drop(g, axis=1, inplace=True)
writer = ExcelWriter('./4_Data_Matrix_Construction/Model3/Gene_'+gene_ID+'_['+g+']_('+p+')-Model_v3.xlsx')
current_pathway_model_3_df.to_excel(writer,'Sheet1')
writer.save()
return dict_model_v3
def create_m4():
"""
The CREATE_M4 operation builds the fourth data matrix for the analysis for each gene of interest, adding to the third matrix data about the expression of genes of interest belonging to the other gene sets with respect ot the model gene. One data matrix for each target gene is created and exported locally in as many Excel files as the considered genes; while the whole set of M4 matrixes is returned as a Python dictionary (dict_model_v4.p), where each target gene (set as key) is associated to a Pandas dataframe containing M4 data of interest (set as value).
:return: a Python dictionary
Example::
import genereg as gr
m4_dict = gr.DataMatrixes.create_m4()
"""
# Load input data:
# Genes of interest
EntrezConversion_df = pd.read_excel('./Genes_of_Interest.xlsx',sheetname='Sheet1',header=0,converters={'GENE_SYMBOL':str,'ENTREZ_GENE_ID':str,'GENE_SET':str})
# Models_v3 of genes of interest
dict_model_v3 = pickle.load(open('./4_Data_Matrix_Construction/Model3/dict_model_v3.p', 'rb'))
# Gene expression values for genes of interest
expr_interest_df = pd.read_excel('./3_TCGA_Data/Gene_Expression/Gene_Expression-InterestGenes.xlsx',sheetname='Sheet1',header=0)
# Create a list containing the Gene Symbols of the genes of interest
gene_interest_SYMs = []
for i, r in EntrezConversion_df.iterrows():
sym = r['GENE_SYMBOL']
if sym not in gene_interest_SYMs:
gene_interest_SYMs.append(sym)
# Get the TCGA aliquots
aliquots = []
for i, r in expr_interest_df.iterrows():
if i != 'ENTREZ_GENE_ID':
aliquots.append(i)
# Create a dictionary where, for each gene of interest set as key (the model gene), we have a dataframe representing the model (matrix of data) of that gene.
# This model contains all the information of the third model, plus additional columns with the expression of the genes of interest that belong to gene sets different from the ones of the model gene,
# while the different TCGA aliquots are the indexes of the rows
dict_model_v4 = {}
# Define the variables we need for the computation
model_gene_pathways = [] # list of the gene sets the model gene belongs to
other_pathway_genes = [] # list of the symbols of the genes belonging to different gene sets
new_columns = [] # list of the new columns names to be added to the model
# Execute the following code for each gene of interest
for gene in gene_interest_SYMs:
model_gene_SYM = gene # get the Gene Symbol of the current gene
# Get the gene sets of the model gene
for i, r in EntrezConversion_df.iterrows():
sym = r['GENE_SYMBOL']
if sym == model_gene_SYM:
p = r['GENE_SET']
model_gene_pathways.append(p)
# Get the genes of interest belonging to other gene sets
for i, r in EntrezConversion_df.iterrows():
path = r['GENE_SET']
if (path not in model_gene_pathways) and (path != 'GLUCOSE_METABOLISM'):
symbol = r['GENE_SYMBOL']
if symbol not in other_pathway_genes: # consider only once the genes belonging to multiple gene sets
other_pathway_genes.append(symbol)
# Get the third model for the current gene (model_v3)
model_3_df = dict_model_v3[model_gene_SYM]
# Identify the new columns to be added to the matrix:
# in this case they are the columns corresponding to genes of interest beloging to different
# gene sets with respect to our model gene
# (be careful not to have duplicated columns, so add only the symbols of the genes
# that are not already contained in the previous model)
old_columns = list(model_3_df.columns.values)
for g in other_pathway_genes:
if g not in old_columns:
new_columns.append(g)
# Create the new part of the model to add
new_df = pd.DataFrame(index = aliquots, columns = new_columns)
# Add the expression values for all the these genes of interest belonging to other gene sets and for each TCGA aliquot
for index, row in new_df.iterrows():
for column_name, values in new_df.iteritems(): # iterate along the columns of the dataframe
expr = expr_interest_df.get_value(index,column_name)
new_df.set_value(index,column_name,expr)
# Join the two dataframes and create the new model (model_v4)
model_4_df = model_3_df.join(new_df)
# Set the new model in correspondence of the correct model gene key in the new dictionary
dict_model_v4[model_gene_SYM] = model_4_df
# Reset the variables for the next iteration on the next gene of interest
model_gene_pathways = []
other_pathway_genes = []
new_columns = []
# Export the dictionary into a pickle file in order to be able to import it back and use it to progressively build the next models for the genes of interest, adding further information
pickle.dump(dict_model_v4, open('./4_Data_Matrix_Construction/Model4/dict_model_v4.p', 'wb'))
# Export the models as .xlsx files
for gene in gene_interest_SYMs:
model_gene_SYM = gene
pathway = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == model_gene_SYM, 'GENE_SET'].iloc[0]
gene_ID = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == model_gene_SYM, 'ENTREZ_GENE_ID'].iloc[0]
file_name = 'Gene_'+gene_ID+'_['+model_gene_SYM+']'+'_('+pathway+')-Model_v4.xlsx'
writer = ExcelWriter('./4_Data_Matrix_Construction/Model4/'+file_name)
output_df = dict_model_v4[model_gene_SYM]
output_df.to_excel(writer,'Sheet1')
writer.save()
# Handle genes belonging to multiple gene sets
multiple_pathway_genes = []
n = EntrezConversion_df['GENE_SYMBOL'].value_counts()
for i, v in n.items():
if v > 1 :
multiple_pathway_genes.append(i)
for g in multiple_pathway_genes:
filtered_df = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == g]
pathways = (filtered_df.GENE_SET.unique()).tolist()
gene_ID = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == g, 'ENTREZ_GENE_ID'].iloc[0]
for p in pathways:
# Import the 'model_v3' matrix for the current gene
current_pathway_model = pd.read_excel('./4_Data_Matrix_Construction/Model3/Gene_'+gene_ID+'_['+g+']_('+p+')-Model_v3.xlsx',sheetname='Sheet1',header=0)
# Current gene set model
current_pathway_other_genes = []
current_pathway_new_columns = []
# Get the genes of interest belonging to other gene sets
for i, r in EntrezConversion_df.iterrows():
path = r['GENE_SET']
if (path != p):
symbol = r['GENE_SYMBOL']
if symbol != g:
current_pathway_other_genes.append(symbol)
# Identify the new columns to be added to the matrix
current_pathway_old_columns = list(current_pathway_model.columns.values)
for gene in current_pathway_other_genes:
if gene not in current_pathway_old_columns:
current_pathway_new_columns.append(gene)
# Create the new part of the model to add
current_pathway_new_df = pd.DataFrame(index = aliquots, columns = current_pathway_new_columns)
# Add the expression values for all the these genes of interest belonging to other gene sets and for each TCGA aliquot
for index, row in current_pathway_new_df.iterrows():
for column_name, values in current_pathway_new_df.iteritems():
expr = expr_interest_df.get_value(index,column_name)
current_pathway_new_df.set_value(index,column_name,expr)
# Join the two dataframes and create the new model (model_v4)
current_pathway_model_4_df = current_pathway_model.join(current_pathway_new_df)
writer = ExcelWriter('./4_Data_Matrix_Construction/Model4/Gene_'+gene_ID+'_['+g+']_('+p+')-Model_v4.xlsx')
current_pathway_model_4_df.to_excel(writer,'Sheet1')
writer.save()
return dict_model_v4
def create_m5():
"""
The CREATE_M5 operation builds the fifth data matrix for the analysis for each gene of interest, adding to the fourth matrix data about the expression of candidate regulatory genes of genes of interest belonging to the other gene sets with respect to the model gene.. One data matrix for each target gene is created and exported locally in as many Excel files as the considered genes; while the whole set of M5 matrixes is returned as a Python dictionary (dict_model_v5.p), where each target gene (set as key) is associated to a Pandas dataframe containing M5 data of interest (set as value).
:return: a Python dictionary
Example::
import genereg as gr
m5_dict = gr.DataMatrixes.create_m5()
"""
# Load input data:
# Genes of interest
EntrezConversion_df = | pd.read_excel('./Genes_of_Interest.xlsx',sheetname='Sheet1',header=0,converters={'GENE_SYMBOL':str,'ENTREZ_GENE_ID':str,'GENE_SET':str}) | pandas.read_excel |
import json
import os
from functools import partial
import pandas as pd
import scipy.stats
import statsmodels.stats.multitest
import torch
from sklearn.experimental import enable_iterative_imputer # noqa
from sklearn.impute import IterativeImputer
from sklearn.neighbors import KernelDensity
import numpy as np
from joblib import load
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from models.mdn.mdn import marginal_mog_log_prob, marginal_mog, mog_log_prob
from models.mdn.ensemble import MDNEnsemble
from processing.loading import process_turk_files
from processing.mappings import short_question_names, question_names, factor_structure, eval_2_cond_names
from search.coverage import trajectory_features
from search.util import load_map
import pingouin as pg
from mpl_toolkits.axes_grid1 import make_axes_locatable
from processing.mappings import factor_names
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import LeaveOneOut
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap, BoundaryNorm
def ks_test(ens, observed_by_condition, factor_names):
results = []
for cond, data in observed_by_condition:
print(cond)
cond_feats = data.iloc[0]["features"]
def make_callable_cdf(ens, d):
cond_feats_t = torch.Tensor(cond_feats).reshape([1, -1])
mog_params = ens.forward(cond_feats_t)
def callable_cdf(eval_x):
n_points = 600
x = np.linspace(-3, 3, n_points)
x_t = torch.Tensor(x)
mog_d = marginal_mog(mog_params, d)
log_probs = mog_log_prob(*mog_d, x_t.reshape([-1,1,1]))
mean_marg_prob = torch.mean(torch.exp(log_probs), dim=1)
up_to = x.reshape(-1, 1) < eval_x
masked = mean_marg_prob.repeat((len(eval_x), 1)).t()
masked[~up_to] = 0
cdf_at_x = torch.trapz(masked.t(), x_t.reshape([1,-1]))
cdf_at_x = cdf_at_x.detach().numpy()
return cdf_at_x
return callable_cdf
for i, factor in enumerate(factor_names):
model_cdf = make_callable_cdf(ens, i)
obs = data[factor]
res = scipy.stats.ks_1samp(obs, model_cdf)
results.append((cond, factor, res.statistic, res.pvalue))
return pd.DataFrame(data=results, columns=["condition", "factor", "statistic", "p"])
def kendalls_test(observed, factor_names):
results = []
for factor in factor_names:
x = observed["id"].str.slice(0,-1).astype(int).to_numpy()
y = observed[factor].to_numpy()
sort = np.lexsort((y,x))
x = x[sort]
y = y[sort]
res = scipy.stats.kendalltau(x, y)
results.append((factor, res.correlation, res.pvalue))
return pd.DataFrame(data=results, columns=["factor", "correlation", "p"])
def pairwise_test(data, condition_names):
post_results = ""
scale_value_by_condition = []
anova_results = []
for scale in factor_names:
# To get full p values
pd.set_option('display.float_format', lambda x: '%.3f' % x)
anova_result = pg.rm_anova(data, scale, subject='WorkerId', within=['id'])
post_results += "Scale: {}\n".format(scale)
post_results += "RM ANOVA\n"
post_results += anova_result.to_string() + "\n\n"
anova_results.append(anova_result)
if anova_result["p-unc"][0] > 0.05:
post_results += "-------------- \n\n\n"
continue
# Run all tests and apply post-hoc corrections
res = pg.pairwise_ttests(data, scale, subject='WorkerId', within=['num_id'], tail="two-sided",
padjust="holm", return_desc=True)
res["A"] = res["A"].map(condition_names)
res["B"] = res["B"].map(condition_names)
post_results += res.to_string() + "\n"
pd.set_option('display.float_format', lambda x: '%.2f' % x)
post_results += res.to_latex(
columns=["A", "B", "mean(A)", "std(A)", "mean(B)", "std(B)", "T", "p-corr", "hedges"], index=False)
post_results += "--------------\n\n\n"
return post_results
def compose_qualitative(ratings, comparison, condition_names):
out = ""
cond_qual = {}
for cond in condition_names.values():
cond_qual[cond] = []
by_condition = ratings.groupby("id")
for name, group in by_condition:
out += "------------\n"
out += name + "\n"
for _, (describe, explain, wid) in group[["describe", "explain", "WorkerId"]].iterrows():
out += describe + "\n"
out += explain + "\n"
out += "\n"
cond_qual[name].append(describe + "---" + explain + "--" + wid)
out += "**********************\n"
out += "MOST\n"
by_condition = comparison.groupby("most_id")
for name, group in by_condition:
out += "------------\n"
out += name + "\n"
for explain in group["most_explain"]:
out += explain + "\n"
out += "LEAST\n"
by_condition = comparison.groupby("least_id")
for name, group in by_condition:
out += "------------\n"
out += name + "\n"
for explain in group["least_explain"]:
out += explain + "\n"
out += "**********************\n"
# By worker
by_worker = ratings.groupby("WorkerId")
for worker, group in by_worker:
out += worker + "\n"
for _, (describe, explain, wid) in group[["describe", "explain", "WorkerId"]].iterrows():
out += describe + "\n"
out += explain + "\n"
out += "\n"
return out, cond_qual
def analyze_experiment(exp_name):
traj_file = "in.json"
env_name = "house.tmx"
condition_names = eval_2_cond_names
if "test" in exp_name:
traj_file = "test.json"
env_name = "house_test.tmx"
import sys
grid, bedroom = load_map(f"../interface/assets/{env_name}")
featurizer = partial(trajectory_features, bedroom, grid)
old_stdout = sys.stdout
sys.stdout = open(f"material/{exp_name}_data.txt", 'w')
condition_ratings = None
comparison = None
other_data = None
for base in [exp_name]:
cr, _, o, comp = process_turk_files(base + ".csv", traj_file=traj_file, featurizer=featurizer)
cr["experiment"] = base
q_names = [q_name for q_name in question_names if q_name in cr.columns]
# Fill in missing values
cr[cr[q_names] == 6] = np.nan
imp = IterativeImputer(missing_values=np.nan, max_iter=200, random_state=0, min_value=1, max_value=5)
to_impute = cr[q_names].to_numpy()
cr[q_names] = np.rint(imp.fit_transform(to_impute)).astype(int)
assert not cr[cr[q_names] == np.nan].any().any()
assert not cr[cr[q_names] == 6].any().any()
comp["experiment"] = base
if condition_ratings is not None:
condition_ratings = pd.concat([condition_ratings, cr])
comparison = pd.concat([comparison, comp])
other_data = pd.concat([other_data, o])
else:
condition_ratings = cr
comparison = comp
other_data = o
pd.set_option('display.float_format', lambda x: '%.2f' % x)
condition_ratings["num_id"] = condition_ratings["id"]
condition_ratings["id"] = condition_ratings["id"].map(condition_names)
comparison["most_id"] = comparison["most_id"].map(condition_names)
comparison["least_id"] = comparison["least_id"].map(condition_names)
workers = other_data.groupby("WorkerId").first()
genders = workers["Answer.gender"]
print(f"{(genders.str.slice(0, 1) == 'm').sum()} male, {(genders.str.slice(0, 1) == 'f').sum()} female")
print(genders[~genders.str.contains("ale")].to_string())
print("N", len(workers), "min age", workers["Answer.age"].min(), "max age", workers["Answer.age"].max(), "M",
workers["Answer.age"].mean(), "SD", workers["Answer.age"].std())
alpha = []
for factor_name, components in factor_structure.items():
alpha.append(pg.cronbach_alpha(condition_ratings[components])[0])
print("Cronbach's alpha by factor:", alpha)
# Create factor loadings
exp_transformer = load("factor_model.pickle")
condition_ratings[factor_names] = exp_transformer.transform(condition_ratings[short_question_names].to_numpy())
condition_ratings["features"] = condition_ratings["trajectories"].apply(lambda x: featurizer(x))
ens = MDNEnsemble.load_ensemble(os.getcwd() + "/final_new_feats")
out = -torch.log(ens.mean_prob(torch.Tensor(np.vstack(condition_ratings["features"])),
torch.Tensor(condition_ratings[factor_names].to_numpy())))
condition_ratings["logprob"] = out.detach().numpy()
breakdown = condition_ratings.melt(id_vars=["id", "experiment"], value_vars="logprob", value_name="logprob").drop(
columns=["variable"]).groupby(["id", "experiment"])["logprob"].describe()
by_condition = condition_ratings.groupby("id")
print("AVG NLL\n", by_condition.describe()["logprob"][["mean", "std"]])
print("AVG NRL OVERALL\n", condition_ratings["logprob"].describe()[["mean", "std"]])
cond_names = by_condition.first().index
cond_batch = torch.Tensor(np.vstack(by_condition.first()["features"]))
n_points = 600
x = np.linspace(-3, 3, n_points)
x_b = torch.Tensor(x)
marg_log_prob = marginal_mog_log_prob(*ens.forward(cond_batch), x_b.reshape([-1, 1, 1]))
mean_marg_prob = torch.mean(torch.exp(marg_log_prob), dim=1)
mean_marg_prob = mean_marg_prob.detach().numpy()
# On why we can't do straight LLR test https://stats.stackexchange.com/questions/137557/comparison-of-log-likelihood-of-two-non-nested-models
new_data = []
for i, cond_name in enumerate(cond_names):
for j, factor in enumerate(factor_names):
new_data.append((cond_name, factor, mean_marg_prob[i, j]))
model_density = pd.DataFrame(data=new_data, columns=["condition", "factor", "density"])
pd.set_option('display.float_format', lambda x: '%.6f' % x)
print("**********************")
# This is not an equivalence test, but rather a test of difference
# two one-sided tests have been suggested as an equivalence testing procedure
# https://stats.stackexchange.com/questions/97556/is-there-a-simple-equivalence-test-version-of-the-kolmogorov-smirnov-test?rq=1
# https://stats.stackexchange.com/questions/174024/can-you-use-the-kolmogorov-smirnov-test-to-directly-test-for-equivalence-of-two?rq=1
ks_results = ks_test(ens, by_condition, factor_names)
for group, data in ks_results.groupby("factor"):
print(group)
print(data.reset_index())
#pvals = [0.039883, 0.001205, 0.310183, 0.043085, 0.179424, 0.026431, 0.344007, 0.127182, 0.267323, 0.125909, 0.837506, 0.652114]
#adj = statsmodels.stats.multitest.multipletests(pvals)
#pvals = [0.091473, 0.005065, 0.015585, 0.360311, 0.205270, 0.089199, 0.594448, 0.071204, 0.685286, 0.013982, 0.025368, 0.085334]
#adj2 = statsmodels.stats.multitest.multipletests(pvals)
pd.set_option('display.float_format', lambda x: '%.3f' % x)
print("*************************")
print("**********************")
post_results = pairwise_test(condition_ratings, condition_names)
print(post_results)
out, cond_qual = compose_qualitative(condition_ratings, comparison, condition_names)
print(out)
data = {"summary": cond_qual}
with open(f"material/{exp_name}_qual.json", 'w') as f:
json.dump(data, f)
turker_performance = pd.DataFrame()
turker_performance["HITTime"] = other_data.groupby("WorkerId")["WorkTimeInSeconds"].mean()
turker_performance["Comment"] = other_data.groupby("WorkerId")["Answer.comment"].apply(list)
# turker_performance.to_csv("turker_stats.txt", index=True)
sys.stdout = old_stdout
return condition_ratings, comparison, model_density, ks_results, other_data
exp_names = ["in_competence", "in_brokenness", "in_curiosity"]
test_names = ["test_competence", "test_brokenness",
"test_curiosity"]
n_points = 600
x = np.linspace(-3, 3, n_points)
plt.rcParams["font.family"] = "Times New Roman"
SMALL_SIZE = 7
MEDIUM_SIZE = 10
BIGGER_SIZE = 12
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=SMALL_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
fig, axs = plt.subplots(ncols=6, nrows=4, sharex=True, figsize=(5.5, 2.5))
plt.rcParams["font.family"] = "Times New Roman"
all_dat = []
ind = np.arange(4) # the x locations for the groups
width = 1 # the width of the bars
fig_ch, axs_ch = plt.subplots(ncols=6, nrows=1, sharex=True, sharey=True, figsize=(5.5, .75))
colors = plt.get_cmap("Dark2").colors
for exp_num, names in enumerate([exp_names, test_names]):
print(str(exp_num) + "------------------------------------------------------------")
all_log_prob = []
all_cond_ratings = []
all_ks_results = []
for dim_i, dim in enumerate(names):
condition_ratings, comparison, model_density, ks_results ,other_data = analyze_experiment(dim)
all_cond_ratings.append(condition_ratings)
all_dat.append(other_data)
ks_results["experiment"] = dim
all_ks_results.append(ks_results)
all_log_prob.append(condition_ratings[["experiment", "id", "logprob"]])
focus_factor = dim.split("_")[1]
for factor_name in factor_names:
if focus_factor not in factor_name:
continue
condition_ratings.drop(columns=[factor_name], axis=1, inplace=True)
else:
focus_factor = factor_name
most_counts = comparison["most_id"].value_counts()
least_counts = comparison["least_id"].value_counts()
for name in ["1x", "2x", "4x", "12x"]:
if name not in most_counts:
most_counts[name] = 0
if name not in least_counts:
least_counts[name] = 0
ax = axs_ch[dim_i + 3 * exp_num]
ax.set_title(focus_factor.capitalize())
ax.bar(ind - width * 1. / 5, least_counts[["1x", "2x", "4x", "12x"]], color=colors[3], width=width * 2. / 5., label="Least")
ax.bar(ind + width * 1. / 5, most_counts[["1x", "2x", "4x", "12x"]], color=colors[4], width=width * 2. / 5., label="Most")
ax.set_xticks(ind)
ax.set_yticks([0, 10, 20])
ax.set_xticklabels(["1x", "2x", "4x", "12x"])
if exp_num == 1 and dim_i == 2:
ax.legend(prop={'size': 4})
for i, condition in enumerate(["1x", "2x", "4x", "12x"]):
ax = axs[i][dim_i + 3 * exp_num]
focus_density = model_density[model_density["factor"] == focus_factor]
focus_density_condition = focus_density[focus_density["condition"] == condition]["density"].to_numpy()[0]
ax.plot(x, focus_density_condition, color="grey", zorder=3)
focus_ratings = condition_ratings[condition_ratings["id"] == condition]
just_scores = focus_ratings[focus_factor].to_numpy().reshape(-1, 1)
ax.set_xlim((-3, 3))
bandwidths = np.linspace(0.01, 1, 100)
grid = GridSearchCV(KernelDensity(kernel='gaussian'),
{'bandwidth': bandwidths},
cv=LeaveOneOut())
grid.fit(just_scores)
kde = KernelDensity(kernel='gaussian', bandwidth=grid.best_params_["bandwidth"]).fit(just_scores)
kde_dens = np.exp(kde.score_samples(x.reshape(-1, 1)))
points = np.array([x, kde_dens]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
kl_curve = np.log(kde_dens / focus_density_condition)
kl_curve = np.clip(kl_curve, -1, 1)
#norm = plt.Normalize(ratio.min(), ratio.max())
norm = plt.Normalize(-1, 1)
#lc = LineCollection(segments, cmap='PiYG', norm=norm)
lc = LineCollection(segments, cmap='Spectral', norm=norm, zorder=2)
# Set the values used for colormapping
lc.set_array(kl_curve)
lc.set_linewidth(2)
line = ax.add_collection(lc)
ax.plot(x, scipy.stats.norm.pdf(x, loc=1.5, scale=0.3), "--", lw=1, color="lightgray", zorder=1)
#ax.plot(x, log_dens, color="blue")
max_density = focus_density_condition.max()
max_kde = kde_dens.max()
ax.set_ylim((-.05, max(max_density, max_kde) + .05))
ax.set_xlabel(None)
ax.set_xticks([-3, -2, -1, 0, 1, 2, 3])
ax.set_yticks([])
#divider = make_axes_locatable(ax)
#cax = divider.append_axes("right", size="5%", pad=0.05)
#plt.colorbar(lc, ax=cax)
if i == 0:
ax.set_title(focus_factor.capitalize())
if dim_i == 0 and exp_num == 0:
ax.set_ylabel(condition, rotation=0, labelpad=8)
else:
ax.set_ylabel("")
# NLL TABLE
all_log_prob = pd.concat(all_log_prob)
# We're only addressing a single experiment here
all_log_prob["experiment"] = all_log_prob["experiment"].str.replace("in_", "", regex=False,)
all_log_prob["experiment"] = all_log_prob["experiment"].str.replace("test_", "", regex=False)
groups = all_log_prob.groupby(["experiment", "id"])
table = all_log_prob.groupby(["id", "experiment"]).describe()["logprob"][["mean", "std"]].unstack(1)
pd.set_option('display.float_format', lambda x: '%.2f' % x)
fmted = table.to_latex(columns=[("mean", "competence"), ("std","competence"), ("mean", "brokenness"), ("std","brokenness"), ("mean", "curiosity"), ("std","curiosity")])
print(fmted)
print("By cond", all_log_prob.groupby("experiment").describe()["logprob"][["mean", "std"]].to_latex())
print(f"ALL in {exp_num}", all_log_prob.describe()["logprob"][["mean", "std"]])
print("------------")
cis = []
for (experiment, id), data in all_log_prob.groupby(["experiment", "id"]):
series = data["logprob"].to_numpy()
bs_mean = [np.random.choice(series, size=(24), replace=True).mean() for _ in range(1000)]
lower_mean, upper_mean = np.percentile(bs_mean, [2.5, 97.5])
mean_error = (upper_mean - lower_mean) / 2.0
bs_std = [np.random.choice(series, size=(24), replace=True).std() for _ in range(1000)]
lower_std, upper_std = np.percentile(bs_std, [2.5, 97.5])
std_error = (upper_std - lower_std) / 2.0
cis.append((experiment, id, series.mean(), series.std(), lower_mean, upper_mean, mean_error, lower_std, upper_std, std_error))
cis_data = pd.DataFrame(cis, columns=["experiment", "id", "mean", "std", "lower_mean", "upper_mean", "mean_error", "lower_std", "upper_std", "std_error"])
cis_by_exp = []
series = all_log_prob["logprob"].to_numpy()
n = len(series)
bs_mean = [np.random.choice(series, size=(n), replace=True).mean() for _ in range(1000)]
lower_mean, upper_mean = np.percentile(bs_mean, [2.5, 97.5])
mean_error = (upper_mean - lower_mean) / 2.0
bs_std = [np.random.choice(series, size=(n), replace=True).std() for _ in range(1000)]
lower_std, upper_std = np.percentile(bs_std, [2.5, 97.5])
std_error = (upper_std - lower_std) / 2.0
cis_by_exp.append((series.mean(), series.std(), lower_mean, upper_mean, mean_error, lower_std, upper_std, std_error))
cis_data_by_exp = pd.DataFrame(cis_by_exp, columns=["mean", "std", "lower_mean", "upper_mean", "mean_error", "lower_std", "upper_std", "std_error"])
| pd.set_option('display.float_format', lambda x: '%0.2f' % x) | pandas.set_option |
import pandas as pd
import os
class readBoundaryData():
"""
Reads in data placed in ./constant/boundaryData. This format is useful for
timeVaryingMappedFixedValue boundary conditions or the turbulentDFSEMInlet
boundary condition.
The data on a boundary patch can be export using by including a 'sample' file in
the system folder with this format:
/*--------------------------------*- C++ -*----------------------------------*\
| ========= | |
| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: 4.0 |
| \\ / A nd | Web: www.OpenFOAM.org |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class dictionary;
object sampleDict;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
type surfaces;
functionObjectLibs ("libsampling.so");
writeControl timeStep;
writeInterval 1;
enabled true;
surfaceFormat boundaryData;
interpolationScheme none;
interpolate false;
triangulate false;
fields
(
U k omega R
);
surfaces
(
inletSurface
{
type patch;
patches (nInlet);
}
outletSurface
{
type patch;
patches (nOutlet);
}
);
Calling this from the commandline with:
pisoFoam -postProcess -func R -latestTime (run first to get R)
postProcess -func sample -latestTime
will export the U, k, omega and Reynolds stress tensor (R) to the postProcessing directory.
Other fields can also be exported in a similar fashion.
This data can then be accessed in the dictionaries of the 'data' attribute of an instance of this class.
Perform the necessary operations on the fields in your python script and then export them to file.
This was useful for calculating the turbulent length scale using k and omega.
"""
def __init__(self,path):
self.path=path
self.boundaryDataPath=self.path+'/'+'constant/'+'boundaryData'
self.getPatchDirs()
self.readData()
def getPatchDirs(self):
self.patchSubfolders = [ f.path for f in os.scandir(self.boundaryDataPath) if f.is_dir() ]
def readData(self):
self.data={}
for folder in self.patchSubfolders:
time=os.path.basename(folder)
print("Extracting for time %s" % time)
patchName=os.path.basename(folder)
self.data[patchName]={}
pointPath= folder + "/points"
self.data[patchName]['points'], self.data[patchName]['nmbPoints'] = self.readInData(pointPath)
self.data[patchName]['points'].columns = ['x','y','z']
times=os.listdir(folder)
for time in times:
if time == 'points':
continue
else:
for field in os.listdir(folder+'/'+time):
self.data[patchName][field],self.data[patchName]['nmbPoints']=self.readInData(folder+'/'+time+'/'+field)
if field == "U":
self.data[patchName][field].columns = ['u','v','w']
if field == "R":
self.data[patchName][field].columns = ['xx','xy','xz','yy','yz','zz']
if field == "k":
self.data[patchName][field].columns = ['k']
if field == "omega":
self.data[patchName][field].columns = ['omega']
def readInData(self,path):
with open(path, 'r') as f:
nmbPoints=None
data=[]
for line in f:
line=line.replace("(","")
line=line.replace(")","")
#skip empty lines
if not line.strip():
continue
#get number of points
if len(line.split()) == 1:
if nmbPoints is None:
nmbPoints=line
continue
data.append(line.split())
data= | pd.DataFrame(data) | pandas.DataFrame |
"""\
Data structures for expt.
The "Experiment" data is structured like a 4D array, i.e.
Experiment := [hypothesis_name, run_index, index, column]
The data is structured in the following ways (from higher to lower level):
Experiment (~= Dict[str, List[DataFrame]]):
An experiment consists of one or multiple Hypotheses (e.g. different
hyperparameters or algorithms) that can be compared with one another.
Hypothesis (~= List[DataFrame], or RunGroup):
A Hypothesis consists of several `Run`s that share an
identical experimental setups (e.g. hyperparameters).
This usually corresponds to one single curve for each model.
It may also contain additional metadata of the experiment.
Run (~= DataFrame == [index, column]):
Contains a pandas DataFrame (a table-like structure, str -> Series)
as well as more metadata (e.g. path, seed, etc.)
Note that one can also manage a collection of Experiments (e.g. the same set
of hypotheses or algorithms applied over different environments or dataset).
"""
import collections
import fnmatch
import itertools
import os.path
import re
import sys
import types
from dataclasses import dataclass # for python 3.6, backport needed
from multiprocessing.pool import Pool as MultiprocessPool
from multiprocessing.pool import ThreadPool
from typing import (Any, Callable, Dict, Generator, Iterable, Iterator, List,
Mapping, MutableMapping, Optional, Sequence, Set, Tuple,
TypeVar, Union)
import numpy as np
import pandas as pd
from pandas.core.accessor import CachedAccessor
from pandas.core.groupby.generic import DataFrameGroupBy
from typeguard import typechecked
from . import plot as _plot
from . import util
from .path_util import exists, glob, isdir, open
T = TypeVar('T')
try:
from tqdm.auto import tqdm
except:
tqdm = util.NoopTqdm
#########################################################################
# Data Classes
#########################################################################
@dataclass
class Run:
"""Represents a single run, containing one pd.DataFrame object
as well as other metadata (path, etc.)
"""
path: str
df: pd.DataFrame
@classmethod
def of(cls, o):
"""A static factory method."""
if isinstance(o, Run):
return Run(path=o.path, df=o.df)
elif isinstance(o, pd.DataFrame):
return cls.from_dataframe(o)
raise TypeError("Unknown type {}".format(type(o)))
@classmethod
@typechecked
def from_dataframe(cls, df: pd.DataFrame):
run = cls(path='', df=df)
if hasattr(df, 'path'):
run.path = df.path
return run
def __repr__(self):
return 'Run({path!r}, df with {rows} rows)'.format(
path=self.path, rows=len(self.df))
@property
def columns(self) -> Sequence[str]:
"""Returns all column names."""
return list(self.df.columns) # type: ignore
@property
def name(self) -> str:
"""Returns the last segment of the path."""
path = self.path.rstrip('/')
return os.path.basename(path)
def to_hypothesis(self) -> 'Hypothesis':
"""Create a new `Hypothesis` consisting of only this run."""
return Hypothesis.of(self)
def plot(self, *args, subplots=True, **kwargs):
return self.to_hypothesis().plot(*args, subplots=subplots, **kwargs)
def hvplot(self, *args, subplots=True, **kwargs):
return self.to_hypothesis().hvplot(*args, subplots=subplots, **kwargs)
class RunList(Sequence[Run]):
"""A (immutable) list of Run objects, but with some useful utility
methods such as filtering, searching, and handy format conversion."""
def __init__(self, runs: Iterable[Run]):
runs = self._validate_type(runs)
self._runs = list(runs)
@classmethod
def of(cls, runs: Iterable[Run]):
if isinstance(runs, cls):
return runs # do not make a copy
else:
return cls(runs) # RunList(runs)
def _validate_type(self, runs) -> List[Run]:
if not isinstance(runs, Iterable):
raise TypeError(f"`runs` must be a Iterable, but given {type(runs)}")
if isinstance(runs, Mapping):
raise TypeError(f"`runs` should not be a dictionary, given {type(runs)} "
" (forgot to wrap with pd.DataFrame?)")
runs = list(runs)
if not all(isinstance(r, Run) for r in runs):
raise TypeError("`runs` must be a iterable of Run, "
"but given {}".format([type(r) for r in runs]))
return runs
def __getitem__(self, index_or_slice):
o = self._runs[index_or_slice]
if isinstance(index_or_slice, slice):
o = RunList(o)
return o
def __next__(self):
# This is a hack to prevent panda's pprint_thing() from converting
# into a sequence of Runs.
raise TypeError("'RunList' object is not an iterator.")
def __len__(self):
return len(self._runs)
def __repr__(self):
return "RunList([\n " + "\n ".join(repr(r) for r in self._runs) + "\n]"
def extend(self, more_runs: Iterable[Run]):
self._runs.extend(more_runs)
def to_list(self) -> List[Run]:
"""Create a new copy of list containing all the runs."""
return list(self._runs)
def to_dataframe(self) -> pd.DataFrame:
"""Return a DataFrame consisting of columns `name` and `run`."""
return pd.DataFrame({
'name': [r.name for r in self._runs],
'run': self._runs,
})
def filter(self, fn: Union[Callable[[Run], bool], str]) -> 'RunList':
"""Apply a filter function (Run -> bool) and return the filtered runs
as another RunList. If a string is given, we convert it as a matcher
function (see fnmatch) that matches `run.name`."""
if isinstance(fn, str):
pat = str(fn)
fn = lambda run: fnmatch.fnmatch(run.name, pat)
return RunList(filter(fn, self._runs))
def grep(self, regex: Union[str, 're.Pattern'], flags=0):
"""Apply a regex-based filter on the path of `Run`, and return the
matched `Run`s as a RunList."""
if isinstance(regex, str):
regex = re.compile(regex, flags=flags)
return self.filter(lambda r: bool(regex.search(r.path)))
def map(self, func: Callable[[Run], Any]) -> List:
"""Apply func for each of the runs. Return the transformation
as a plain list."""
return list(map(func, self._runs))
def to_hypothesis(self, name: str) -> 'Hypothesis':
"""Create a new Hypothesis instance containing all the runs
as the current RunList instance."""
return Hypothesis.of(self, name=name)
def groupby(
self,
by: Callable[[Run], T],
*,
name: Callable[[T], str] = str,
) -> Iterator[Tuple[T, 'Hypothesis']]:
r"""Group runs into hypotheses with the key function `by` (Run -> key).
This will enumerate tuples (`group_key`, Hypothesis) where `group_key`
is the result of the key function for each group, and a Hypothesis
object (with name `name(group_key)`) will consist of all the runs
mapped to the same group.
Args:
by: a key function for groupby operation. (Run -> Key)
name: a function that maps the group (Key) into Hypothesis name (str).
Example:
>>> key_func = lambda run: re.search("algo=(\w+),lr=([.0-9]+)", run.name).group(1, 2)
>>> for group_name, hypothesis in runs.groupby(key_func):
>>> ...
"""
series = pd.Series(self._runs)
groupby = series.groupby(lambda i: by(series[i]))
group: T
for group, runs_in_group in groupby:
yield group, Hypothesis.of(runs_in_group, name=name(group))
def extract(self, pat: str, flags: int = 0) -> pd.DataFrame:
r"""Extract capture groups in the regex pattern `pat` as columns.
Example:
>>> runs[0].name
"ppo-halfcheetah-seed0"
>>> df = runs.extract(r"(?P<algo>[\w]+)-(?P<env_id>[\w]+)-seed(?P<seed>[\d]+)")
>>> assert list(df.columns) == ['algo', 'env_id', 'seed', 'run']
"""
df: pd.DataFrame = self.to_dataframe()
df = df['name'].str.extract(pat, flags=flags)
df['run'] = list(self._runs)
return df
@dataclass
class Hypothesis(Iterable[Run]):
name: str
runs: RunList
def __init__(self, name: str, runs: Union[Run, Iterable[Run]]):
if isinstance(runs, Run) or isinstance(runs, pd.DataFrame):
if not isinstance(runs, Run):
runs = Run.of(runs)
runs = [runs] # type: ignore
self.name = name
self.runs = RunList(runs)
def __iter__(self) -> Iterator[Run]:
return iter(self.runs)
@classmethod
def of(cls,
runs: Union[Run, Iterable[Run]],
*,
name: Optional[str] = None) -> 'Hypothesis':
"""A static factory method."""
if isinstance(runs, Run):
name = name or runs.path
return cls(name=name or '', runs=runs)
def __getitem__(self, k):
if isinstance(k, int):
return self.runs[k]
if k not in self.columns:
raise KeyError(k)
return pd.DataFrame({r.path: r.df[k] for r in self.runs})
def __repr__(self) -> str:
return f"Hypothesis({self.name!r}, <{len(self.runs)} runs>)"
def __len__(self) -> int:
return len(self.runs)
def __hash__(self):
return hash(id(self))
def __next__(self):
# This is a hack to prevent panda's pprint_thing() from converting
# into a sequence of Runs.
raise TypeError("'Hypothesis' object is not an iterator.")
def describe(self) -> pd.DataFrame:
"""Report a descriptive statistics as a DataFrame,
after aggregating all runs (e.g., mean)."""
return self.mean().describe()
def summary(self) -> pd.DataFrame:
"""Return a DataFrame that summarizes the current hypothesis."""
return Experiment(self.name, [self]).summary()
# see module expt.plot
plot = CachedAccessor("plot", _plot.HypothesisPlotter)
plot.__doc__ = _plot.HypothesisPlotter.__doc__
hvplot = CachedAccessor("hvplot", _plot.HypothesisHvPlotter)
hvplot.__doc__ = _plot.HypothesisHvPlotter.__doc__
@property
def grouped(self) -> DataFrameGroupBy:
return pd.concat(self._dataframes, sort=False).groupby(level=0)
def empty(self) -> bool:
sentinel = object()
return next(iter(self.grouped), sentinel) is sentinel # O(1)
@property
def _dataframes(self) -> List[pd.DataFrame]:
"""Get all dataframes associated with all the runs."""
def _get_df(o):
if isinstance(o, pd.DataFrame):
return o
else:
return o.df
return [_get_df(r) for r in self.runs]
@property
def columns(self) -> Iterable[str]:
return util.merge_list(*[df.columns for df in self._dataframes])
def rolling(self, *args, **kwargs):
return self.grouped.rolling(*args, **kwargs)
def mean(self, *args, **kwargs) -> pd.DataFrame:
g = self.grouped
return g.mean(*args, **kwargs)
def std(self, *args, **kwargs) -> pd.DataFrame:
g = self.grouped
return g.std(*args, **kwargs)
def min(self, *args, **kwargs) -> pd.DataFrame:
g = self.grouped
return g.min(*args, **kwargs)
def max(self, *args, **kwargs) -> pd.DataFrame:
g = self.grouped
return g.max(*args, **kwargs)
class Experiment(Iterable[Hypothesis]):
@typechecked
def __init__(
self,
name: Optional[str] = None,
hypotheses: Iterable[Hypothesis] = None,
):
self._name = name if name is not None else ""
self._hypotheses: MutableMapping[str, Hypothesis]
self._hypotheses = collections.OrderedDict()
if isinstance(hypotheses, np.ndarray):
hypotheses = list(hypotheses)
for h in (hypotheses or []):
if not isinstance(h, Hypothesis):
raise TypeError("An element of hypotheses contains a wrong type: "
"expected {}, but given {} ".format(
Hypothesis, type(h)))
if h.name in self._hypotheses:
raise ValueError(f"Duplicate hypothesis name: `{h.name}`")
self._hypotheses[h.name] = h
@classmethod
def from_dataframe(
cls,
df: pd.DataFrame,
by: Optional[Union[str, List[str]]] = None,
*,
run_column: str = 'run',
hypothesis_namer: Callable[..., str] = str,
name: Optional[str] = None,
) -> 'Experiment':
"""Constructs a new Experiment object from a DataFrame instance
structured as per the convention.
Args:
by (str, List[str]): The column name to group by. If None (default),
it will try to automatically determine from the dataframe if there
is only one column other than `run_column`.
run_column (str): The column name that contains `Run` objects.
See also `RunList.to_dataframe()`.
hypothesis_namer: This is a mapping that transforms the group key
(a str or tuple) that pandas groupby produces into hypothesis name.
This function should take one positional argument for the group key.
name: The name for the produced `Experiment`.
"""
if by is None:
# Automatically determine the column from df.
by_columns = list(sorted(set(df.columns).difference([run_column])))
if len(by_columns) != 1:
raise ValueError("Cannot automatically determine the column to "
"group by. Candidates: {}".format(by_columns))
by = next(iter(by_columns))
ex = Experiment(name=name)
for hypothesis_key, runs_df in df.groupby(by):
hypothesis_name = hypothesis_namer(hypothesis_key)
runs = RunList(runs_df[run_column])
h = runs.to_hypothesis(name=hypothesis_name)
ex.add_hypothesis(h)
return ex
def add_runs(
self,
hypothesis_name: str,
runs: List[Union[Run, Tuple[str, pd.DataFrame], pd.DataFrame]],
*,
color=None,
linestyle=None,
) -> Hypothesis:
def check_runs_type(runs) -> List[Run]:
if isinstance(runs, types.GeneratorType):
runs = list(runs)
if runs == []:
return []
if isinstance(runs, Run):
runs = [runs]
return [Run.of(r) for r in runs]
_runs = check_runs_type(runs)
d = Hypothesis.of(name=hypothesis_name, runs=_runs)
return self.add_hypothesis(d, extend_if_conflict=True)
@typechecked
def add_hypothesis(
self,
h: Hypothesis,
extend_if_conflict=False,
) -> Hypothesis:
if h.name in self._hypotheses:
if not extend_if_conflict:
raise ValueError(f"Hypothesis named {h.name} already exists!")
d: Hypothesis = self._hypotheses[h.name]
d.runs.extend(h.runs)
else:
self._hypotheses[h.name] = h
return self._hypotheses[h.name]
@property
def name(self) -> str:
return self._name
@property
def title(self) -> str:
return self._name
def keys(self) -> Iterable[str]:
"""Return all hypothesis names."""
return self._hypotheses.keys()
@property
def hypotheses(self) -> Sequence[Hypothesis]:
return tuple(self._hypotheses.values())
def select_top(
self,
key,
k=None,
descending=True,
) -> Union[Hypothesis, Sequence[Hypothesis]]:
"""Choose a hypothesis that has the largest value on the specified column.
Args:
key: str (y_name) or Callable(Hypothesis -> number).
k: If None, the top-1 hypothesis will be returned. Otherwise (integer),
top-k hypotheses will be returned as a tuple.
descending: If True, the hypothesis with largest value in key will be
chosen. If False, the hypothesis with smallest value will be chosen.
Returns: the top-1 hypothesis (if `k` is None) or a tuple of k hypotheses
in the order specified by `key`.
"""
if k is not None and k <= 0:
raise ValueError("k must be greater than 0.")
if k is not None and k > len(self._hypotheses):
raise ValueError("k must be smaller than the number of "
"hypotheses ({})".format(len(self._hypotheses)))
if isinstance(key, str):
y = str(key) # make a copy for closure
if descending:
key = lambda h: h.mean()[y].max()
else:
key = lambda h: h.mean()[y].min()
elif callable(key):
pass # key: Hypothesis -> scalar.
else:
raise TypeError(
f"`key` must be a str or a callable, but got: {type(key)}")
candidates = sorted(self.hypotheses, key=key, reverse=descending)
assert isinstance(candidates, list)
if k is None:
return candidates[0]
else:
return candidates[:k]
def __iter__(self) -> Iterator[Hypothesis]:
return iter(self._hypotheses.values())
def __repr__(self) -> str:
return (
f"Experiment('{self.name}', {len(self._hypotheses)} hypotheses: [ \n " +
'\n '.join([repr(exp) for exp in self.hypotheses]) + "\n])")
def __getitem__(
self,
key: Union[str, Tuple],
) -> Union[Hypothesis, np.ndarray, Run, pd.DataFrame]:
"""Return self[key].
`key` can be one of the following:
- str: The hypothesis's name to retrieve.
- int: An index [0, len(self)) in all hypothesis. A numpy-style fancy
indexing is supported.
- Tuple(hypo_key: str|int, column: str):
- The first axis is the same as previous (hypothesis' name or index)
- The second one is the column name. The return value will be same
as self[hypo_key][column].
"""
if isinstance(key, str):
name = key
return self._hypotheses[name]
elif isinstance(key, int):
try:
_keys = self._hypotheses.keys()
name = next(itertools.islice(_keys, key, None))
except StopIteration:
raise IndexError("out of range: {} (should be < {})".format(
key, len(self._hypotheses)))
return self._hypotheses[name]
elif isinstance(key, tuple):
hypo_key, column = key
hypos = self[hypo_key]
if isinstance(hypos, list):
raise NotImplementedError("2-dim fancy indexing is not implemented") # yapf: disable
return hypos[column] # type: ignore
elif isinstance(key, Iterable):
key = list(key)
if all(isinstance(k, bool) for k in key):
# fancy indexing through bool
if len(key) != len(self._hypotheses):
raise IndexError("boolean index did not match indexed array along"
" dimension 0; dimension is {} but corresponding "
" boolean dimension is {}".format(
len(self._hypotheses), len(key)))
r = np.empty(len(key), dtype=object)
r[:] = list(self._hypotheses.values())
return r[key]
else:
# fancy indexing through int? # TODO: support str
hypo_keys = list(self._hypotheses.keys())
to_key = lambda k: k if isinstance(k, str) else hypo_keys[k]
return [self._hypotheses[to_key(k)] for k in key]
else:
raise ValueError("Unsupported index: {}".format(key))
def __setitem__(
self,
name: str,
hypothesis_or_runs: Union[Hypothesis, List[Run]],
) -> Hypothesis:
"""An dict-like method for adding hypothesis or runs."""
if isinstance(hypothesis_or_runs, Hypothesis):
if hypothesis_or_runs in self._hypotheses:
raise ValueError(f"A hypothesis named {name} already exists")
self._hypotheses[name] = hypothesis_or_runs
else:
# TODO metadata (e.g. color)
self.add_runs(name, hypothesis_or_runs) # type: ignore
return self._hypotheses[name]
@property
def columns(self) -> Iterable[str]:
# merge and uniquify all columns but preserving the order.
return util.merge_list(*[h.columns for h in self._hypotheses.values()])
@staticmethod
def AGGREGATE_MEAN_LAST(portion: float):
return (lambda series: series.rolling(max(1, int(len(series) * portion))
).mean().iloc[-1]) # yapf: disable
def summary(self, columns=None, aggregate=None) -> pd.DataFrame:
"""Return a DataFrame that summarizes the current experiments,
whose rows are all hypothesis.
Args:
columns: The list of columns to show. Defaults to `self.columns` plus
`"index"`.
aggregate: A function or a dict of functions ({column_name: ...})
specifying a strategy to aggregate a `Series`. Defaults to take the
average of the last 10% of the series.
Example Usage:
>>> pd.set_option('display.max_colwidth', 2000) # hypothesis name can be long!
>>> df = ex.summary(columns=['index', 'loss', 'return'])
>>> df.style.background_gradient(cmap='viridis')
"""
columns = columns or (['index'] + list(self.columns))
aggregate = aggregate or self.AGGREGATE_MEAN_LAST(0.1)
df = pd.DataFrame({'hypothesis': [h.name for h in self.hypotheses]})
hypo_means = [
(h.mean() if not all(len(df) == 0 for df in h._dataframes) \
else pd.DataFrame())
for h in self.hypotheses
]
for column in columns:
def df_series(df: pd.DataFrame):
if column == 'index':
return df.index
if column not in df:
return []
else:
return df[column].dropna()
def aggregate_h(series):
if len(series) == 0:
# after dropna, no numeric types to aggregate?
return np.nan
aggregate_fn = aggregate
if not callable(aggregate_fn):
aggregate_fn = aggregate[column]
v = aggregate_fn(series) if column != 'index' else series.max()
return v
df[column] = [aggregate_h(df_series(hm)) for hm in hypo_means]
return df
def hvplot(self, *args, **kwargs):
plot = None
for i, (name, hypo) in enumerate(self._hypotheses.items()):
p = hypo.hvplot(*args, label=name, **kwargs)
plot = (plot * p) if plot else p
return plot
plot = CachedAccessor("plot", _plot.ExperimentPlotter)
plot.__doc__ = _plot.ExperimentPlotter.__doc__
#########################################################################
# Data Parsing Functions
#########################################################################
def parse_run(run_folder, fillna=False, verbose=False) -> pd.DataFrame:
"""Create a pd.DataFrame object from a single directory."""
if verbose:
# TODO Use python logging
print(f"Reading {run_folder} ...", file=sys.stderr, flush=True)
# make it more general (rather than being specific to progress.csv)
# and support tensorboard eventlog files, etc.
sources = [
parse_run_progresscsv,
parse_run_tensorboard,
]
for fn in sources:
try:
df = fn(run_folder, fillna=fillna, verbose=verbose)
if df is not None:
break
except (FileNotFoundError, IOError) as e:
if verbose:
print(f"{fn.__name__} -> {e}\n", file=sys.stderr, flush=True)
else:
raise pd.errors.EmptyDataError(f"Cannot handle dir: {run_folder}")
# add some optional metadata... (might not be preserved afterwards)
if df is not None:
df.path = run_folder
return df
def parse_run_progresscsv(run_folder,
fillna=False,
verbose=False) -> pd.DataFrame:
"""Create a pandas DataFrame object from progress.csv per convention."""
# Try progress.csv or log.csv from folder
detected_csv = None
for fname in ('progress.csv', 'log.csv'):
p = os.path.join(run_folder, fname)
if exists(p):
detected_csv = p
break
# maybe a direct file path is given instead of directory
if detected_csv is None:
if exists(run_folder) and not isdir(run_folder):
detected_csv = run_folder
if detected_csv is None:
raise FileNotFoundError(os.path.join(run_folder, "*.csv"))
# Read the detected file `p`
if verbose:
print(f"parse_run (csv): Reading {detected_csv}",
file=sys.stderr, flush=True) # yapf: disable
with open(detected_csv, mode='r') as f:
df = pd.read_csv(f)
if fillna:
df = df.fillna(0)
return df
def parse_run_tensorboard(run_folder,
fillna=False,
verbose=False) -> pd.DataFrame:
"""Create a pandas DataFrame from tensorboard eventfile or run directory."""
event_file = list(
sorted(glob(os.path.join(run_folder, '*events.out.tfevents.*'))))
if not event_file: # no event file detected
raise | pd.errors.EmptyDataError(f"No event file detected in {run_folder}") | pandas.errors.EmptyDataError |
# Copyright (c) 2021 ipyradiant contributors.
# Distributed under the terms of the Modified BSD License.
import logging
import re
from pandas import DataFrame
from rdflib import Graph, URIRef
from rdflib.plugins.sparql import prepareQuery
# pattern used to identify bindings in a sparql string
BINDING_PATTERN = re.compile(r"\?([\w]*)")
def build_values(string: str, values: dict) -> str:
"""
:param string: the query string to format (must have two format slots)
:param values: a dictionary of values to assign, with the following structure::
values = {
"var_1": [value_1, ..., value_n],
...
"var_n": [value_1, ..., value_n]
}
Note: values can be strings, rdflib.URIRefs, or preformatted SPARQL IRIs, e.g. '<IRI>'.
:return: the formatted string with the given values
TODO should values be a NamedTuple with different structure to improve readability?
"""
assert values, "Input values cannot be empty."
assert (
len(set([len(_) for _ in values.values()])) == 1
), "All values must have equal length."
# TODO assert keys are valid for var assignment
# Convert any values that are necessary (e.g. URIRefs)
for var, values_list in values.items():
for ii, value in enumerate(values_list):
if isinstance(value, str):
if value.startswith("<") and value.endswith(">"):
continue
values[var][ii] = f"<{URIRef(value)}>"
elif isinstance(value, URIRef):
values[var][ii] = f"<{value}>"
# Rotates values dict to be specified per instance as required by the VALUES block
value_vars = " ".join([f"?{value}" for value in values.keys()])
values_transposed = [list(i) for i in zip(*[values for values in values.values()])]
values_block = "\n\t ".join(
f"({' '.join([i for i in row])})" for row in values_transposed
)
return string.format(
value_vars,
values_block,
)
class SPARQLQueryFramer:
"""A generic Class for building and running SPARQL queries with rdflib.
TODO possible to static property sparql and load from file or overload string?
TODO lexer method for sparql string
:param initNs: a dict of namespace {term: namespace} to use in rdflib prepareQuery
:param classBindings: a dict of bindings to set at the class level
(independent of initBindings).
:param sparql: a SPARQL parse-able string to use during query
:param index: an index list to use when building a query result DataFrame
:param columns: a list of strings to use as column headers for
the query result DataFrame
:param query: a valid rdflib Query object
"""
initNs = {}
classBindings = {}
sparql = ""
index = []
columns = None
query = None
# low cost trait (previous cls.sparql state)
p_sparql = ""
@classmethod
def print_vars(cls) -> None:
"""Utility function to print variables that may be used as bindings"""
logging.info("Only variables in the SELECT line are printed.")
tmp_graph = Graph()
# Run fake query to print vars
if not cls.query:
tmp_query = prepareQuery(cls.sparql, initNs=cls.initNs)
tmp_res = tmp_graph.query(tmp_query)
else:
tmp_res = tmp_graph.query(cls.query)
print("Vars:\n", sorted([str(var) for var in tmp_res.vars]))
@classmethod
def print_potential_bindings(cls) -> None:
"""Utility function to print bindings in the sparql string.
Note, this method is regex-based, and may not be 100% accurate.
"""
if not cls.sparql:
print("No sparql string set in class.")
return None
logging.warning("Bindings are not guaranteed to be 100% accurate")
potential_bindings = [
str(binding) for binding in set(BINDING_PATTERN.findall(cls.sparql))
]
print("Potential bindings:\n", sorted(potential_bindings))
return None
@classmethod
def run_query(
cls,
graph: Graph,
initBindings: dict = None,
**initBindingsKwarg,
) -> DataFrame:
"""Runs a query with optional initBindings, and returns the results as a
pandas.DataFrame.
:param graph: the rdflib.graph.Graph to be queried
:param initBindings: a dictionary of bindings where the key is the variable in
the sparql string, and the value is the URI/Literal to BIND to the variable.
:param initBindingsKwarg: kwarg version of initBindings
:return: pandas.DataFrame containing the contents of the SPARQL query
result from rdflib
"""
assert (
cls.query or cls.sparql
), "No rdflib Query or SPARQL string has been set for the class."
# Check if query should be updated due to stale sparql string
update_query = cls.p_sparql != cls.sparql
if not cls.query or update_query:
cls.query = prepareQuery(cls.sparql, initNs=cls.initNs)
# note: merge method kwargs with default class bindings
if initBindings:
all_bindings = {**cls.classBindings, **initBindings, **initBindingsKwarg}
else:
all_bindings = {**cls.classBindings, **initBindingsKwarg}
result = graph.query(cls.query, initBindings=all_bindings)
if cls.columns is None:
# Try to infer from query vars
try:
cls.columns = [str(var) for var in result.vars]
except TypeError:
# no columns. Probably an ASK or CONSTRUCT query
logging.debug(
"No columns passed, and unable to infer. "
"Therefore, no columns were assigned to the DataFrame."
)
df = | DataFrame(result, columns=cls.columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), | u(' b') | pandas.compat.u |
"""
Utitlity functions that will be used by the sequence data
generators
IGNORE_FOR_SPHINX_DOCS:
List of functions:
getChromPositions - returns two column dataframe of chromosome
positions spanning the entire chromosome at
a) regular intervals or b) random locations
getPeakPositions - returns two column dataframe of chromosome
positions
getInputTasks - when input data is fed as a path to a directory,
that contains files (single task) or sub directories (multi
tasl) that follow a strict naming convention, this function
returns a nested python dictionary of tasks, specifying the
'signal' and/or 'control' bigWigs, 'peaks' file, 'task_id'
& 'strand
roundToMultiple - Return the largest multiple of y < x
one_hot_encode - returns a 3-dimension numpy array of one hot
encoding of a list of DNA sequences
reverse_complement_of_sequences - returns the reverse
complement of a list of sequences
reverse_complement_of_profiles - returns the reverse
complement of the assay signal
License:
MIT License
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
IGNORE_FOR_SPHINX_DOCS
"""
import glob
import logging
import numpy as np
import os
import pandas as pd
from collections import OrderedDict
from mseqgen.exceptionhandler import NoTracebackException
def getChromPositions(chroms, chrom_sizes, flank, mode='sequential',
num_positions=-1, step=50):
"""
Chromosome positions spanning the entire chromosome at
a) regular intervals or b) random locations
Args:
chroms (list): The list of required chromosomes
chrom_sizes (pandas.Dataframe): dataframe of chromosome
sizes with 'chrom' and 'size' columns
flank (int): Buffer size before & after the position to
ensure we dont fetch values at index < 0 & > chrom size
mode (str): mode of returned position 'sequential' (from
the beginning) or 'random'
num_positions (int): number of chromosome positions
to return on each chromosome, use -1 to return
positions across the entrire chromosome for all given
chromosomes in `chroms`. mode='random' cannot be used
with num_positions=-1
step (int): the interval between consecutive chromosome
positions in 'sequential' mode
Returns:
pandas.DataFrame:
two column dataframe of chromosome positions (chrom, pos)
"""
if mode == 'random' and num_positions == -1:
raise NoTracebackException(
"Incompatible parameter pairing: 'mode' = random, "
"'num_positions' = -1")
# check if chrom_sizes has a column called 'chrom'
if 'chrom' not in chrom_sizes.columns:
logging.error("Expected column 'chrom' not found in chrom_sizes")
return None
chrom_sizes = chrom_sizes.set_index('chrom')
# initialize an empty dataframe with 'chrom' and 'pos' columns
positions = pd.DataFrame(columns=['chrom', 'pos'])
# for each chromosome in the list
for i in range(len(chroms)):
chrom_size = chrom_sizes.at[chroms[i], 'size']
# keep start & end within bounds
start = flank
end = chrom_size - flank + 1
if mode == 'random':
# randomly sample positions
pos_array = np.random.randint(start, end, num_positions)
if mode == 'sequential':
_end = end
if num_positions != -1:
# change the last positon based on the number of
# required positions
_end = start + step * num_positions
# if the newly computed 'end' goes beyond the
# chromosome end (we could throw an error here)
if _end > end:
_end = end
# positions at regular intervals
pos_array = list(range(start, _end, step))
# construct a dataframe for this chromosome
chrom_df = pd.DataFrame({'chrom': [chroms[i]] * len(pos_array),
'pos': pos_array})
# concatenate to existing df
positions = pd.concat([positions, chrom_df])
return positions
def getPeakPositions(tasks, chroms, chrom_sizes, flank, drop_duplicates=False):
"""
Peak positions for all the tasks filtered based on required
chromosomes and other qc filters. Since 'task' here refers
one strand of input/output, if the data is stranded the peaks
will be duplicated for the plus and minus strand.
Args:
tasks (dict): A python dictionary containing the task
information. Each task in tasks should have the
key 'peaks' that has the path to he peaks file
chroms (list): The list of required test chromosomes
chrom_sizes (pandas.Dataframe): dataframe of chromosome
sizes with 'chrom' and 'size' columns
flank (int): Buffer size before & after the position to
ensure we dont fetch values at index < 0 & > chrom size
drop_duplicates (boolean): True if duplicates should be
dropped from returned dataframe.
Returns:
pandas.DataFrame:
two column dataframe of peak positions (chrom, pos)
"""
# necessary for dataframe apply operation below --->>>
chrom_size_dict = dict(chrom_sizes.to_records(index=False))
# initialize an empty dataframe
allPeaks = | pd.DataFrame() | pandas.DataFrame |
import csv
import json
import os
import re
from io import StringIO
import pandas as pd
import requests
from django.forms import model_to_dict
from va_explorer.va_data_management.models import CauseCodingIssue
from va_explorer.va_data_management.models import CauseOfDeath
from va_explorer.va_data_management.models import VerbalAutopsy
PYCROSS_HOST = os.environ.get('PYCROSS_HOST', 'http://127.0.0.1:5001')
INTERVA_HOST = os.environ.get('INTERVA_HOST', 'http://127.0.0.1:5002')
# TODO: settings need to be configurable
ALGORITHM_SETTINGS = {'HIV': 'l', 'Malaria': 'l'}
def _run_pycross_and_interva5(verbal_autopsies):
# Get into CSV format, also prefixing keys with - as expected by pyCrossVA (e.g. Id10424 becomes -Id10424)
va_data = [model_to_dict(va) for va in verbal_autopsies]
va_data = [dict([(f'-{k}', v) for k, v in d.items()]) for d in va_data]
va_data_csv = | pd.DataFrame.from_records(va_data) | pandas.DataFrame.from_records |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
__author__ = 'maxim'
import os
import time
import pandas as pd
from . import api
from util import *
COLUMNS = ['date', 'high', 'low', 'open', 'close', 'volume', 'quoteVolume', 'weightedAverage']
PERIODS = ['5m', '15m', '30m', '2h', '4h', 'day']
def get_all_tickers_list():
df = api.get_24h_volume()
return [ticker for ticker in df.columns if not ticker.startswith('total')]
def update_ticker(ticker, period, dest_dir):
path = os.path.join(dest_dir, '%s_%s.csv' % (ticker, api.period_to_human(period)))
if os.path.exists(path):
existing_df = | pd.read_csv(path) | pandas.read_csv |
from geographnet.model.wdatasampling import DataSamplingDSited
import pandas as pd
import numpy as np
from geographnet.model.wsampler import WNeighborSampler
import torch
from geographnet.traintest_pm import train, test
from geographnet.model.geographpnet import GeoGraphPNet
import gc
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import urllib
import tarfile
def selectSites(datain):
sitesDF = datain.drop_duplicates('id').copy()
sgrp = sitesDF['stratified_flag'].value_counts()
sitesDF['stratified_flag_cnt'] = sgrp.loc[sitesDF['stratified_flag']].values
pos1_index = np.where(sitesDF['stratified_flag_cnt'] < 5)[0]
posT_index = np.where(sitesDF['stratified_flag_cnt'] >= 5)[0]
np.random.seed()
trainsiteIndex, testsiteIndex = train_test_split(posT_index, stratify=sitesDF.iloc[posT_index]['stratified_flag'],
test_size=0.15)
selsites = sitesDF.iloc[testsiteIndex]['id']
trainsitesIndex = np.where(~datain['id'].isin(selsites))[0]
indTestsitesIndex = np.where(datain['id'].isin(selsites))[0]
return trainsitesIndex,indTestsitesIndex
def untar(fname, dirs):
t = tarfile.open(fname)
t.extractall(path = dirs)
url = 'https://github.com/lspatial/geographnetdata/raw/master/pmdatain.pkl.tar.gz'
tarfl='./pmdatain.pkl.tar.gz'
print("Downloading from "+url+' ... ...')
urllib.request.urlretrieve(url, tarfl)
target='./test'
untar(tarfl,target)
targetFl=target+'/pmdatain.pkl'
datatar= | pd.read_pickle(targetFl) | pandas.read_pickle |
import numpy as np
import pandas as pd
import logging
import json
import pickle
import random
import altair as alt
import datetime
import yaml
from itertools import chain
import narrowing_ai_research
from narrowing_ai_research.utils.altair_utils import (
altair_visualisation_setup,
save_altair,
)
from narrowing_ai_research.utils.read_utils import (
read_papers,
read_topic_mix,
read_topic_long,
read_arxiv_cat_lookup,
read_arxiv_categories,
)
project_dir = narrowing_ai_research.project_dir
with open(f"{project_dir}/paper_config.yaml", "r") as infile:
params = yaml.safe_load(infile)["section_1"]
# Functions
def load_process_data():
"""Loads AI paper data for analysis in section 1."""
logging.info("Reading data")
arxiv_cat_lookup = read_arxiv_cat_lookup()
papers = read_papers()
topic_long = read_topic_long()
topic_mix = read_topic_mix()
cats = read_arxiv_categories()
logging.info("Reading tokenised abstracts")
with open(f"{project_dir}/data/interim/arxiv_tokenised.json", "r") as infile:
arxiv_tokenised = json.load(infile)
logging.info("Reading AI labelling outputs")
with open(f"{project_dir}/data/interim/find_ai_outputs.p", "rb") as infile:
ai_indices, term_counts = pickle.load(infile)
logging.info("Processing")
papers["tokenised"] = papers["article_id"].map(arxiv_tokenised)
# Create category sets to identify papers in different categories
ai_cats = ["cs.AI", "cs.NE", "stat.ML", "cs.LG"]
cat_sets = cats.groupby("category_id")["article_id"].apply(lambda x: set(x))
# Create one hot encodings for AI categories
ai_binary = pd.DataFrame(index=set(cats["article_id"]), columns=ai_cats)
for c in ai_binary.columns:
ai_binary[c] = [x in cat_sets[c] for x in ai_binary.index]
# Create arxiv dataset
papers.set_index("article_id", inplace=True)
# We remove papers without abstracts and arXiv categories
arx = pd.concat([ai_binary, papers], axis=1, sort=True).dropna(
axis=0, subset=["abstract", "cs.AI"]
)
return arx, ai_indices, term_counts, arxiv_cat_lookup, cat_sets, cats, ai_cats
def make_agg_trend(arx, save=True):
"""Makes first plot"""
# First chart: trends
ai_bool_lookup = {False: "Other categories", True: "AI"}
# Totals
ai_trends = (
arx.groupby(["date", "is_ai"]).size().reset_index(name="Number of papers")
)
ai_trends["is_ai"] = ai_trends["is_ai"].map(ai_bool_lookup)
# Shares
ai_shares = (
ai_trends.pivot_table(index="date", columns="is_ai", values="Number of papers")
.fillna(0)
.reset_index(drop=False)
)
ai_shares["share"] = ai_shares["AI"] / ai_shares.sum(axis=1)
# Make chart
at_ch = (
alt.Chart(ai_trends)
.transform_window(
roll="mean(Number of papers)", frame=[-5, 5], groupby=["is_ai"]
)
.mark_line()
.encode(
x=alt.X("date:T", title="", axis=alt.Axis(labels=False, ticks=False)),
y=alt.Y("roll:Q", title=["Number", "of papers"]),
color=alt.Color("is_ai:N", title="Category"),
)
.properties(width=350, height=120)
)
as_ch = (
alt.Chart(ai_shares)
.transform_window(roll="mean(share)", frame=[-5, 5])
.mark_line()
.encode(
x=alt.X("date:T", title=""),
y=alt.Y("roll:Q", title=["AI as share", "of all arXiv"]),
)
).properties(width=350, height=120)
ai_trends_chart = alt.vconcat(at_ch, as_ch, spacing=0)
if save is True:
save_altair(ai_trends_chart, "fig_1_ai_trends", driver=driv)
return ai_trends_chart, ai_trends
def make_cumulative_results(trends, years):
"""Creates cumulative results"""
cumulative = (
trends.pivot_table(index="date", columns="is_ai", values="Number of papers")
.fillna(0)
.apply(lambda x: x / x.sum())
.cumsum()
)
datetimes = [datetime.datetime(y, 1, 1) for y in years]
paper_shares = cumulative.loc[
[x.to_pydatetime() in datetimes for x in cumulative.index]
]
return paper_shares
def make_category_distr_time(
ai_indices, arx, cats, cat_sets, arxiv_cat_lookup, get_examples=False, example_n=4
):
"""Makes timecharts by category
Args:
ai_indices: dict where keys are categories and items paper ids in category
arx: arxiv dataframe, used for the temporal analysis
cats: lookup between categories and papers
cat_sets: paper indices per category
get_examples: whether we want to print examples
example_n: number of examples to print
"""
time_charts = []
cat_charts = []
logging.info("Trend analysis by category")
# For each AI category and index
for k, ind in ai_indices.items():
logging.info(k)
ind = set(ind)
logging.info("Extracting category distribution")
# Find all papers in the category
# cat_subs = cats.loc[[x in ind for x in cats['article_id']]]
cat_subs = cats.loc[cats["article_id"].isin(ind)]
# Number of papers in category (without time)
cat_distr = cat_subs["category_id"].value_counts().reset_index(name="n")
cat_distr["category"] = k
cat_charts.append(cat_distr)
# Now get the year stuff
rel_papers = arx.loc[ind]
print(len(rel_papers))
logging.info("Extracting trends")
# Create timeline
exp_year = rel_papers["date"].value_counts().reset_index(name="n")
exp_year["category"] = k
exp_year["type"] = "expanded"
# Create timeline for core
core_year = arx.loc[cat_sets[k]]["date"].value_counts().reset_index(name="n")
core_year["category"] = k
core_year["type"] = "core"
# Combine
combined_year = pd.concat([exp_year, core_year])
time_charts.append(combined_year)
linech = | pd.concat(time_charts) | pandas.concat |
import json
import os
from functools import partial
from logging_config import get_logger
from sounds import sounds_config
from sounds.audio_sound_pre_processing import prepare_audio_sound
from sounds.ground_truth_processor import GroundtruthReader
from sounds.model_labeler import ModelLabelEncoder
from sounds.model_predictor import ModelPredictor
import pandas as pd
import numpy as np
from sounds.model_structures import *
from sounds.model_trainer import AudioFeaturesModel, train_and_test_model
from pandas import DataFrame
from utils.file_utils import return_from_path, save_object, load_object
logger = get_logger(__name__)
def save_features(groundtruth, path, dataset_name, filter_label=None):
gtp = GroundtruthReader(f'{sounds_config.sounds_data_dir}/{groundtruth}')
prepare_audio_sound_groundtruth = partial(prepare_audio_sound,
gtp, filter_label)
if not os.path.isdir(f'{sounds_config.sounds_data_dir}/{dataset_name}'):
os.mkdir(f'{sounds_config.sounds_data_dir}/{dataset_name}')
ftrs = return_from_path(prepare_audio_sound_groundtruth,
path,
sounds_config.extension)
audio_sound_df = DataFrame(ftrs)
save_object(audio_sound_df, f'{sounds_config.sounds_data_dir}/{dataset_name}/{os.path.basename(path)}.data')
def train_sounds(model, paths):
"""Load the data and process it before training and testing"""
dataframes = []
for path in paths:
dataframes.extend(return_from_path(load_object, path, '.data'))
features_and_labels = | pd.concat(dataframes) | pandas.concat |
# -*- coding: utf-8 -*-
import random
from pandasqt.compat import Qt, QtCore, QtGui
import pytest
import pytestqt
import decimal
import numpy
import pandas
from pandasqt.models.DataFrameModel import DataFrameModel, DATAFRAME_ROLE
from pandasqt.models.DataSearch import DataSearch
from pandasqt.models.SupportedDtypes import SupportedDtypes
def test_initDataFrame():
model = DataFrameModel()
assert model.dataFrame().empty
def test_initDataFrameWithDataFrame():
dataFrame = pandas.DataFrame([0], columns=['A'])
model = DataFrameModel(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
def test_setDataFrame():
dataFrame = pandas.DataFrame([0], columns=['A'])
model = DataFrameModel()
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
with pytest.raises(TypeError) as excinfo:
model.setDataFrame(None)
assert "pandas.core.frame.DataFrame" in unicode(excinfo.value)
@pytest.mark.parametrize(
"copy, operator",
[
(True, numpy.not_equal),
(False, numpy.equal)
]
)
def test_copyDataFrame(copy, operator):
dataFrame = pandas.DataFrame([0], columns=['A'])
model = DataFrameModel(dataFrame, copyDataFrame=copy)
assert operator(id(model.dataFrame()), id(dataFrame))
model.setDataFrame(dataFrame, copyDataFrame=copy)
assert operator(id(model.dataFrame()), id(dataFrame))
def test_TimestampFormat():
model = DataFrameModel()
assert model.timestampFormat == Qt.ISODate
newFormat = u"yy-MM-dd hh:mm"
model.timestampFormat = newFormat
assert model.timestampFormat == newFormat
with pytest.raises(TypeError) as excinfo:
model.timestampFormat = "yy-MM-dd hh:mm"
assert "unicode" in unicode(excinfo.value)
#def test_signalUpdate(qtbot):
#model = DataFrameModel()
#with qtbot.waitSignal(model.layoutAboutToBeChanged) as layoutAboutToBeChanged:
#model.signalUpdate()
#assert layoutAboutToBeChanged.signal_triggered
#with qtbot.waitSignal(model.layoutChanged) as blocker:
#model.signalUpdate()
#assert blocker.signal_triggered
@pytest.mark.parametrize(
"orientation, role, index, expectedHeader",
[
(Qt.Horizontal, Qt.EditRole, 0, None),
(Qt.Vertical, Qt.EditRole, 0, None),
(Qt.Horizontal, Qt.DisplayRole, 0, 'A'),
(Qt.Horizontal, Qt.DisplayRole, 1, None), # run into IndexError
(Qt.Vertical, Qt.DisplayRole, 0, 0),
(Qt.Vertical, Qt.DisplayRole, 1, 1)
]
)
def test_headerData(orientation, role, index, expectedHeader):
model = DataFrameModel(pandas.DataFrame([0], columns=['A']))
assert model.headerData(index, orientation, role) == expectedHeader
def test_flags():
model = DataFrameModel(pandas.DataFrame([0], columns=['A']))
index = model.index(0, 0)
assert index.isValid()
assert model.flags(index) == Qt.ItemIsSelectable | Qt.ItemIsEnabled
model.enableEditing(True)
assert model.flags(index) == Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable
model.setDataFrame(pandas.DataFrame([True], columns=['A']))
index = model.index(0, 0)
model.enableEditing(True)
assert model.flags(index) != Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable
assert model.flags(index) == Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsUserCheckable
def test_rowCount():
model = DataFrameModel(pandas.DataFrame([0], columns=['A']))
assert model.rowCount() == 1
model = DataFrameModel(pandas.DataFrame(numpy.arange(100), columns=['A']))
assert model.rowCount() == 100
def test_columnCount():
model = DataFrameModel( | pandas.DataFrame([0], columns=['A']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
import itertools
import numpy as np
import pytest
from pandas.compat import u
import pandas as pd
from pandas import (
DataFrame, Index, MultiIndex, Period, Series, Timedelta, date_range)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
class TestDataFrameReshape(TestData):
def test_pivot(self):
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data)
pivoted = frame.pivot(
index='index', columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
tm.assert_frame_equal(pivoted, expected)
# name tracking
assert pivoted.index.name == 'index'
assert pivoted.columns.name == 'columns'
# don't specify values
pivoted = frame.pivot(index='index', columns='columns')
assert pivoted.index.name == 'index'
assert pivoted.columns.names == (None, 'columns')
def test_pivot_duplicates(self):
data = DataFrame({'a': ['bar', 'bar', 'foo', 'foo', 'foo'],
'b': ['one', 'two', 'one', 'one', 'two'],
'c': [1., 2., 3., 3., 4.]})
with pytest.raises(ValueError, match='duplicate entries'):
data.pivot('a', 'b', 'c')
def test_pivot_empty(self):
df = DataFrame({}, columns=['a', 'b', 'c'])
result = df.pivot('a', 'b', 'c')
expected = DataFrame()
tm.assert_frame_equal(result, expected, check_names=False)
def test_pivot_integer_bug(self):
df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")])
result = df.pivot(index=1, columns=0, values=2)
repr(result)
tm.assert_index_equal(result.columns, Index(['A', 'B'], name=0))
def test_pivot_index_none(self):
# gh-3962
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data).set_index('index')
result = frame.pivot(columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
assert_frame_equal(result, expected)
# omit values
result = frame.pivot(columns='columns')
expected.columns = pd.MultiIndex.from_tuples([('values', 'One'),
('values', 'Two')],
names=[None, 'columns'])
expected.index.name = 'index'
tm.assert_frame_equal(result, expected, check_names=False)
assert result.index.name == 'index'
assert result.columns.names == (None, 'columns')
expected.columns = expected.columns.droplevel(0)
result = frame.pivot(columns='columns', values='values')
expected.columns.name = 'columns'
tm.assert_frame_equal(result, expected)
def test_stack_unstack(self):
df = self.frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({'foo': stacked, 'bar': stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
assert_frame_equal(unstacked, df)
assert_frame_equal(unstacked_df['bar'], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
assert_frame_equal(unstacked_cols.T, df)
assert_frame_equal(unstacked_cols_df['bar'].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, 'a', 'b'], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0],
columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(1, index=MultiIndex.from_product([levels[0],
levels[2]]),
columns=levels[1])
assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[['a', 'b']].stack(1)
expected = expected[['a', 'b']]
assert_frame_equal(result, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack(fill_value=-1)
expected = DataFrame({'a': [1, -1, 5], 'b': [2, 4, -1]},
index=['x', 'y', 'z'], dtype=np.int16)
assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame({'a': [1, 0.5, 5], 'b': [2, 4, 0.5]},
index=['x', 'y', 'z'], dtype=np.float)
assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame({'x': ['a', 'a', 'b'],
'y': ['j', 'k', 'j'],
'z': [0, 1, 2],
'w': [0, 1, 2]}).set_index(['x', 'y', 'z'])
unstacked = df.unstack(['x', 'y'], fill_value=0)
key = ('<KEY>')
expected = unstacked[key]
result = pd.Series([0, 0, 2], index=unstacked.index, name=key)
assert_series_equal(result, expected)
stacked = unstacked.stack(['x', 'y'])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
assert_frame_equal(result, df)
# From a series
s = df['w']
result = s.unstack(['x', 'y'], fill_value=0)
expected = unstacked['w']
assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list('AB'), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
# From a mixed type dataframe
df['A'] = df['A'].astype(np.int16)
df['B'] = df['B'].astype(np.float64)
result = df.unstack(fill_value=-1)
expected['A'] = expected['A'].astype(np.int16)
expected['B'] = expected['B'].astype(np.float64)
assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.float)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = pd.date_range('2012-01-01', periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [dv[0], pd.NaT, dv[3]],
'b': [dv[1], dv[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame({'a': [dv[0], dv[0], dv[3]],
'b': [dv[1], dv[2], dv[0]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [td[0], pd.NaT, td[3]],
'b': [td[1], td[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame({'a': [td[0], td[1], td[3]],
'b': [td[1], td[2], td[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [Period('2012-01'), Period('2012-02'), Period('2012-03'),
Period('2012-04')]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [periods[0], None, periods[3]],
'b': [periods[1], periods[2], None]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame({'a': [periods[0], periods[1], periods[3]],
'b': [periods[1], periods[2], periods[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = pd.Series(['a', 'b', 'c', 'a'], dtype='category')
data.index = pd.MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')],
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame({'a': pd.Categorical(list('axa'),
categories=list('abc')),
'b': pd.Categorical(list('bcx'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
# Fill with non-category results in a TypeError
msg = r"'fill_value' \('d'\) is not in"
with pytest.raises(TypeError, match=msg):
data.unstack(fill_value='d')
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value='c')
expected = DataFrame({'a': pd.Categorical(list('aca'),
categories=list('abc')),
'b': pd.Categorical(list('bcc'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = pd.DataFrame(dict(state=['IL', 'MI', 'NC'],
index=['a', 'b', 'c'],
some_categories=pd.Series(['a', 'b', 'c']
).astype('category'),
A=np.random.rand(3),
B=1,
C='foo',
D=pd.Timestamp('20010102'),
E=pd.Series([1.0, 50.0, 100.0]
).astype('float32'),
F=pd.Series([3.0, 4.0, 5.0]).astype('float64'),
G=False,
H=pd.Series([1, 200, 923442], dtype='int8')))
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(['state', 'index'])
unstack_and_compare(df1, 'index')
df1 = df.set_index(['state', 'some_categories'])
unstack_and_compare(df1, 'some_categories')
df1 = df.set_index(['F', 'C'])
unstack_and_compare(df1, 'F')
df1 = df.set_index(['G', 'B', 'state'])
unstack_and_compare(df1, 'B')
df1 = df.set_index(['E', 'A'])
unstack_and_compare(df1, 'E')
df1 = df.set_index(['state', 'index'])
s = df1['A']
unstack_and_compare(s, 'index')
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3),
repeat=3)))
df = DataFrame(np.random.randn(30, 27), columns=columns)
assert_frame_equal(df.stack(level=[1, 2]),
df.stack(level=1).stack(level=1))
assert_frame_equal(df.stack(level=[-2, -1]),
df.stack(level=1).stack(level=1))
df_named = df.copy()
df_named.columns.set_names(range(3), inplace=True)
assert_frame_equal(df_named.stack(level=[1, 2]),
df_named.stack(level=1).stack(level=1))
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ['exp', 'animal', 1]
assert_frame_equal(df2.stack(level=['animal', 1]),
animal_hair_stacked, check_names=False)
assert_frame_equal(df2.stack(level=['exp', 1]),
exp_hair_stacked, check_names=False)
# When mixed types are passed and the ints are not level
# names, raise
msg = ("level should contain all level names or all level numbers, not"
" a mixture of the two")
with pytest.raises(ValueError, match=msg):
df2.stack(level=['animal', 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ['exp', 'animal', 0]
assert_frame_equal(df3.stack(level=['animal', 0]),
animal_hair_stacked, check_names=False)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=['exp', 'animal'])
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
assert_frame_equal(df2.stack(level=[1, 2]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 1]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 2]), exp_hair_stacked,
check_names=False)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
assert_frame_equal(df3.stack(level=[0, 1]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 0]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 1]), exp_hair_stacked,
check_names=False)
def test_unstack_bool(self):
df = DataFrame([False, False],
index=MultiIndex.from_arrays([['a', 'b'], ['c', 'l']]),
columns=['col'])
rs = df.unstack()
xp = DataFrame(np.array([[False, np.nan], [np.nan, False]],
dtype=object),
index=['a', 'b'],
columns=MultiIndex.from_arrays([['col', 'col'],
['c', 'l']]))
assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = pd.MultiIndex(
levels=[[u('foo'), u('bar')], [u('one'), u('two')],
[u('a'), u('b')]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=[u('first'), u('second'), u('third')])
s = pd.Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = pd.MultiIndex(
levels=[['foo', 'bar'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=['first', 'second'])
expected = pd.DataFrame(np.array([[np.nan, 0],
[0, np.nan],
[np.nan, 0],
[0, np.nan]],
dtype=np.float64),
index=expected_mi,
columns=pd.Index(['a', 'b'], name='third'))
assert_frame_equal(result, expected)
def test_unstack_to_series(self):
# check reversibility
data = self.frame.unstack()
assert isinstance(data, Series)
undo = data.unstack().T
assert_frame_equal(undo, self.frame)
# check NA handling
data = DataFrame({'x': [1, 2, np.NaN], 'y': [3.0, 4, np.NaN]})
data.index = Index(['a', 'b', 'c'])
result = data.unstack()
midx = MultiIndex(levels=[['x', 'y'], ['a', 'b', 'c']],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
assert_frame_equal(old_data, data)
def test_unstack_dtypes(self):
# GH 2929
rows = [[1, 1, 3, 4],
[1, 2, 3, 4],
[2, 1, 3, 4],
[2, 2, 3, 4]]
df = DataFrame(rows, columns=list('ABCD'))
result = df.get_dtype_counts()
expected = Series({'int64': 4})
assert_series_equal(result, expected)
# single dtype
df2 = df.set_index(['A', 'B'])
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64': 4})
assert_series_equal(result, expected)
# mixed
df2 = df.set_index(['A', 'B'])
df2['C'] = 3.
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64': 2, 'float64': 2})
assert_series_equal(result, expected)
df2['D'] = 'foo'
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'float64': 2, 'object': 2})
assert_series_equal(result, expected)
# GH7405
for c, d in (np.zeros(5), np.zeros(5)), \
(np.arange(5, dtype='f8'), np.arange(5, 10, dtype='f8')):
df = DataFrame({'A': ['a'] * 5, 'C': c, 'D': d,
'B': pd.date_range('2012-01-01', periods=5)})
right = df.iloc[:3].copy(deep=True)
df = df.set_index(['A', 'B'])
df['D'] = df['D'].astype('int64')
left = df.iloc[:3].unstack(0)
right = right.set_index(['A', 'B']).unstack(0)
right[('D', 'a')] = right[('D', 'a')].astype('int64')
assert left.shape == (3, 2)
tm.assert_frame_equal(left, right)
def test_unstack_non_unique_index_names(self):
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')],
names=['c1', 'c1'])
df = DataFrame([1, 2], index=idx)
with pytest.raises(ValueError):
df.unstack('c1')
with pytest.raises(ValueError):
df.T.stack('c1')
def test_unstack_unused_levels(self):
# GH 17845: unused codes in index make unstack() cast int to float
idx = pd.MultiIndex.from_product([['a'], ['A', 'B', 'C', 'D']])[:-1]
df = pd.DataFrame([[1, 0]] * 3, index=idx)
result = df.unstack()
exp_col = pd.MultiIndex.from_product([[0, 1], ['A', 'B', 'C']])
expected = pd.DataFrame([[1, 1, 1, 0, 0, 0]], index=['a'],
columns=exp_col)
tm.assert_frame_equal(result, expected)
assert((result.columns.levels[1] == idx.levels[1]).all())
# Unused items on both levels
levels = [[0, 1, 7], [0, 1, 2, 3]]
codes = [[0, 0, 1, 1], [0, 2, 0, 2]]
idx = pd.MultiIndex(levels, codes)
block = np.arange(4).reshape(2, 2)
df = pd.DataFrame(np.concatenate([block, block + 4]), index=idx)
result = df.unstack()
expected = pd.DataFrame(np.concatenate([block * 2, block * 2 + 1],
axis=1),
columns=idx)
tm.assert_frame_equal(result, expected)
assert((result.columns.levels[1] == idx.levels[1]).all())
# With mixed dtype and NaN
levels = [['a', 2, 'c'], [1, 3, 5, 7]]
codes = [[0, -1, 1, 1], [0, 2, -1, 2]]
idx = pd.MultiIndex(levels, codes)
data = np.arange(8)
df = pd.DataFrame(data.reshape(4, 2), index=idx)
cases = ((0, [13, 16, 6, 9, 2, 5, 8, 11],
[np.nan, 'a', 2], [np.nan, 5, 1]),
(1, [8, 11, 1, 4, 12, 15, 13, 16],
[np.nan, 5, 1], [np.nan, 'a', 2]))
for level, idces, col_level, idx_level in cases:
result = df.unstack(level=level)
exp_data = np.zeros(18) * np.nan
exp_data[idces] = data
cols = pd.MultiIndex.from_product([[0, 1], col_level])
expected = pd.DataFrame(exp_data.reshape(3, 6),
index=idx_level, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("cols", [['A', 'C'], slice(None)])
def test_unstack_unused_level(self, cols):
# GH 18562 : unused codes on the unstacked level
df = pd.DataFrame([[2010, 'a', 'I'],
[2011, 'b', 'II']],
columns=['A', 'B', 'C'])
ind = df.set_index(['A', 'B', 'C'], drop=False)
selection = ind.loc[(slice(None), slice(None), 'I'), cols]
result = selection.unstack()
expected = ind.iloc[[0]][cols]
expected.columns = MultiIndex.from_product([expected.columns, ['I']],
names=[None, 'C'])
expected.index = expected.index.droplevel('C')
tm.assert_frame_equal(result, expected)
def test_unstack_nan_index(self): # GH7466
cast = lambda val: '{0:1}'.format('' if val != val else val)
def verify(df):
mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]
rows, cols = df.notna().values.nonzero()
for i, j in zip(rows, cols):
left = sorted(df.iloc[i, j].split('.'))
right = mk_list(df.index[i]) + mk_list(df.columns[j])
right = sorted(list(map(cast, right)))
assert left == right
df = DataFrame({'jim': ['a', 'b', np.nan, 'd'],
'joe': ['w', 'x', 'y', 'z'],
'jolie': ['a.w', 'b.x', ' .y', 'd.z']})
left = df.set_index(['jim', 'joe']).unstack()['jolie']
right = df.set_index(['joe', 'jim']).unstack()['jolie'].T
assert_frame_equal(left, right)
for idx in itertools.permutations(df.columns[:2]):
mi = df.set_index(list(idx))
for lev in range(2):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == len(df)
verify(udf['jolie'])
df = DataFrame({'1st': ['d'] * 3 + [np.nan] * 5 + ['a'] * 2 +
['c'] * 3 + ['e'] * 2 + ['b'] * 5,
'2nd': ['y'] * 2 + ['w'] * 3 + [np.nan] * 3 +
['z'] * 4 + [np.nan] * 3 + ['x'] * 3 + [np.nan] * 2,
'3rd': [67, 39, 53, 72, 57, 80, 31, 18, 11, 30, 59,
50, 62, 59, 76, 52, 14, 53, 60, 51]})
df['4th'], df['5th'] = \
df.apply(lambda r: '.'.join(map(cast, r)), axis=1), \
df.apply(lambda r: '.'.join(map(cast, r.iloc[::-1])), axis=1)
for idx in itertools.permutations(['1st', '2nd', '3rd']):
mi = df.set_index(list(idx))
for lev in range(3):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == 2 * len(df)
for col in ['4th', '5th']:
verify(udf[col])
# GH7403
df = pd.DataFrame(
{'A': list('aaaabbbb'), 'B': range(8), 'C': range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[3, 0, 1, 2, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, 4, 5, 6, 7]]
vals = list(map(list, zip(*vals)))
idx = Index([np.nan, 0, 1, 2, 4, 5, 6, 7], name='B')
cols = MultiIndex(levels=[['C'], ['a', 'b']],
codes=[[0, 0], [0, 1]],
names=[None, 'A'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
df = DataFrame({'A': list('aaaabbbb'), 'B': list(range(4)) * 2,
'C': range(8)})
df.iloc[2, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[2, np.nan], [0, 4], [1, 5], [np.nan, 6], [3, 7]]
cols = MultiIndex(levels=[['C'], ['a', 'b']],
codes=[[0, 0], [0, 1]],
names=[None, 'A'])
idx = Index([np.nan, 0, 1, 2, 3], name='B')
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
df = pd.DataFrame({'A': list('aaaabbbb'), 'B': list(range(4)) * 2,
'C': range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[3, np.nan], [0, 4], [1, 5], [2, 6], [np.nan, 7]]
cols = MultiIndex(levels=[['C'], ['a', 'b']],
codes=[[0, 0], [0, 1]],
names=[None, 'A'])
idx = Index([np.nan, 0, 1, 2, 3], name='B')
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
# GH7401
df = pd.DataFrame({'A': list('aaaaabbbbb'),
'B': (date_range('2012-01-01', periods=5)
.tolist() * 2),
'C': np.arange(10)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack()
vals = np.array([[3, 0, 1, 2, np.nan, 4], [np.nan, 5, 6, 7, 8, 9]])
idx = Index(['a', 'b'], name='A')
cols = MultiIndex(levels=[['C'], date_range('2012-01-01', periods=5)],
codes=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
# GH4862
vals = [['Hg', np.nan, np.nan, 680585148],
['U', 0.0, np.nan, 680585148],
['Pb', 7.07e-06, np.nan, 680585148],
['Sn', 2.3614e-05, 0.0133, 680607017],
['Ag', 0.0, 0.0133, 680607017],
['Hg', -0.00015, 0.0133, 680607017]]
df = DataFrame(vals, columns=['agent', 'change', 'dosage', 's_id'],
index=[17263, 17264, 17265, 17266, 17267, 17268])
left = df.copy().set_index(['s_id', 'dosage', 'agent']).unstack()
vals = [[np.nan, np.nan, 7.07e-06, np.nan, 0.0],
[0.0, -0.00015, np.nan, 2.3614e-05, np.nan]]
idx = MultiIndex(levels=[[680585148, 680607017], [0.0133]],
codes=[[0, 1], [-1, 0]],
names=['s_id', 'dosage'])
cols = MultiIndex(levels=[['change'], ['Ag', 'Hg', 'Pb', 'Sn', 'U']],
codes=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]],
names=[None, 'agent'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
left = df.loc[17264:].copy().set_index(['s_id', 'dosage', 'agent'])
assert_frame_equal(left.unstack(), right)
# GH9497 - multiple unstack with nulls
df = DataFrame({'1st': [1, 2, 1, 2, 1, 2],
'2nd': pd.date_range('2014-02-01', periods=6,
freq='D'),
'jim': 100 + np.arange(6),
'joe': (np.random.randn(6) * 10).round(2)})
df['3rd'] = df['2nd'] - pd.Timestamp('2014-02-02')
df.loc[1, '2nd'] = df.loc[3, '2nd'] = np.nan
df.loc[1, '3rd'] = df.loc[4, '3rd'] = np.nan
left = df.set_index(['1st', '2nd', '3rd']).unstack(['2nd', '3rd'])
assert left.notna().values.sum() == 2 * len(df)
for col in ['jim', 'joe']:
for _, r in df.iterrows():
key = r['1st'], (col, r['2nd'], r['3rd'])
assert r[col] == left.loc[key]
def test_stack_datetime_column_multiIndex(self):
# GH 8039
t = datetime(2014, 1, 1)
df = DataFrame(
[1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, 'A', 'B')]))
result = df.stack()
eidx = MultiIndex.from_product([(0, 1, 2, 3), ('B',)])
ecols = MultiIndex.from_tuples([(t, 'A')])
expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols)
assert_frame_equal(result, expected)
def test_stack_partial_multiIndex(self):
# GH 8844
def _test_stack_with_multiindex(multiindex):
df = DataFrame(np.arange(3 * len(multiindex))
.reshape(3, len(multiindex)),
columns=multiindex)
for level in (-1, 0, 1, [0, 1], [1, 0]):
result = df.stack(level=level, dropna=False)
if isinstance(level, int):
# Stacking a single level should not make any all-NaN rows,
# so df.stack(level=level, dropna=False) should be the same
# as df.stack(level=level, dropna=True).
expected = df.stack(level=level, dropna=True)
if isinstance(expected, Series):
assert_series_equal(result, expected)
else:
| assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
from binance.client import Client
import keys
from pandas import DataFrame as df
from datetime import datetime
import trading_key
client=Client(api_key=keys.Pkeys, api_secret=keys.Skeys)
#get candle data
def candle_data(symbols, intervals):
candles=client.get_klines(symbol=symbols, interval=intervals)
#create (date) dataframe
candles_data_frame=df(candles)
candles_data_frame_date=candles_data_frame[0]
#create the empty date list
final_date=[]
#convert timestamp to readable date and append it to the list
for time in candles_data_frame_date.unique():
readable=datetime.fromtimestamp(int(time/1000))
final_date.append(readable)
#drop the first and last columns of the dateframe
candles_data_frame.pop(0)
candles_data_frame.pop(len(candles_data_frame.columns))
dataframe_final_date=df(final_date)
dataframe_final_date.columns=['date']
final_data_frame=candles_data_frame.join(dataframe_final_date)
#index by date
final_data_frame.set_index('date', inplace=True)
final_data_frame.columns=["open", "high", "low", "close", "volumn", "close time", "quote asset volumn", "number of trades", "taker buy base asset volumn", "taker buy quote assest volumn"]
final_data_frame.to_csv(r'C:\Yiru Xiong-Professional\实习\CryptoAlgoWheel\S1\task2\S1_task2_candle data', index=True)
#candle_data('BTCUSDT',Client.KLINE_INTERVAL_30MINUTE)
#get transaction/trades data
def trades_data(symbols):
trades=client.get_recent_trades(symbol=symbols)
trades_df= | df(trades) | pandas.DataFrame |
import pandas as pd
import numpy as np
from pathlib import Path
from datetime import datetime as dt
def mergeManagers(managers, gameLogs):
#Sum up doubled data
managers = managers.groupby(['yearID','playerID'], as_index=False)['Games','Wins','Losses'].sum()
#Get visiting managers
visitingManagers = gameLogs[['row','Date','Visiting team manager ID']]
visitingManagers['yearID'] = pd.DatetimeIndex(pd.to_datetime(visitingManagers['Date'])).year-1
visitingManagers = pd.merge(visitingManagers, managers, left_on=['yearID','Visiting team manager ID'], right_on=['yearID','playerID'], how="left")
#Get home managers
homeManagers = gameLogs[['row','Date','Home team manager ID']]
homeManagers['yearID'] = pd.DatetimeIndex(pd.to_datetime(homeManagers['Date'])).year-1
homeManagers = pd.merge(homeManagers, managers, left_on=['yearID','Home team manager ID'], right_on=['yearID','playerID'], how="left")
#Merge managers
homes = homeManagers[['row','Games','Wins','Losses']]
visitings = visitingManagers[['row','Games','Wins','Losses']]
return pd.merge(homes, visitings, on='row', suffixes=(' home manager',' visiting manager'))
def mergePitchings(pitchers, gameLogs):
#Get aggregators for doubled data
aggregators = {}
for column in pitchers.drop(columns=['yearID','playerID']).columns:
if column.find("average")>-1:
aggregators[column] = 'mean'
else:
aggregators[column] = 'sum'
#Aggregate doubled data
pitchers = pitchers.groupby(['yearID','playerID'], as_index=False).agg(aggregators)
#Get visiting pitchers
visitingPitchers = gameLogs[['row','Date','Visiting starting pitcher ID']]
visitingPitchers['yearID'] = pd.DatetimeIndex(pd.to_datetime(visitingPitchers['Date'])).year-1
visitingPitchers = pd.merge(visitingPitchers, pitchers, left_on=['yearID','Visiting starting pitcher ID'], right_on=['yearID','playerID'], how="left")
#Get home pitchers
homePitchers = gameLogs[['row','Date','Home starting pitcher ID']]
homePitchers['yearID'] = pd.DatetimeIndex(pd.to_datetime(homePitchers['Date'])).year-1
homePitchers = pd.merge(homePitchers, pitchers, left_on=['yearID','Home starting pitcher ID'], right_on=['yearID','playerID'], how="left")
#Merge pitchers
homes = homePitchers.drop(columns=['yearID','Home starting pitcher ID','playerID','Date'])
visitings = visitingPitchers.drop(columns=['yearID','Visiting starting pitcher ID','playerID','Date'])
return pd.merge(homes, visitings, on='row', suffixes=(' home pitcher',' visiting pitcher'))
def mergePeople(people, gameLogs):
#Encode people
people['bats right'] = (people['bats']=="R") | (people['bats']=="B")
people['bats left'] = (people['bats']=="L") | (people['bats']=="B")
people['throws right'] = people['throws']=="R"
people = people.drop(columns=['bats','throws'])
#Merge people
allPeople = []
for IDColumn in gameLogs.columns:
if IDColumn.find("starting")>-1:
merged = pd.merge(gameLogs[['row','Date',IDColumn]], people, how="left", left_on=[IDColumn], right_on=['playerID'])
merged['age'] = (pd.to_datetime(merged['Date']) - pd.to_datetime(merged['birthdate'])) / np.timedelta64(1, 'Y')
newColumns = {"age":IDColumn.replace(" ID"," "+" age")}
for column in people.drop(columns=['playerID','birthdate']).columns:
newColumns[column] = IDColumn.replace(" ID"," "+str(column))
merged = merged.rename(columns=newColumns)
allPeople.append(merged[['row']+list(newColumns.values())])
mergedPeople = gameLogs['row']
for merSal in allPeople:
mergedPeople = | pd.merge(mergedPeople, merSal, how="left", on='row') | pandas.merge |
import pandas as pd
class RecHash:
def __init__(self):
# Combinations of header labels
self.base = ['Rk', 'Date', 'G#', 'Age', 'Tm', 'Home', 'Opp', 'Result', 'GS']
self.receiving = ['Rec_Tgt', 'Rec_Rec', 'Rec_Yds', 'Rec_Y/R', 'Rec_TD', 'Rec_Ctch%', 'Rec_Y/Tgt']
self.rushing = ['rush_att', 'rush_yds', 'rush_Y/A', 'rush_TD']
self.passing = ['pass_cmp', 'pass_att', 'Cmp%', 'pass_yds', 'pass_td', 'Int', 'Rate', 'Sk', 'Sk-Yds',
'pass_Y/A', 'AY/A']
self.rush_sk = ['rush_sk', 'tkl', 'Ast']
self.scoring2p = ['2pt']
self.scoring = ['Any_TD', 'Any_Pts']
self.punting = ['Pnt', 'Pnt_Yds', 'Y/P', 'Blck']
self.kick_rt = ['Kick_Rt', 'Kick_RtYds', 'Y/Rt', 'Kick_TD']
self.punt_rt = ['Pnt_rt', 'Pnt_Yds', 'Y/Pnt', 'Pnt_TD']
def md5b3c4237d9a10de8cfaad61852cb552c4(self, df):
# Rename columns
df.columns = self.base + self.receiving + self.rushing + self.kick_rt + self.punt_rt + self.scoring + self.rush_sk
# add missing cols
df = pd.concat([df, pd.DataFrame(columns=self.scoring2p)], axis=1)
# set all the new columns to zero
df.loc[:, self.scoring2p] = 0
return df
def md5bcb96297b50fb2120f475e8e05fbabcd(self,df):
# Rename columns
df.columns = self.base + self.receiving + self.rushing + self.passing + self.kick_rt + self.punt_rt + self.scoring2p + self.scoring
# add missing cols
df = pd.concat([df, | pd.DataFrame(columns=self.rush_sk) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from pathlib import Path
from time import time
from typing import List, Tuple
import numpy as np
from pandas import DataFrame, Series, concat
from sklearn.base import BaseEstimator
from sklearn.model_selection import StratifiedKFold, cross_validate
from sklearn.pipeline import Pipeline
from src.metrics_helpers import my_eval_metric
from src.visualization_helpers import show_evaluation_plots
def model_cross_validator(
pipe: BaseEstimator,
model_name: str,
num_feats_list: List,
X: DataFrame,
y: Series,
scoring_metric: str,
cv: StratifiedKFold = StratifiedKFold(
n_splits=5, shuffle=True, random_state=1000
),
) -> Tuple:
"""Perform KFCV for a single model or pipeline"""
d = {}
cv_results = cross_validate(
estimator=pipe,
X=X,
y=y,
cv=cv,
scoring=scoring_metric,
return_train_score=True,
return_estimator=False,
n_jobs=-1,
)
# Get validation and testing scores
d["CV Train"] = np.mean(cv_results["train_score"])
d["CV Validation"] = np.mean(cv_results["test_score"])
# Append validation and testing scores to DataFrame
df_scores = DataFrame.from_dict(d, orient="index").T
d_all_scores = {"validation_scores": cv_results["test_score"]}
df_scores_all = DataFrame.from_dict(d_all_scores, orient="index").T
df_scores_all["model_name"] = model_name
df_scores["model"] = model_name
# print(cv_results["estimator"][0].named_steps["clf"].coef_)
# print(len(cv_results["estimator"][0].named_steps["clf"].coef_))
# print(len(cols))
# display(cv_results["estimator"])
return (df_scores, df_scores_all)
def configuration_assesser(
X: DataFrame,
y: Series,
preprocessor,
nums: List,
scoring_metric: str,
models: List,
model_names: List,
cv: StratifiedKFold,
) -> Tuple:
"""
Perform KFCV on model(s) and return (a) mean and (b) all CV scores
"""
df_scores = []
df_sc_all = []
full_models = []
for model, model_name in zip(models, model_names):
start = time()
# Apply pre-processing or skip, depending on model
if "Dummy" in model_name:
if not df_scores:
dstr = "most_frequent"
elif len(df_scores) == 1:
dstr = "uniform"
elif len(df_scores) == 2:
dstr = "stratified"
print(f"Cross-validation on dummy classifier with strategy={dstr}")
else:
print(f"Cross-Validation on {model_name} model")
if "RandomForest" in model_name or "DummyClassifier" in model_name:
pipe = Pipeline(steps=[("clf", model)])
print(
f"Using pipeline with no pre-processing step for {model_name}"
)
else:
pipe = Pipeline(
steps=[("preprocessor", preprocessor), ("clf", model)]
)
# Append validation and testing scores to DataFrame
df_cv_scores, df_scores_all = model_cross_validator(
pipe=pipe,
model_name=model_name,
num_feats_list=nums,
X=X,
y=y,
scoring_metric=scoring_metric,
cv=cv,
)
df_scores.append(df_cv_scores)
df_sc_all.append(df_scores_all)
full_models.append(pipe)
time_reqd = time() - start
if "Dummy" in model_name:
print(
f"Time for dummy classifier with {dstr} strategy = "
f"{time_reqd:.2f} seconds\n"
)
else:
print(f"Time for {model_name} model = {time_reqd:.2f} seconds\n")
df_sc = | concat(df_scores, axis=0) | pandas.concat |
"""
@ProjectName: DXY-2019-nCoV-Crawler
@FileName: script.py
@Author: <NAME>
@Date: 2020/1/31
"""
from git import Repo
from pymongo import MongoClient
import os
import json
import time
import logging
import datetime
import requests
import pandas as pd
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')
logger = logging.getLogger(__name__)
uri = '**Confidential**'
client = MongoClient(uri)
db = client['2019-nCoV']
collections = {
'DXYOverall': 'overall',
'DXYArea': 'area',
'DXYNews': 'news',
'DXYRumors': 'rumors'
}
time_types = ('pubDate', 'createTime', 'modifyTime', 'dataInfoTime', 'crawlTime', 'updateTime')
def git_manager(changed_files):
repo = Repo(path=os.path.split(os.path.realpath(__file__))[0])
repo.index.add(changed_files)
repo.index.commit(message='{datetime} - Change detected!'.format(datetime=datetime.datetime.now()))
origin = repo.remote('origin')
origin.push()
logger.info('Pushing to GitHub successfully!')
class DB:
def __init__(self):
self.db = db
def count(self, collection):
return self.db[collection].count_documents(filter={})
def dump(self, collection):
return self.db[collection].aggregate(
pipeline=[
{
'$sort': {
'updateTime': -1,
'crawlTime': -1
}
}
]
)
class Listener:
def __init__(self):
self.db = DB()
def run(self):
while True:
self.listener()
time.sleep(3600)
def listener(self):
changed_files = list()
for collection in collections:
json_file = open(
os.path.join(
os.path.split(os.path.realpath(__file__))[0], 'json', collection + '.json'),
'r', encoding='utf-8'
)
static_data = json.load(json_file)
json_file.close()
while True:
request = requests.get(url='https://lab.isaaclin.cn/nCoV/api/' + collections.get(collection))
if request.status_code == 200:
current_data = request.json()
break
else:
continue
if static_data != current_data:
self.json_dumper(collection=collection, content=current_data)
changed_files.append('json/' + collection + '.json')
self.csv_dumper(collection=collection)
changed_files.append('csv/' + collection + '.csv')
logger.info('{collection} updated!'.format(collection=collection))
if changed_files:
git_manager(changed_files=changed_files)
def json_dumper(self, collection, content):
json_file = open(
os.path.join(
os.path.split(os.path.realpath(__file__))[0], 'json', collection + '.json'),
'w', encoding='utf-8'
)
json.dump(content, json_file, ensure_ascii=False, indent=4)
json_file.close()
def csv_dumper(self, collection):
if collection == 'DXYArea':
structured_results = list()
results = self.db.dump(collection=collection)
for province_dict in results:
if province_dict.get('cities', None):
for city_counter in range(len(province_dict['cities'])):
city_dict = province_dict['cities'][city_counter]
result = dict()
result['provinceName'] = province_dict['provinceName']
result['provinceEnglishName'] = province_dict.get('provinceEnglishName')
result['province_zipCode'] = province_dict.get('locationId')
result['cityName'] = city_dict['cityName']
result['cityEnglishName'] = city_dict.get('cityEnglishName')
result['city_zipCode'] = city_dict.get('locationId')
result['province_confirmedCount'] = province_dict['confirmedCount']
result['province_suspectedCount'] = province_dict['suspectedCount']
result['province_curedCount'] = province_dict['curedCount']
result['province_deadCount'] = province_dict['deadCount']
result['city_confirmedCount'] = city_dict['confirmedCount']
result['city_suspectedCount'] = city_dict['suspectedCount']
result['city_curedCount'] = city_dict['curedCount']
result['city_deadCount'] = city_dict['deadCount']
result['updateTime'] = datetime.datetime.fromtimestamp(province_dict['updateTime']/1000)
structured_results.append(result)
df = | pd.DataFrame(structured_results) | pandas.DataFrame |
import numpy as np
import pandas as pd
import random
import os
import policyValueNet as net
import dataTools
import sheepEscapingEnv as env
import trainTools
import visualize as VI
class ApplyFunction:
def __init__(self, saveModelDir=None, saveGraphDir=None):
self.saveModelDir = saveModelDir
self.saveGraphDir = saveGraphDir
def __call__(self, df, dataSet, criticFunction, tfseed):
trainDataSize = df.index.get_level_values('trainingDataSize')[0]
trainData = [list(varData) for varData in zip(*dataSet[:trainDataSize])]
testDataSize = df.index.get_level_values('testDataSize')[0]
testData = [list(varData) for varData in zip(*dataSet[-testDataSize:])]
numStateSpace = df.index.get_level_values('numStateSpace')[0]
numActionSpace = df.index.get_level_values('numActionSpace')[0]
learningRate = df.index.get_level_values('learningRate')[0]
regularizationFactor = df.index.get_level_values('regularizationFactor')[0]
valueRelativeErrBound = df.index.get_level_values('valueRelativeErrBound')[0]
maxStepNum = df.index.get_level_values('maxStepNum')[0]
batchSize = df.index.get_level_values('batchSize')[0]
lossChangeThreshold = df.index.get_level_values('lossChangeThreshold')[0]
lossHistorySize = df.index.get_level_values('lossHistorySize')[0]
initActionCoefficient = df.index.get_level_values('initActionCoefficient')[0]
initValueCoefficient = df.index.get_level_values('initValueCoefficient')[0]
netNeurons = df.index.get_level_values('netNeurons')[0]
netLayers = df.index.get_level_values('netLayers')[0]
neuronsPerLayer = int(round(netNeurons/netLayers))
reportInterval = df.index.get_level_values('reportInterval')[0]
trainTerminalController = trainTools.TrainTerminalController(lossHistorySize, lossChangeThreshold)
coefficientController = trainTools.coefficientCotroller(initActionCoefficient, initValueCoefficient)
trainReporter = trainTools.TrainReporter(maxStepNum, reportInterval)
train = net.Train(maxStepNum, batchSize, trainTerminalController, coefficientController, trainReporter)
generateModel = net.GenerateModelSeparateLastLayer(numStateSpace, numActionSpace, learningRate, regularizationFactor, valueRelativeErrBound=valueRelativeErrBound, seed=tfseed)
model = generateModel([neuronsPerLayer] * netLayers)
trainedModel = train(model, trainData)
modelName = "{}data_{}x{}_minibatch_{}kIter_contState_actionDist".format(trainData, neuronsPerLayer, netLayers,
maxStepNum / 1000)
if self.saveModelDir is not None:
savePath = os.path.join(os.getcwd(), self.saveModelDir, modelName)
net.saveVariables(trainedModel, savePath)
evalTest = net.evaluate(trainedModel, testData)
return pd.Series({"testActionLoss": evalTest['actionLoss']})
def main(seed=128, tfseed=128):
random.seed(seed)
np.random.seed(4027)
dataSetPath = "72640steps_1000trajs_sheepEscapingEnv_data_actionDist.pkl"
dataSet = dataTools.loadData(dataSetPath)
random.shuffle(dataSet)
trainingDataSizes = [100,200] # [5000, 15000, 30000, 45000, 60000]
testDataSize = [100]
numStateSpace = [env.numStateSpace]
numActionSpace = [env.numActionSpace]
learningRate = [1e-4]
regularizationFactor = [0]
valueRelativeErrBound = [0.1]
maxStepNum = [100000]
batchSize = [100]
reportInterval = [1000]
lossChangeThreshold = [1e-8]
lossHistorySize = [10]
initActionCoefficient = [50]
initValueCoefficient = [1]
netNeurons = [256]
netLayers = [4]
levelNames = ["trainingDataSize", "testDataSize", "numStateSpace", "numActionSpace", "learningRate",
"regularizationFactor", "valueRelativeErrBound", "maxStepNum", "batchSize", "reportInterval",
"lossChangeThreshold", "lossHistorySize", "initActionCoefficient", "initValueCoefficient",
"netNeurons", "netLayers"]
levelValues = [trainingDataSizes, testDataSize, numStateSpace, numActionSpace, learningRate, regularizationFactor,
valueRelativeErrBound, maxStepNum, batchSize, reportInterval, lossChangeThreshold, lossHistorySize,
initActionCoefficient, initValueCoefficient, netNeurons, netLayers]
levelIndex = pd.MultiIndex.from_product(levelValues, names=levelNames)
toSplitFrame = | pd.DataFrame(index=levelIndex) | pandas.DataFrame |
import numpy as np
import pandas as pd
import random
import tensorflow.keras as keras
from sklearn.model_selection import train_test_split
def read_data(random_state=42,
otu_filename='../../Datasets/otu_table_all_80.csv',
metadata_filename='../../Datasets/metadata_table_all_80.csv'):
otu = pd.read_csv(otu_filename, index_col=0, header=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.astype('int32')
metadata = pd.read_csv(metadata_filename, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[['age',
'Temperature',
'Precipitation3Days',
'INBREDS',
'Maize_Line']]
domain = pd.concat([domain, pd.get_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = pd.concat([domain, pd.get_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.drop(['INBREDS', 'Maize_Line'], axis=1)
df = pd.concat([otu, domain], axis=1, sort=True, join='outer')
data_microbioma = df[otu.columns].to_numpy(dtype=np.float32)
data_domain = df[domain.columns].to_numpy(dtype=np.float32)
data_microbioma_train, data_microbioma_test, data_domain_train, data_domain_test = \
train_test_split(data_microbioma, data_domain, test_size=0.1, random_state=random_state)
return data_microbioma_train, data_microbioma_test, data_domain_train, data_domain_test, otu.columns, domain.columns
def read_df_with_transfer_learning_subset_fewerDomainFeatures(
metadata_names=['age','Temperature','Precipitation3Days'],
random_state=42,
otu_filename='../Datasets/otu_table_all_80.csv',
metadata_filename='../Datasets/metadata_table_all_80.csv'):
otu = pd.read_csv(otu_filename, index_col=0, header=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.astype('int32')
metadata = pd.read_csv(metadata_filename, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[metadata_names]
if 'INBREDS' in metadata_names:
domain = pd.concat([domain, pd.get_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = domain.drop(['INBREDS'], axis=1)
elif 'Maize_Line' in metadata_names:
domain = pd.concat([domain, pd.get_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.drop(['Maize_Line'], axis=1)
df = pd.concat([otu, domain], axis=1, sort=True, join='outer')
#data_microbioma = df[otu.columns].to_numpy(dtype=np.float32)
#data_domain = df[domain.columns].to_numpy(dtype=np.float32)
df_microbioma = df[otu.columns]
df_domain = df[domain.columns]
df_microbioma_train, df_microbioma_no_train, df_domain_train, df_domain_no_train = \
train_test_split(df_microbioma, df_domain, test_size=0.1, random_state=random_state)
# Transfer learning subset
df_microbioma_test, df_microbioma_transfer_learning, df_domain_test, df_domain_transfer_learning = \
train_test_split(df_microbioma_no_train, df_domain_no_train, test_size=100, random_state=random_state)
df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test = \
train_test_split(df_microbioma_transfer_learning, df_domain_transfer_learning, test_size=0.3, random_state=random_state)
return df_microbioma_train, df_microbioma_test, df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_train, df_domain_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test, otu.columns, domain.columns
def read_df_with_transfer_learning_subset(random_state=42,
otu_filename='../Datasets/otu_table_all_80.csv',
metadata_filename='../Datasets/metadata_table_all_80.csv'):
otu = pd.read_csv(otu_filename, index_col=0, header=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.astype('int32')
metadata = pd.read_csv(metadata_filename, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[['age',
'Temperature',
'Precipitation3Days',
'INBREDS',
'Maize_Line']]
domain = pd.concat([domain, pd.get_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = pd.concat([domain, pd.get_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.drop(['INBREDS', 'Maize_Line'], axis=1)
df = pd.concat([otu, domain], axis=1, sort=True, join='outer')
#data_microbioma = df[otu.columns].to_numpy(dtype=np.float32)
#data_domain = df[domain.columns].to_numpy(dtype=np.float32)
df_microbioma = df[otu.columns]
df_domain = df[domain.columns]
df_microbioma_train, df_microbioma_no_train, df_domain_train, df_domain_no_train = \
train_test_split(df_microbioma, df_domain, test_size=0.1, random_state=random_state)
df_microbioma_test, df_microbioma_transfer_learning, df_domain_test, df_domain_transfer_learning = \
train_test_split(df_microbioma_no_train, df_domain_no_train, test_size=100, random_state=random_state)
df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test = \
train_test_split(df_microbioma_transfer_learning, df_domain_transfer_learning, test_size=0.3, random_state=random_state)
return df_microbioma_train, df_microbioma_test, df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_train, df_domain_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test, otu.columns, domain.columns
def read_df_with_transfer_learning_subset_stratified_by_maize_line(random_state=42,
otu_filename='../Datasets/otu_table_all_80.csv',
metadata_filename='../Datasets/metadata_table_all_80.csv'):
otu = pd.read_csv(otu_filename, index_col=0, header=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.astype('int32')
metadata = pd.read_csv(metadata_filename, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[['age',
'Temperature',
'Precipitation3Days',
'INBREDS',
'Maize_Line']]
domain = pd.concat([domain, pd.get_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = pd.concat([domain, pd.get_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.drop(['INBREDS', 'Maize_Line'], axis=1)
df = pd.concat([otu, domain], axis=1, sort=True, join='outer')
#data_microbioma = df[otu.columns].to_numpy(dtype=np.float32)
#data_domain = df[domain.columns].to_numpy(dtype=np.float32)
df_microbioma = df[otu.columns]
df_domain = df[domain.columns]
df_microbioma_train, df_microbioma_no_train, df_domain_train, df_domain_no_train = \
train_test_split(df_microbioma, df_domain, test_size=0.1, random_state=random_state)
df_microbioma_test, df_microbioma_transfer_learning, df_domain_test, df_domain_transfer_learning = \
train_test_split(df_microbioma_no_train, df_domain_no_train, test_size=100, random_state=random_state)
df_temp=df_domain_transfer_learning
col_stratify=df_temp.iloc[:,30:36][df==1].stack().reset_index().loc[:,'level_1']
df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test = \
train_test_split(df_microbioma_transfer_learning, df_domain_transfer_learning, test_size=0.3, random_state=random_state, stratify = col_stratify)
return df_microbioma_train, df_microbioma_test, df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_train, df_domain_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test, otu.columns, domain.columns
def read_df_with_transfer_learning_2otufiles_fewerDomainFeatures(
metadata_names=['age','Temperature','Precipitation3Days'],
random_state=42,
otu_filename='../Datasets/otu_table_all_80.csv',
metadata_filename='../Datasets/metadata_table_all_80.csv',
otu_transfer_filename='../Datasets/Walters5yearsLater/otu_table_Walters5yearsLater.csv',
metadata_transfer_filename='../Datasets/Walters5yearsLater/metadata_table_Walters5yearsLater.csv'):
otu = pd.read_csv(otu_filename, index_col=0, header=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.astype('int32')
metadata = pd.read_csv(metadata_filename, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[metadata_names]
if 'INBREDS' in metadata_names:
domain = pd.concat([domain, pd.get_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = domain.drop(['INBREDS'], axis=1)
elif 'Maize_Line' in metadata_names:
domain = pd.concat([domain, pd.get_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.drop(['Maize_Line'], axis=1)
df = pd.concat([otu, domain], axis=1, sort=True, join='outer')
df_microbioma = df[otu.columns]
df_domain = df[domain.columns]
df_microbioma_train, df_microbioma_no_train, df_domain_train, df_domain_no_train = \
train_test_split(df_microbioma, df_domain, test_size=0.1, random_state=random_state)
df_microbioma_test, _, df_domain_test, _ = \
train_test_split(df_microbioma_no_train, df_domain_no_train, test_size=100, random_state=random_state)
otu_columns = otu.columns
domain_columns = domain.columns
# TRANSFER LEARNING SUBSETS
otu = pd.read_csv(otu_transfer_filename, index_col=0, header=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.astype('int32')
metadata = pd.read_csv(metadata_transfer_filename, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[metadata_names]
if 'INBREDS' in metadata_names:
domain = pd.concat([domain, pd.get_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = domain.drop(['INBREDS'], axis=1)
elif 'Maize_Line' in metadata_names:
domain = pd.concat([domain, pd.get_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.drop(['Maize_Line'], axis=1)
df = pd.concat([otu, domain], axis=1, sort=True, join='outer')
df_microbioma = df[otu.columns]
df_domain = df[domain.columns]
df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test = \
train_test_split(df_microbioma, df_domain, test_size=0.3, random_state=random_state)
return df_microbioma_train, df_microbioma_test, df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_train, df_domain_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test, otu_columns, domain_columns
def read_df_with_transfer_learning_2otufiles_differentDomainFeatures(
metadata_names=['age','Temperature','Precipitation3Days'],
random_state=42,
otu_filename='../Datasets/otu_table_all_80.csv',
metadata_filename='../Datasets/metadata_table_all_80.csv',
metadata_names_transfer=['pH', 'Nmin', 'N', 'C', 'C.N', 'Corg', 'soil_type', 'clay_fration', 'water_holding_capacity'],
otu_transfer_filename='../Datasets/Maarastawi2018/otu_table_Order_Maarastawi2018.csv',
metadata_transfer_filename='../Datasets/Maarastawi2018/metadata_table_Maarastawi2018.csv'):
otu = pd.read_csv(otu_filename, index_col=0, header=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.astype('int32')
metadata = pd.read_csv(metadata_filename, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[metadata_names]
if 'INBREDS' in metadata_names:
domain = pd.concat([domain, pd.get_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = domain.drop(['INBREDS'], axis=1)
elif 'Maize_Line' in metadata_names:
domain = pd.concat([domain, pd.get_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.drop(['Maize_Line'], axis=1)
df = pd.concat([otu, domain], axis=1, sort=True, join='outer')
df_microbioma = df[otu.columns]
df_domain = df[domain.columns]
df_microbioma_train, df_microbioma_no_train, df_domain_train, df_domain_no_train = \
train_test_split(df_microbioma, df_domain, test_size=0.1, random_state=random_state)
df_microbioma_test, _, df_domain_test, _ = \
train_test_split(df_microbioma_no_train, df_domain_no_train, test_size=100, random_state=random_state)
otu_columns = otu.columns
domain_columns = domain.columns
# TRANSFER LEARNING SUBSETS
otu = pd.read_csv(otu_transfer_filename, index_col=0, header=None, sep='\t').T
#otu = otu.set_index('otuids')
otu = otu.reset_index()
otu = otu.drop(['otuids','index'],axis=1)
otu = otu.astype('int32')
metadata = pd.read_csv(metadata_transfer_filename, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[metadata_names_transfer]
if 'soil_type' in metadata_names_transfer:
domain = pd.concat([domain, pd.get_dummies(domain['soil_type'], prefix='soil_type')], axis=1)
domain = domain.drop(['soil_type'], axis=1)
domain = domain.reset_index()
domain = domain.drop(['X.SampleID'], axis=1)
df = pd.concat([otu, domain], axis=1, sort=True, join='outer')
df = df.dropna(subset=metadata_names_transfer)
df_microbioma = df[otu.columns]
df_domain = df[domain.columns]
df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test = \
train_test_split(df_microbioma, df_domain, test_size=0.3, random_state=random_state)
return df_microbioma_train, df_microbioma_test, df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_train, df_domain_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test, otu_columns, domain_columns
def read_df_with_transfer_learning_subset_3domainFeatures(random_state=42,
otu_filename='../Datasets/otu_table_all_80.csv',
metadata_filename='../Datasets/metadata_table_all_80.csv'):
otu = pd.read_csv(otu_filename, index_col=0, header=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.astype('int32')
metadata = | pd.read_csv(metadata_filename, sep='\t') | pandas.read_csv |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/10_shared.ipynb (unless otherwise specified).
__all__ = ['load_standard_holidays', 'cov2corr', 'load_sales_example', 'load_sales_example2',
'load_dcmm_latent_factor_example', 'load_dbcm_latent_factor_example', 'load_dlmm_example',
'load_us_inflation', 'load_us_inflation_forecasts']
# Internal Cell
#exporti
import numpy as np
import pandas as pd
import scipy as sc
import pickle
from scipy.special import digamma
from pandas.tseries.holiday import AbstractHolidayCalendar, USMartinLutherKingJr, USMemorialDay, Holiday, USLaborDay, \
USThanksgivingDay
import os
import pickle
import zlib
# Internal Cell
def load_interpolators():
pkg_data_dir = os.path.dirname(os.path.abspath(__file__)) + '/pkg_data'
#pkg_data_dir = os.getcwd().split('pybats')[0] + 'pybats/pybats/pkg_data'
#pkg_data_dir = globals()['_dh'][0] + '/pkg_data'
try:
with open(pkg_data_dir + '/interp_beta.pickle.gzip', 'rb') as fl:
interp_beta = pickle.loads(zlib.decompress(fl.read()))
with open(pkg_data_dir + '/interp_gamma.pickle.gzip', 'rb') as fl:
interp_gamma = pickle.loads(zlib.decompress(fl.read()))
except:
print('WARNING: Unable to load interpolator. Code will run slower.')
interp_beta, interp_gamma = None, None
return interp_beta, interp_gamma
# Internal Cell
# I need this helper in a module file for pickle reasons ...
def transformer(ft, qt, fn1, fn2):
return np.exp(np.ravel(fn1(ft, np.sqrt(qt), grid=False))), \
np.exp(np.ravel(fn2(ft, np.sqrt(qt), grid=False)))
# Internal Cell
def gamma_transformer(ft, qt, fn):
alpha = np.ravel(np.exp(fn(np.sqrt(qt))))
beta = np.exp(digamma(alpha) - ft)
return alpha, beta
# Internal Cell
def trigamma(x):
return sc.special.polygamma(x=x, n=1)
# Internal Cell
def save(obj, filename):
with open(filename, "wb") as file:
pickle.dump(obj, file=file)
# Internal Cell
def load(filename):
with open(filename, "rb") as file:
tmp = pickle.load(file)
return tmp
# Internal Cell
def define_holiday_regressors(X, dates, holidays=None):
"""
Add columns to the predictor matrix X for a specified list of holidays
:param X: (Array) Predictor matrix without columns for the holidays
:param dates: Dates
:param holidays: (List) holidays
:return: Updated predictor matrix
"""
if holidays is not None:
if len(holidays) > 0:
if X is None:
n = len(dates)
else:
n = X.shape[0]
for holiday in holidays:
cal = AbstractHolidayCalendar()
cal.rules = [holiday]
x = np.zeros(n)
x[dates.isin(cal.holidays())] = 1
if X is None:
X = x
else:
X = np.c_[X, x]
return X
else:
return X
else:
return X
# Cell
def load_standard_holidays():
"""
Load in a list of standard holidays
"""
holidays = [USMartinLutherKingJr,
USMemorialDay,
Holiday('July4', month=7, day=4),
USLaborDay,
# Holiday('Thanksgiving_1DB', month=11, day=1, offset=pd.DateOffset(weekday=WE(4))),
USThanksgivingDay,
# Holiday('Christmas_1DB', month=12, day=24),
Holiday('Christmas', month=12, day=25),
Holiday('New_Years_Eve', month=12, day=31),
]
return holidays
# Cell
def cov2corr(cov):
"""
Transform a covariance matrix into a correlation matrix. Useful for understanding coefficient correlations
"""
D = np.sqrt(cov.diagonal()).reshape(-1, 1)
return cov / D / D.T
# Cell
def load_sales_example():
"""
Read data for the first sales forecasting example
"""
data_dir = os.path.dirname(os.path.abspath(__file__)) + '/pkg_data/'
return pd.read_csv(data_dir + 'sales.csv', index_col=0)[['Sales', 'Advertising']]
# Cell
def load_sales_example2():
"""
Read data for the second sales forecasting example
"""
data_dir = os.path.dirname(os.path.abspath(__file__)) + '/pkg_data/'
data = pd.read_pickle(data_dir + 'sim_sales_data')
data = data.set_index('Date')
return data
# Cell
def load_dcmm_latent_factor_example():
"""
Read data for the DCMM latent factor example
"""
data_dir = os.path.dirname(os.path.abspath(__file__)) + '/pkg_data/'
data = load(data_dir + 'dcmm_latent_factor_data')
return data
# Cell
def load_dbcm_latent_factor_example():
"""
Read data for the DBCM latent factor example
"""
data_dir = os.path.dirname(os.path.abspath(__file__)) + '/pkg_data/'
data = load(data_dir + 'dbcm_latent_factor_data')
return data
# Cell
def load_dlmm_example():
"""
Read data for the DBCM latent factor example
"""
data_dir = os.path.dirname(os.path.abspath(__file__)) + '/pkg_data/'
data = pd.read_csv(data_dir + 'dlmm_example_data.csv')
data.DATE = pd.to_datetime(data.DATE)
data = data.set_index('DATE')
return data
# Cell
def load_us_inflation():
"""
Read in quarterly US inflation data
"""
data_dir = os.path.dirname(os.path.abspath(__file__)) + '/pkg_data/'
data = pd.read_csv(data_dir + 'us_inflation.csv')
return data
# Cell
def load_us_inflation_forecasts():
"""
Read in quarterly US inflation data along with forecasts from 4 models
"""
data_dir = os.path.dirname(os.path.abspath(__file__)) + '/pkg_data/'
data = pd.read_csv(data_dir + 'bps_inflation.csv')
dates = data.values[:,0]
agent_mean = pd.read_csv(data_dir + 'bps_agent_mean.csv')
agent_mean.columns = ['Dates', '1', '2', '3', '4']
agent_mean.set_index('Dates', inplace=True)
agent_var = pd.read_csv(data_dir + 'bps_agent_var.csv').values
agent_dof = pd.read_csv(data_dir + 'bps_agent_dof.csv').values
agent_var[:,1:] = agent_var[:,1:] * agent_dof[:,1:] / (agent_dof[:,1:]-2) # Adjust the agent variance for d.o.f. b/c they're t-distributed
agent_var = | pd.DataFrame(agent_var) | pandas.DataFrame |
# SPDX-License-Identifier: Apache-2.0
# Licensed to the Ed-Fi Alliance under one or more agreements.
# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.
# See the LICENSE and NOTICES files in the project root for more information.
from datetime import datetime
import pytest
from pandas import read_sql_query, DataFrame
from edfi_google_classroom_extractor.api.teachers import _sync_without_cleanup
from edfi_lms_extractor_lib.api.resource_sync import (
SYNC_COLUMNS_SQL,
SYNC_COLUMNS,
add_hash_and_json_to,
add_sourceid_to,
)
from tests.api.api_helper import prep_expected_sync_df, prep_from_sync_db_df
IDENTITY_COLUMNS = ["courseId", "userId"]
COLUMNS = [
"courseId",
"userId",
"profile.id",
"profile.name.givenName",
"profile.name.familyName",
"profile.name.fullName",
"profile.emailAddress",
"profile.permissions",
"profile.photoUrl",
"profile.verifiedTeacher",
]
CHANGED_TEACHER_BEFORE = [
"1",
"11",
"111",
"givenName1",
"familyName1",
"fullName1",
"<EMAIL>",
"1111",
"http://111",
"False",
]
CHANGED_TEACHER_AFTER = [
"1",
"11",
"111",
"*CHANGED*",
"familyName1",
"fullName1",
"<EMAIL>",
"1111",
"http://111",
"False",
]
UNCHANGED_TEACHER = [
"2",
"22",
"222",
"givenName2",
"familyName2",
"fullName2",
"<EMAIL>",
"2222",
"http://222",
"False",
]
OMITTED_FROM_SYNC_TEACHER = [
"3",
"33",
"333",
"givenName3",
"familyName3",
"fullName3",
"<EMAIL>",
"3333",
"http://333",
"False",
]
NEW_TEACHER = [
"4",
"44",
"444",
"givenName4",
"familyName4",
"fullName4",
"<EMAIL>",
"4444",
"http://444",
"False",
]
SYNC_DATA = [CHANGED_TEACHER_AFTER, UNCHANGED_TEACHER, NEW_TEACHER]
def describe_when_testing_sync_with_new_and_missing_and_updated_rows():
@pytest.fixture
def test_db_after_sync(test_db_fixture):
# arrange
INITIAL_TEACHER_DATA = [
CHANGED_TEACHER_BEFORE,
UNCHANGED_TEACHER,
OMITTED_FROM_SYNC_TEACHER,
]
teachers_initial_df = DataFrame(INITIAL_TEACHER_DATA, columns=COLUMNS)
teachers_initial_df = add_hash_and_json_to(teachers_initial_df)
add_sourceid_to(teachers_initial_df, IDENTITY_COLUMNS)
dateToUse = datetime(2020, 9, 14, 12, 0, 0)
teachers_initial_df["SyncNeeded"] = 0
teachers_initial_df["CreateDate"] = dateToUse
teachers_initial_df["LastModifiedDate"] = dateToUse
teachers_initial_df = teachers_initial_df[SYNC_COLUMNS]
teachers_sync_df = DataFrame(SYNC_DATA, columns=COLUMNS)
with test_db_fixture.connect() as con:
con.execute("DROP TABLE IF EXISTS Teachers")
con.execute(
f"""
CREATE TABLE IF NOT EXISTS Teachers (
{SYNC_COLUMNS_SQL}
)
"""
)
teachers_initial_df.to_sql(
"Teachers", test_db_fixture, if_exists="append", index=False, chunksize=1000
)
# act
_sync_without_cleanup(teachers_sync_df, test_db_fixture)
return test_db_fixture
def it_should_have_teachers_table_with_updated_row_and_added_new_row(
test_db_after_sync,
):
EXPECTED_TEACHER_DATA_AFTER_SYNC = [
UNCHANGED_TEACHER,
OMITTED_FROM_SYNC_TEACHER,
CHANGED_TEACHER_AFTER,
NEW_TEACHER,
]
with test_db_after_sync.connect() as con:
expected_teachers_df = prep_expected_sync_df(
DataFrame(EXPECTED_TEACHER_DATA_AFTER_SYNC, columns=COLUMNS).astype(
"string"
),
IDENTITY_COLUMNS,
)
teachers_from_db_df = prep_from_sync_db_df(
read_sql_query("SELECT * from Teachers", con).astype("string"),
IDENTITY_COLUMNS,
)
assert expected_teachers_df.to_csv() == teachers_from_db_df.to_csv()
def it_should_have_temporary_sync_table_unchanged(test_db_after_sync):
EXPECTED_SYNC_DATA_AFTER_SYNC = SYNC_DATA
with test_db_after_sync.connect() as con:
expected_sync_teachers_df = prep_expected_sync_df(
| DataFrame(EXPECTED_SYNC_DATA_AFTER_SYNC, columns=COLUMNS) | pandas.DataFrame |
#!/usr/bin/env python3
import json
import multiprocessing as mp
import os.path
import re
import sys
from datetime import datetime
from itertools import islice
from multiprocessing.dummy.connection import Connection
from typing import Dict, List, Optional, Generator, Tuple, Callable
import numpy as np
import pandas as pd
import common
from common import logger
def load_json_file(path: str):
"""
Loads and parses the content of a json file.
:param path:
:return:
"""
if not os.path.isfile(path):
return None
with open(path, 'r') as file:
try:
return json.load(file)
except json.JSONDecodeError as err:
if err.msg != "Extra data":
logger.exception("Failed to load json file '%s'" % path)
return None
# Read only first object from file, ignore extra data
file.seek(0)
json_str = file.read(err.pos)
try:
return json.loads(json_str)
except json.JSONDecodeError:
logger.exception("Failed to read json file '%s'" % path)
return None
def bps_factor(prefix: str):
factor = {'K': 10 ** 3, 'M': 10 ** 6, 'G': 10 ** 9, 'T': 10 ** 12, 'P': 10 ** 15, 'E': 10 ** 18, 'Z': 10 ** 21,
'Y': 10 ** 24}
prefix = prefix.upper()
return factor[prefix] if prefix in factor else 1
def extend_df(df: pd.DataFrame, by: pd.DataFrame, **kwargs) -> pd.DataFrame:
"""
Extends the dataframe containing the data of a single file (by) with the information given in the kwargs so that it
can be appended to the main dataframe (df)
:param df: The main dataframe
:param by: The dataframe to extend by
:param kwargs: Values to use for new columns in by
:return: The extended df
"""
aliases = {
'sat': ['delay', 'orbit'],
'queue': ['queue_overhead_factor'],
}
missing_cols = set(df.columns).difference(set(by.columns))
for col_name in missing_cols:
col_value = np.nan
if col_name in kwargs:
col_value = kwargs[col_name]
elif col_name in aliases:
for alias_col in aliases[col_name]:
if alias_col in kwargs:
col_value = kwargs[alias_col]
break
by[col_name] = col_value
return df.append(by, ignore_index=True)
def fix_dtypes(df: pd.DataFrame) -> pd.DataFrame:
"""
Fix the data types of the columns in a data frame.
:param df: The dataframe to fix
:return:
"""
# Cleanup values
if 'rate' in df:
df['rate'] = df['rate'].apply(
lambda x: np.nan if str(x) == 'nan' else ''.join(c for c in str(x) if c.isdigit() or c == '.'))
if 'loss' in df:
df['loss'] = df['loss'].apply(
lambda x: np.nan if str(x) == 'nan' else float(''.join(c for c in str(x) if c.isdigit() or c == '.')) / 100)
defaults = {
np.int32: -1,
np.str: "",
np.bool: False,
}
dtypes = {
'protocol': np.str,
'pep': np.bool,
'sat': np.str,
'rate': np.int32,
'loss': float,
'queue': np.int32,
'run': np.int32,
'second': np.float32,
'bps': np.float64,
'bytes': np.int32,
'packets_received': np.int32,
'cwnd': np.int32,
'packets_sent': np.int32,
'packets_lost': np.int32,
'con_est': np.float64,
'ttfb': np.float64,
'omitted': np.bool,
'rtt': np.int32,
'seq': np.int32,
'ttl': np.int32,
'rtt_min': np.float32,
'rtt_avg': np.float32,
'rtt_max': np.float32,
'rtt_mdev': np.float32,
'name': np.str,
'cpu_load': np.float32,
'ram_usage': np.float32,
'attenuation': np.int32,
'tbs': np.str,
'qbs': np.str,
'ubs': np.str,
'prime': np.float32,
}
# Set defaults
df = df.fillna({col: defaults.get(dtypes[col], np.nan) for col in dtypes.keys()})
cols = set(df.columns).intersection(dtypes.keys())
return df.astype({col_name: dtypes[col_name] for col_name in cols})
def __mp_function_wrapper(parse_func: Callable[..., any], conn: Connection, *args, **kwargs) -> None:
result = parse_func(*args, **kwargs)
conn.send(result)
conn.close()
def __parse_slice(parse_func: Callable[..., pd.DataFrame], in_dir: str, scenarios: List[Tuple[str, Dict]],
df_cols: List[str], protocol: str, entity: str) -> pd.DataFrame:
"""
Parse a slice of the protocol entity results using the given function.
:param parse_func: The function to parse a single scenario.
:param in_dir: The directory containing the measurement results.
:param scenarios: The scenarios to parse within the in_dir.
:param df_cols: The column names for columns in the resulting dataframe.
:param protocol: The name of the protocol that is being parsed.
:param entity: Then name of the entity that is being parsed.
:return: A dataframe containing the combined results of the specified scenarios.
"""
df_slice = pd.DataFrame(columns=df_cols)
for folder, config in scenarios:
for pep in (False, True):
df = parse_func(in_dir, folder, pep=pep)
if df is not None:
df_slice = extend_df(df_slice, df, protocol=protocol, pep=pep, **config)
else:
logger.warning("No data %s%s %s data in %s", protocol, " (pep)" if pep else "", entity, folder)
return df_slice
def __mp_parse_slices(num_procs: int, parse_func: Callable[..., pd.DataFrame], in_dir: str,
scenarios: Dict[str, Dict], df_cols: List[str], protocol: str, entity: str) -> pd.DataFrame:
"""
Parse all protocol entity results using the given function in multiple processes.
:param num_procs: The number of processes to spawn.
:param parse_func: The function to parse a single scenario.
:param in_dir: The directory containing the measurement results.
:param scenarios: The scenarios to parse within the in_dir.
:param df_cols: The column names for columns in the resulting dataframe.
:param protocol: The name of the protocol that is being parsed.
:param entity: Then name of the entity that is being parsed.
:return:
"""
tasks = [
(
"%s_%s_%d" % (protocol, entity, i),
list(islice(scenarios.items(), i, sys.maxsize, num_procs)),
mp.Pipe()
)
for i in range(num_procs)
]
processes = [
mp.Process(target=__mp_function_wrapper, name=name,
args=(__parse_slice, child_con, parse_func, in_dir, s_slice, df_cols, protocol, entity))
for name, s_slice, (_, child_con) in tasks
]
# Start processes
for p in processes:
p.start()
# Collect results
slice_dfs = [
parent_con.recv()
for _, _, (parent_con, _) in tasks
]
# Wait for processes to finish
for p in processes:
p.join()
return pd.concat(slice_dfs, axis=0, ignore_index=True)
def parse_quic_client(in_dir: str, out_dir: str, scenarios: Dict[str, Dict], config_cols: List[str],
multi_process: bool = False) -> pd.DataFrame:
"""
Parse all quic client results.
:param in_dir: The directory containing the measurement results.
:param out_dir: The directory to save the parsed results to.
:param scenarios: The scenarios to parse within the in_dir.
:param config_cols: The column names for columns taken from the scenario configuration.
:param multi_process: Whether to allow multiprocessing.
:return: A dataframe containing the combined results from all scenarios.
"""
logger.info("Parsing quic client results")
df_cols = [*config_cols, 'run', 'second', 'bps', 'bytes', 'packets_received']
if multi_process:
df_quic_client = __mp_parse_slices(2, __parse_quic_client_from_scenario, in_dir, scenarios,
df_cols, 'quic', 'client')
else:
df_quic_client = __parse_slice(__parse_quic_client_from_scenario, in_dir, [*scenarios.items()],
df_cols, 'quic', 'client')
logger.debug("Fixing quic client data types")
df_quic_client = fix_dtypes(df_quic_client)
logger.info("Saving quic client data")
df_quic_client.to_pickle(os.path.join(out_dir, 'quic_client.pkl'))
with open(os.path.join(out_dir, 'quic_client.csv'), 'w+') as out_file:
df_quic_client.to_csv(out_file)
return df_quic_client
def __parse_quic_client_from_scenario(in_dir: str, scenario_name: str, pep: bool = False) -> pd.DataFrame:
"""
Parse the quic client results in the given scenario.
:param in_dir: The directory containing all measurement results
:param scenario_name: The name of the scenario to parse
:param pep: Whether to parse QUIC or QUIC (PEP) files
:return: A dataframe containing the parsed results of the specified scenario.
"""
logger.debug("Parsing quic%s client files in %s", " (pep)" if pep else "", scenario_name)
df = pd.DataFrame(columns=['run', 'second', 'bps', 'bytes', 'packets_received'])
for file_name in os.listdir(os.path.join(in_dir, scenario_name)):
file_path = os.path.join(in_dir, scenario_name, file_name)
if not os.path.isfile(file_path):
continue
match = re.search(r"^quic%s_(\d+)_client\.txt$" % ("_pep" if pep else "",), file_name)
if not match:
continue
logger.debug("%s: Parsing '%s'", scenario_name, file_name)
run = int(match.group(1))
with open(file_path) as file:
for line in file:
line_match = re.search(
r"^second (\d+(?:\.\d+)?): (\d+(?:\.\d+)?) ([a-zA-Z]?)bit/s, bytes received: (\d+), packets received: (\d+)$",
line.strip()
)
if not line_match:
continue
df = df.append({
'run': run,
'second': float(line_match.group(1)),
'bps': float(line_match.group(2)) * bps_factor(line_match.group(3)),
'bytes': int(line_match.group(4)),
'packets_received': int(line_match.group(5))
}, ignore_index=True)
with_na = len(df.index)
df.dropna(subset=['bps', 'bytes', 'packets_received'], inplace=True)
without_na = len(df.index)
if with_na != without_na:
logger.warning("%s: Dropped %d lines with NaN values", scenario_name, with_na - without_na)
if df.empty:
logger.warning("%s: No quic%s client data found", scenario_name, " (pep)" if pep else "")
return df
def parse_quic_server(in_dir: str, out_dir: str, scenarios: Dict[str, Dict], config_cols: List[str],
multi_process: bool = False) -> pd.DataFrame:
"""
Parse all quic server results.
:param in_dir: The directory containing the measurement results.
:param out_dir: The directory to save the parsed results to.
:param scenarios: The scenarios to parse within the in_dir.
:param config_cols: The column names for columns taken from the scenario configuration.
:param multi_process: Whether to allow multiprocessing.
:return: A dataframe containing the combined results from all scenarios.
"""
logger.info("Parsing quic server results")
df_cols = [*config_cols, 'run', 'second', 'cwnd', 'packets_sent', 'packets_lost']
if multi_process:
df_quic_server = __mp_parse_slices(2, __parse_quic_server_from_scenario, in_dir, scenarios,
df_cols, 'quic', 'server')
else:
df_quic_server = __parse_slice(__parse_quic_server_from_scenario, in_dir, [*scenarios.items()],
df_cols, 'quic', 'server')
logger.debug("Fixing quic server data types")
df_quic_server = fix_dtypes(df_quic_server)
logger.info("Saving quic server data")
df_quic_server.to_pickle(os.path.join(out_dir, 'quic_server.pkl'))
with open(os.path.join(out_dir, 'quic_server.csv'), 'w+') as out_file:
df_quic_server.to_csv(out_file)
return df_quic_server
def __parse_quic_server_from_scenario(in_dir: str, scenario_name: str, pep: bool = False) -> pd.DataFrame:
"""
Parse the quic server results in the given scenario.
:param in_dir: The directory containing all measurement results
:param scenario_name: The name of the scenario to parse
:param pep: Whether to parse QUIC or QUIC (PEP) files
:return: A dataframe containing the parsed results of the specified scenario.
"""
logger.debug("Parsing quic%s server files in %s", " (pep)" if pep else "", scenario_name)
df = | pd.DataFrame(columns=['run', 'second', 'cwnd', 'packets_sent', 'packets_lost']) | pandas.DataFrame |
from datetime import date, datetime, timedelta
from dateutil import tz
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, Timestamp, date_range
import pandas._testing as tm
class TestDatetimeIndex:
def test_setitem_with_datetime_tz(self):
# 16889
# support .loc with alignment and tz-aware DatetimeIndex
mask = np.array([True, False, True, False])
idx = date_range("20010101", periods=4, tz="UTC")
df = DataFrame({"a": np.arange(4)}, index=idx).astype("float64")
result = df.copy()
result.loc[mask, :] = df.loc[mask, :]
tm.assert_frame_equal(result, df)
result = df.copy()
result.loc[mask] = df.loc[mask]
tm.assert_frame_equal(result, df)
idx = date_range("20010101", periods=4)
df = DataFrame({"a": np.arange(4)}, index=idx).astype("float64")
result = df.copy()
result.loc[mask, :] = df.loc[mask, :]
tm.assert_frame_equal(result, df)
result = df.copy()
result.loc[mask] = df.loc[mask]
tm.assert_frame_equal(result, df)
def test_indexing_with_datetime_tz(self):
# GH#8260
# support datetime64 with tz
idx = Index(date_range("20130101", periods=3, tz="US/Eastern"), name="foo")
dr = date_range("20130110", periods=3)
df = DataFrame({"A": idx, "B": dr})
df["C"] = idx
df.iloc[1, 1] = pd.NaT
df.iloc[1, 2] = pd.NaT
# indexing
result = df.iloc[1]
expected = Series(
[Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), pd.NaT, pd.NaT],
index=list("ABC"),
dtype="object",
name=1,
)
tm.assert_series_equal(result, expected)
result = df.loc[1]
expected = Series(
[Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), pd.NaT, pd.NaT],
index=list("ABC"),
dtype="object",
name=1,
)
tm.assert_series_equal(result, expected)
# indexing - fast_xs
df = DataFrame({"a": date_range("2014-01-01", periods=10, tz="UTC")})
result = df.iloc[5]
expected = Series(
[Timestamp("2014-01-06 00:00:00+0000", tz="UTC")], index=["a"], name=5
)
tm.assert_series_equal(result, expected)
result = df.loc[5]
tm.assert_series_equal(result, expected)
# indexing - boolean
result = df[df.a > df.a[3]]
expected = df.iloc[4:]
tm.assert_frame_equal(result, expected)
# indexing - setting an element
df = DataFrame(
data=pd.to_datetime(["2015-03-30 20:12:32", "2015-03-12 00:11:11"]),
columns=["time"],
)
df["new_col"] = ["new", "old"]
df.time = df.set_index("time").index.tz_localize("UTC")
v = df[df.new_col == "new"].set_index("time").index.tz_convert("US/Pacific")
# trying to set a single element on a part of a different timezone
# this converts to object
df2 = df.copy()
df2.loc[df2.new_col == "new", "time"] = v
expected = Series([v[0], df.loc[1, "time"]], name="time")
tm.assert_series_equal(df2.time, expected)
v = df.loc[df.new_col == "new", "time"] + pd.Timedelta("1s")
df.loc[df.new_col == "new", "time"] = v
tm.assert_series_equal(df.loc[df.new_col == "new", "time"], v)
def test_consistency_with_tz_aware_scalar(self):
# xef gh-12938
# various ways of indexing the same tz-aware scalar
df = Series([Timestamp("2016-03-30 14:35:25", tz="Europe/Brussels")]).to_frame()
df = pd.concat([df, df]).reset_index(drop=True)
expected = Timestamp("2016-03-30 14:35:25+0200", tz="Europe/Brussels")
result = df[0][0]
assert result == expected
result = df.iloc[0, 0]
assert result == expected
result = df.loc[0, 0]
assert result == expected
result = df.iat[0, 0]
assert result == expected
result = df.at[0, 0]
assert result == expected
result = df[0].loc[0]
assert result == expected
result = df[0].at[0]
assert result == expected
def test_indexing_with_datetimeindex_tz(self):
# GH 12050
# indexing on a series with a datetimeindex with tz
index = date_range("2015-01-01", periods=2, tz="utc")
ser = Series(range(2), index=index, dtype="int64")
# list-like indexing
for sel in (index, list(index)):
# getitem
tm.assert_series_equal(ser[sel], ser)
# setitem
result = ser.copy()
result[sel] = 1
expected = Series(1, index=index)
tm.assert_series_equal(result, expected)
# .loc getitem
tm.assert_series_equal(ser.loc[sel], ser)
# .loc setitem
result = ser.copy()
result.loc[sel] = 1
expected = Series(1, index=index)
tm.assert_series_equal(result, expected)
# single element indexing
# getitem
assert ser[index[1]] == 1
# setitem
result = ser.copy()
result[index[1]] = 5
expected = Series([0, 5], index=index)
tm.assert_series_equal(result, expected)
# .loc getitem
assert ser.loc[index[1]] == 1
# .loc setitem
result = ser.copy()
result.loc[index[1]] = 5
expected = Series([0, 5], index=index)
tm.assert_series_equal(result, expected)
def test_partial_setting_with_datetimelike_dtype(self):
# GH9478
# a datetimeindex alignment issue with partial setting
df = DataFrame(
np.arange(6.0).reshape(3, 2),
columns=list("AB"),
index=date_range("1/1/2000", periods=3, freq="1H"),
)
expected = df.copy()
expected["C"] = [expected.index[0]] + [pd.NaT, pd.NaT]
mask = df.A < 1
df.loc[mask, "C"] = df.loc[mask].index
tm.assert_frame_equal(df, expected)
def test_loc_setitem_datetime(self):
# GH 9516
dt1 = Timestamp("20130101 09:00:00")
dt2 = Timestamp("20130101 10:00:00")
for conv in [
lambda x: x,
lambda x: x.to_datetime64(),
lambda x: x.to_pydatetime(),
lambda x: np.datetime64(x),
]:
df = DataFrame()
df.loc[conv(dt1), "one"] = 100
df.loc[conv(dt2), "one"] = 200
expected = DataFrame({"one": [100.0, 200.0]}, index=[dt1, dt2])
tm.assert_frame_equal(df, expected)
def test_series_partial_set_datetime(self):
# GH 11497
idx = date_range("2011-01-01", "2011-01-02", freq="D", name="idx")
ser = Series([0.1, 0.2], index=idx, name="s")
result = ser.loc[[Timestamp("2011-01-01"), Timestamp("2011-01-02")]]
exp = Series([0.1, 0.2], index=idx, name="s")
tm.assert_series_equal(result, exp, check_index_type=True)
keys = [
Timestamp("2011-01-02"),
Timestamp("2011-01-02"),
Timestamp("2011-01-01"),
]
exp = Series(
[0.2, 0.2, 0.1], index=pd.DatetimeIndex(keys, name="idx"), name="s"
)
tm.assert_series_equal(ser.loc[keys], exp, check_index_type=True)
keys = [
Timestamp("2011-01-03"),
Timestamp("2011-01-02"),
Timestamp("2011-01-03"),
]
with pytest.raises(KeyError, match="with any missing labels"):
ser.loc[keys]
def test_series_partial_set_period(self):
# GH 11497
idx = pd.period_range("2011-01-01", "2011-01-02", freq="D", name="idx")
ser = Series([0.1, 0.2], index=idx, name="s")
result = ser.loc[
[pd.Period("2011-01-01", freq="D"), pd.Period("2011-01-02", freq="D")]
]
exp = Series([0.1, 0.2], index=idx, name="s")
tm.assert_series_equal(result, exp, check_index_type=True)
keys = [
pd.Period("2011-01-02", freq="D"),
pd.Period("2011-01-02", freq="D"),
pd.Period("2011-01-01", freq="D"),
]
exp = Series([0.2, 0.2, 0.1], index=pd.PeriodIndex(keys, name="idx"), name="s")
tm.assert_series_equal(ser.loc[keys], exp, check_index_type=True)
keys = [
pd.Period("2011-01-03", freq="D"),
| pd.Period("2011-01-02", freq="D") | pandas.Period |
import pandas as pd
import numpy as np
from ogusa.utils import save_return_table
from ogusa.constants import VAR_LABELS, PARAM_LABELS, DEFAULT_START_YEAR
def tax_rate_table(base_TxFuncEst, base_params, reform_TxFuncEst=None,
reform_params=None, rate_type='ETR',
start_year=DEFAULT_START_YEAR, num_years=10,
table_format='tex', path=None):
'''
Table of average tax rates over several years.
Args:
base_TxFuncEst(dictionary): Baseline tax function parameter
estimates
base_params (OG-USA Specifications class): baseline parameters
object
reform_TxFuncEst (dictionary): Reform tax function parameter
estimates
reform_params (OG-USA Specifications class): reform parameters
object
rate_type (string): Tax rate to include in table
start_year (integer): year to start table
num_years (integer): number of years to include in table
table_format (string): format to save/return table as
path (string): path to save table to
Returns:
table_str (string or DataFrame): table of tax rates
'''
assert isinstance(start_year, (int, np.integer))
assert isinstance(num_years, (int, np.integer))
# Make sure both runs cover same time period
if reform_TxFuncEst is not None:
assert (base_params.start_year == reform_params.start_year)
start_index = start_year - base_params.start_year
years = list(np.arange(start_year, start_year + num_years, 1))
if reform_TxFuncEst is None:
if rate_type == 'ETR':
rates = base_TxFuncEst['tfunc_avg_etr'] * 100
elif rate_type == 'MTRx':
rates = base_TxFuncEst['tfunc_avg_mtrx'] * 100
elif rate_type == 'MTRy':
rates = base_TxFuncEst['tfunc_avg_mtry'] * 100
elif rate_type == 'all':
etr_rates = base_TxFuncEst['tfunc_avg_etr'] * 100
mtrx_rates = base_TxFuncEst['tfunc_avg_mtrx'] * 100
mtry_rates = base_TxFuncEst['tfunc_avg_mtry'] * 100
else:
raise ValueError(
'Value {!r} is not a valid rate_type'.format(rate_type))
if rate_type == 'all':
# In case num_years is greater than number of years
# tax function estimates are for
len_rates = len(etr_rates[start_index: start_index +
num_years])
table = {'Year': years[:len_rates],
VAR_LABELS['ETR']:
etr_rates[start_index: start_index + num_years],
VAR_LABELS['MTRx']:
mtrx_rates[start_index: start_index + num_years],
VAR_LABELS['MTRy']:
mtry_rates[start_index: start_index + num_years]}
else:
len_rates = len(rates[start_index: start_index + num_years])
table = {'Year': years[:len_rates],
VAR_LABELS[rate_type]:
rates[start_index: start_index + num_years]}
else:
if rate_type == 'ETR':
base_rates = base_TxFuncEst['tfunc_avg_etr'] * 100
reform_rates = reform_TxFuncEst['tfunc_avg_etr'] * 100
elif rate_type == 'MTRx':
base_rates = base_TxFuncEst['tfunc_avg_mtrx'] * 100
reform_rates = reform_TxFuncEst['tfunc_avg_mtrx'] * 100
elif rate_type == 'MTRy':
base_rates = base_TxFuncEst['tfunc_avg_mtrx'] * 100
reform_rates = reform_TxFuncEst['tfunc_avg_mtrx'] * 100
elif rate_type == 'all':
base_etr_rates = base_TxFuncEst['tfunc_avg_etr'] * 100
base_mtrx_rates = base_TxFuncEst['tfunc_avg_mtrx'] * 100
base_mtry_rates = base_TxFuncEst['tfunc_avg_mtry'] * 100
reform_etr_rates = reform_TxFuncEst['tfunc_avg_etr'] * 100
reform_mtrx_rates = reform_TxFuncEst['tfunc_avg_mtrx'] * 100
reform_mtry_rates = reform_TxFuncEst['tfunc_avg_mtry'] * 100
else:
raise ValueError(
'Value {!r} is not a valid rate_type'.format(rate_type))
if rate_type == 'all':
len_rates = len(base_etr_rates[start_index: start_index +
num_years])
table = {
'Year': years[:len_rates],
'Baseline ' + VAR_LABELS['ETR']:
base_etr_rates[start_index: start_index + num_years],
'Reform ' + VAR_LABELS['ETR']:
reform_etr_rates[start_index: start_index + num_years],
'Differences in ' + VAR_LABELS['ETR']:
reform_etr_rates[start_index: start_index + num_years]
- base_etr_rates[start_index: start_index + num_years],
'Baseline ' + VAR_LABELS['MTRx']:
base_mtrx_rates[start_index: start_index + num_years],
'Reform ' + VAR_LABELS['MTRx']:
reform_mtrx_rates[start_index: start_index + num_years],
'Differences in ' + VAR_LABELS['MTRx']:
reform_mtrx_rates[start_index: start_index + num_years]
- base_mtrx_rates[start_index: start_index + num_years],
'Baseline ' + VAR_LABELS['MTRy']:
base_mtry_rates[start_index: start_index + num_years],
'Reform ' + VAR_LABELS['MTRy']:
reform_mtry_rates[start_index: start_index + num_years],
'Differences in ' + VAR_LABELS['MTRy']:
reform_mtry_rates[start_index: start_index + num_years]
- base_mtry_rates[start_index: start_index + num_years]}
else:
len_rates = len(base_rates[start_index: start_index +
num_years])
table = {
'Year': years[:len_rates],
'Baseline ' + VAR_LABELS[rate_type]:
base_rates[start_index: start_index + num_years],
'Reform ' + VAR_LABELS[rate_type]:
reform_rates[start_index: start_index + num_years],
'Difference':
reform_rates[start_index: start_index + num_years]
- base_rates[start_index: start_index + num_years]}
table_df = (pd.DataFrame.from_dict(table, orient='columns')).transpose()
table_df.columns = table_df.iloc[0].astype('int').astype('str')
table_df.reindex(table_df.index.drop('Year'))
table_df.drop('Year', inplace=True)
table_df.reset_index(inplace=True)
table_df.rename(columns={'index': 'Variable'}, inplace=True)
table_str = save_return_table(table_df, table_format, path,
precision=2)
return table_str
def param_table(p, table_format='tex', path=None):
'''
This function creates a table of model parameters for publication.
Args:
p (OG-USA Specifications class): baseline parameters
object)
table_format (string): format to save/return table as
path (string): path to save table to
Returns:
table (string or DataFrame): table of tax rates
'''
table = {'Symbol': [], 'Description': [], 'Value': []}
for k, v in PARAM_LABELS.items():
table['Symbol'].append(v[1])
table['Description'].append(v[0])
value = getattr(p, k)
if hasattr(value, '__len__') & ~isinstance(value, str):
if value.ndim > 1:
report = 'See elsewhere'
else:
report = (
'[' + '{0:1.3f}'.format(value[0]) + '...' +
'{0:1.3f}'.format(value[-1]) + ']')
else:
if isinstance(value, int) or isinstance(value, np.int64):
report = str(value)
elif isinstance(value, str):
report = value
else:
if value < 0.0001:
report = "{:.2E}".format(value)
else:
report = '{0:1.3f}'.format(value)
table['Value'].append(report)
table_df = | pd.DataFrame.from_dict(table) | pandas.DataFrame.from_dict |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import pickle
import numpy as np
import pandas as pd
import json
import sqlalchemy as sql
from sqlalchemy import create_engine
from tqdm import tqdm
import requests
from bs4 import BeautifulSoup
from io import StringIO
from urllib.parse import quote
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import Pool
import time
import random
# In[ ]:
with open('../tools/credentials.json') as file:
credentials = json.load(file)
username = credentials["dblogin"]["username"]
password = credentials["dblogin"]["password"]
# In[ ]:
db_string = f"postgresql://{username}:{password}@localhost:5432/animeplanet"
db = create_engine(db_string)
# In[ ]:
def chunker(seq, size):
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
# ### Scrape User Watch List (Page 1)
# In[ ]:
usernames = pd.read_sql('SELECT * FROM "user" WHERE num_anime_pages IS NULL;', db)['username'].to_list()
# In[ ]:
def getUserFirstPage(username, attempt=1):
url = f'https://www.anime-planet.com/users/{username}/anime?sort=title&mylist_view=list'
if attempt == 4:
return (username, url, '')
try:
resp = requests.get(f'http://192.168.0.3:5000/special-requests?url={quote(url)}')
if resp.text != '':
return (username, url, resp.text)
else:
return getUserFirstPage(username, attempt+1)
except:
return getUserFirstPage(username, attempt+1)
# In[ ]:
def findNumAnimePages(uname_url_html_tup):
try:
html_text = uname_url_html_tup[2]
soup = BeautifulSoup(html_text, 'html.parser')
if soup.find('table') is None:
result_tup = (*uname_url_html_tup, 0)
return result_tup
ul = soup.find('ul', attrs={'class':'nav'})
page_nums = []
for tag in ul.find_all('a'):
try:
page_nums.append(int(tag.text))
except:
continue
num_anime_pages = max(page_nums)
result_tup = (*uname_url_html_tup, num_anime_pages)
return result_tup
except:
result_tup = (*uname_url_html_tup, 1)
return result_tup
# In[ ]:
def saveData():
global list_of_tups, result_dict
with Pool(4) as p:
list_of_tups = p.map(findNumAnimePages, list_of_tups)
for tup in list_of_tups:
result_dict['username'].append(tup[0])
result_dict['url'].append(tup[1])
result_dict['html_text'].append(tup[2])
result_dict['num_anime_pages'].append(tup[3])
list_of_tups = []
df = | pd.DataFrame(result_dict) | pandas.DataFrame |
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
from copy import deepcopy
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt.utils.random_ import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.portfolio['attach_call_seq'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_execute_order_nb():
# Errors, ignored and rejected orders
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(-100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.nan, 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.inf, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.nan, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., np.nan, 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., -10., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., np.nan, 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., -100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, -10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=0))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=-10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=np.nan))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., np.nan, 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., -10., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(0, 10))
assert exec_state == ExecuteOrderState(cash=100.0, position=10.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(15, 10, max_size=10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=1.))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100, 0., np.inf, np.nan, 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(100, 10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-200, 10, direction=Direction.LongOnly, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, fixed_fees=1000))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=180.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=909.0, position=-100.0, debt=900.0, free_cash=-891.0)
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=7.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=125.0, position=-2.5, debt=25.0, free_cash=75.0)
assert_same_tuple(order_result, OrderResult(
size=7.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-2.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=-2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-7.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-10.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(150., -5., 0., 150., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=300.0, position=-20.0, debt=150.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=50.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(1000., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 17.5, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=850.0, position=3.571428571428571, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.571428571428571, price=17.5, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 100, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=37.5, position=-4.375, debt=43.75, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=0.625, price=100.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 10., 0., -50., 10., 100., 0, 0),
nb.order_nb(-20, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=150.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 1., 0., -50., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=10.0, position=0.0, debt=0.0, free_cash=-40.0)
assert_same_tuple(order_result, OrderResult(
size=1.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., -100., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=-100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-20, 10, fees=0.1, slippage=0.1, fixed_fees=1., lock_cash=True))
assert exec_state == ExecuteOrderState(cash=80.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_orders ############# #
order_size = pd.Series([np.inf, -np.inf, np.nan, np.inf, -np.inf], index=price.index)
order_size_wide = order_size.vbt.tile(3, keys=['a', 'b', 'c'])
order_size_one = pd.Series([1, -1, np.nan, 1, -1], index=price.index)
def from_orders_both(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='both', **kwargs)
def from_orders_longonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='longonly', **kwargs)
def from_orders_shortonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='shortonly', **kwargs)
class TestFromOrders:
def test_one_column(self):
record_arrays_close(
from_orders_both().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_orders_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1), (8, 2, 0, 100.0, 1.0, 0.0, 0),
(9, 2, 1, 100.0, 2.0, 0.0, 1), (10, 2, 3, 50.0, 4.0, 0.0, 0), (11, 2, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0), (4, 2, 0, 100.0, 1.0, 0.0, 1), (5, 2, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_size_inf(self):
record_arrays_close(
from_orders_both(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_orders_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 198.01980198019803, 2.02, 0.0, 1),
(2, 0, 3, 99.00990099009901, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 1),
(2, 0, 3, 49.504950495049506, 4.04, 0.0, 0), (3, 0, 4, 49.504950495049506, 5.05, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1),
(2, 0, 3, 50.0, 4.0, 0.0, 0), (3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 0), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 1), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.2, 0),
(6, 1, 3, 1.0, 4.0, 0.4, 1), (7, 1, 4, 1.0, 5.0, 0.5, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 2.0, 0), (10, 2, 3, 1.0, 4.0, 4.0, 1), (11, 2, 4, 1.0, 5.0, 5.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.1, 0),
(6, 1, 3, 1.0, 4.0, 0.1, 1), (7, 1, 4, 1.0, 5.0, 0.1, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 1.0, 0), (10, 2, 3, 1.0, 4.0, 1.0, 1), (11, 2, 4, 1.0, 5.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_orders_both(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 0.9, 0.0, 1), (5, 1, 1, 1.0, 2.2, 0.0, 0),
(6, 1, 3, 1.0, 3.6, 0.0, 1), (7, 1, 4, 1.0, 5.5, 0.0, 0), (8, 2, 0, 1.0, 0.0, 0.0, 1),
(9, 2, 1, 1.0, 4.0, 0.0, 0), (10, 2, 3, 1.0, 0.0, 0.0, 1), (11, 2, 4, 1.0, 10.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0), (2, 0, 3, 0.5, 4.0, 0.0, 1),
(3, 0, 4, 0.5, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0), (8, 2, 0, 1.0, 1.0, 0.0, 1),
(9, 2, 1, 1.0, 2.0, 0.0, 0), (10, 2, 3, 1.0, 4.0, 0.0, 1), (11, 2, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_orders_both(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 1, 1.0, 2.0, 0.0, 1), (5, 1, 3, 1.0, 4.0, 0.0, 0),
(6, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 3, 1.0, 4.0, 0.0, 0), (5, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_lock_cash(self):
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[143.12812469365747, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-49.5, -49.5]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[94.6034702480149, 47.54435839623566]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[49.5, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[1.4312812469365748, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-96.16606313106556, -96.16606313106556]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[0.4699090272918124, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[98.06958012596222, 98.06958012596222]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = from_orders_both(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 1000., 2., 0., 1),
(2, 0, 3, 500., 4., 0., 0), (3, 0, 4, 1000., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 200., 2., 0., 1),
(6, 1, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-6600.0, 0.0]
])
)
pf = from_orders_longonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 100., 2., 0., 1),
(2, 0, 3, 50., 4., 0., 0), (3, 0, 4, 50., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 100., 2., 0., 1),
(6, 1, 3, 50., 4., 0., 0), (7, 1, 4, 50., 5., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[200.0, 200.0],
[200.0, 200.0],
[0.0, 0.0],
[250.0, 250.0]
])
)
pf = from_orders_shortonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1000., 1., 0., 1), (1, 0, 1, 550., 2., 0., 0),
(2, 0, 3, 1000., 4., 0., 1), (3, 0, 4, 800., 5., 0., 0),
(4, 1, 0, 100., 1., 0., 1), (5, 1, 1, 100., 2., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[-900.0, 0.0],
[-900.0, 0.0],
[-900.0, 0.0],
[-4900.0, 0.0],
[-3989.6551724137926, 0.0]
])
)
def test_allow_partial(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1), (4, 1, 1, 1000.0, 2.0, 0.0, 1), (5, 1, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0), (4, 1, 0, 1000.0, 1.0, 0.0, 1), (5, 1, 3, 1000.0, 4.0, 0.0, 1),
(6, 1, 4, 1000.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_orders_both(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_longonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_shortonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_orders_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0,
100.0, 0.0, 0.0, 1.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 1, 0.0, 100.0, 0.0, 0.0, 2.0, 200.0, -np.inf, 2.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 2.0, 200.0, 200.0, 2.0, 0.0, 1, 0, -1, 1),
(2, 0, 0, 2, 400.0, -100.0, 200.0, 0.0, 3.0, 100.0, np.nan, 3.0, 0,
2, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 3.0, 100.0, np.nan, np.nan, np.nan, -1, 1, 0, -1),
(3, 0, 0, 3, 400.0, -100.0, 200.0, 0.0, 4.0, 0.0, np.inf, 4.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 4.0, 0.0, 100.0, 4.0, 0.0, 0, 0, -1, 2),
(4, 0, 0, 4, 0.0, 0.0, 0.0, 0.0, 5.0, 0.0, -np.inf, 5.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 5.0, 0.0, np.nan, np.nan, np.nan, -1, 2, 6, -1)
], dtype=log_dt)
)
def test_group_by(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
size=pd.DataFrame([
[0., 0., np.inf],
[0., np.inf, -np.inf],
[np.inf, -np.inf, 0.],
[-np.inf, 0., np.inf],
[0., np.inf, -np.inf],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_orders_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
def test_value(self):
record_arrays_close(
from_orders_both(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0),
(2, 0, 3, 0.25, 4.0, 0.0, 1), (3, 0, 4, 0.2, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_amount(self):
record_arrays_close(
from_orders_both(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=75., size_type='targetamount',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_value(self):
record_arrays_close(
from_orders_both(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 25.0, 2.0, 0.0, 0), (7, 1, 2, 8.333333333333332, 3.0, 0.0, 0),
(8, 1, 3, 4.166666666666668, 4.0, 0.0, 0), (9, 1, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 25.0, 2.0, 0.0, 0),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 0), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 0),
(4, 0, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=50., size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0),
(2, 0, 1, 25.0, 2.0, 0.0, 1), (3, 1, 1, 25.0, 2.0, 0.0, 1),
(4, 2, 1, 25.0, 2.0, 0.0, 0), (5, 0, 2, 8.333333333333332, 3.0, 0.0, 1),
(6, 1, 2, 8.333333333333332, 3.0, 0.0, 1), (7, 2, 2, 8.333333333333332, 3.0, 0.0, 1),
(8, 0, 3, 4.166666666666668, 4.0, 0.0, 1), (9, 1, 3, 4.166666666666668, 4.0, 0.0, 1),
(10, 2, 3, 4.166666666666668, 4.0, 0.0, 1), (11, 0, 4, 2.5, 5.0, 0.0, 1),
(12, 1, 4, 2.5, 5.0, 0.0, 1), (13, 2, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
def test_target_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 37.5, 2.0, 0.0, 0), (7, 1, 2, 6.25, 3.0, 0.0, 0), (8, 1, 3, 2.34375, 4.0, 0.0, 0),
(9, 1, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 37.5, 2.0, 0.0, 0), (2, 0, 2, 6.25, 3.0, 0.0, 0),
(3, 0, 3, 2.34375, 4.0, 0.0, 0), (4, 0, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_update_value(self):
record_arrays_close(
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=False).order_records,
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=True).order_records
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=False).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.9465661198057499, 2.02, 0.019120635620076154, 0),
(4, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(5, 1, 2, 0.018558300554959377, 3.0300000000000002, 0.0005623165068152705, 0),
(6, 0, 3, 0.00037870218456959037, 3.96, 1.4996606508955778e-05, 1),
(7, 1, 3, 0.0003638525743521767, 4.04, 1.4699644003827875e-05, 0),
(8, 0, 4, 7.424805112066224e-06, 4.95, 3.675278530472781e-07, 1),
(9, 1, 4, 7.133664827307231e-06, 5.05, 3.6025007377901643e-07, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.7303208018821721, 2.02, 0.014752480198019875, 0),
(4, 2, 1, 0.21624531792357785, 2.02, 0.0043681554220562635, 0),
(5, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(6, 1, 2, 0.009608602243410758, 2.9699999999999998, 0.00028537548662929945, 1),
(7, 2, 2, 0.02779013180558861, 3.0300000000000002, 0.0008420409937093393, 0),
(8, 0, 3, 0.0005670876809631409, 3.96, 2.2456672166140378e-05, 1),
(9, 1, 3, 0.00037770350099464167, 3.96, 1.4957058639387809e-05, 1),
(10, 2, 3, 0.0009077441794302741, 4.04, 3.6672864848982974e-05, 0),
(11, 0, 4, 1.8523501267964093e-05, 4.95, 9.169133127642227e-07, 1),
(12, 1, 4, 1.2972670177191503e-05, 4.95, 6.421471737709794e-07, 1),
(13, 2, 4, 3.0261148547590434e-05, 5.05, 1.5281880016533242e-06, 0)
], dtype=order_dt)
)
def test_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0), (5, 1, 0, 50., 1., 0., 1),
(6, 1, 1, 12.5, 2., 0., 1), (7, 1, 2, 4.16666667, 3., 0., 1),
(8, 1, 3, 1.5625, 4., 0., 1), (9, 1, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 1, 12.5, 2., 0., 1),
(2, 0, 2, 4.16666667, 3., 0., 1), (3, 0, 3, 1.5625, 4., 0., 1),
(4, 0, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 5.00000000e+01, 1., 0., 0), (1, 1, 0, 2.50000000e+01, 1., 0., 0),
(2, 2, 0, 1.25000000e+01, 1., 0., 0), (3, 0, 1, 3.12500000e+00, 2., 0., 0),
(4, 1, 1, 1.56250000e+00, 2., 0., 0), (5, 2, 1, 7.81250000e-01, 2., 0., 0),
(6, 0, 2, 2.60416667e-01, 3., 0., 0), (7, 1, 2, 1.30208333e-01, 3., 0., 0),
(8, 2, 2, 6.51041667e-02, 3., 0., 0), (9, 0, 3, 2.44140625e-02, 4., 0., 0),
(10, 1, 3, 1.22070312e-02, 4., 0., 0), (11, 2, 3, 6.10351562e-03, 4., 0., 0),
(12, 0, 4, 2.44140625e-03, 5., 0., 0), (13, 1, 4, 1.22070312e-03, 5., 0., 0),
(14, 2, 4, 6.10351562e-04, 5., 0., 0)
], dtype=order_dt)
)
def test_auto_seq(self):
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value, size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value / 100, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
def test_max_orders(self):
_ = from_orders_both(close=price_wide)
_ = from_orders_both(close=price_wide, max_orders=9)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, max_orders=8)
def test_max_logs(self):
_ = from_orders_both(close=price_wide, log=True)
_ = from_orders_both(close=price_wide, log=True, max_logs=15)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, log=True, max_logs=14)
# ############# from_signals ############# #
entries = pd.Series([True, True, True, False, False], index=price.index)
entries_wide = entries.vbt.tile(3, keys=['a', 'b', 'c'])
exits = pd.Series([False, False, True, True, True], index=price.index)
exits_wide = exits.vbt.tile(3, keys=['a', 'b', 'c'])
def from_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='both', **kwargs)
def from_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='longonly', **kwargs)
def from_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='shortonly', **kwargs)
def from_ls_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, False, exits, False, **kwargs)
def from_ls_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, False, False, **kwargs)
def from_ls_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, False, False, entries, exits, **kwargs)
class TestFromSignals:
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_one_column(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_multiple_columns(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 200., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 100., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0),
(2, 1, 0, 100., 1., 0., 1), (3, 1, 3, 50., 4., 0., 0),
(4, 2, 0, 100., 1., 0., 1), (5, 2, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_custom_signal_func(self):
@njit
def signal_func_nb(c, long_num_arr, short_num_arr):
long_num = nb.get_elem_nb(c, long_num_arr)
short_num = nb.get_elem_nb(c, short_num_arr)
is_long_entry = long_num > 0
is_long_exit = long_num < 0
is_short_entry = short_num > 0
is_short_exit = short_num < 0
return is_long_entry, is_long_exit, is_short_entry, is_short_exit
pf_base = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
entries=pd.Series([True, False, False, False, False]),
exits=pd.Series([False, False, True, False, False]),
short_entries=pd.Series([False, True, False, True, False]),
short_exits=pd.Series([False, False, False, False, True]),
size=1,
upon_opposite_entry='ignore'
)
pf = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
signal_func_nb=signal_func_nb,
signal_args=(vbt.Rep('long_num_arr'), vbt.Rep('short_num_arr')),
broadcast_named_args=dict(
long_num_arr=pd.Series([1, 0, -1, 0, 0]),
short_num_arr=pd.Series([0, 1, 0, 1, -1])
),
size=1,
upon_opposite_entry='ignore'
)
record_arrays_close(
pf_base.order_records,
pf.order_records
)
def test_amount(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 2.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_value(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 0.3125, 4.0, 0.0, 1),
(2, 1, 4, 0.1775, 5.0, 0.0, 1), (3, 2, 0, 100.0, 1.0, 0.0, 0),
(4, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
with pytest.raises(Exception):
_ = from_signals_both(size=0.5, size_type='percent')
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1), (2, 0, 4, 25., 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close',
accumulate=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 0),
(2, 0, 3, 62.5, 4.0, 0.0, 1), (3, 0, 4, 27.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 3, 37.5, 4., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 25., 1., 0., 0),
(2, 2, 0, 12.5, 1., 0., 0), (3, 0, 3, 50., 4., 0., 1),
(4, 1, 3, 25., 4., 0., 1), (5, 2, 3, 12.5, 4., 0., 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_signals_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 3, 198.01980198019803, 4.04, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099, 1.01, 0., 0), (1, 0, 3, 99.00990099, 4.04, 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 3, 49.504950495049506, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_signals_both(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.8, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 8.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.4, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 4.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.4, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 4.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_signals_both(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.1, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_signals_both(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 2.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 2.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 1.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 0.9, 0.0, 1),
(3, 1, 3, 1.0, 4.4, 0.0, 0), (4, 2, 0, 1.0, 0.0, 0.0, 1), (5, 2, 3, 1.0, 8.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_signals_both(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_signals_both(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 0, 4, 0.5, 5.0, 0.0, 1),
(3, 1, 0, 1.0, 1.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1), (4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 3, 0.5, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_signals_both(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1), (2, 1, 3, 1000.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 3, 275.0, 4.0, 0.0, 0), (2, 1, 0, 1000.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=True, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_both(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_signals_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2, 0.0, 0.0,
0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 100.0, 0.0, 0.0, 1.0,
100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 3, 0.0, 100.0, 0.0, 0.0, 4.0, 400.0, -np.inf, 4.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 800.0, -100.0,
400.0, 0.0, 4.0, 400.0, 200.0, 4.0, 0.0, 1, 0, -1, 1)
], dtype=log_dt)
)
def test_accumulate(self):
record_arrays_close(
from_signals_both(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 3.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 1.0, 4.0, 0.0, 1), (8, 2, 4, 1.0, 5.0, 0.0, 1),
(9, 3, 0, 1.0, 1.0, 0.0, 0), (10, 3, 1, 1.0, 2.0, 0.0, 0), (11, 3, 3, 1.0, 4.0, 0.0, 1),
(12, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 2.0, 4.0, 0.0, 1), (5, 2, 0, 1.0, 1.0, 0.0, 0),
(6, 2, 3, 1.0, 4.0, 0.0, 1), (7, 3, 0, 1.0, 1.0, 0.0, 0), (8, 3, 1, 1.0, 2.0, 0.0, 0),
(9, 3, 3, 1.0, 4.0, 0.0, 1), (10, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 1, 1.0, 2.0, 0.0, 1), (4, 1, 3, 2.0, 4.0, 0.0, 0), (5, 2, 0, 1.0, 1.0, 0.0, 1),
(6, 2, 3, 1.0, 4.0, 0.0, 0), (7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 1),
(9, 3, 3, 1.0, 4.0, 0.0, 0), (10, 3, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_long_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_long_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_longonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 1, 1.0, 2.0, 0.0, 0), (5, 2, 2, 1.0, 3.0, 0.0, 1),
(6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 0),
(8, 5, 1, 1.0, 2.0, 0.0, 0), (9, 5, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_upon_short_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_short_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_shortonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 1),
(1, 1, 0, 1.0, 1.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 1, 1.0, 2.0, 0.0, 1), (5, 2, 2, 1.0, 3.0, 0.0, 0),
(6, 3, 1, 1.0, 2.0, 0.0, 1), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 5, 1, 1.0, 2.0, 0.0, 1), (9, 5, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_dir_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_dir_conflict=[[
'ignore',
'long',
'short',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 1, 1.0, 2.0, 0.0, 0), (6, 2, 2, 1.0, 3.0, 0.0, 1),
(7, 3, 1, 1.0, 2.0, 0.0, 0), (8, 3, 2, 1.0, 3.0, 0.0, 0),
(9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 1),
(11, 5, 1, 1.0, 2.0, 0.0, 0), (12, 5, 2, 1.0, 3.0, 0.0, 1),
(13, 6, 1, 1.0, 2.0, 0.0, 1), (14, 6, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_opposite_entry(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False]
]),
exits=pd.DataFrame([
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True]
]),
size=1.,
upon_opposite_entry=[[
'ignore',
'ignore',
'close',
'close',
'closereduce',
'closereduce',
'reverse',
'reverse',
'reversereduce',
'reversereduce'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 1),
(2, 2, 0, 1.0, 1.0, 0.0, 0), (3, 2, 1, 1.0, 2.0, 0.0, 1), (4, 2, 2, 1.0, 3.0, 0.0, 0),
(5, 3, 0, 1.0, 1.0, 0.0, 1), (6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 4, 0, 1.0, 1.0, 0.0, 0), (9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 0),
(11, 5, 0, 1.0, 1.0, 0.0, 1), (12, 5, 1, 1.0, 2.0, 0.0, 0), (13, 5, 2, 1.0, 3.0, 0.0, 1),
(14, 6, 0, 1.0, 1.0, 0.0, 0), (15, 6, 1, 2.0, 2.0, 0.0, 1), (16, 6, 2, 2.0, 3.0, 0.0, 0),
(17, 7, 0, 1.0, 1.0, 0.0, 1), (18, 7, 1, 2.0, 2.0, 0.0, 0), (19, 7, 2, 2.0, 3.0, 0.0, 1),
(20, 8, 0, 1.0, 1.0, 0.0, 0), (21, 8, 1, 2.0, 2.0, 0.0, 1), (22, 8, 2, 2.0, 3.0, 0.0, 0),
(23, 9, 0, 1.0, 1.0, 0.0, 1), (24, 9, 1, 2.0, 2.0, 0.0, 0), (25, 9, 2, 2.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(**kwargs, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 2, 1.0, 3.0, 0.0, 0),
(2, 1, 0, 1.0, 1.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 1, 1.0, 2.0, 0.0, 1), (6, 2, 2, 1.0, 3.0, 0.0, 0),
(7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 0), (9, 3, 2, 1.0, 3.0, 0.0, 1),
(10, 4, 0, 1.0, 1.0, 0.0, 0), (11, 4, 1, 1.0, 2.0, 0.0, 1), (12, 4, 2, 1.0, 3.0, 0.0, 0),
(13, 5, 0, 1.0, 1.0, 0.0, 1), (14, 5, 1, 1.0, 2.0, 0.0, 0), (15, 5, 2, 1.0, 3.0, 0.0, 1),
(16, 6, 0, 1.0, 1.0, 0.0, 0), (17, 6, 1, 2.0, 2.0, 0.0, 1), (18, 6, 2, 2.0, 3.0, 0.0, 0),
(19, 7, 0, 1.0, 1.0, 0.0, 1), (20, 7, 1, 2.0, 2.0, 0.0, 0), (21, 7, 2, 2.0, 3.0, 0.0, 1),
(22, 8, 0, 1.0, 1.0, 0.0, 0), (23, 8, 1, 1.0, 2.0, 0.0, 1), (24, 8, 2, 1.0, 3.0, 0.0, 0),
(25, 9, 0, 1.0, 1.0, 0.0, 1), (26, 9, 1, 1.0, 2.0, 0.0, 0), (27, 9, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_init_cash(self):
record_arrays_close(
from_signals_both(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 3, 1.0, 4.0, 0.0, 1), (1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 3, 2.0, 4.0, 0.0, 1),
(3, 2, 0, 1.0, 1.0, 0.0, 0), (4, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1), (2, 2, 0, 1.0, 1.0, 0.0, 0),
(3, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 0.25, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 0.5, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_both(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(init_cash=np.inf).order_records
def test_group_by(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_signals_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_signals_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
entries=pd.DataFrame([
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
[False, True, False],
]),
exits=pd.DataFrame([
[False, False, False],
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_signals_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_signals_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_signals_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 1, 0],
[1, 0, 2]
])
)
pf = from_signals_longonly(**kwargs, size=1., size_type='percent')
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100.0, 1.0, 0.0, 0), (1, 2, 1, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 1, 2, 100.0, 1.0, 0.0, 1), (4, 0, 2, 100.0, 1.0, 0.0, 0), (5, 0, 3, 100.0, 1.0, 0.0, 1),
(6, 2, 3, 100.0, 1.0, 0.0, 0), (7, 2, 4, 100.0, 1.0, 0.0, 1), (8, 1, 4, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 0, 1]
])
)
def test_sl_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(sl_stop=-0.1)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0), (2, 1, 1, 20.0, 4.0, 0.0, 1),
(3, 2, 0, 20.0, 5.0, 0.0, 0), (4, 2, 3, 20.0, 2.0, 0.0, 1),
(5, 3, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1),
(2, 2, 0, 20.0, 5.0, 0.0, 1),
(3, 3, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0), (2, 1, 1, 20.0, 4.25, 0.0, 1),
(3, 2, 0, 20.0, 5.0, 0.0, 0), (4, 2, 1, 20.0, 4.25, 0.0, 1),
(5, 3, 0, 20.0, 5.0, 0.0, 0), (6, 3, 1, 20.0, 4.0, 0.0, 1),
(7, 4, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1),
(2, 2, 0, 20.0, 5.0, 0.0, 1),
(3, 3, 0, 20.0, 5.0, 0.0, 1),
(4, 4, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0),
(3, 3, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 2.0, 0.0, 0),
(3, 2, 0, 100.0, 1.0, 0.0, 1), (4, 2, 3, 50.0, 4.0, 0.0, 0),
(5, 3, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0),
(3, 3, 0, 100.0, 1.0, 0.0, 0),
(4, 4, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.75, 0.0, 0),
(3, 2, 0, 100.0, 1.0, 0.0, 1), (4, 2, 1, 100.0, 1.75, 0.0, 0),
(5, 3, 0, 100.0, 1.0, 0.0, 1), (6, 3, 1, 100.0, 2.0, 0.0, 0),
(7, 4, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_ts_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(ts_stop=-0.1)
close = pd.Series([4., 5., 4., 3., 2.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 0),
(1, 1, 0, 25.0, 4.0, 0.0, 0), (2, 1, 2, 25.0, 4.0, 0.0, 1),
(3, 2, 0, 25.0, 4.0, 0.0, 0), (4, 2, 4, 25.0, 2.0, 0.0, 1),
(5, 3, 0, 25.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 1),
(1, 1, 0, 25.0, 4.0, 0.0, 1), (2, 1, 1, 25.0, 5.0, 0.0, 0),
(3, 2, 0, 25.0, 4.0, 0.0, 1),
(4, 3, 0, 25.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records
)
print('here')
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.15, 0.2, 0.25, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 0),
(1, 1, 0, 25.0, 4.0, 0.0, 0), (2, 1, 2, 25.0, 4.25, 0.0, 1),
(3, 2, 0, 25.0, 4.0, 0.0, 0), (4, 2, 2, 25.0, 4.25, 0.0, 1),
(5, 3, 0, 25.0, 4.0, 0.0, 0), (6, 3, 2, 25.0, 4.125, 0.0, 1),
(7, 4, 0, 25.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.15, 0.2, 0.25, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 1),
(1, 1, 0, 25.0, 4.0, 0.0, 1), (2, 1, 1, 25.0, 5.25, 0.0, 0),
(3, 2, 0, 25.0, 4.0, 0.0, 1), (4, 2, 1, 25.0, 5.25, 0.0, 0),
(5, 3, 0, 25.0, 4.0, 0.0, 1), (6, 3, 1, 25.0, 5.25, 0.0, 0),
(7, 4, 0, 25.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([2., 1., 2., 3., 4.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0),
(1, 1, 0, 50.0, 2.0, 0.0, 0), (2, 1, 1, 50.0, 1.0, 0.0, 1),
(3, 2, 0, 50.0, 2.0, 0.0, 0),
(4, 3, 0, 50.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 1),
(1, 1, 0, 50.0, 2.0, 0.0, 1), (2, 1, 2, 50.0, 2.0, 0.0, 0),
(3, 2, 0, 50.0, 2.0, 0.0, 1), (4, 2, 4, 50.0, 4.0, 0.0, 0),
(5, 3, 0, 50.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0),
(1, 1, 0, 50.0, 2.0, 0.0, 0), (2, 1, 1, 50.0, 0.75, 0.0, 1),
(3, 2, 0, 50.0, 2.0, 0.0, 0), (4, 2, 1, 50.0, 0.5, 0.0, 1),
(5, 3, 0, 50.0, 2.0, 0.0, 0),
(6, 4, 0, 50.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 1),
(1, 1, 0, 50.0, 2.0, 0.0, 1), (2, 1, 2, 50.0, 1.75, 0.0, 0),
(3, 2, 0, 50.0, 2.0, 0.0, 1), (4, 2, 2, 50.0, 1.75, 0.0, 0),
(5, 3, 0, 50.0, 2.0, 0.0, 1), (6, 3, 2, 50.0, 1.75, 0.0, 0),
(7, 4, 0, 50.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
def test_tp_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(sl_stop=-0.1)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0),
(2, 2, 0, 20.0, 5.0, 0.0, 0),
(3, 3, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1), (2, 1, 1, 20.0, 4.0, 0.0, 0),
(3, 2, 0, 20.0, 5.0, 0.0, 1), (4, 2, 3, 20.0, 2.0, 0.0, 0),
(5, 3, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0),
(2, 2, 0, 20.0, 5.0, 0.0, 0),
(3, 3, 0, 20.0, 5.0, 0.0, 0),
(4, 4, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1), (2, 1, 1, 20.0, 4.25, 0.0, 0),
(3, 2, 0, 20.0, 5.0, 0.0, 1), (4, 2, 1, 20.0, 4.25, 0.0, 0),
(5, 3, 0, 20.0, 5.0, 0.0, 1), (6, 3, 1, 20.0, 4.0, 0.0, 0),
(7, 4, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0), (2, 1, 1, 100.0, 2.0, 0.0, 1),
(3, 2, 0, 100.0, 1.0, 0.0, 0), (4, 2, 3, 100.0, 4.0, 0.0, 1),
(5, 3, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 1),
(3, 3, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0), (2, 1, 1, 100.0, 1.75, 0.0, 1),
(3, 2, 0, 100.0, 1.0, 0.0, 0), (4, 2, 1, 100.0, 1.75, 0.0, 1),
(5, 3, 0, 100.0, 1.0, 0.0, 0), (6, 3, 1, 100.0, 2.0, 0.0, 1),
(7, 4, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 1),
(3, 3, 0, 100.0, 1.0, 0.0, 1),
(4, 4, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_stop_entry_price(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='val_price',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.625, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='price',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.75, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='fillprice',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 3.0250000000000004, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 3, 16.52892561983471, 1.5125000000000002, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='close',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.5, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
def test_stop_exit_price(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 4.25, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.5, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='stopmarket', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.825, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.25, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 1.125, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='close', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.6, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.7, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 0.9, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='price', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.9600000000000004, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.97, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 0.9900000000000001, 0.0, 1)
], dtype=order_dt)
)
def test_upon_stop_exit(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits, size=1,
sl_stop=0.1, upon_stop_exit=[['close', 'closereduce', 'reverse', 'reversereduce']],
accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 1),
(2, 1, 0, 1.0, 5.0, 0.0, 0), (3, 1, 1, 1.0, 4.0, 0.0, 1),
(4, 2, 0, 1.0, 5.0, 0.0, 0), (5, 2, 1, 2.0, 4.0, 0.0, 1),
(6, 3, 0, 1.0, 5.0, 0.0, 0), (7, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits, size=1,
sl_stop=0.1, upon_stop_exit=[['close', 'closereduce', 'reverse', 'reversereduce']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 1),
(2, 1, 0, 1.0, 5.0, 0.0, 0), (3, 1, 1, 1.0, 4.0, 0.0, 1),
(4, 2, 0, 1.0, 5.0, 0.0, 0), (5, 2, 1, 2.0, 4.0, 0.0, 1),
(6, 3, 0, 1.0, 5.0, 0.0, 0), (7, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
def test_upon_stop_update(self):
entries = pd.Series([True, True, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
sl_stop = pd.Series([0.4, np.nan, np.nan, np.nan, np.nan])
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits, accumulate=True, size=1.,
sl_stop=sl_stop, upon_stop_update=[['keep', 'override', 'overridenan']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 0), (2, 0, 2, 2.0, 3.0, 0.0, 1),
(3, 1, 0, 1.0, 5.0, 0.0, 0), (4, 1, 1, 1.0, 4.0, 0.0, 0), (5, 1, 2, 2.0, 3.0, 0.0, 1),
(6, 2, 0, 1.0, 5.0, 0.0, 0), (7, 2, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
sl_stop = pd.Series([0.4, 0.4, np.nan, np.nan, np.nan])
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits, accumulate=True, size=1.,
sl_stop=sl_stop, upon_stop_update=[['keep', 'override']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 0), (2, 0, 2, 2.0, 3.0, 0.0, 1),
(3, 1, 0, 1.0, 5.0, 0.0, 0), (4, 1, 1, 1.0, 4.0, 0.0, 0), (5, 1, 3, 2.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
def test_adjust_sl_func(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
@njit
def adjust_sl_func_nb(c, dur):
return 0. if c.i - c.init_i >= dur else c.curr_stop, c.curr_trail
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=np.inf, adjust_sl_func_nb=adjust_sl_func_nb, adjust_sl_args=(2,)).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0), (1, 0, 2, 20.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_adjust_ts_func(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([10., 11., 12., 11., 10.], index=price.index)
@njit
def adjust_sl_func_nb(c, dur):
return 0. if c.i - c.curr_i >= dur else c.curr_stop, c.curr_trail
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=np.inf, adjust_sl_func_nb=adjust_sl_func_nb, adjust_sl_args=(2,)).order_records,
np.array([
(0, 0, 0, 10.0, 10.0, 0.0, 0), (1, 0, 4, 10.0, 10.0, 0.0, 1)
], dtype=order_dt)
)
def test_adjust_tp_func(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
@njit
def adjust_tp_func_nb(c, dur):
return 0. if c.i - c.init_i >= dur else c.curr_stop
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=np.inf, adjust_tp_func_nb=adjust_tp_func_nb, adjust_tp_args=(2,)).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_max_orders(self):
_ = from_signals_both(close=price_wide)
_ = from_signals_both(close=price_wide, max_orders=6)
with pytest.raises(Exception):
_ = from_signals_both(close=price_wide, max_orders=5)
def test_max_logs(self):
_ = from_signals_both(close=price_wide, log=True)
_ = from_signals_both(close=price_wide, log=True, max_logs=6)
with pytest.raises(Exception):
_ = from_signals_both(close=price_wide, log=True, max_logs=5)
# ############# from_holding ############# #
class TestFromHolding:
def test_from_holding(self):
record_arrays_close(
vbt.Portfolio.from_holding(price).order_records,
vbt.Portfolio.from_signals(price, True, False, accumulate=False).order_records
)
# ############# from_random_signals ############# #
class TestFromRandomSignals:
def test_from_random_n(self):
result = vbt.Portfolio.from_random_signals(price, n=2, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, True, False, False],
[False, True, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, n=[1, 2], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [True, False], [False, True], [False, False], [False, False]],
[[False, False], [False, True], [False, False], [False, True], [True, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.Int64Index([1, 2], dtype='int64', name='randnx_n')
)
def test_from_random_prob(self):
result = vbt.Portfolio.from_random_signals(price, prob=0.5, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, False, False, False],
[False, False, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, prob=[0.25, 0.5], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [False, False], [False, False], [False, False], [True, False]],
[[False, False], [False, True], [False, False], [False, False], [False, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.MultiIndex.from_tuples(
[(0.25, 0.25), (0.5, 0.5)],
names=['rprobnx_entry_prob', 'rprobnx_exit_prob'])
)
# ############# from_order_func ############# #
@njit
def order_func_nb(c, size):
_size = nb.get_elem_nb(c, size)
return nb.order_nb(_size if c.i % 2 == 0 else -_size)
@njit
def log_order_func_nb(c, size):
_size = nb.get_elem_nb(c, size)
return nb.order_nb(_size if c.i % 2 == 0 else -_size, log=True)
@njit
def flex_order_func_nb(c, size):
if c.call_idx < c.group_len:
_size = nb.get_col_elem_nb(c, c.from_col + c.call_idx, size)
return c.from_col + c.call_idx, nb.order_nb(_size if c.i % 2 == 0 else -_size)
return -1, nb.order_nothing_nb()
@njit
def log_flex_order_func_nb(c, size):
if c.call_idx < c.group_len:
_size = nb.get_col_elem_nb(c, c.from_col + c.call_idx, size)
return c.from_col + c.call_idx, nb.order_nb(_size if c.i % 2 == 0 else -_size, log=True)
return -1, nb.order_nothing_nb()
class TestFromOrderFunc:
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_one_column(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price.tolist(), order_func, np.asarray(np.inf), row_wise=test_row_wise, flexible=test_flexible)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1),
(2, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (3, 0, 3, 66.66666666666669, 4.0, 0.0, 1),
(4, 0, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pf = vbt.Portfolio.from_order_func(
price, order_func, np.asarray(np.inf), row_wise=test_row_wise, flexible=test_flexible)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1),
(2, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (3, 0, 3, 66.66666666666669, 4.0, 0.0, 1),
(4, 0, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
@pytest.mark.parametrize("test_use_numba", [False, True])
def test_multiple_columns(self, test_row_wise, test_flexible, test_use_numba):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, vbt.Rep('size'), broadcast_named_args=dict(size=[0, 1, np.inf]),
row_wise=test_row_wise, flexible=test_flexible, use_numba=test_use_numba)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 2, 0, 100.0, 1.0, 0.0, 0),
(2, 1, 1, 1.0, 2.0, 0.0, 1), (3, 2, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 2, 1.0, 3.0, 0.0, 0), (5, 2, 2, 133.33333333333334, 3.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 2, 3, 66.66666666666669, 4.0, 0.0, 1),
(8, 1, 4, 1.0, 5.0, 0.0, 0), (9, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 1, 1.0, 2.0, 0.0, 1),
(2, 1, 2, 1.0, 3.0, 0.0, 0), (3, 1, 3, 1.0, 4.0, 0.0, 1),
(4, 1, 4, 1.0, 5.0, 0.0, 0), (5, 2, 0, 100.0, 1.0, 0.0, 0),
(6, 2, 1, 200.0, 2.0, 0.0, 1), (7, 2, 2, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 3, 66.66666666666669, 4.0, 0.0, 1), (9, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_group_by(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
group_by=np.array([0, 0, 1]), row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 0, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 2, 1, 200.0, 2.0, 0.0, 1),
(6, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (7, 1, 2, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 0, 3, 66.66666666666669, 4.0, 0.0, 1),
(10, 1, 3, 66.66666666666669, 4.0, 0.0, 1), (11, 2, 3, 66.66666666666669, 4.0, 0.0, 1),
(12, 0, 4, 53.33333333333335, 5.0, 0.0, 0), (13, 1, 4, 53.33333333333335, 5.0, 0.0, 0),
(14, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 0, 1, 200.0, 2.0, 0.0, 1), (3, 1, 1, 200.0, 2.0, 0.0, 1),
(4, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (5, 1, 2, 133.33333333333334, 3.0, 0.0, 0),
(6, 0, 3, 66.66666666666669, 4.0, 0.0, 1), (7, 1, 3, 66.66666666666669, 4.0, 0.0, 1),
(8, 0, 4, 53.33333333333335, 5.0, 0.0, 0), (9, 1, 4, 53.33333333333335, 5.0, 0.0, 0),
(10, 2, 0, 100.0, 1.0, 0.0, 0), (11, 2, 1, 200.0, 2.0, 0.0, 1),
(12, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (13, 2, 3, 66.66666666666669, 4.0, 0.0, 1),
(14, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_cash_sharing(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
group_by=np.array([0, 0, 1]), cash_sharing=True, row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 0, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 0, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 0, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 0, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 2, 133.33333333, 3., 0., 0), (3, 0, 3, 66.66666667, 4., 0., 1),
(4, 0, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_call_seq(self, test_row_wise):
pf = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 0, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 0, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 0, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 0, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 2, 133.33333333, 3., 0., 0), (3, 0, 3, 66.66666667, 4., 0., 1),
(4, 0, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed', row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 1, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 1, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 1, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 1, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 2, 133.33333333, 3., 0., 0), (3, 1, 3, 66.66666667, 4., 0., 1),
(4, 1, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 1, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 1, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 1, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 1, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 2, 133.33333333, 3., 0., 0), (3, 1, 3, 66.66666667, 4., 0., 1),
(4, 1, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
with pytest.raises(Exception):
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='auto', row_wise=test_row_wise
)
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
@njit
def pre_segment_func_nb(c, target_hold_value):
order_size = np.copy(target_hold_value[c.i, c.from_col:c.to_col])
order_size_type = np.full(c.group_len, SizeType.TargetValue)
direction = np.full(c.group_len, Direction.Both)
order_value_out = np.empty(c.group_len, dtype=np.float_)
c.last_val_price[c.from_col:c.to_col] = c.close[c.i, c.from_col:c.to_col]
nb.sort_call_seq_nb(c, order_size, order_size_type, direction, order_value_out)
return order_size, order_size_type, direction
@njit
def pct_order_func_nb(c, order_size, order_size_type, direction):
col_i = c.call_seq_now[c.call_idx]
return nb.order_nb(
order_size[col_i],
c.close[c.i, col_i],
size_type=order_size_type[col_i],
direction=direction[col_i]
)
pf = vbt.Portfolio.from_order_func(
price_wide * 0 + 1, pct_order_func_nb, group_by=np.array([0, 0, 0]),
cash_sharing=True, pre_segment_func_nb=pre_segment_func_nb,
pre_segment_args=(target_hold_value.values,), row_wise=test_row_wise)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 1, 0],
[0, 2, 1],
[1, 0, 2],
[2, 1, 0]
])
)
pd.testing.assert_frame_equal(
pf.asset_value(group_by=False),
target_hold_value
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_target_value(self, test_row_wise, test_flexible):
@njit
def target_val_pre_segment_func_nb(c, val_price):
c.last_val_price[c.from_col:c.to_col] = val_price[c.i]
return ()
if test_flexible:
@njit
def target_val_order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(50., nb.get_col_elem_nb(c, col, c.close), size_type=SizeType.TargetValue)
return -1, nb.order_nothing_nb()
else:
@njit
def target_val_order_func_nb(c):
return nb.order_nb(50., nb.get_elem_nb(c, c.close), size_type=SizeType.TargetValue)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb, row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb,
pre_segment_func_nb=target_val_pre_segment_func_nb,
pre_segment_args=(price.iloc[:-1].values,), row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 4.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 4.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_target_percent(self, test_row_wise, test_flexible):
@njit
def target_pct_pre_segment_func_nb(c, val_price):
c.last_val_price[c.from_col:c.to_col] = val_price[c.i]
return ()
if test_flexible:
@njit
def target_pct_order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(0.5, nb.get_col_elem_nb(c, col, c.close), size_type=SizeType.TargetPercent)
return -1, nb.order_nothing_nb()
else:
@njit
def target_pct_order_func_nb(c):
return nb.order_nb(0.5, nb.get_elem_nb(c, c.close), size_type=SizeType.TargetPercent)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb, row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb,
pre_segment_func_nb=target_pct_pre_segment_func_nb,
pre_segment_args=(price.iloc[:-1].values,), row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 3, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 3, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_update_value(self, test_row_wise, test_flexible):
if test_flexible:
@njit
def order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(
np.inf if c.i % 2 == 0 else -np.inf,
nb.get_col_elem_nb(c, col, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
return -1, nb.order_nothing_nb()
else:
@njit
def order_func_nb(c):
return nb.order_nb(
np.inf if c.i % 2 == 0 else -np.inf,
nb.get_elem_nb(c, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
@njit
def post_order_func_nb(c, value_before, value_now):
value_before[c.i, c.col] = c.value_before
value_now[c.i, c.col] = c.value_now
value_before = np.empty_like(price.values[:, None])
value_now = np.empty_like(price.values[:, None])
_ = vbt.Portfolio.from_order_func(
price,
order_func_nb,
post_order_func_nb=post_order_func_nb,
post_order_args=(value_before, value_now),
row_wise=test_row_wise,
update_value=False,
flexible=test_flexible)
np.testing.assert_array_equal(
value_before,
value_now
)
_ = vbt.Portfolio.from_order_func(
price,
order_func_nb,
post_order_func_nb=post_order_func_nb,
post_order_args=(value_before, value_now),
row_wise=test_row_wise,
update_value=True,
flexible=test_flexible)
np.testing.assert_array_equal(
value_before,
np.array([
[100.0],
[97.04930889128518],
[185.46988117104038],
[82.47853456223025],
[104.65775576218027]
])
)
np.testing.assert_array_equal(
value_now,
np.array([
[98.01980198019803],
[187.36243097890815],
[83.30331990785257],
[105.72569204546781],
[73.54075125567473]
])
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_states(self, test_row_wise, test_flexible):
close = np.array([
[1, 1, 1],
[np.nan, 2, 2],
[3, np.nan, 3],
[4, 4, np.nan],
[5, 5, 5]
])
size = np.array([
[1, 1, 1],
[-1, -1, -1],
[1, 1, 1],
[-1, -1, -1],
[1, 1, 1]
])
value_arr1 = np.empty((size.shape[0], 2), dtype=np.float_)
value_arr2 = np.empty(size.shape, dtype=np.float_)
value_arr3 = np.empty(size.shape, dtype=np.float_)
return_arr1 = np.empty((size.shape[0], 2), dtype=np.float_)
return_arr2 = np.empty(size.shape, dtype=np.float_)
return_arr3 = np.empty(size.shape, dtype=np.float_)
pos_record_arr1 = np.empty(size.shape, dtype=trade_dt)
pos_record_arr2 = np.empty(size.shape, dtype=trade_dt)
pos_record_arr3 = np.empty(size.shape, dtype=trade_dt)
def pre_segment_func_nb(c):
value_arr1[c.i, c.group] = c.last_value[c.group]
return_arr1[c.i, c.group] = c.last_return[c.group]
for col in range(c.from_col, c.to_col):
pos_record_arr1[c.i, col] = c.last_pos_record[col]
if c.i > 0:
c.last_val_price[c.from_col:c.to_col] = c.last_val_price[c.from_col:c.to_col] + 0.5
return ()
if test_flexible:
def order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
value_arr2[c.i, col] = c.last_value[c.group]
return_arr2[c.i, col] = c.last_return[c.group]
pos_record_arr2[c.i, col] = c.last_pos_record[col]
return col, nb.order_nb(size[c.i, col], fixed_fees=1.)
return -1, nb.order_nothing_nb()
else:
def order_func_nb(c):
value_arr2[c.i, c.col] = c.value_now
return_arr2[c.i, c.col] = c.return_now
pos_record_arr2[c.i, c.col] = c.pos_record_now
return nb.order_nb(size[c.i, c.col], fixed_fees=1.)
def post_order_func_nb(c):
value_arr3[c.i, c.col] = c.value_now
return_arr3[c.i, c.col] = c.return_now
pos_record_arr3[c.i, c.col] = c.pos_record_now
_ = vbt.Portfolio.from_order_func(
close,
order_func_nb,
pre_segment_func_nb=pre_segment_func_nb,
post_order_func_nb=post_order_func_nb,
use_numba=False,
row_wise=test_row_wise,
update_value=True,
ffill_val_price=True,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
np.testing.assert_array_equal(
value_arr1,
np.array([
[100.0, 100.0],
[98.0, 99.0],
[98.5, 99.0],
[99.0, 98.0],
[99.0, 98.5]
])
)
np.testing.assert_array_equal(
value_arr2,
np.array([
[100.0, 99.0, 100.0],
[99.0, 99.0, 99.5],
[99.0, 99.0, 99.0],
[100.0, 100.0, 98.5],
[99.0, 98.5, 99.0]
])
)
np.testing.assert_array_equal(
value_arr3,
np.array([
[99.0, 98.0, 99.0],
[99.0, 98.5, 99.0],
[99.0, 99.0, 98.0],
[100.0, 99.0, 98.5],
[98.5, 97.0, 99.0]
])
)
np.testing.assert_array_equal(
return_arr1,
np.array([
[np.nan, np.nan],
[-0.02, -0.01],
[0.00510204081632653, 0.0],
[0.005076142131979695, -0.010101010101010102],
[0.0, 0.00510204081632653]
])
)
np.testing.assert_array_equal(
return_arr2,
np.array([
[0.0, -0.01, 0.0],
[-0.01, -0.01, -0.005],
[0.01020408163265306, 0.01020408163265306, 0.0],
[0.015228426395939087, 0.015228426395939087, -0.005050505050505051],
[0.0, -0.005050505050505051, 0.01020408163265306]
])
)
np.testing.assert_array_equal(
return_arr3,
np.array([
[-0.01, -0.02, -0.01],
[-0.01, -0.015, -0.01],
[0.01020408163265306, 0.01020408163265306, -0.010101010101010102],
[0.015228426395939087, 0.005076142131979695, -0.005050505050505051],
[-0.005050505050505051, -0.020202020202020204, 0.01020408163265306]
])
)
record_arrays_close(
pos_record_arr1.flatten()[3:],
np.array([
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 2, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 2, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 0, 2.0, 0, 2.0, 2.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -1.0, -0.3333333333333333, 0, 0, 1),
(0, 0, 2.0, 0, 2.0, 2.0, -1, 4.0, 1.0, 1.0, 0.25, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, -1, np.nan, 0.0, -1.0, -0.25, 1, 0, 1),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -0.5, -0.16666666666666666, 0, 0, 1)
], dtype=trade_dt)
)
record_arrays_close(
pos_record_arr2.flatten()[3:],
np.array([
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 2, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 2, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 0, 2.0, 0, 2.0, 2.0, -1, np.nan, 0.0, 1.0, 0.25, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -0.5, -0.16666666666666666, 0, 0, 1),
(0, 0, 2.0, 0, 2.0, 2.0, -1, 4.0, 1.0, 1.5, 0.375, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, -1, np.nan, 0.0, -1.5, -0.375, 1, 0, 1),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 1)
], dtype=trade_dt)
)
record_arrays_close(
pos_record_arr3.flatten(),
np.array([
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 2, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 2, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 0, 2.0, 0, 2.0, 2.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -1.0, -0.3333333333333333, 0, 0, 1),
(0, 0, 2.0, 0, 2.0, 2.0, -1, 4.0, 1.0, 1.0, 0.25, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, -1, np.nan, 0.0, -1.0, -0.25, 1, 0, 1),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -0.5, -0.16666666666666666, 0, 0, 1),
(0, 0, 3.0, 0, 3.0, 3.0, -1, 4.0, 1.0, 1.0, 0.1111111111111111, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, 4, 5.0, 1.0, -3.0, -0.75, 1, 1, 1),
(1, 2, 2.0, 2, 4.0, 2.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 1)
], dtype=trade_dt)
)
cash_arr = np.empty((size.shape[0], 2), dtype=np.float_)
position_arr = np.empty(size.shape, dtype=np.float_)
val_price_arr = np.empty(size.shape, dtype=np.float_)
value_arr = np.empty((size.shape[0], 2), dtype=np.float_)
return_arr = np.empty((size.shape[0], 2), dtype=np.float_)
sim_order_cash_arr = np.empty(size.shape, dtype=np.float_)
sim_order_value_arr = np.empty(size.shape, dtype=np.float_)
sim_order_return_arr = np.empty(size.shape, dtype=np.float_)
def post_order_func_nb(c):
sim_order_cash_arr[c.i, c.col] = c.cash_now
sim_order_value_arr[c.i, c.col] = c.value_now
sim_order_return_arr[c.i, c.col] = c.value_now
if c.i == 0 and c.call_idx == 0:
sim_order_return_arr[c.i, c.col] -= c.init_cash[c.group]
sim_order_return_arr[c.i, c.col] /= c.init_cash[c.group]
else:
if c.call_idx == 0:
prev_i = c.i - 1
prev_col = c.to_col - 1
else:
prev_i = c.i
prev_col = c.from_col + c.call_idx - 1
sim_order_return_arr[c.i, c.col] -= sim_order_value_arr[prev_i, prev_col]
sim_order_return_arr[c.i, c.col] /= sim_order_value_arr[prev_i, prev_col]
def post_segment_func_nb(c):
cash_arr[c.i, c.group] = c.last_cash[c.group]
for col in range(c.from_col, c.to_col):
position_arr[c.i, col] = c.last_position[col]
val_price_arr[c.i, col] = c.last_val_price[col]
value_arr[c.i, c.group] = c.last_value[c.group]
return_arr[c.i, c.group] = c.last_return[c.group]
pf = vbt.Portfolio.from_order_func(
close,
order_func_nb,
post_order_func_nb=post_order_func_nb,
post_segment_func_nb=post_segment_func_nb,
use_numba=False,
row_wise=test_row_wise,
update_value=True,
ffill_val_price=True,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
np.testing.assert_array_equal(
cash_arr,
pf.cash().values
)
np.testing.assert_array_equal(
position_arr,
pf.assets().values
)
np.testing.assert_array_equal(
val_price_arr,
pf.get_filled_close().values
)
np.testing.assert_array_equal(
value_arr,
pf.value().values
)
np.testing.assert_array_equal(
return_arr,
pf.returns().values
)
if test_flexible:
with pytest.raises(Exception):
pf.cash(in_sim_order=True, group_by=False)
with pytest.raises(Exception):
pf.value(in_sim_order=True, group_by=False)
with pytest.raises(Exception):
pf.returns(in_sim_order=True, group_by=False)
else:
np.testing.assert_array_equal(
sim_order_cash_arr,
pf.cash(in_sim_order=True, group_by=False).values
)
np.testing.assert_array_equal(
sim_order_value_arr,
pf.value(in_sim_order=True, group_by=False).values
)
np.testing.assert_array_equal(
sim_order_return_arr,
pf.returns(in_sim_order=True, group_by=False).values
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_post_sim_ctx(self, test_row_wise, test_flexible):
if test_flexible:
def order_func(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(
1.,
nb.get_col_elem_nb(c, col, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01,
log=True
)
return -1, nb.order_nothing_nb()
else:
def order_func(c):
return nb.order_nb(
1.,
nb.get_elem_nb(c, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01,
log=True
)
def post_sim_func(c, lst):
lst.append(deepcopy(c))
lst = []
_ = vbt.Portfolio.from_order_func(
price_wide,
order_func,
post_sim_func_nb=post_sim_func,
post_sim_args=(lst,),
row_wise=test_row_wise,
update_value=True,
max_logs=price_wide.shape[0] * price_wide.shape[1],
use_numba=False,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
c = lst[-1]
assert c.target_shape == price_wide.shape
np.testing.assert_array_equal(
c.close,
price_wide.values
)
np.testing.assert_array_equal(
c.group_lens,
np.array([2, 1])
)
np.testing.assert_array_equal(
c.init_cash,
np.array([100., 100.])
)
assert c.cash_sharing
if test_flexible:
assert c.call_seq is None
else:
np.testing.assert_array_equal(
c.call_seq,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
np.testing.assert_array_equal(
c.segment_mask,
np.array([
[True, True],
[True, True],
[True, True],
[True, True],
[True, True]
])
)
assert c.ffill_val_price
assert c.update_value
if test_row_wise:
record_arrays_close(
c.order_records,
np.array([
(0, 0, 0, 1.0, 1.01, 1.0101, 0), (1, 1, 0, 1.0, 1.01, 1.0101, 0),
(2, 2, 0, 1.0, 1.01, 1.0101, 0), (3, 0, 1, 1.0, 2.02, 1.0202, 0),
(4, 1, 1, 1.0, 2.02, 1.0202, 0), (5, 2, 1, 1.0, 2.02, 1.0202, 0),
(6, 0, 2, 1.0, 3.0300000000000002, 1.0303, 0), (7, 1, 2, 1.0, 3.0300000000000002, 1.0303, 0),
(8, 2, 2, 1.0, 3.0300000000000002, 1.0303, 0), (9, 0, 3, 1.0, 4.04, 1.0404, 0),
(10, 1, 3, 1.0, 4.04, 1.0404, 0), (11, 2, 3, 1.0, 4.04, 1.0404, 0),
(12, 0, 4, 1.0, 5.05, 1.0505, 0), (13, 1, 4, 1.0, 5.05, 1.0505, 0),
(14, 2, 4, 1.0, 5.05, 1.0505, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
c.order_records,
np.array([
(0, 0, 0, 1.0, 1.01, 1.0101, 0), (1, 1, 0, 1.0, 1.01, 1.0101, 0),
(2, 0, 1, 1.0, 2.02, 1.0202, 0), (3, 1, 1, 1.0, 2.02, 1.0202, 0),
(4, 0, 2, 1.0, 3.0300000000000002, 1.0303, 0), (5, 1, 2, 1.0, 3.0300000000000002, 1.0303, 0),
(6, 0, 3, 1.0, 4.04, 1.0404, 0), (7, 1, 3, 1.0, 4.04, 1.0404, 0),
(8, 0, 4, 1.0, 5.05, 1.0505, 0), (9, 1, 4, 1.0, 5.05, 1.0505, 0),
(10, 2, 0, 1.0, 1.01, 1.0101, 0), (11, 2, 1, 1.0, 2.02, 1.0202, 0),
(12, 2, 2, 1.0, 3.0300000000000002, 1.0303, 0), (13, 2, 3, 1.0, 4.04, 1.0404, 0),
(14, 2, 4, 1.0, 5.05, 1.0505, 0)
], dtype=order_dt)
)
if test_row_wise:
record_arrays_close(
c.log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2, 0.01, 1.0,
0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799, 1.0, 0.0, 97.9799,
1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 0),
(1, 0, 1, 0, 97.9799, 0.0, 0.0, 97.9799, np.nan, 98.9899, 1.0, 1.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 95.9598, 1.0,
0.0, 95.9598, 1.01, 97.97980000000001, 1.0, 1.01, 1.0101, 0, 0, -1, 1),
(2, 1, 2, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2, 0.01,
1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799, 1.0, 0.0,
97.9799, 1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 2),
(3, 0, 0, 1, 95.9598, 1.0, 0.0, 95.9598, 1.0, 97.9598, 1.0, 2.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 92.9196,
2.0, 0.0, 92.9196, 2.02, 97.95960000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 3),
(4, 0, 1, 1, 92.9196, 1.0, 0.0, 92.9196, 1.0, 97.95960000000001, 1.0, 2.0,
0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 89.8794,
2.0, 0.0, 89.8794, 2.02, 97.95940000000002, 1.0, 2.02, 1.0202, 0, 0, -1, 4),
(5, 1, 2, 1, 97.9799, 1.0, 0.0, 97.9799, 1.0, 98.9799, 1.0, 2.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 94.9397, 2.0,
0.0, 94.9397, 2.02, 98.97970000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 5),
(6, 0, 0, 2, 89.8794, 2.0, 0.0, 89.8794, 2.0, 97.8794, 1.0, 3.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.8191, 3.0,
0.0, 85.8191, 3.0300000000000002, 98.90910000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 6),
(7, 0, 1, 2, 85.8191, 2.0, 0.0, 85.8191, 2.0, 98.90910000000001,
1.0, 3.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True,
81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0300000000000002,
99.93880000000001, 1.0, 3.0300000000000002, 1.0303, 0, 0, -1, 7),
(8, 1, 2, 2, 94.9397, 2.0, 0.0, 94.9397, 2.0, 98.9397, 1.0, 3.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 90.8794,
3.0, 0.0, 90.8794, 3.0300000000000002, 99.96940000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 8),
(9, 0, 0, 3, 81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0, 99.75880000000001,
1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True,
76.67840000000001, 4.0, 0.0, 76.67840000000001, 4.04, 101.83840000000001,
1.0, 4.04, 1.0404, 0, 0, -1, 9),
(10, 0, 1, 3, 76.67840000000001, 3.0, 0.0, 76.67840000000001, 3.0,
101.83840000000001, 1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 71.59800000000001, 4.0, 0.0, 71.59800000000001,
4.04, 103.918, 1.0, 4.04, 1.0404, 0, 0, -1, 10),
(11, 1, 2, 3, 90.8794, 3.0, 0.0, 90.8794, 3.0, 99.8794, 1.0, 4.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.799, 4.0,
0.0, 85.799, 4.04, 101.959, 1.0, 4.04, 1.0404, 0, 0, -1, 11),
(12, 0, 0, 4, 71.59800000000001, 4.0, 0.0, 71.59800000000001, 4.0,
103.59800000000001, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 65.49750000000002, 5.0, 0.0, 65.49750000000002,
5.05, 106.74750000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 12),
(13, 0, 1, 4, 65.49750000000002, 4.0, 0.0, 65.49750000000002, 4.0,
106.74750000000002, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 59.39700000000002, 5.0, 0.0, 59.39700000000002,
5.05, 109.89700000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 13),
(14, 1, 2, 4, 85.799, 4.0, 0.0, 85.799, 4.0, 101.799, 1.0, 5.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 79.69850000000001,
5.0, 0.0, 79.69850000000001, 5.05, 104.94850000000001, 1.0, 5.05, 1.0505, 0, 0, -1, 14)
], dtype=log_dt)
)
else:
record_arrays_close(
c.log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799,
1.0, 0.0, 97.9799, 1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 0),
(1, 0, 1, 0, 97.9799, 0.0, 0.0, 97.9799, np.nan, 98.9899, 1.0, 1.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 95.9598,
1.0, 0.0, 95.9598, 1.01, 97.97980000000001, 1.0, 1.01, 1.0101, 0, 0, -1, 1),
(2, 0, 0, 1, 95.9598, 1.0, 0.0, 95.9598, 1.0, 97.9598, 1.0, 2.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 92.9196, 2.0,
0.0, 92.9196, 2.02, 97.95960000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 2),
(3, 0, 1, 1, 92.9196, 1.0, 0.0, 92.9196, 1.0, 97.95960000000001, 1.0,
2.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 89.8794,
2.0, 0.0, 89.8794, 2.02, 97.95940000000002, 1.0, 2.02, 1.0202, 0, 0, -1, 3),
(4, 0, 0, 2, 89.8794, 2.0, 0.0, 89.8794, 2.0, 97.8794, 1.0, 3.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.8191,
3.0, 0.0, 85.8191, 3.0300000000000002, 98.90910000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 4),
(5, 0, 1, 2, 85.8191, 2.0, 0.0, 85.8191, 2.0, 98.90910000000001, 1.0,
3.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True,
81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0300000000000002,
99.93880000000001, 1.0, 3.0300000000000002, 1.0303, 0, 0, -1, 5),
(6, 0, 0, 3, 81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0,
99.75880000000001, 1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 76.67840000000001, 4.0, 0.0, 76.67840000000001,
4.04, 101.83840000000001, 1.0, 4.04, 1.0404, 0, 0, -1, 6),
(7, 0, 1, 3, 76.67840000000001, 3.0, 0.0, 76.67840000000001, 3.0,
101.83840000000001, 1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 71.59800000000001, 4.0, 0.0, 71.59800000000001,
4.04, 103.918, 1.0, 4.04, 1.0404, 0, 0, -1, 7),
(8, 0, 0, 4, 71.59800000000001, 4.0, 0.0, 71.59800000000001, 4.0,
103.59800000000001, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 65.49750000000002, 5.0, 0.0, 65.49750000000002,
5.05, 106.74750000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 8),
(9, 0, 1, 4, 65.49750000000002, 4.0, 0.0, 65.49750000000002, 4.0,
106.74750000000002, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 59.39700000000002, 5.0, 0.0, 59.39700000000002,
5.05, 109.89700000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 9),
(10, 1, 2, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2, 0.01,
1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799, 1.0, 0.0,
97.9799, 1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 10),
(11, 1, 2, 1, 97.9799, 1.0, 0.0, 97.9799, 1.0, 98.9799, 1.0, 2.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 94.9397,
2.0, 0.0, 94.9397, 2.02, 98.97970000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 11),
(12, 1, 2, 2, 94.9397, 2.0, 0.0, 94.9397, 2.0, 98.9397, 1.0, 3.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 90.8794,
3.0, 0.0, 90.8794, 3.0300000000000002, 99.96940000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 12),
(13, 1, 2, 3, 90.8794, 3.0, 0.0, 90.8794, 3.0, 99.8794, 1.0, 4.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.799, 4.0,
0.0, 85.799, 4.04, 101.959, 1.0, 4.04, 1.0404, 0, 0, -1, 13),
(14, 1, 2, 4, 85.799, 4.0, 0.0, 85.799, 4.0, 101.799, 1.0, 5.0, 0, 2, 0.01,
1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 79.69850000000001,
5.0, 0.0, 79.69850000000001, 5.05, 104.94850000000001, 1.0, 5.05, 1.0505, 0, 0, -1, 14)
], dtype=log_dt)
)
np.testing.assert_array_equal(
c.last_cash,
np.array([59.39700000000002, 79.69850000000001])
)
np.testing.assert_array_equal(
c.last_position,
np.array([5., 5., 5.])
)
np.testing.assert_array_equal(
c.last_val_price,
np.array([5.0, 5.0, 5.0])
)
np.testing.assert_array_equal(
c.last_value,
np.array([109.39700000000002, 104.69850000000001])
)
np.testing.assert_array_equal(
c.second_last_value,
np.array([103.59800000000001, 101.799])
)
np.testing.assert_array_equal(
c.last_return,
np.array([0.05597598409235705, 0.028482598060884715])
)
np.testing.assert_array_equal(
c.last_debt,
np.array([0., 0., 0.])
)
np.testing.assert_array_equal(
c.last_free_cash,
np.array([59.39700000000002, 79.69850000000001])
)
if test_row_wise:
np.testing.assert_array_equal(
c.last_oidx,
np.array([12, 13, 14])
)
np.testing.assert_array_equal(
c.last_lidx,
np.array([12, 13, 14])
)
else:
np.testing.assert_array_equal(
c.last_oidx,
np.array([8, 9, 14])
)
np.testing.assert_array_equal(
c.last_lidx,
np.array([8, 9, 14])
)
assert c.order_records[c.last_oidx[0]]['col'] == 0
assert c.order_records[c.last_oidx[1]]['col'] == 1
assert c.order_records[c.last_oidx[2]]['col'] == 2
assert c.log_records[c.last_lidx[0]]['col'] == 0
assert c.log_records[c.last_lidx[1]]['col'] == 1
assert c.log_records[c.last_lidx[2]]['col'] == 2
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_free_cash(self, test_row_wise, test_flexible):
if test_flexible:
def order_func(c, size):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(
size[c.i, col],
nb.get_col_elem_nb(c, col, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
return -1, nb.order_nothing_nb()
else:
def order_func(c, size):
return nb.order_nb(
size[c.i, c.col],
nb.get_elem_nb(c, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
def post_order_func(c, debt, free_cash):
debt[c.i, c.col] = c.debt_now
if c.cash_sharing:
free_cash[c.i, c.group] = c.free_cash_now
else:
free_cash[c.i, c.col] = c.free_cash_now
size = np.array([
[5, -5, 5],
[5, -5, -10],
[-5, 5, 10],
[-5, 5, -10],
[-5, 5, 10]
])
debt = np.empty(price_wide.shape, dtype=np.float_)
free_cash = np.empty(price_wide.shape, dtype=np.float_)
pf = vbt.Portfolio.from_order_func(
price_wide,
order_func, size,
post_order_func_nb=post_order_func,
post_order_args=(debt, free_cash,),
row_wise=test_row_wise,
use_numba=False,
flexible=test_flexible
)
np.testing.assert_array_equal(
debt,
np.array([
[0.0, 4.95, 0.0],
[0.0, 14.850000000000001, 9.9],
[0.0, 7.425000000000001, 0.0],
[0.0, 0.0, 19.8],
[24.75, 0.0, 0.0]
])
)
np.testing.assert_array_equal(
free_cash,
np.array([
[93.8995, 94.0005, 93.8995],
[82.6985, 83.00150000000001, 92.70150000000001],
[96.39999999999999, 81.55000000000001, 80.8985],
[115.002, 74.998, 79.5025],
[89.0045, 48.49550000000001, 67.0975]
])
)
np.testing.assert_almost_equal(
free_cash,
pf.cash(free=True).values
)
debt = np.empty(price_wide.shape, dtype=np.float_)
free_cash = np.empty(price_wide.shape, dtype=np.float_)
pf = vbt.Portfolio.from_order_func(
price_wide.vbt.wrapper.wrap(price_wide.values[::-1]),
order_func, size,
post_order_func_nb=post_order_func,
post_order_args=(debt, free_cash,),
row_wise=test_row_wise,
use_numba=False,
flexible=test_flexible
)
np.testing.assert_array_equal(
debt,
np.array([
[0.0, 24.75, 0.0],
[0.0, 44.55, 19.8],
[0.0, 22.275, 0.0],
[0.0, 0.0, 9.9],
[4.95, 0.0, 0.0]
])
)
np.testing.assert_array_equal(
free_cash,
np.array([
[73.4975, 74.0025, 73.4975],
[52.0955, 53.00449999999999, 72.1015],
[65.797, 81.25299999999999, 80.0985],
[74.598, 114.60199999999998, 78.9005],
[68.5985, 108.50149999999998, 87.49949999999998]
])
)
np.testing.assert_almost_equal(
free_cash,
pf.cash(free=True).values
)
debt = np.empty(price_wide.shape, dtype=np.float_)
free_cash = np.empty((price_wide.shape[0], 2), dtype=np.float_)
pf = vbt.Portfolio.from_order_func(
price_wide,
order_func, size,
post_order_func_nb=post_order_func,
post_order_args=(debt, free_cash,),
row_wise=test_row_wise,
use_numba=False,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
np.testing.assert_array_equal(
debt,
np.array([
[0.0, 4.95, 0.0],
[0.0, 14.850000000000001, 9.9],
[0.0, 7.425000000000001, 0.0],
[0.0, 0.0, 19.8],
[24.75, 0.0, 0.0]
])
)
np.testing.assert_array_equal(
free_cash,
np.array([
[87.9, 93.8995],
[65.70000000000002, 92.70150000000001],
[77.95000000000002, 80.8985],
[90.00000000000001, 79.5025],
[37.500000000000014, 67.0975]
])
)
np.testing.assert_almost_equal(
free_cash,
pf.cash(free=True).values
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_init_cash(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=[1., 10., np.inf], flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 10.0, 1.0, 0.0, 0),
(2, 2, 0, 10.0, 1.0, 0.0, 0), (3, 0, 1, 10.0, 2.0, 0.0, 1),
(4, 1, 1, 10.0, 2.0, 0.0, 1), (5, 2, 1, 10.0, 2.0, 0.0, 1),
(6, 0, 2, 6.666666666666667, 3.0, 0.0, 0), (7, 1, 2, 6.666666666666667, 3.0, 0.0, 0),
(8, 2, 2, 10.0, 3.0, 0.0, 0), (9, 0, 3, 10.0, 4.0, 0.0, 1),
(10, 1, 3, 10.0, 4.0, 0.0, 1), (11, 2, 3, 10.0, 4.0, 0.0, 1),
(12, 0, 4, 8.0, 5.0, 0.0, 0), (13, 1, 4, 8.0, 5.0, 0.0, 0),
(14, 2, 4, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 10.0, 2.0, 0.0, 1),
(2, 0, 2, 6.666666666666667, 3.0, 0.0, 0), (3, 0, 3, 10.0, 4.0, 0.0, 1),
(4, 0, 4, 8.0, 5.0, 0.0, 0), (5, 1, 0, 10.0, 1.0, 0.0, 0),
(6, 1, 1, 10.0, 2.0, 0.0, 1), (7, 1, 2, 6.666666666666667, 3.0, 0.0, 0),
(8, 1, 3, 10.0, 4.0, 0.0, 1), (9, 1, 4, 8.0, 5.0, 0.0, 0),
(10, 2, 0, 10.0, 1.0, 0.0, 0), (11, 2, 1, 10.0, 2.0, 0.0, 1),
(12, 2, 2, 10.0, 3.0, 0.0, 0), (13, 2, 3, 10.0, 4.0, 0.0, 1),
(14, 2, 4, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
assert type(pf._init_cash) == np.ndarray
base_pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=np.inf, flexible=test_flexible)
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=InitCashMode.Auto, flexible=test_flexible)
record_arrays_close(
pf.order_records,
base_pf.orders.values
)
assert pf._init_cash == InitCashMode.Auto
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=InitCashMode.AutoAlign, flexible=test_flexible)
record_arrays_close(
pf.order_records,
base_pf.orders.values
)
assert pf._init_cash == InitCashMode.AutoAlign
def test_func_calls(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_group_func_nb(c, call_i, pre_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_group_lst.append(call_i[0])
return (call_i,)
@njit
def post_group_func_nb(c, call_i, post_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_group_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(c, call_i, order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
row_wise=False, template_mapping=dict(np=np)
)
assert call_i[0] == 56
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [56]
assert list(pre_group_lst) == [2, 34]
assert list(post_group_lst) == [33, 55]
assert list(pre_segment_lst) == [3, 9, 15, 21, 27, 35, 39, 43, 47, 51]
assert list(post_segment_lst) == [8, 14, 20, 26, 32, 38, 42, 46, 50, 54]
assert list(order_lst) == [4, 6, 10, 12, 16, 18, 22, 24, 28, 30, 36, 40, 44, 48, 52]
assert list(post_order_lst) == [5, 7, 11, 13, 17, 19, 23, 25, 29, 31, 37, 41, 45, 49, 53]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=False, template_mapping=dict(np=np)
)
assert call_i[0] == 38
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [38]
assert list(pre_group_lst) == [2, 22]
assert list(post_group_lst) == [21, 37]
assert list(pre_segment_lst) == [3, 5, 7, 13, 19, 23, 25, 29, 31, 35]
assert list(post_segment_lst) == [4, 6, 12, 18, 20, 24, 28, 30, 34, 36]
assert list(order_lst) == [8, 10, 14, 16, 26, 32]
assert list(post_order_lst) == [9, 11, 15, 17, 27, 33]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=False, template_mapping=dict(np=np)
)
assert call_i[0] == 26
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [26]
assert list(pre_group_lst) == [2, 16]
assert list(post_group_lst) == [15, 25]
assert list(pre_segment_lst) == [3, 9, 17, 21]
assert list(post_segment_lst) == [8, 14, 20, 24]
assert list(order_lst) == [4, 6, 10, 12, 18, 22]
assert list(post_order_lst) == [5, 7, 11, 13, 19, 23]
def test_func_calls_flexible(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_group_func_nb(c, call_i, pre_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_group_lst.append(call_i[0])
return (call_i,)
@njit
def post_group_func_nb(c, call_i, post_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_group_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def flex_order_func_nb(c, call_i, order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
order_lst.append(call_i[0])
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, NoOrder
return -1, NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
row_wise=False, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 66
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [66]
assert list(pre_group_lst) == [2, 39]
assert list(post_group_lst) == [38, 65]
assert list(pre_segment_lst) == [3, 10, 17, 24, 31, 40, 45, 50, 55, 60]
assert list(post_segment_lst) == [9, 16, 23, 30, 37, 44, 49, 54, 59, 64]
assert list(order_lst) == [
4, 6, 8, 11, 13, 15, 18, 20, 22, 25, 27, 29, 32, 34,
36, 41, 43, 46, 48, 51, 53, 56, 58, 61, 63
]
assert list(post_order_lst) == [5, 7, 12, 14, 19, 21, 26, 28, 33, 35, 42, 47, 52, 57, 62]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=False, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 42
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [42]
assert list(pre_group_lst) == [2, 24]
assert list(post_group_lst) == [23, 41]
assert list(pre_segment_lst) == [3, 5, 7, 14, 21, 25, 27, 32, 34, 39]
assert list(post_segment_lst) == [4, 6, 13, 20, 22, 26, 31, 33, 38, 40]
assert list(order_lst) == [8, 10, 12, 15, 17, 19, 28, 30, 35, 37]
assert list(post_order_lst) == [9, 11, 16, 18, 29, 36]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=False, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 30
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [30]
assert list(pre_group_lst) == [2, 18]
assert list(post_group_lst) == [17, 29]
assert list(pre_segment_lst) == [3, 10, 19, 24]
assert list(post_segment_lst) == [9, 16, 23, 28]
assert list(order_lst) == [4, 6, 8, 11, 13, 15, 20, 22, 25, 27]
assert list(post_order_lst) == [5, 7, 12, 14, 21, 26]
def test_func_calls_row_wise(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst):
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst):
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_row_func_nb(c, call_i, pre_row_lst):
call_i[0] += 1
pre_row_lst.append(call_i[0])
return (call_i,)
@njit
def post_row_func_nb(c, call_i, post_row_lst):
call_i[0] += 1
post_row_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst):
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst):
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(c, call_i, order_lst):
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst):
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst,),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst,),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst,),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst,),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst,),
row_wise=True, template_mapping=dict(np=np)
)
assert call_i[0] == 62
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [62]
assert list(pre_row_lst) == [2, 14, 26, 38, 50]
assert list(post_row_lst) == [13, 25, 37, 49, 61]
assert list(pre_segment_lst) == [3, 9, 15, 21, 27, 33, 39, 45, 51, 57]
assert list(post_segment_lst) == [8, 12, 20, 24, 32, 36, 44, 48, 56, 60]
assert list(order_lst) == [4, 6, 10, 16, 18, 22, 28, 30, 34, 40, 42, 46, 52, 54, 58]
assert list(post_order_lst) == [5, 7, 11, 17, 19, 23, 29, 31, 35, 41, 43, 47, 53, 55, 59]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst,),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst,),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst,),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst,),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst,),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=True, template_mapping=dict(np=np)
)
assert call_i[0] == 44
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [44]
assert list(pre_row_lst) == [2, 8, 16, 26, 38]
assert list(post_row_lst) == [7, 15, 25, 37, 43]
assert list(pre_segment_lst) == [3, 5, 9, 11, 17, 23, 27, 33, 39, 41]
assert list(post_segment_lst) == [4, 6, 10, 14, 22, 24, 32, 36, 40, 42]
assert list(order_lst) == [12, 18, 20, 28, 30, 34]
assert list(post_order_lst) == [13, 19, 21, 29, 31, 35]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst,),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst,),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst,),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst,),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst,),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=True, template_mapping=dict(np=np)
)
assert call_i[0] == 32
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [32]
assert list(pre_row_lst) == [2, 4, 10, 18, 30]
assert list(post_row_lst) == [3, 9, 17, 29, 31]
assert list(pre_segment_lst) == [5, 11, 19, 25]
assert list(post_segment_lst) == [8, 16, 24, 28]
assert list(order_lst) == [6, 12, 14, 20, 22, 26]
assert list(post_order_lst) == [7, 13, 15, 21, 23, 27]
def test_func_calls_row_wise_flexible(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_row_func_nb(c, call_i, pre_row_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_row_lst.append(call_i[0])
return (call_i,)
@njit
def post_row_func_nb(c, call_i, post_row_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_row_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def flex_order_func_nb(c, call_i, order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
order_lst.append(call_i[0])
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, NoOrder
return -1, NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst, sub_arg),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
row_wise=True, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 72
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [72]
assert list(pre_row_lst) == [2, 16, 30, 44, 58]
assert list(post_row_lst) == [15, 29, 43, 57, 71]
assert list(pre_segment_lst) == [3, 10, 17, 24, 31, 38, 45, 52, 59, 66]
assert list(post_segment_lst) == [9, 14, 23, 28, 37, 42, 51, 56, 65, 70]
assert list(order_lst) == [
4, 6, 8, 11, 13, 18, 20, 22, 25, 27, 32, 34, 36,
39, 41, 46, 48, 50, 53, 55, 60, 62, 64, 67, 69
]
assert list(post_order_lst) == [5, 7, 12, 19, 21, 26, 33, 35, 40, 47, 49, 54, 61, 63, 68]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst, sub_arg),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=True, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 48
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [48]
assert list(pre_row_lst) == [2, 8, 17, 28, 42]
assert list(post_row_lst) == [7, 16, 27, 41, 47]
assert list(pre_segment_lst) == [3, 5, 9, 11, 18, 25, 29, 36, 43, 45]
assert list(post_segment_lst) == [4, 6, 10, 15, 24, 26, 35, 40, 44, 46]
assert list(order_lst) == [12, 14, 19, 21, 23, 30, 32, 34, 37, 39]
assert list(post_order_lst) == [13, 20, 22, 31, 33, 38]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst, sub_arg),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=True, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 36
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [36]
assert list(pre_row_lst) == [2, 4, 11, 20, 34]
assert list(post_row_lst) == [3, 10, 19, 33, 35]
assert list(pre_segment_lst) == [5, 12, 21, 28]
assert list(post_segment_lst) == [9, 18, 27, 32]
assert list(order_lst) == [6, 8, 13, 15, 17, 22, 24, 26, 29, 31]
assert list(post_order_lst) == [7, 14, 16, 23, 25, 30]
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_max_orders(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
_ = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
row_wise=test_row_wise, flexible=test_flexible)
_ = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_orders=15, flexible=test_flexible)
with pytest.raises(Exception):
_ = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_orders=14, flexible=test_flexible)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_max_logs(self, test_row_wise, test_flexible):
log_order_func = log_flex_order_func_nb if test_flexible else log_order_func_nb
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func, np.asarray(np.inf),
row_wise=test_row_wise, flexible=test_flexible)
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_logs=15, flexible=test_flexible)
with pytest.raises(Exception):
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_logs=14, flexible=test_flexible)
# ############# Portfolio ############# #
price_na = pd.DataFrame({
'a': [np.nan, 2., 3., 4., 5.],
'b': [1., 2., np.nan, 4., 5.],
'c': [1., 2., 3., 4., np.nan]
}, index=price.index)
order_size_new = pd.Series([1., 0.1, -1., -0.1, 1.])
directions = ['longonly', 'shortonly', 'both']
group_by = pd.Index(['first', 'first', 'second'], name='group')
pf = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='amount', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=None,
init_cash=[100., 100., 100.], freq='1D', attach_call_seq=True
) # independent
pf_grouped = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='amount', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=False,
init_cash=[100., 100., 100.], freq='1D', attach_call_seq=True
) # grouped
pf_shared = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='amount', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=True,
init_cash=[200., 100.], freq='1D', attach_call_seq=True
) # shared
class TestPortfolio:
def test_config(self, tmp_path):
pf2 = pf.copy()
pf2._metrics = pf2._metrics.copy()
pf2.metrics['hello'] = 'world'
pf2._subplots = pf2.subplots.copy()
pf2.subplots['hello'] = 'world'
assert vbt.Portfolio.loads(pf2['a'].dumps()) == pf2['a']
assert vbt.Portfolio.loads(pf2.dumps()) == pf2
pf2.save(tmp_path / 'pf')
assert vbt.Portfolio.load(tmp_path / 'pf') == pf2
def test_wrapper(self):
pd.testing.assert_index_equal(
pf.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
price_na.columns
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.grouper.group_by is None
assert pf.wrapper.grouper.allow_enable
assert pf.wrapper.grouper.allow_disable
assert pf.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
pf_grouped.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
pf_grouped.wrapper.columns,
price_na.columns
)
assert pf_grouped.wrapper.ndim == 2
pd.testing.assert_index_equal(
pf_grouped.wrapper.grouper.group_by,
group_by
)
assert pf_grouped.wrapper.grouper.allow_enable
assert pf_grouped.wrapper.grouper.allow_disable
assert pf_grouped.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
pf_shared.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
pf_shared.wrapper.columns,
price_na.columns
)
assert pf_shared.wrapper.ndim == 2
pd.testing.assert_index_equal(
pf_shared.wrapper.grouper.group_by,
group_by
)
assert not pf_shared.wrapper.grouper.allow_enable
assert pf_shared.wrapper.grouper.allow_disable
assert not pf_shared.wrapper.grouper.allow_modify
def test_indexing(self):
assert pf['a'].wrapper == pf.wrapper['a']
assert pf['a'].orders == pf.orders['a']
assert pf['a'].logs == pf.logs['a']
assert pf['a'].init_cash == pf.init_cash['a']
pd.testing.assert_series_equal(pf['a'].call_seq, pf.call_seq['a'])
assert pf['c'].wrapper == pf.wrapper['c']
assert pf['c'].orders == pf.orders['c']
assert pf['c'].logs == pf.logs['c']
assert pf['c'].init_cash == pf.init_cash['c']
pd.testing.assert_series_equal(pf['c'].call_seq, pf.call_seq['c'])
assert pf[['c']].wrapper == pf.wrapper[['c']]
assert pf[['c']].orders == pf.orders[['c']]
assert pf[['c']].logs == pf.logs[['c']]
pd.testing.assert_series_equal(pf[['c']].init_cash, pf.init_cash[['c']])
pd.testing.assert_frame_equal(pf[['c']].call_seq, pf.call_seq[['c']])
assert pf_grouped['first'].wrapper == pf_grouped.wrapper['first']
assert pf_grouped['first'].orders == pf_grouped.orders['first']
assert pf_grouped['first'].logs == pf_grouped.logs['first']
assert pf_grouped['first'].init_cash == pf_grouped.init_cash['first']
pd.testing.assert_frame_equal(pf_grouped['first'].call_seq, pf_grouped.call_seq[['a', 'b']])
assert pf_grouped[['first']].wrapper == pf_grouped.wrapper[['first']]
assert pf_grouped[['first']].orders == pf_grouped.orders[['first']]
assert pf_grouped[['first']].logs == pf_grouped.logs[['first']]
pd.testing.assert_series_equal(
pf_grouped[['first']].init_cash,
pf_grouped.init_cash[['first']])
pd.testing.assert_frame_equal(pf_grouped[['first']].call_seq, pf_grouped.call_seq[['a', 'b']])
assert pf_grouped['second'].wrapper == pf_grouped.wrapper['second']
assert pf_grouped['second'].orders == pf_grouped.orders['second']
assert pf_grouped['second'].logs == pf_grouped.logs['second']
assert pf_grouped['second'].init_cash == pf_grouped.init_cash['second']
pd.testing.assert_series_equal(pf_grouped['second'].call_seq, pf_grouped.call_seq['c'])
assert pf_grouped[['second']].orders == pf_grouped.orders[['second']]
assert pf_grouped[['second']].wrapper == pf_grouped.wrapper[['second']]
assert pf_grouped[['second']].orders == pf_grouped.orders[['second']]
assert pf_grouped[['second']].logs == pf_grouped.logs[['second']]
pd.testing.assert_series_equal(
pf_grouped[['second']].init_cash,
pf_grouped.init_cash[['second']])
pd.testing.assert_frame_equal(pf_grouped[['second']].call_seq, pf_grouped.call_seq[['c']])
assert pf_shared['first'].wrapper == pf_shared.wrapper['first']
assert pf_shared['first'].orders == pf_shared.orders['first']
assert pf_shared['first'].logs == pf_shared.logs['first']
assert pf_shared['first'].init_cash == pf_shared.init_cash['first']
pd.testing.assert_frame_equal(pf_shared['first'].call_seq, pf_shared.call_seq[['a', 'b']])
assert pf_shared[['first']].orders == pf_shared.orders[['first']]
assert pf_shared[['first']].wrapper == pf_shared.wrapper[['first']]
assert pf_shared[['first']].orders == pf_shared.orders[['first']]
assert pf_shared[['first']].logs == pf_shared.logs[['first']]
pd.testing.assert_series_equal(
pf_shared[['first']].init_cash,
pf_shared.init_cash[['first']])
pd.testing.assert_frame_equal(pf_shared[['first']].call_seq, pf_shared.call_seq[['a', 'b']])
assert pf_shared['second'].wrapper == pf_shared.wrapper['second']
assert pf_shared['second'].orders == pf_shared.orders['second']
assert pf_shared['second'].logs == pf_shared.logs['second']
assert pf_shared['second'].init_cash == pf_shared.init_cash['second']
pd.testing.assert_series_equal(pf_shared['second'].call_seq, pf_shared.call_seq['c'])
assert pf_shared[['second']].wrapper == pf_shared.wrapper[['second']]
assert pf_shared[['second']].orders == pf_shared.orders[['second']]
assert pf_shared[['second']].logs == pf_shared.logs[['second']]
pd.testing.assert_series_equal(
pf_shared[['second']].init_cash,
pf_shared.init_cash[['second']])
pd.testing.assert_frame_equal(pf_shared[['second']].call_seq, pf_shared.call_seq[['c']])
def test_regroup(self):
assert pf.regroup(None) == pf
assert pf.regroup(False) == pf
assert pf.regroup(group_by) != pf
pd.testing.assert_index_equal(pf.regroup(group_by).wrapper.grouper.group_by, group_by)
assert pf_grouped.regroup(None) == pf_grouped
assert pf_grouped.regroup(False) != pf_grouped
assert pf_grouped.regroup(False).wrapper.grouper.group_by is None
assert pf_grouped.regroup(group_by) == pf_grouped
assert pf_shared.regroup(None) == pf_shared
with pytest.raises(Exception):
_ = pf_shared.regroup(False)
assert pf_shared.regroup(group_by) == pf_shared
def test_cash_sharing(self):
assert not pf.cash_sharing
assert not pf_grouped.cash_sharing
assert pf_shared.cash_sharing
def test_call_seq(self):
pd.testing.assert_frame_equal(
pf.call_seq,
pd.DataFrame(
np.array([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_grouped.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
def test_orders(self):
record_arrays_close(
pf.orders.values,
np.array([
(0, 0, 1, 0.1, 2.02, 0.10202, 0), (1, 0, 2, 0.1, 2.9699999999999998, 0.10297, 1),
(2, 0, 4, 1.0, 5.05, 0.1505, 0), (3, 1, 0, 1.0, 0.99, 0.10990000000000001, 1),
(4, 1, 1, 0.1, 1.98, 0.10198, 1), (5, 1, 3, 0.1, 4.04, 0.10404000000000001, 0),
(6, 1, 4, 1.0, 4.95, 0.14950000000000002, 1), (7, 2, 0, 1.0, 1.01, 0.1101, 0),
(8, 2, 1, 0.1, 2.02, 0.10202, 0), (9, 2, 2, 1.0, 2.9699999999999998, 0.1297, 1),
(10, 2, 3, 0.1, 3.96, 0.10396000000000001, 1)
], dtype=order_dt)
)
result = pd.Series(
np.array([3, 4, 4]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.orders.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_orders(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_orders(group_by=False).count(),
result
)
result = pd.Series(
np.array([7, 4]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_orders(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.orders.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.orders.count(),
result
)
def test_logs(self):
record_arrays_close(
pf.logs.values,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, np.nan, 0, 0, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.0, 0.0, 0.0,
100.0, np.nan, 100.0, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(1, 0, 0, 1, 100.0, 0.0, 0.0, 100.0, 2.0, 100.0, 0.1, 2.0, 0, 0, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 99.69598, 0.1,
0.0, 99.69598, 2.0, 100.0, 0.1, 2.02, 0.10202, 0, 0, -1, 0),
(2, 0, 0, 2, 99.69598, 0.1, 0.0, 99.69598, 3.0, 99.99598, -1.0, 3.0,
0, 0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 99.89001,
0.0, 0.0, 99.89001, 3.0, 99.99598, 0.1, 2.9699999999999998, 0.10297, 1, 0, -1, 1),
(3, 0, 0, 3, 99.89001, 0.0, 0.0, 99.89001, 4.0, 99.89001, -0.1, 4.0,
0, 0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
99.89001, 0.0, 0.0, 99.89001, 4.0, 99.89001, np.nan, np.nan, np.nan, -1, 2, 8, -1),
(4, 0, 0, 4, 99.89001, 0.0, 0.0, 99.89001, 5.0, 99.89001, 1.0, 5.0, 0,
0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 94.68951,
1.0, 0.0, 94.68951, 5.0, 99.89001, 1.0, 5.05, 0.1505, 0, 0, -1, 2),
(5, 1, 1, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, 1.0, 1.0, 0, 1, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.8801, -1.0,
0.99, 98.9001, 1.0, 100.0, 1.0, 0.99, 0.10990000000000001, 1, 0, -1, 3),
(6, 1, 1, 1, 100.8801, -1.0, 0.99, 98.9001, 2.0, 98.8801, 0.1, 2.0, 0, 1,
0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.97612,
-1.1, 1.188, 98.60011999999999, 2.0, 98.8801, 0.1, 1.98, 0.10198, 1, 0, -1, 4),
(7, 1, 1, 2, 100.97612, -1.1, 1.188, 98.60011999999999, 2.0, 98.77611999999999,
-1.0, np.nan, 0, 1, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.97612,
-1.1, 1.188, 98.60011999999999, 2.0, 98.77611999999999, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(8, 1, 1, 3, 100.97612, -1.1, 1.188, 98.60011999999999, 4.0, 96.57611999999999,
-0.1, 4.0, 0, 1, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
100.46808, -1.0, 1.08, 98.30807999999999, 4.0, 96.57611999999999, 0.1, 4.04,
0.10404000000000001, 0, 0, -1, 5),
(9, 1, 1, 4, 100.46808, -1.0, 1.08, 98.30807999999999, 5.0, 95.46808, 1.0, 5.0, 0, 1,
0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 105.26858, -2.0, 6.03,
93.20857999999998, 5.0, 95.46808, 1.0, 4.95, 0.14950000000000002, 1, 0, -1, 6),
(10, 2, 2, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, 1.0, 1.0, 0, 2, 0.01, 0.1,
0.01, 1e-08, np.inf, 0.0, False, True, False, True, 98.8799, 1.0, 0.0, 98.8799,
1.0, 100.0, 1.0, 1.01, 0.1101, 0, 0, -1, 7),
(11, 2, 2, 1, 98.8799, 1.0, 0.0, 98.8799, 2.0, 100.8799, 0.1, 2.0, 0, 2, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 98.57588000000001, 1.1,
0.0, 98.57588000000001, 2.0, 100.8799, 0.1, 2.02, 0.10202, 0, 0, -1, 8),
(12, 2, 2, 2, 98.57588000000001, 1.1, 0.0, 98.57588000000001, 3.0, 101.87588000000001,
-1.0, 3.0, 0, 2, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
101.41618000000001, 0.10000000000000009, 0.0, 101.41618000000001, 3.0,
101.87588000000001, 1.0, 2.9699999999999998, 0.1297, 1, 0, -1, 9),
(13, 2, 2, 3, 101.41618000000001, 0.10000000000000009, 0.0, 101.41618000000001,
4.0, 101.81618000000002, -0.1, 4.0, 0, 2, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0,
False, True, False, True, 101.70822000000001, 0.0, 0.0, 101.70822000000001,
4.0, 101.81618000000002, 0.1, 3.96, 0.10396000000000001, 1, 0, -1, 10),
(14, 2, 2, 4, 101.70822000000001, 0.0, 0.0, 101.70822000000001, 4.0, 101.70822000000001,
1.0, np.nan, 0, 2, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
101.70822000000001, 0.0, 0.0, 101.70822000000001, 4.0, 101.70822000000001,
np.nan, np.nan, np.nan, -1, 1, 1, -1)
], dtype=log_dt)
)
result = pd.Series(
np.array([5, 5, 5]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.logs.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_logs(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_logs(group_by=False).count(),
result
)
result = pd.Series(
np.array([10, 5]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_logs(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.logs.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.logs.count(),
result
)
def test_entry_trades(self):
record_arrays_close(
pf.entry_trades.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998, 0.10297,
-0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0, -0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 1.0, 0, 0.99, 0.10990000000000001, 4, 4.954285714285714,
0.049542857142857145, -4.12372857142857, -4.165382395382394, 1, 0, 2),
(3, 1, 0.1, 1, 1.98, 0.10198, 4, 4.954285714285714, 0.004954285714285714,
-0.4043628571428571, -2.0422366522366517, 1, 0, 2),
(4, 1, 1.0, 4, 4.95, 0.14950000000000002, 4, 4.954285714285714,
0.049542857142857145, -0.20332857142857072, -0.04107647907647893, 1, 0, 2),
(5, 2, 1.0, 0, 1.01, 0.1101, 3, 3.0599999999999996, 0.21241818181818184,
1.727481818181818, 1.71037803780378, 0, 1, 3),
(6, 2, 0.1, 1, 2.02, 0.10202, 3, 3.0599999999999996, 0.021241818181818185,
-0.019261818181818203, -0.09535553555355546, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 3, 2]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.entry_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_entry_trades(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_entry_trades(group_by=False).count(),
result
)
result = pd.Series(
np.array([5, 2]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_entry_trades(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.entry_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.entry_trades.count(),
result
)
def test_exit_trades(self):
record_arrays_close(
pf.exit_trades.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998, 0.10297,
-0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 0.1, 0, 1.0799999999999998, 0.019261818181818182,
3, 4.04, 0.10404000000000001, -0.4193018181818182, -3.882424242424243, 1, 1, 2),
(3, 1, 2.0, 0, 3.015, 0.3421181818181819, 4, 5.0, 0.0,
-4.312118181818182, -0.7151108095884214, 1, 0, 2),
(4, 2, 1.0, 0, 1.1018181818181818, 0.19283636363636364, 2,
2.9699999999999998, 0.1297, 1.5456454545454543, 1.4028135313531351, 0, 1, 3),
(5, 2, 0.10000000000000009, 0, 1.1018181818181818, 0.019283636363636378,
3, 3.96, 0.10396000000000001, 0.1625745454545457, 1.4755115511551162, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 2, 2]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.exit_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_exit_trades(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_exit_trades(group_by=False).count(),
result
)
result = pd.Series(
np.array([4, 2]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_exit_trades(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.exit_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.exit_trades.count(),
result
)
def test_positions(self):
record_arrays_close(
pf.positions.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998,
0.10297, -0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 2.1, 0, 2.9228571428571426, 0.36138000000000003, 4, 4.954285714285714,
0.10404000000000001, -4.731420000000001, -0.7708406647116326, 1, 0, 2),
(3, 2, 1.1, 0, 1.1018181818181818, 0.21212000000000003, 3,
3.06, 0.23366000000000003, 1.7082200000000003, 1.4094224422442245, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.positions.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_positions(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_positions(group_by=False).count(),
result
)
result = pd.Series(
np.array([3, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_positions(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.positions.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.positions.count(),
result
)
def test_drawdowns(self):
record_arrays_close(
pf.drawdowns.values,
np.array([
(0, 0, 0, 1, 4, 4, 100.0, 99.68951, 99.68951, 0),
(1, 1, 0, 1, 4, 4, 99.8801, 95.26858, 95.26858, 0),
(2, 2, 2, 3, 3, 4, 101.71618000000001, 101.70822000000001, 101.70822000000001, 0)
], dtype=drawdown_dt)
)
result = pd.Series(
np.array([1, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_drawdowns(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_drawdowns(group_by=False).count(),
result
)
result = pd.Series(
np.array([1, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_drawdowns(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.drawdowns.count(),
result
)
def test_close(self):
pd.testing.assert_frame_equal(pf.close, price_na)
pd.testing.assert_frame_equal(pf_grouped.close, price_na)
pd.testing.assert_frame_equal(pf_shared.close, price_na)
def test_get_filled_close(self):
pd.testing.assert_frame_equal(
pf.get_filled_close(),
price_na.ffill().bfill()
)
def test_asset_flow(self):
pd.testing.assert_frame_equal(
pf.asset_flow(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 0.1],
[-0.1, 0., -1.],
[0., 0., -0.1],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.asset_flow(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 0.1, 0.],
[0., 0., 0.],
[0., -0.1, 0.],
[0., 1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -0.1, 0.1],
[-0.1, 0., -1.],
[0., 0.1, -0.1],
[1., -1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.asset_flow(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_flow(),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_flow(),
result
)
def test_assets(self):
pd.testing.assert_frame_equal(
pf.assets(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 1.1],
[0., 0., 0.1],
[0., 0., 0.],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.assets(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 1.1, 0.],
[0., 1.1, 0.],
[0., 1., 0.],
[0., 2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -1.1, 1.1],
[0., -1.1, 0.1],
[0., -1., 0.],
[1., -2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.assets(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.assets(),
result
)
pd.testing.assert_frame_equal(
pf_shared.assets(),
result
)
def test_position_mask(self):
pd.testing.assert_frame_equal(
pf.position_mask(direction='longonly'),
pd.DataFrame(
np.array([
[False, False, True],
[True, False, True],
[False, False, True],
[False, False, False],
[True, False, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.position_mask(direction='shortonly'),
pd.DataFrame(
np.array([
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[False, True, True],
[True, True, True],
[False, True, True],
[False, True, False],
[True, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.position_mask(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.position_mask(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.position_mask(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[True, True],
[True, True],
[True, True],
[True, False],
[True, False]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.position_mask(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.position_mask(),
result
)
pd.testing.assert_frame_equal(
pf_shared.position_mask(),
result
)
def test_position_coverage(self):
pd.testing.assert_series_equal(
pf.position_coverage(direction='longonly'),
pd.Series(np.array([0.4, 0., 0.6]), index=price_na.columns).rename('position_coverage')
)
pd.testing.assert_series_equal(
pf.position_coverage(direction='shortonly'),
pd.Series(np.array([0., 1., 0.]), index=price_na.columns).rename('position_coverage')
)
result = pd.Series(np.array([0.4, 1., 0.6]), index=price_na.columns).rename('position_coverage')
pd.testing.assert_series_equal(
pf.position_coverage(),
result
)
pd.testing.assert_series_equal(
pf_grouped.position_coverage(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.position_coverage(group_by=False),
result
)
result = pd.Series(
np.array([0.7, 0.6]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('position_coverage')
pd.testing.assert_series_equal(
pf.position_coverage(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.position_coverage(),
result
)
pd.testing.assert_series_equal(
pf_shared.position_coverage(),
result
)
def test_cash_flow(self):
pd.testing.assert_frame_equal(
pf.cash_flow(free=True),
pd.DataFrame(
np.array([
[0.0, -1.0998999999999999, -1.1201],
[-0.30402, -0.2999800000000002, -0.3040200000000002],
[0.19402999999999998, 0.0, 2.8402999999999996],
[0.0, -0.2920400000000002, 0.29204000000000035],
[-5.2005, -5.0995, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., 0.8801, -1.1201],
[-0.30402, 0.09602, -0.30402],
[0.19403, 0., 2.8403],
[0., -0.50804, 0.29204],
[-5.2005, 4.8005, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.cash_flow(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash_flow(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash_flow(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0.8801, -1.1201],
[-0.208, -0.30402],
[0.19403, 2.8403],
[-0.50804, 0.29204],
[-0.4, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.cash_flow(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash_flow(),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash_flow(),
result
)
def test_init_cash(self):
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
pf_grouped.get_init_cash(group_by=False),
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
pf_shared.get_init_cash(group_by=False),
pd.Series(np.array([200., 200., 100.]), index=price_na.columns).rename('init_cash')
)
result = pd.Series(
np.array([200., 100.]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
pd.testing.assert_series_equal(
pf.get_init_cash(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.init_cash,
result
)
pd.testing.assert_series_equal(
pf_shared.init_cash,
result
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=None).init_cash,
pd.Series(
np.array([14000., 12000., 10000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=None).init_cash,
pd.Series(
np.array([14000., 14000., 14000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
def test_cash(self):
pd.testing.assert_frame_equal(
pf.cash(free=True),
pd.DataFrame(
np.array([
[100.0, 98.9001, 98.8799],
[99.69598, 98.60011999999999, 98.57588000000001],
[99.89001, 98.60011999999999, 101.41618000000001],
[99.89001, 98.30807999999999, 101.70822000000001],
[94.68951, 93.20857999999998, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[100., 100.8801, 98.8799],
[99.69598, 100.97612, 98.57588],
[99.89001, 100.97612, 101.41618],
[99.89001, 100.46808, 101.70822],
[94.68951, 105.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.cash(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash(group_by=False),
pd.DataFrame(
np.array([
[200., 200.8801, 98.8799],
[199.69598, 200.97612, 98.57588],
[199.89001, 200.97612, 101.41618],
[199.89001, 200.46808, 101.70822],
[194.68951, 205.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.cash(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[200.8801, 200.8801, 98.8799],
[200.6721, 200.97612, 98.57588000000001],
[200.86613, 200.6721, 101.41618000000001],
[200.35809, 200.35809, 101.70822000000001],
[199.95809, 205.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200.8801, 98.8799],
[200.6721, 98.57588],
[200.86613, 101.41618],
[200.35809, 101.70822],
[199.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.cash(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash(),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash(),
result
)
def test_asset_value(self):
pd.testing.assert_frame_equal(
pf.asset_value(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.2, 0., 2.2],
[0., 0., 0.3],
[0., 0., 0.],
[5., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.asset_value(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 2.2, 0.],
[0., 2.2, 0.],
[0., 4., 0.],
[0., 10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.2, -2.2, 2.2],
[0., -2.2, 0.3],
[0., -4., 0.],
[5., -10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.asset_value(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_value(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-1., 1.],
[-2., 2.2],
[-2.2, 0.3],
[-4., 0.],
[-5., 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.asset_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_value(),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_value(),
result
)
def test_gross_exposure(self):
pd.testing.assert_frame_equal(
pf.gross_exposure(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 0.01001202],
[0.00200208, 0., 0.02183062],
[0., 0., 0.00294938],
[0., 0., 0.],
[0.05015573, 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.gross_exposure(direction='shortonly'),
pd.DataFrame(
np.array([
[0.0, 0.01000999998999, 0.0],
[0.0, 0.021825370842812494, 0.0],
[0.0, 0.021825370842812494, 0.0],
[0.0, 0.03909759620159034, 0.0],
[0.0, 0.09689116931945001, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0.0, -0.010214494162927312, 0.010012024441354066],
[0.00200208256628545, -0.022821548354919067, 0.021830620581035857],
[0.0, -0.022821548354919067, 0.002949383274126105],
[0.0, -0.04241418126633477, 0.0],
[0.050155728521486365, -0.12017991413866216, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.gross_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.gross_exposure(group_by=False),
pd.DataFrame(
np.array([
[0.0, -0.00505305454620791, 0.010012024441354066],
[0.0010005203706447724, -0.011201622483733716, 0.021830620581035857],
[0.0, -0.011201622483733716, 0.002949383274126105],
[0.0, -0.020585865497718882, 0.0],
[0.025038871596209537, -0.0545825965137659, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.00505305454620791, 0.010012024441354066],
[-0.010188689433972452, 0.021830620581035857],
[-0.0112078992458765, 0.002949383274126105],
[-0.02059752492931316, 0.0],
[-0.027337628293439265, 0.0]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.gross_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_shared.gross_exposure(),
result
)
def test_net_exposure(self):
result = pd.DataFrame(
np.array([
[0.0, -0.01000999998999, 0.010012024441354066],
[0.00200208256628545, -0.021825370842812494, 0.021830620581035857],
[0.0, -0.021825370842812494, 0.002949383274126105],
[0.0, -0.03909759620159034, 0.0],
[0.050155728521486365, -0.09689116931945001, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.net_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.net_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.net_exposure(group_by=False),
pd.DataFrame(
np.array([
[0.0, -0.005002498748124688, 0.010012024441354066],
[0.0010005203706447724, -0.010956168751293576, 0.021830620581035857],
[0.0, -0.010956168751293576, 0.002949383274126105],
[0.0, -0.019771825228137207, 0.0],
[0.025038871596209537, -0.049210520540028384, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.005002498748124688, 0.010012024441354066],
[-0.009965205542937988, 0.021830620581035857],
[-0.010962173376438594, 0.002949383274126105],
[-0.019782580537729116, 0.0],
[-0.0246106361476199, 0.0]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.net_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.net_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_shared.net_exposure(),
result
)
def test_value(self):
result = pd.DataFrame(
np.array([
[100., 99.8801, 99.8799],
[99.89598, 98.77612, 100.77588],
[99.89001, 98.77612, 101.71618],
[99.89001, 96.46808, 101.70822],
[99.68951, 95.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.value(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.value(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.value(group_by=False),
pd.DataFrame(
np.array([
[200., 199.8801, 99.8799],
[199.89598, 198.77612, 100.77588],
[199.89001, 198.77612, 101.71618],
[199.89001, 196.46808, 101.70822],
[199.68951, 195.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.value(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[199.8801, 199.8801, 99.8799],
[198.6721, 198.77612000000002, 100.77588000000002],
[198.66613, 198.6721, 101.71618000000001],
[196.35809, 196.35809, 101.70822000000001],
[194.95809, 195.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[199.8801, 99.8799],
[198.6721, 100.77588],
[198.66613, 101.71618],
[196.35809, 101.70822],
[194.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.value(),
result
)
pd.testing.assert_frame_equal(
pf_shared.value(),
result
)
def test_total_profit(self):
result = pd.Series(
np.array([-0.31049, -4.73142, 1.70822]),
index=price_na.columns
).rename('total_profit')
pd.testing.assert_series_equal(
pf.total_profit(),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_profit(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.total_profit(group_by=False),
result
)
result = pd.Series(
np.array([-5.04191, 1.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_profit')
pd.testing.assert_series_equal(
pf.total_profit(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_profit(),
result
)
pd.testing.assert_series_equal(
pf_shared.total_profit(),
result
)
def test_final_value(self):
result = pd.Series(
np.array([99.68951, 95.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
pd.testing.assert_series_equal(
pf.final_value(),
result
)
pd.testing.assert_series_equal(
pf_grouped.final_value(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.final_value(group_by=False),
pd.Series(
np.array([199.68951, 195.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
)
result = pd.Series(
np.array([194.95809, 101.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('final_value')
pd.testing.assert_series_equal(
pf.final_value(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.final_value(),
result
)
pd.testing.assert_series_equal(
pf_shared.final_value(),
result
)
def test_total_return(self):
result = pd.Series(
np.array([-0.0031049, -0.0473142, 0.0170822]),
index=price_na.columns
).rename('total_return')
pd.testing.assert_series_equal(
pf.total_return(),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_return(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.total_return(group_by=False),
pd.Series(
np.array([-0.00155245, -0.0236571, 0.0170822]),
index=price_na.columns
).rename('total_return')
)
result = pd.Series(
np.array([-0.02520955, 0.0170822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_return')
pd.testing.assert_series_equal(
pf.total_return(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_return(),
result
)
pd.testing.assert_series_equal(
pf_shared.total_return(),
result
)
def test_returns(self):
result = pd.DataFrame(
np.array([
[0.00000000e+00, -1.19900000e-03, -1.20100000e-03],
[-1.04020000e-03, -1.10530526e-02, 8.97057366e-03],
[-5.97621646e-05, 0.0, 9.33060570e-03],
[0.00000000e+00, -0.023366376407576966, -7.82569695e-05],
[-2.00720773e-03, -1.24341648e-02, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.returns(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.returns(group_by=False),
pd.DataFrame(
np.array([
[0.00000000e+00, -5.99500000e-04, -1.20100000e-03],
[-5.20100000e-04, -5.52321117e-03, 8.97057366e-03],
[-2.98655331e-05, 0.0, 9.33060570e-03],
[0.00000000e+00, -0.011611253907159497, -7.82569695e-05],
[-1.00305163e-03, -6.10531746e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.returns(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[0.0, -0.0005995000000000062, -1.20100000e-03],
[-0.0005233022960706736, -0.005523211165093367, 8.97057366e-03],
[-3.0049513746473233e-05, 0.0, 9.33060570e-03],
[0.0, -0.011617682390048093, -7.82569695e-05],
[-0.0010273695869600474, -0.0061087373583639994, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-5.99500000e-04, -1.20100000e-03],
[-6.04362315e-03, 8.97057366e-03],
[-3.0049513746473233e-05, 9.33060570e-03],
[-0.011617682390048093, -7.82569695e-05],
[-7.12983101e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.returns(),
result
)
pd.testing.assert_frame_equal(
pf_shared.returns(),
result
)
def test_asset_returns(self):
result = pd.DataFrame(
np.array([
[0., -np.inf, -np.inf],
[-np.inf, -1.10398, 0.89598],
[-0.02985, 0.0, 0.42740909],
[0., -1.0491090909090908, -0.02653333],
[-np.inf, -0.299875, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.asset_returns(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-np.inf, -np.inf],
[-1.208, 0.89598],
[-0.0029850000000000154, 0.42740909],
[-1.0491090909090908, -0.02653333],
[-0.35, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.asset_returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_returns(),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_returns(),
result
)
def test_benchmark_value(self):
result = pd.DataFrame(
np.array([
[100., 100., 100.],
[100., 200., 200.],
[150., 200., 300.],
[200., 400., 400.],
[250., 500., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.benchmark_value(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_value(group_by=False),
pd.DataFrame(
np.array([
[200., 200., 100.],
[200., 400., 200.],
[300., 400., 300.],
[400., 800., 400.],
[500., 1000., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200., 100.],
[300., 200.],
[350., 300.],
[600., 400.],
[750., 400.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.benchmark_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_value(),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_value(),
result
)
def test_benchmark_returns(self):
result = pd.DataFrame(
np.array([
[0., 0., 0.],
[0., 1., 1.],
[0.5, 0., 0.5],
[0.33333333, 1., 0.33333333],
[0.25, 0.25, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.benchmark_returns(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0., 0.],
[0.5, 1.],
[0.16666667, 0.5],
[0.71428571, 0.33333333],
[0.25, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.benchmark_returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_returns(),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_returns(),
result
)
def test_total_benchmark_return(self):
result = pd.Series(
np.array([1.5, 4., 3.]),
index=price_na.columns
).rename('total_benchmark_return')
pd.testing.assert_series_equal(
pf.total_benchmark_return(),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_benchmark_return(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.total_benchmark_return(group_by=False),
result
)
result = pd.Series(
np.array([2.75, 3.]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_benchmark_return')
pd.testing.assert_series_equal(
pf.total_benchmark_return(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_benchmark_return(),
result
)
pd.testing.assert_series_equal(
pf_shared.total_benchmark_return(),
result
)
def test_return_method(self):
pd.testing.assert_frame_equal(
pf_shared.cumulative_returns(),
pd.DataFrame(
np.array([
[-0.000599499999999975, -0.0012009999999998966],
[-0.006639499999999909, 0.007758800000000177],
[-0.006669349999999907, 0.017161800000000005],
[-0.01820955000000002, 0.017082199999999936],
[-0.025209550000000136, 0.017082199999999936]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
)
pd.testing.assert_frame_equal(
pf_shared.cumulative_returns(group_by=False),
pd.DataFrame(
np.array([
[0.0, -0.000599499999999975, -0.0012009999999998966],
[-0.0005201000000001343, -0.006119399999999886, 0.007758800000000177],
[-0.0005499500000001323, -0.006119399999999886, 0.017161800000000005],
[-0.0005499500000001323, -0.017659599999999886, 0.017082199999999936],
[-0.0015524500000001495, -0.023657099999999875, 0.017082199999999936]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(),
pd.Series(
np.array([-20.095906945591288, 12.345065267401496]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(risk_free=0.01),
pd.Series(
np.array([-59.62258787402645, -23.91718815937344]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(year_freq='365D'),
pd.Series(
np.array([-20.095906945591288, 12.345065267401496]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(group_by=False),
pd.Series(
np.array([-13.30950646054953, -19.278625117344564, 12.345065267401496]),
index=price_na.columns
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.information_ratio(group_by=False),
pd.Series(
np.array([-0.9988561334618041, -0.8809478746008806, -0.884780642352239]),
index=price_na.columns
).rename('information_ratio')
)
with pytest.raises(Exception):
_ = pf_shared.information_ratio(pf_shared.benchmark_returns(group_by=False) * 2)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Start Value', 'End Value',
'Total Return [%]', 'Benchmark Return [%]', 'Max Gross Exposure [%]',
'Total Fees Paid', 'Max Drawdown [%]', 'Max Drawdown Duration',
'Total Trades', 'Total Closed Trades', 'Total Open Trades',
'Open Trade PnL', 'Win Rate [%]', 'Best Trade [%]', 'Worst Trade [%]',
'Avg Winning Trade [%]', 'Avg Losing Trade [%]',
'Avg Winning Trade Duration', 'Avg Losing Trade Duration',
'Profit Factor', 'Expectancy', 'Sharpe Ratio', 'Calmar Ratio',
'Omega Ratio', 'Sortino Ratio'
], dtype='object')
pd.testing.assert_series_equal(
pf.stats(),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 98.88877000000001, -1.11123, 283.3333333333333,
2.05906183131983, 0.42223000000000005, 1.6451238489727062, pd.Timedelta('3 days 08:00:00'),
2.0, 1.3333333333333333, 0.6666666666666666, -1.5042060606060605, 33.333333333333336,
-98.38058805880588, -100.8038553855386, 143.91625412541256, -221.34645964596464,
pd.Timedelta('2 days 12:00:00'), pd.Timedelta('2 days 00:00:00'), np.inf, 0.10827272727272726,
-6.751008013903537, 10378.930331014584, 4.768700318817701, 31.599760994679134
]),
index=stats_index,
name='agg_func_mean')
)
pd.testing.assert_series_equal(
pf.stats(column='a'),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997, 150.0,
5.015572852148637, 0.35549, 0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
2, 1, 1, -0.20049999999999982, 0.0, -54.450495049504966, -54.450495049504966,
np.nan, -54.450495049504966, pd.NaT, pd.Timedelta('1 days 00:00:00'), 0.0,
-0.10999000000000003, -13.30804491478906, -65.40868619923044, 0.0, -11.738864633265454
]),
index=stats_index,
name='a')
)
pd.testing.assert_series_equal(
pf.stats(column='a', settings=dict(freq='10 days', year_freq='200 days')),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('50 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997, 150.0,
5.015572852148637, 0.35549, 0.3104900000000015, pd.Timedelta('40 days 00:00:00'),
2, 1, 1, -0.20049999999999982, 0.0, -54.450495049504966, -54.450495049504966,
np.nan, -54.450495049504966, pd.NaT, pd.Timedelta('10 days 00:00:00'), 0.0, -0.10999000000000003,
-3.1151776875290866, -3.981409131683691, 0.0, -2.7478603669149457
]),
index=stats_index,
name='a')
)
pd.testing.assert_series_equal(
pf.stats(column='a', settings=dict(trade_type='positions')),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997, 150.0,
5.015572852148637, 0.35549, 0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
2, 1, 1, -0.20049999999999982, 0.0, -54.450495049504966, -54.450495049504966,
np.nan, -54.450495049504966, pd.NaT, pd.Timedelta('1 days 00:00:00'), 0.0,
-0.10999000000000003, -13.30804491478906, -65.40868619923044, 0.0, -11.738864633265454
]),
index=pd.Index([
'Start', 'End', 'Period', 'Start Value', 'End Value',
'Total Return [%]', 'Benchmark Return [%]', 'Max Gross Exposure [%]',
'Total Fees Paid', 'Max Drawdown [%]', 'Max Drawdown Duration',
'Total Trades', 'Total Closed Trades', 'Total Open Trades',
'Open Trade PnL', 'Win Rate [%]', 'Best Trade [%]',
'Worst Trade [%]', 'Avg Winning Trade [%]',
'Avg Losing Trade [%]', 'Avg Winning Trade Duration',
'Avg Losing Trade Duration', 'Profit Factor', 'Expectancy',
'Sharpe Ratio', 'Calmar Ratio', 'Omega Ratio', 'Sortino Ratio'
], dtype='object'),
name='a')
)
pd.testing.assert_series_equal(
pf.stats(column='a', settings=dict(required_return=0.1, risk_free=0.01)),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997, 150.0,
5.015572852148637, 0.35549, 0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
2, 1, 1, -0.20049999999999982, 0.0, -54.450495049504966, -54.450495049504966,
np.nan, -54.450495049504966, pd.NaT, pd.Timedelta('1 days 00:00:00'), 0.0,
-0.10999000000000003, -227.45862849586334, -65.40868619923044, 0.0, -19.104372472268942
]),
index=stats_index,
name='a')
)
pd.testing.assert_series_equal(
pf.stats(column='a', settings=dict(use_asset_returns=True)),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997,
150.0, 5.015572852148637, 0.35549, 0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
2, 1, 1, -0.20049999999999982, 0.0, -54.450495049504966, -54.450495049504966, np.nan,
-54.450495049504966, pd.NaT, pd.Timedelta('1 days 00:00:00'), 0.0, -0.10999000000000003,
np.nan, np.nan, 0.0, np.nan
]),
index=stats_index,
name='a')
)
pd.testing.assert_series_equal(
pf.stats(column='a', settings=dict(incl_open=True)),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997, 150.0,
5.015572852148637, 0.35549, 0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
2, 1, 1, -0.20049999999999982, 0.0, -3.9702970297029667, -54.450495049504966,
np.nan, -29.210396039603964, pd.NaT, pd.Timedelta('1 days 00:00:00'), 0.0,
-0.1552449999999999, -13.30804491478906, -65.40868619923044, 0.0, -11.738864633265454
]),
index=stats_index,
name='a')
)
pd.testing.assert_series_equal(
pf_grouped.stats(column='first'),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 200.0, 194.95809, -2.520955, 275.0, -0.505305454620791,
0.82091, 2.46248125751388, pd.Timedelta('4 days 00:00:00'), 4, 2, 2, -4.512618181818182,
0.0, -54.450495049504966, -388.2424242424243, np.nan, -221.34645964596461, pd.NaT,
pd.Timedelta('2 days 00:00:00'), 0.0, -0.2646459090909091, -20.095906945591288,
-34.312217430388344, 0.0, -14.554511690523578
]),
index=stats_index,
name='first')
)
pd.testing.assert_series_equal(
pf.stats(column='a', tags='trades and open and not closed', settings=dict(incl_open=True)),
pd.Series(
np.array([
1, -0.20049999999999982
]),
index=pd.Index([
'Total Open Trades', 'Open Trade PnL'
], dtype='object'),
name='a')
)
max_winning_streak = (
'max_winning_streak',
dict(
title='Max Winning Streak',
calc_func=lambda trades: trades.winning_streak.max(),
resolve_trades=True
)
)
pd.testing.assert_series_equal(
pf.stats(column='a', metrics=max_winning_streak),
pd.Series([0.0], index=['Max Winning Streak'], name='a')
)
max_winning_streak = (
'max_winning_streak',
dict(
title='Max Winning Streak',
calc_func=lambda self, group_by: self.get_trades(group_by=group_by).winning_streak.max()
)
)
pd.testing.assert_series_equal(
pf.stats(column='a', metrics=max_winning_streak),
pd.Series([0.0], index=['Max Winning Streak'], name='a')
)
max_winning_streak = (
'max_winning_streak',
dict(
title='Max Winning Streak',
calc_func=lambda self, settings:
self.get_trades(group_by=settings['group_by']).winning_streak.max(),
resolve_calc_func=False
)
)
pd.testing.assert_series_equal(
pf.stats(column='a', metrics=max_winning_streak),
pd.Series([0.0], index=['Max Winning Streak'], name='a')
)
vbt.settings.portfolio.stats['settings']['my_arg'] = 100
my_arg_metric = ('my_arg_metric', dict(title='My Arg', calc_func=lambda my_arg: my_arg))
pd.testing.assert_series_equal(
pf.stats(my_arg_metric, column='a'),
pd.Series([100], index=['My Arg'], name='a')
)
vbt.settings.portfolio.stats.reset()
pd.testing.assert_series_equal(
pf.stats(my_arg_metric, column='a', settings=dict(my_arg=200)),
| pd.Series([200], index=['My Arg'], name='a') | pandas.Series |
# pylint: disable-msg=W0612,E1101,W0141
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u, cPickle,
product as cart_product, zip)
import pandas as pd
import pandas.index as _index
class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'],
inplace=True)
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
assert_series_equal(result, expected)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(
level='month').transform(np.sum)
expected = op(self.ymd['A'], broadcasted)
assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
pickled = cPickle.dumps(frame)
unpickled = cPickle.loads(pickled)
assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
expected = self.frame.ix[[0, 3]]
assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
self.assertIs(chunk.index, new_index)
chunk = self.ymd.ix[new_index]
self.assertIs(chunk.index, new_index)
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
self.assertIs(chunk.columns, new_index)
chunk = ymdT.ix[:, new_index]
self.assertIs(chunk.columns, new_index)
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
self.assertEquals(result.index.names, self.frame.index.names)
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
self.assert_(lines[2].startswith('a 0 foo'))
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
assert_almost_equal(col.values, df.values[:, 0])
self.assertRaises(KeyError, df.__getitem__, ('foo', 'four'))
self.assertRaises(KeyError, df.__getitem__, 'foobar')
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
result2 = s.ix[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
self.assertEquals(result, expected)
# fancy
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
expected = s.reindex(s.index[49:51])
assert_series_equal(result, expected)
# key error
self.assertRaises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
self.assertRaises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
self.assert_(isnull(s.values[42:65]).all())
self.assert_(notnull(s.values[:42]).all())
self.assert_(notnull(s.values[65:]).all())
s[2000, 3, 10] = np.nan
self.assert_(isnull(s[49]))
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.ix[:4]
expected = self.frame[:4]
assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.ix[:4] = 0
self.assert_((cp.values[:4] == 0).all())
self.assert_((cp.values[4:] != 0).all())
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.ix[:, 'value']
assert_series_equal(df['value'], result)
result = df.ix[1:3, 'value']
assert_series_equal(df['value'][1:3], result)
result = df.ix[:, :]
assert_frame_equal(df, result)
result = df
df.ix[:, 'value'] = 10
result['value'] = 10
assert_frame_equal(df, result)
df.ix[:, :] = 10
assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'],
'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'],
['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
assert_frame_equal(cp['a'], cp['b'])
#----------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
self.assertTrue((df['A'].values == 0).all())
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
assert_series_equal(df['A', '1'], df['B', '1'])
assert_series_equal(df['A', '2'], df['B', '1'])
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': lrange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)})
idf = df.set_index(['a', 'b'])
result = idf.ix[(0, 0), :]
expected = idf.ix[0, 0]
expected2 = idf.xs((0, 0))
assert_series_equal(result, expected)
assert_series_equal(result, expected2)
def test_getitem_setitem_tuple_plus_columns(self):
# GH #1013
df = self.ymd[:5]
result = df.ix[(2000, 1, 6), ['A', 'B', 'C']]
expected = df.ix[2000, 1, 6][['A', 'B', 'C']]
assert_series_equal(result, expected)
def test_getitem_multilevel_index_tuple_unsorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.ix[query_index, "data"]
xp = Series(['x'], index=MultiIndex.from_tuples([(0, 1, 0)]))
assert_series_equal(rs, xp)
def test_xs(self):
xs = self.frame.xs(('bar', 'two'))
xs2 = self.frame.ix[('bar', 'two')]
assert_series_equal(xs, xs2)
assert_almost_equal(xs.values, self.frame.values[4])
# GH 6574
# missing values in returned index should be preserrved
acc = [
('a','abcde',1),
('b','bbcde',2),
('y','yzcde',25),
('z','xbcde',24),
('z',None,26),
('z','zbcde',25),
('z','ybcde',26),
]
df = DataFrame(acc, columns=['a1','a2','cnt']).set_index(['a1','a2'])
expected = DataFrame({ 'cnt' : [24,26,25,26] }, index=Index(['xbcde',np.nan,'zbcde','ybcde'],name='a2'))
result = df.xs('z',level='a1')
assert_frame_equal(result, expected)
def test_xs_partial(self):
result = self.frame.xs('foo')
result2 = self.frame.ix['foo']
expected = self.frame.T['foo'].T
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
result = self.ymd.xs((2000, 4))
expected = self.ymd.ix[2000, 4]
assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(levels=[['foo', 'bar'], ['one', 'two'], [-1, 1]],
labels=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=list('abcd'))
result = df.xs(['foo', 'one'])
expected = df.ix['foo', 'one']
assert_frame_equal(result, expected)
def test_xs_level(self):
result = self.frame.xs('two', level='second')
expected = self.frame[self.frame.index.get_level_values(1) == 'two']
expected.index = expected.index.droplevel(1)
assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([('x', 'y', 'z'), ('a', 'b', 'c'),
('p', 'q', 'r')])
df = DataFrame(np.random.randn(3, 5), index=index)
result = df.xs('c', level=2)
expected = df[1:2]
expected.index = expected.index.droplevel(2)
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
def test_xs_level_multiple(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
# GH2107
dates = lrange(20111201, 20111205)
ids = 'abcde'
idx = MultiIndex.from_tuples([x for x in cart_product(dates, ids)])
idx.names = ['date', 'secid']
df = DataFrame(np.random.randn(len(idx), 3), idx, ['X', 'Y', 'Z'])
rs = df.xs(20111201, level='date')
xp = df.ix[20111201, :]
assert_frame_equal(rs, xp)
def test_xs_level0(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
self.assertEqual(len(result), 2)
assert_frame_equal(result, expected)
def test_xs_level_series(self):
s = self.frame['A']
result = s[:, 'two']
expected = self.frame.xs('two', level=1)['A']
assert_series_equal(result, expected)
s = self.ymd['A']
result = s[2000, 5]
expected = self.ymd.ix[2000, 5]['A']
assert_series_equal(result, expected)
# not implementing this for now
self.assertRaises(TypeError, s.__getitem__, (2000, slice(3, 4)))
# result = s[2000, 3:4]
# lv =s.index.get_level_values(1)
# expected = s[(lv == 3) | (lv == 4)]
# expected.index = expected.index.droplevel(0)
# assert_series_equal(result, expected)
# can do this though
def test_get_loc_single_level(self):
s = Series(np.random.randn(len(self.single_level)),
index=self.single_level)
for k in self.single_level.values:
s[k]
def test_getitem_toplevel(self):
df = self.frame.T
result = df['foo']
expected = df.reindex(columns=df.columns[:3])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
result = df['bar']
result2 = df.ix[:, 'bar']
expected = df.reindex(columns=df.columns[3:5])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
def test_getitem_setitem_slice_integers(self):
index = MultiIndex(levels=[[0, 1, 2], [0, 2]],
labels=[[0, 0, 1, 1, 2, 2],
[0, 1, 0, 1, 0, 1]])
frame = DataFrame(np.random.randn(len(index), 4), index=index,
columns=['a', 'b', 'c', 'd'])
res = frame.ix[1:2]
exp = frame.reindex(frame.index[2:])
assert_frame_equal(res, exp)
frame.ix[1:2] = 7
self.assert_((frame.ix[1:2] == 7).values.all())
series = Series(np.random.randn(len(index)), index=index)
res = series.ix[1:2]
exp = series.reindex(series.index[2:])
assert_series_equal(res, exp)
series.ix[1:2] = 7
self.assert_((series.ix[1:2] == 7).values.all())
def test_getitem_int(self):
levels = [[0, 1], [0, 1, 2]]
labels = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, labels=labels)
frame = DataFrame(np.random.randn(6, 2), index=index)
result = frame.ix[1]
expected = frame[-3:]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
# raises exception
self.assertRaises(KeyError, frame.ix.__getitem__, 3)
# however this will work
result = self.frame.ix[2]
expected = self.frame.xs(self.frame.index[2])
assert_series_equal(result, expected)
def test_getitem_partial(self):
ymd = self.ymd.T
result = ymd[2000, 2]
expected = ymd.reindex(columns=ymd.columns[ymd.columns.labels[1] == 1])
expected.columns = expected.columns.droplevel(0).droplevel(0)
assert_frame_equal(result, expected)
def test_getitem_slice_not_sorted(self):
df = self.frame.sortlevel(1).T
# buglet with int typechecking
result = df.ix[:, :np.int32(3)]
expected = df.reindex(columns=df.columns[:3])
assert_frame_equal(result, expected)
def test_setitem_change_dtype(self):
dft = self.frame.T
s = dft['foo', 'two']
dft['foo', 'two'] = s > s.median()
assert_series_equal(dft['foo', 'two'], s > s.median())
# tm.assert_isinstance(dft._data.blocks[1].items, MultiIndex)
reindexed = dft.reindex(columns=[('foo', 'two')])
assert_series_equal(reindexed['foo', 'two'], s > s.median())
def test_frame_setitem_ix(self):
self.frame.ix[('bar', 'two'), 'B'] = 5
self.assertEquals(self.frame.ix[('bar', 'two'), 'B'], 5)
# with integer labels
df = self.frame.copy()
df.columns = lrange(3)
df.ix[('bar', 'two'), 1] = 7
self.assertEquals(df.ix[('bar', 'two'), 1], 7)
def test_fancy_slice_partial(self):
result = self.frame.ix['bar':'baz']
expected = self.frame[3:7]
assert_frame_equal(result, expected)
result = self.ymd.ix[(2000, 2):(2000, 4)]
lev = self.ymd.index.labels[1]
expected = self.ymd[(lev >= 1) & (lev <= 3)]
assert_frame_equal(result, expected)
def test_getitem_partial_column_select(self):
idx = MultiIndex(labels=[[0, 0, 0], [0, 1, 1], [1, 0, 1]],
levels=[['a', 'b'], ['x', 'y'], ['p', 'q']])
df = DataFrame(np.random.rand(3, 2), index=idx)
result = df.ix[('a', 'y'), :]
expected = df.ix[('a', 'y')]
assert_frame_equal(result, expected)
result = df.ix[('a', 'y'), [1, 0]]
expected = df.ix[('a', 'y')][[1, 0]]
assert_frame_equal(result, expected)
self.assertRaises(KeyError, df.ix.__getitem__,
(('a', 'foo'), slice(None, None)))
def test_sortlevel(self):
df = self.frame.copy()
df.index = np.arange(len(df))
assertRaisesRegexp(TypeError, 'hierarchical index', df.sortlevel, 0)
# axis=1
# series
a_sorted = self.frame['A'].sortlevel(0)
with assertRaisesRegexp(TypeError, 'hierarchical index'):
self.frame.reset_index()['A'].sortlevel()
# preserve names
self.assertEquals(a_sorted.index.names, self.frame.index.names)
# inplace
rs = self.frame.copy()
rs.sortlevel(0, inplace=True)
assert_frame_equal(rs, self.frame.sortlevel(0))
def test_sortlevel_large_cardinality(self):
# #2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int64)
# it works!
result = df.sortlevel(0)
self.assertTrue(result.index.lexsort_depth == 3)
# #2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int32)
# it works!
result = df.sortlevel(0)
self.assert_((result.dtypes.values == df.dtypes.values).all() == True)
self.assertTrue(result.index.lexsort_depth == 3)
def test_delevel_infer_dtype(self):
tuples = [tuple for tuple in cart_product(['foo', 'bar'],
[10, 20], [1.0, 1.1])]
index = MultiIndex.from_tuples(tuples,
names=['prm0', 'prm1', 'prm2'])
df = DataFrame(np.random.randn(8, 3), columns=['A', 'B', 'C'],
index=index)
deleveled = df.reset_index()
self.assert_(com.is_integer_dtype(deleveled['prm1']))
self.assert_(com.is_float_dtype(deleveled['prm2']))
def test_reset_index_with_drop(self):
deleveled = self.ymd.reset_index(drop=True)
self.assertEquals(len(deleveled.columns), len(self.ymd.columns))
deleveled = self.series.reset_index()
tm.assert_isinstance(deleveled, DataFrame)
self.assertEqual(len(deleveled.columns),
len(self.series.index.levels) + 1)
deleveled = self.series.reset_index(drop=True)
tm.assert_isinstance(deleveled, Series)
def test_sortlevel_by_name(self):
self.frame.index.names = ['first', 'second']
result = self.frame.sortlevel(level='second')
expected = self.frame.sortlevel(level=1)
assert_frame_equal(result, expected)
def test_sortlevel_mixed(self):
sorted_before = self.frame.sortlevel(1)
df = self.frame.copy()
df['foo'] = 'bar'
sorted_after = df.sortlevel(1)
assert_frame_equal(sorted_before, sorted_after.drop(['foo'], axis=1))
dft = self.frame.T
sorted_before = dft.sortlevel(1, axis=1)
dft['foo', 'three'] = 'bar'
sorted_after = dft.sortlevel(1, axis=1)
assert_frame_equal(sorted_before.drop([('foo', 'three')], axis=1),
sorted_after.drop([('foo', 'three')], axis=1))
def test_count_level(self):
def _check_counts(frame, axis=0):
index = frame._get_axis(axis)
for i in range(index.nlevels):
result = frame.count(axis=axis, level=i)
expected = frame.groupby(axis=axis, level=i).count(axis=axis)
expected = expected.reindex_like(result).astype('i8')
assert_frame_equal(result, expected)
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
self.ymd.ix[1, [1, 2]] = np.nan
self.ymd.ix[7, [0, 1]] = np.nan
_check_counts(self.frame)
_check_counts(self.ymd)
_check_counts(self.frame.T, axis=1)
_check_counts(self.ymd.T, axis=1)
# can't call with level on regular DataFrame
df = tm.makeTimeDataFrame()
assertRaisesRegexp(TypeError, 'hierarchical', df.count, level=0)
self.frame['D'] = 'foo'
result = self.frame.count(level=0, numeric_only=True)
assert_almost_equal(result.columns, ['A', 'B', 'C'])
def test_count_level_series(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz'],
['one', 'two', 'three', 'four']],
labels=[[0, 0, 0, 2, 2],
[2, 0, 1, 1, 2]])
s = Series(np.random.randn(len(index)), index=index)
result = s.count(level=0)
expected = s.groupby(level=0).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
result = s.count(level=1)
expected = s.groupby(level=1).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
def test_count_level_corner(self):
s = self.frame['A'][:0]
result = s.count(level=0)
expected = Series(0, index=s.index.levels[0])
assert_series_equal(result, expected)
df = self.frame[:0]
result = df.count(level=0)
expected = DataFrame({}, index=s.index.levels[0],
columns=df.columns).fillna(0).astype(np.int64)
assert_frame_equal(result, expected)
def test_unstack(self):
# just check that it works for now
unstacked = self.ymd.unstack()
unstacked2 = unstacked.unstack()
# test that ints work
unstacked = self.ymd.astype(int).unstack()
# test that int32 work
unstacked = self.ymd.astype(np.int32).unstack()
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples([(0, 'foo', 0), (0, 'bar', 0),
(1, 'baz', 1), (1, 'qux', 1)])
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
def test_stack(self):
# regular roundtrip
unstacked = self.ymd.unstack()
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
unlexsorted = self.ymd.sortlevel(2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
# columns unsorted
unstacked = self.ymd.unstack()
unstacked = unstacked.sort(axis=1, ascending=False)
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
# more than 2 levels in the columns
unstacked = self.ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = self.ymd.unstack()
assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = self.ymd.unstack(1)
assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = self.ymd.stack().unstack(1).unstack(1)
assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = self.ymd.unstack(2).ix[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = self.ymd.stack()
assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = self.ymd.unstack(0).stack(-2)
expected = self.ymd.unstack(0).stack(0)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thur,Dinner,No,3.0,1
Thur,Lunch,No,117.32,44
Thur,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(['day', 'time', 'smoker'])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
assert_frame_equal(recons, df)
def test_stack_mixed_dtype(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
df = df.sortlevel(1, axis=1)
stacked = df.stack()
assert_series_equal(stacked['foo'], df['foo'].stack())
self.assertEqual(stacked['bar'].dtype, np.float_)
def test_unstack_bug(self):
df = DataFrame({'state': ['naive', 'naive', 'naive',
'activ', 'activ', 'activ'],
'exp': ['a', 'b', 'b', 'b', 'a', 'a'],
'barcode': [1, 2, 3, 4, 1, 3],
'v': ['hi', 'hi', 'bye', 'bye', 'bye', 'peace'],
'extra': np.arange(6.)})
result = df.groupby(['state', 'exp', 'barcode', 'v']).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack()
assert_series_equal(restacked,
result.reindex(restacked.index).astype(float))
def test_stack_unstack_preserve_names(self):
unstacked = self.frame.unstack()
self.assertEquals(unstacked.index.name, 'first')
self.assertEquals(unstacked.columns.names, ['exp', 'second'])
restacked = unstacked.stack()
self.assertEquals(restacked.index.names, self.frame.index.names)
def test_unstack_level_name(self):
result = self.frame.unstack('second')
expected = self.frame.unstack(level=1)
assert_frame_equal(result, expected)
def test_stack_level_name(self):
unstacked = self.frame.unstack('second')
result = unstacked.stack('exp')
expected = self.frame.unstack().stack(0)
assert_frame_equal(result, expected)
result = self.frame.stack('exp')
expected = self.frame.stack()
assert_series_equal(result, expected)
def test_stack_unstack_multiple(self):
unstacked = self.ymd.unstack(['year', 'month'])
expected = self.ymd.unstack('year').unstack('month')
assert_frame_equal(unstacked, expected)
self.assertEquals(unstacked.columns.names,
expected.columns.names)
# series
s = self.ymd['A']
s_unstacked = s.unstack(['year', 'month'])
assert_frame_equal(s_unstacked, expected['A'])
restacked = unstacked.stack(['year', 'month'])
restacked = restacked.swaplevel(0, 1).swaplevel(1, 2)
restacked = restacked.sortlevel(0)
assert_frame_equal(restacked, self.ymd)
self.assertEquals(restacked.index.names, self.ymd.index.names)
# GH #451
unstacked = self.ymd.unstack([1, 2])
expected = self.ymd.unstack(1).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
unstacked = self.ymd.unstack([2, 1])
expected = self.ymd.unstack(2).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected.ix[:, unstacked.columns])
def test_unstack_period_series(self):
# GH 4342
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period')
idx2 = Index(['A', 'B'] * 3, name='str')
value = [1, 2, 3, 4, 5, 6]
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period')
expected = DataFrame({'A': [1, 3, 5], 'B': [2, 4, 6]}, index=e_idx,
columns=['A', 'B'])
expected.columns.name = 'str'
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07'], freq='M', name='period2')
idx = pd.MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period1')
e_cols = pd.PeriodIndex(['2013-07', '2013-08', '2013-09', '2013-10',
'2013-11', '2013-12'], freq='M', name='period2')
expected = DataFrame([[np.nan, np.nan, np.nan, np.nan, 2, 1],
[np.nan, np.nan, 4, 3, np.nan, np.nan],
[6, 5, np.nan, np.nan, np.nan, np.nan]],
index=e_idx, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
def test_unstack_period_frame(self):
# GH 4342
idx1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-02', '2014-02', '2014-01', '2014-01'],
freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-12', '2014-02', '2013-10', '2013-10', '2014-02'],
freq='M', name='period2')
value = {'A': [1, 2, 3, 4, 5, 6], 'B': [6, 5, 4, 3, 2, 1]}
idx = pd.MultiIndex.from_arrays([idx1, idx2])
df = pd.DataFrame(value, index=idx)
result1 = df.unstack()
result2 = df.unstack(level=1)
result3 = df.unstack(level=0)
e_1 = pd.PeriodIndex(['2014-01', '2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02', '2013-10',
'2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A A B B B'.split(), e_2])
expected = DataFrame([[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]],
index=e_1, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
e_1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-01',
'2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A B B'.split(), e_1])
expected = DataFrame([[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]],
index=e_2, columns=e_cols)
assert_frame_equal(result3, expected)
def test_stack_multiple_bug(self):
""" bug when some uniques are not present in the data #3170"""
id_col = ([1] * 3) + ([2] * 3)
name = (['a'] * 3) + (['b'] * 3)
date = pd.to_datetime(['2013-01-03', '2013-01-04', '2013-01-05'] * 2)
var1 = np.random.randint(0, 100, 6)
df = DataFrame(dict(ID=id_col, NAME=name, DATE=date, VAR1=var1))
multi = df.set_index(['DATE', 'ID'])
multi.columns.name = 'Params'
unst = multi.unstack('ID')
down = unst.resample('W-THU')
rs = down.stack('ID')
xp = unst.ix[:, ['VAR1']].resample('W-THU').stack('ID')
xp.columns.name = 'Params'
assert_frame_equal(rs, xp)
def test_stack_dropna(self):
# GH #3997
df = pd.DataFrame({'A': ['a1', 'a2'],
'B': ['b1', 'b2'],
'C': [1, 1]})
df = df.set_index(['A', 'B'])
stacked = df.unstack().stack(dropna=False)
self.assertTrue(len(stacked) > len(stacked.dropna()))
stacked = df.unstack().stack(dropna=True)
assert_frame_equal(stacked, stacked.dropna())
def test_unstack_multiple_hierarchical(self):
df = DataFrame(index=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]],
columns=[[0, 0, 1, 1], [0, 1, 0, 1]])
df.index.names = ['a', 'b', 'c']
df.columns.names = ['d', 'e']
# it works!
df.unstack(['b', 'c'])
def test_groupby_transform(self):
s = self.frame['A']
grouper = s.index.get_level_values(0)
grouped = s.groupby(grouper)
applied = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
assert_series_equal(applied.reindex(expected.index), expected)
def test_unstack_sparse_keyspace(self):
# memory problems with naive impl #2278
# Generate Long File & Test Pivot
NUM_ROWS = 1000
df = DataFrame({'A': np.random.randint(100, size=NUM_ROWS),
'B': np.random.randint(300, size=NUM_ROWS),
'C': np.random.randint(-7, 7, size=NUM_ROWS),
'D': np.random.randint(-19, 19, size=NUM_ROWS),
'E': np.random.randint(3000, size=NUM_ROWS),
'F': np.random.randn(NUM_ROWS)})
idf = df.set_index(['A', 'B', 'C', 'D', 'E'])
# it works! is sufficient
idf.unstack('E')
def test_unstack_unobserved_keys(self):
# related to #2278 refactoring
levels = [[0, 1], [0, 1, 2, 3]]
labels = [[0, 0, 1, 1], [0, 2, 0, 2]]
index = MultiIndex(levels, labels)
df = DataFrame(np.random.randn(4, 2), index=index)
result = df.unstack()
self.assertEquals(len(result.columns), 4)
recons = result.stack()
assert_frame_equal(recons, df)
def test_groupby_corner(self):
midx = MultiIndex(levels=[['foo'], ['bar'], ['baz']],
labels=[[0], [0], [0]], names=['one', 'two', 'three'])
df = DataFrame([np.random.rand(4)], columns=['a', 'b', 'c', 'd'],
index=midx)
# should work
df.groupby(level='three')
def test_groupby_level_no_obs(self):
# #1697
midx = MultiIndex.from_tuples([('f1', 's1'), ('f1', 's2'),
('f2', 's1'), ('f2', 's2'),
('f3', 's1'), ('f3', 's2')])
df = DataFrame(
[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], columns=midx)
df1 = df.select(lambda u: u[0] in ['f2', 'f3'], axis=1)
grouped = df1.groupby(axis=1, level=0)
result = grouped.sum()
self.assert_((result.columns == ['f2', 'f3']).all())
def test_join(self):
a = self.frame.ix[:5, ['A']]
b = self.frame.ix[2:, ['B', 'C']]
joined = a.join(b, how='outer').reindex(self.frame.index)
expected = self.frame.copy()
expected.values[np.isnan(joined.values)] = np.nan
self.assert_(not np.isnan(joined.values).all())
assert_frame_equal(joined, expected, check_names=False) # TODO what should join do with names ?
def test_swaplevel(self):
swapped = self.frame['A'].swaplevel(0, 1)
swapped2 = self.frame['A'].swaplevel('first', 'second')
self.assert_(not swapped.index.equals(self.frame.index))
assert_series_equal(swapped, swapped2)
back = swapped.swaplevel(0, 1)
back2 = swapped.swaplevel('second', 'first')
self.assert_(back.index.equals(self.frame.index))
assert_series_equal(back, back2)
ft = self.frame.T
swapped = ft.swaplevel('first', 'second', axis=1)
exp = self.frame.swaplevel('first', 'second').T
assert_frame_equal(swapped, exp)
def test_swaplevel_panel(self):
panel = Panel({'ItemA': self.frame,
'ItemB': self.frame * 2})
result = panel.swaplevel(0, 1, axis='major')
expected = panel.copy()
expected.major_axis = expected.major_axis.swaplevel(0, 1)
tm.assert_panel_equal(result, expected)
def test_reorder_levels(self):
result = self.ymd.reorder_levels(['month', 'day', 'year'])
expected = self.ymd.swaplevel(0, 1).swaplevel(1, 2)
assert_frame_equal(result, expected)
result = self.ymd['A'].reorder_levels(['month', 'day', 'year'])
expected = self.ymd['A'].swaplevel(0, 1).swaplevel(1, 2)
assert_series_equal(result, expected)
result = self.ymd.T.reorder_levels(['month', 'day', 'year'], axis=1)
expected = self.ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
assert_frame_equal(result, expected)
with assertRaisesRegexp(TypeError, 'hierarchical axis'):
self.ymd.reorder_levels([1, 2], axis=1)
with assertRaisesRegexp(IndexError, 'Too many levels'):
self.ymd.index.reorder_levels([1, 2, 3])
def test_insert_index(self):
df = self.ymd[:5].T
df[2000, 1, 10] = df[2000, 1, 7]
tm.assert_isinstance(df.columns, MultiIndex)
self.assert_((df[2000, 1, 10] == df[2000, 1, 7]).all())
def test_alignment(self):
x = Series(data=[1, 2, 3],
index=MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3)]))
y = Series(data=[4, 5, 6],
index=MultiIndex.from_tuples([("Z", 1), ("Z", 2), ("B", 3)]))
res = x - y
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
# hit non-monotonic code path
res = x[::-1] - y[::-1]
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
def test_is_lexsorted(self):
levels = [[0, 1], [0, 1, 2]]
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 1, 2]])
self.assert_(index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 2, 1]])
self.assert_(not index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 1, 0, 1, 1],
[0, 1, 0, 2, 2, 1]])
self.assert_(not index.is_lexsorted())
self.assertEqual(index.lexsort_depth, 0)
def test_frame_getitem_view(self):
df = self.frame.T.copy()
# this works because we are modifying the underlying array
# really a no-no
df['foo'].values[:] = 0
self.assert_((df['foo'].values == 0).all())
# but not if it's mixed-type
df['foo', 'four'] = 'foo'
df = df.sortlevel(0, axis=1)
# this will work, but will raise/warn as its chained assignment
def f():
df['foo']['one'] = 2
return df
self.assertRaises(com.SettingWithCopyError, f)
try:
df = f()
except:
pass
self.assert_((df['foo', 'one'] == 0).all())
def test_frame_getitem_not_sorted(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
arrays = [np.array(x) for x in zip(*df.columns._tuple_index)]
result = df['foo']
result2 = df.ix[:, 'foo']
expected = df.reindex(columns=df.columns[arrays[0] == 'foo'])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
df = df.T
result = df.xs('foo')
result2 = df.ix['foo']
expected = df.reindex(df.index[arrays[0] == 'foo'])
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_series_getitem_not_sorted(self):
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
arrays = [np.array(x) for x in zip(*index._tuple_index)]
result = s['qux']
result2 = s.ix['qux']
expected = s[arrays[0] == 'qux']
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_count(self):
frame = self.frame.copy()
frame.index.names = ['a', 'b']
result = frame.count(level='b')
expect = self.frame.count(level=1)
assert_frame_equal(result, expect, check_names=False)
result = frame.count(level='a')
expect = self.frame.count(level=0)
assert_frame_equal(result, expect, check_names=False)
series = self.series.copy()
series.index.names = ['a', 'b']
result = series.count(level='b')
expect = self.series.count(level=1)
assert_series_equal(result, expect)
result = series.count(level='a')
expect = self.series.count(level=0)
assert_series_equal(result, expect)
self.assertRaises(KeyError, series.count, 'x')
self.assertRaises(KeyError, frame.count, level='x')
AGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew',
'mad', 'std', 'var']
def test_series_group_min_max(self):
for op, level, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2),
[False, True]):
grouped = self.series.groupby(level=level)
aggf = lambda x: getattr(x, op)(skipna=skipna)
# skipna=True
leftside = grouped.agg(aggf)
rightside = getattr(self.series, op)(level=level, skipna=skipna)
assert_series_equal(leftside, rightside)
def test_frame_group_ops(self):
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
for op, level, axis, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2), lrange(2),
[False, True]):
if axis == 0:
frame = self.frame
else:
frame = self.frame.T
grouped = frame.groupby(level=level, axis=axis)
pieces = []
def aggf(x):
pieces.append(x)
return getattr(x, op)(skipna=skipna, axis=axis)
leftside = grouped.agg(aggf)
rightside = getattr(frame, op)(level=level, axis=axis,
skipna=skipna)
# for good measure, groupby detail
level_index = frame._get_axis(axis).levels[level]
self.assert_(leftside._get_axis(axis).equals(level_index))
self.assert_(rightside._get_axis(axis).equals(level_index))
assert_frame_equal(leftside, rightside)
def test_stat_op_corner(self):
obj = Series([10.0], index=MultiIndex.from_tuples([(2, 3)]))
result = obj.sum(level=0)
expected = Series([10.0], index=[2])
assert_series_equal(result, expected)
def test_frame_any_all_group(self):
df = DataFrame(
{'data': [False, False, True, False, True, False, True]},
index=[
['one', 'one', 'two', 'one', 'two', 'two', 'two'],
[0, 1, 0, 2, 1, 2, 3]])
result = df.any(level=0)
ex = DataFrame({'data': [False, True]}, index=['one', 'two'])
assert_frame_equal(result, ex)
result = df.all(level=0)
ex = DataFrame({'data': [False, False]}, index=['one', 'two'])
assert_frame_equal(result, ex)
def test_std_var_pass_ddof(self):
index = MultiIndex.from_arrays([np.arange(5).repeat(10),
np.tile(np.arange(10), 5)])
df = DataFrame(np.random.randn(len(index), 5), index=index)
for meth in ['var', 'std']:
ddof = 4
alt = lambda x: getattr(x, meth)(ddof=ddof)
result = getattr(df[0], meth)(level=0, ddof=ddof)
expected = df[0].groupby(level=0).agg(alt)
assert_series_equal(result, expected)
result = getattr(df, meth)(level=0, ddof=ddof)
expected = df.groupby(level=0).agg(alt)
assert_frame_equal(result, expected)
def test_frame_series_agg_multiple_levels(self):
result = self.ymd.sum(level=['year', 'month'])
expected = self.ymd.groupby(level=['year', 'month']).sum()
assert_frame_equal(result, expected)
result = self.ymd['A'].sum(level=['year', 'month'])
expected = self.ymd['A'].groupby(level=['year', 'month']).sum()
assert_series_equal(result, expected)
def test_groupby_multilevel(self):
result = self.ymd.groupby(level=[0, 1]).mean()
k1 = self.ymd.index.get_level_values(0)
k2 = self.ymd.index.get_level_values(1)
expected = self.ymd.groupby([k1, k2]).mean()
assert_frame_equal(result, expected, check_names=False) # TODO groupby with level_values drops names
self.assertEquals(result.index.names, self.ymd.index.names[:2])
result2 = self.ymd.groupby(level=self.ymd.index.names[:2]).mean()
assert_frame_equal(result, result2)
def test_groupby_multilevel_with_transform(self):
pass
def test_multilevel_consolidate(self):
index = MultiIndex.from_tuples([('foo', 'one'), ('foo', 'two'),
('bar', 'one'), ('bar', 'two')])
df = DataFrame(np.random.randn(4, 4), index=index, columns=index)
df['Totals', ''] = df.sum(1)
df = df.consolidate()
def test_ix_preserve_names(self):
result = self.ymd.ix[2000]
result2 = self.ymd['A'].ix[2000]
self.assertEquals(result.index.names, self.ymd.index.names[1:])
self.assertEquals(result2.index.names, self.ymd.index.names[1:])
result = self.ymd.ix[2000, 2]
result2 = self.ymd['A'].ix[2000, 2]
self.assertEquals(result.index.name, self.ymd.index.names[2])
self.assertEquals(result2.index.name, self.ymd.index.names[2])
def test_partial_set(self):
# GH #397
df = self.ymd.copy()
exp = self.ymd.copy()
df.ix[2000, 4] = 0
exp.ix[2000, 4].values[:] = 0
assert_frame_equal(df, exp)
df['A'].ix[2000, 4] = 1
exp['A'].ix[2000, 4].values[:] = 1
assert_frame_equal(df, exp)
df.ix[2000] = 5
exp.ix[2000].values[:] = 5
assert_frame_equal(df, exp)
# this works...for now
df['A'].ix[14] = 5
self.assertEquals(df['A'][14], 5)
def test_unstack_preserve_types(self):
# GH #403
self.ymd['E'] = 'foo'
self.ymd['F'] = 2
unstacked = self.ymd.unstack('month')
self.assertEqual(unstacked['A', 1].dtype, np.float64)
self.assertEqual(unstacked['E', 1].dtype, np.object_)
self.assertEqual(unstacked['F', 1].dtype, np.float64)
def test_unstack_group_index_overflow(self):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
index = MultiIndex(levels=[level] * 8 + [[0, 1]],
labels=[labels] * 8 + [np.arange(2).repeat(500)])
s = Series(np.arange(1000), index=index)
result = s.unstack()
self.assertEqual(result.shape, (500, 2))
# test roundtrip
stacked = result.stack()
assert_series_equal(s,
stacked.reindex(s.index))
# put it at beginning
index = MultiIndex(levels=[[0, 1]] + [level] * 8,
labels=[np.arange(2).repeat(500)] + [labels] * 8)
s = Series(np.arange(1000), index=index)
result = s.unstack(0)
self.assertEqual(result.shape, (500, 2))
# put it in middle
index = MultiIndex(levels=[level] * 4 + [[0, 1]] + [level] * 4,
labels=([labels] * 4 + [np.arange(2).repeat(500)]
+ [labels] * 4))
s = Series(np.arange(1000), index=index)
result = s.unstack(4)
self.assertEqual(result.shape, (500, 2))
def test_getitem_lowerdim_corner(self):
self.assertRaises(KeyError, self.frame.ix.__getitem__,
(('bar', 'three'), 'B'))
# in theory should be inserting in a sorted space????
self.frame.ix[('bar','three'),'B'] = 0
self.assertEqual(self.frame.sortlevel().ix[('bar','three'),'B'], 0)
#----------------------------------------------------------------------
# AMBIGUOUS CASES!
def test_partial_ix_missing(self):
raise nose.SkipTest("skipping for now")
result = self.ymd.ix[2000, 0]
expected = self.ymd.ix[2000]['A']
assert_series_equal(result, expected)
# need to put in some work here
# self.ymd.ix[2000, 0] = 0
# self.assert_((self.ymd.ix[2000]['A'] == 0).all())
# Pretty sure the second (and maybe even the first) is already wrong.
self.assertRaises(Exception, self.ymd.ix.__getitem__, (2000, 6))
self.assertRaises(Exception, self.ymd.ix.__getitem__, (2000, 6), 0)
#----------------------------------------------------------------------
def test_to_html(self):
self.ymd.columns.name = 'foo'
self.ymd.to_html()
self.ymd.T.to_html()
def test_level_with_tuples(self):
index = MultiIndex(levels=[[('foo', 'bar', 0), ('foo', 'baz', 0),
('foo', 'qux', 0)],
[0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar', 0)]
result2 = series.ix[('foo', 'bar', 0)]
expected = series[:2]
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertRaises(KeyError, series.__getitem__, (('foo', 'bar', 0), 2))
result = frame.ix[('foo', 'bar', 0)]
result2 = frame.xs(('foo', 'bar', 0))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
index = MultiIndex(levels=[[('foo', 'bar'), ('foo', 'baz'),
('foo', 'qux')],
[0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar')]
result2 = series.ix[('foo', 'bar')]
expected = series[:2]
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = frame.ix[('foo', 'bar')]
result2 = frame.xs(('foo', 'bar'))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_int_series_slicing(self):
s = self.ymd['A']
result = s[5:]
expected = s.reindex(s.index[5:])
assert_series_equal(result, expected)
exp = self.ymd['A'].copy()
s[5:] = 0
exp.values[5:] = 0
self.assert_numpy_array_equal(s.values, exp.values)
result = self.ymd[5:]
expected = self.ymd.reindex(s.index[5:])
assert_frame_equal(result, expected)
def test_mixed_depth_get(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df['a']
expected = df['a', '', '']
assert_series_equal(result, expected)
self.assertEquals(result.name, 'a')
result = df['routine1', 'result1']
expected = df['routine1', 'result1', '']
assert_series_equal(result, expected)
self.assertEquals(result.name, ('routine1', 'result1'))
def test_mixed_depth_insert(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.copy()
expected = df.copy()
result['b'] = [1, 2, 3, 4]
expected['b', '', ''] = [1, 2, 3, 4]
assert_frame_equal(result, expected)
def test_mixed_depth_drop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.drop('a', axis=1)
expected = df.drop([('a', '', '')], axis=1)
assert_frame_equal(expected, result)
result = df.drop(['top'], axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
expected = expected.drop([('top', 'OD', 'wy')], axis=1)
assert_frame_equal(expected, result)
result = df.drop(('top', 'OD', 'wx'), axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
assert_frame_equal(expected, result)
expected = df.drop([('top', 'OD', 'wy')], axis=1)
expected = df.drop('top', axis=1)
result = df.drop('result1', level=1, axis=1)
expected = df.drop([('routine1', 'result1', ''),
('routine2', 'result1', '')], axis=1)
assert_frame_equal(expected, result)
def test_drop_nonunique(self):
df = DataFrame([["x-a", "x", "a", 1.5], ["x-a", "x", "a", 1.2],
["z-c", "z", "c", 3.1], ["x-a", "x", "a", 4.1],
["x-b", "x", "b", 5.1], ["x-b", "x", "b", 4.1],
["x-b", "x", "b", 2.2],
["y-a", "y", "a", 1.2], ["z-b", "z", "b", 2.1]],
columns=["var1", "var2", "var3", "var4"])
grp_size = df.groupby("var1").size()
drop_idx = grp_size.ix[grp_size == 1]
idf = df.set_index(["var1", "var2", "var3"])
# it works! #2101
result = idf.drop(drop_idx.index, level=0).reset_index()
expected = df[-df.var1.isin(drop_idx.index)]
result.index = expected.index
assert_frame_equal(result, expected)
def test_mixed_depth_pop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
df1 = df.copy()
df2 = df.copy()
result = df1.pop('a')
expected = df2.pop(('a', '', ''))
assert_series_equal(expected, result)
assert_frame_equal(df1, df2)
self.assertEquals(result.name, 'a')
expected = df1['top']
df1 = df1.drop(['top'], axis=1)
result = df2.pop('top')
assert_frame_equal(expected, result)
assert_frame_equal(df1, df2)
def test_reindex_level_partial_selection(self):
result = self.frame.reindex(['foo', 'qux'], level=0)
expected = self.frame.ix[[0, 1, 2, 7, 8, 9]]
assert_frame_equal(result, expected)
result = self.frame.T.reindex_axis(['foo', 'qux'], axis=1, level=0)
assert_frame_equal(result, expected.T)
result = self.frame.ix[['foo', 'qux']]
assert_frame_equal(result, expected)
result = self.frame['A'].ix[['foo', 'qux']]
assert_series_equal(result, expected['A'])
result = self.frame.T.ix[:, ['foo', 'qux']]
assert_frame_equal(result, expected.T)
def test_setitem_multiple_partial(self):
expected = self.frame.copy()
result = self.frame.copy()
result.ix[['foo', 'bar']] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_frame_equal(result, expected)
expected = self.frame.copy()
result = self.frame.copy()
result.ix['foo':'bar'] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_frame_equal(result, expected)
expected = self.frame['A'].copy()
result = self.frame['A'].copy()
result.ix[['foo', 'bar']] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_series_equal(result, expected)
expected = self.frame['A'].copy()
result = self.frame['A'].copy()
result.ix['foo':'bar'] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_series_equal(result, expected)
def test_drop_level(self):
result = self.frame.drop(['bar', 'qux'], level='first')
expected = self.frame.ix[[0, 1, 2, 5, 6]]
assert_frame_equal(result, expected)
result = self.frame.drop(['two'], level='second')
expected = self.frame.ix[[0, 2, 3, 6, 7, 9]]
assert_frame_equal(result, expected)
result = self.frame.T.drop(['bar', 'qux'], axis=1, level='first')
expected = self.frame.ix[[0, 1, 2, 5, 6]].T
assert_frame_equal(result, expected)
result = self.frame.T.drop(['two'], axis=1, level='second')
expected = self.frame.ix[[0, 2, 3, 6, 7, 9]].T
assert_frame_equal(result, expected)
def test_drop_preserve_names(self):
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1],
[1, 2, 3, 1, 2, 3]],
names=['one', 'two'])
df = DataFrame(np.random.randn(6, 3), index=index)
result = df.drop([(0, 2)])
self.assertEqual(result.index.names, ('one', 'two'))
def test_unicode_repr_issues(self):
levels = [Index([u('a/\u03c3'), u('b/\u03c3'), u('c/\u03c3')]),
Index([0, 1])]
labels = [np.arange(3).repeat(2), np.tile(np.arange(2), 3)]
index = MultiIndex(levels=levels, labels=labels)
repr(index.levels)
# NumPy bug
# repr(index.get_level_values(1))
def test_unicode_repr_level_names(self):
index = MultiIndex.from_tuples([(0, 0), (1, 1)],
names=[u('\u0394'), 'i1'])
s = Series(lrange(2), index=index)
df = DataFrame(np.random.randn(2, 4), index=index)
repr(s)
repr(df)
def test_dataframe_insert_column_all_na(self):
# GH #1534
mix = MultiIndex.from_tuples(
[('1a', '2a'), ('1a', '2b'), ('1a', '2c')])
df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mix)
s = Series({(1, 1): 1, (1, 2): 2})
df['new'] = s
self.assert_(df['new'].isnull().all())
def test_join_segfault(self):
# 1532
df1 = DataFrame({'a': [1, 1], 'b': [1, 2], 'x': [1, 2]})
df2 = DataFrame({'a': [2, 2], 'b': [1, 2], 'y': [1, 2]})
df1 = df1.set_index(['a', 'b'])
df2 = df2.set_index(['a', 'b'])
# it works!
for how in ['left', 'right', 'outer']:
df1.join(df2, how=how)
def test_set_column_scalar_with_ix(self):
subset = self.frame.index[[1, 4, 5]]
self.frame.ix[subset] = 99
self.assert_((self.frame.ix[subset].values == 99).all())
col = self.frame['B']
col[subset] = 97
self.assert_((self.frame.ix[subset, 'B'] == 97).all())
def test_frame_dict_constructor_empty_series(self):
s1 = Series([1, 2, 3, 4], index=MultiIndex.from_tuples([(1, 2), (1, 3),
(2, 2), (2, 4)]))
s2 = Series([1, 2, 3, 4],
index=MultiIndex.from_tuples([(1, 2), (1, 3), (3, 2), (3, 4)]))
s3 = Series()
# it works!
df = DataFrame({'foo': s1, 'bar': s2, 'baz': s3})
df = | DataFrame.from_dict({'foo': s1, 'baz': s3, 'bar': s2}) | pandas.DataFrame.from_dict |
import os
import pandas as pd
import pickle
import flask
from flask import Flask, request, jsonify
from ensemble import *
import boto3
app = Flask(__name__)
BUCKET_NAME = 'ff-inbound-videos' # replace with your bucket name
s3 = boto3.resource('s3')
chpt_dir = './weights'
load_slowfast_path = '{}/sf_bc_jc_44000.pth.tar'.format(chpt_dir)
load_slowfast_path2 = '{}/sf_32000.pth.tar'.format(chpt_dir)
load_slowfast_path3 = '{}/sf_16x8_bc_jc_44000.pth.tar'.format(chpt_dir)
load_slowfast_path4 = '{}/sf_trainval_52000.pth.tar'.format(chpt_dir)
load_xcp_path = '{}/xcep_bgr_58000.pth.tar'.format(chpt_dir)
load_b3_path = '{}/b3_rgb_50000.pth.tar'.format(chpt_dir)
load_res34_path = '.{}/res34_rgb_23000.pth.tar'.format(chpt_dir)
load_b1_path = '{}/b1_rgb_58000.pth.tar'.format(chpt_dir)
load_b1long_path = '{}/b1_rgb_long_alldata_66000.pth.tar'.format(chpt_dir)
load_b1short_path = '{}/b1_rgb_alldata_58000.pth.tar'.format(chpt_dir)
load_b0_path = '{}/b0_rgb_58000.pth.tar'.format(chpt_dir)
frame_nums = 160
model = Ensemble(load_slowfast_path, load_xcp_path, load_slowfast_path2, load_slowfast_path3, load_b3_path,
load_res34_path, load_b1_path,
load_b1long_path, load_b1short_path, load_b0_path, load_slowfast_path4, frame_nums,
cuda=pipeline_cfg.cuda)
@app.route('/healthcheck')
def starting_url():
status_code = flask.Response(status=201)
return status_code
@app.route('/predict', methods=['POST'])
def predict():
video_list = request.get_json(force=True)['video_list']
predictions = []
for filename in video_list:
score = 0.5
video = filename.rsplit('/',1)[-1]
try:
s3.Bucket(BUCKET_NAME).download_file(video, video)
score = model.inference(video)
os.remove(video)
except:
pass
predictions.append({'filename': video, 'eighteen': score})
result = | pd.DataFrame(predictions) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Authors: <NAME>
UNESCO-IHE 2016
Contact: <EMAIL>
Repository: https://github.com/wateraccounting/wa
Module: Sheets/sheet1
"""
import os
import pandas as pd
import time
import xml.etree.ElementTree as ET
import subprocess
def create_sheet3(basin, period, units, data, output, template=False):
"""
Keyword arguments:
basin -- The name of the basin
period -- The period of analysis
units -- A list with the units of the data:
[<water consumption>, <land productivity>, <water productivity>]
data -- A csv file that contains the water data. The csv file has to
follow an specific format. A sample csv is available in the link:
https://github.com/wateraccounting/wa/tree/master/Sheets/csv
output -- A list (length 2) with the output paths of the jpg files
for the two parts of the sheet
template -- A list (length 2) of the svg files of the sheet.
Use False (default) to use the standard svg files.
Example:
from wa.Sheets import *
create_sheet3(basin='Helmand', period='2007-2011',
units=['km3/yr', 'kg/ha/yr', 'kg/m3'],
data=[r'C:\Sheets\csv\Sample_sheet3_part1.csv',
r'C:\Sheets\csv\Sample_sheet3_part2.csv'],
output=[r'C:\Sheets\sheet_3_part1.jpg',
r'C:\Sheets\sheet_3_part2.jpg'])
"""
# Read table
df1 = pd.read_csv(data[0], sep=';')
df2 = pd.read_csv(data[1], sep=';')
# Data frames
df1c = df1.loc[df1.USE == "CROP"]
df1n = df1.loc[df1.USE == "NON-CROP"]
df2c = df2.loc[df2.USE == "CROP"]
df2n = df2.loc[df2.USE == "NON-CROP"]
# Read csv file part 1
crop_r01c01 = float(df1c.loc[(df1c.TYPE == "Cereals") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c01 = float(df1c.loc[(df1c.TYPE == "Cereals") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c01 = float(df1c.loc[(df1c.TYPE == "Cereals") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c01 = crop_r02c01 + crop_r03c01
crop_r01c02 = float(df1c.loc[(df1c.SUBTYPE == "Root/tuber crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c02 = float(df1c.loc[(df1c.SUBTYPE == "Root/tuber crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c02 = float(df1c.loc[(df1c.SUBTYPE == "Root/tuber crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c02 = crop_r02c02 + crop_r03c02
crop_r01c03 = float(df1c.loc[(df1c.SUBTYPE == "Leguminous crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c03 = float(df1c.loc[(df1c.SUBTYPE == "Leguminous crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c03 = float(df1c.loc[(df1c.SUBTYPE == "Leguminous crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c03 = crop_r02c03 + crop_r03c03
crop_r01c04 = float(df1c.loc[(df1c.SUBTYPE == "Sugar crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c04 = float(df1c.loc[(df1c.SUBTYPE == "Sugar crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c04 = float(df1c.loc[(df1c.SUBTYPE == "Sugar crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c04 = crop_r02c04 + crop_r03c04
crop_r01c05 = float(df1c.loc[(df1c.TYPE == "Non-cereals") &
(df1c.SUBCLASS == "ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r02c05 = float(df1c.loc[(df1c.TYPE == "Non-cereals") &
(df1c.SUBCLASS == "ET rainfall") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r03c05 = float(df1c.loc[(df1c.TYPE == "Non-cereals") &
(df1c.SUBCLASS == "Incremental ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r04c05 = crop_r02c05 + crop_r03c05
crop_r01c06 = float(df1c.loc[(df1c.SUBTYPE == "Vegetables & melons") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c06 = float(df1c.loc[(df1c.SUBTYPE == "Vegetables & melons") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c06 = float(df1c.loc[(df1c.SUBTYPE == "Vegetables & melons") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c06 = crop_r02c06 + crop_r03c06
crop_r01c07 = float(df1c.loc[(df1c.SUBTYPE == "Fruits & nuts") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c07 = float(df1c.loc[(df1c.SUBTYPE == "Fruits & nuts") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c07 = float(df1c.loc[(df1c.SUBTYPE == "Fruits & nuts") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c07 = crop_r02c07 + crop_r03c07
crop_r01c08 = float(df1c.loc[(df1c.TYPE == "Fruit & vegetables") &
(df1c.SUBCLASS == "ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r02c08 = float(df1c.loc[(df1c.TYPE == "Fruit & vegetables") &
(df1c.SUBCLASS == "ET rainfall") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r03c08 = float(df1c.loc[(df1c.TYPE == "Fruit & vegetables") &
(df1c.SUBCLASS == "Incremental ET") &
(df1c.SUBTYPE == "Merged")].WATER_CONSUMPTION)
crop_r04c08 = crop_r02c08 + crop_r03c08
crop_r01c09 = float(df1c.loc[(df1c.TYPE == "Oilseeds") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c09 = float(df1c.loc[(df1c.TYPE == "Oilseeds") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c09 = float(df1c.loc[(df1c.TYPE == "Oilseeds") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c09 = crop_r02c09 + crop_r03c09
crop_r01c10 = float(df1c.loc[(df1c.TYPE == "Feed crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c10 = float(df1c.loc[(df1c.TYPE == "Feed crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c10 = float(df1c.loc[(df1c.TYPE == "Feed crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c10 = crop_r02c10 + crop_r03c10
crop_r01c11 = float(df1c.loc[(df1c.TYPE == "Beverage crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c11 = float(df1c.loc[(df1c.TYPE == "Beverage crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c11 = float(df1c.loc[(df1c.TYPE == "Beverage crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c11 = crop_r02c11 + crop_r03c11
crop_r01c12 = float(df1c.loc[(df1c.TYPE == "Other crops") &
(df1c.SUBCLASS == "ET")].WATER_CONSUMPTION)
crop_r02c12 = float(df1c.loc[(df1c.TYPE == "Other crops") &
(df1c.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
crop_r03c12 = float(df1c.loc[(df1c.TYPE == "Other crops") &
(df1c.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
crop_r04c12 = crop_r02c12 + crop_r03c12
noncrop_r01c01 = float(df1n.loc[(df1n.TYPE == "Fish (Aquaculture)") &
(df1n.SUBCLASS == "ET")].WATER_CONSUMPTION)
noncrop_r02c01 = float(df1n.loc[(df1n.TYPE == "Fish (Aquaculture)") &
(df1n.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
noncrop_r03c01 = float(df1n.loc[(df1n.TYPE == "Fish (Aquaculture)") &
(df1n.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
noncrop_r04c01 = noncrop_r02c01 + noncrop_r03c01
noncrop_r01c02 = float(df1n.loc[(df1n.TYPE == "Timber") &
(df1n.SUBCLASS == "ET")].WATER_CONSUMPTION)
noncrop_r02c02 = float(df1n.loc[(df1n.TYPE == "Timber") &
(df1n.SUBCLASS == "ET rainfall")].WATER_CONSUMPTION)
noncrop_r03c02 = float(df1n.loc[(df1n.TYPE == "Timber") &
(df1n.SUBCLASS == "Incremental ET")].WATER_CONSUMPTION)
noncrop_r04c02 = noncrop_r02c02 + noncrop_r03c02
crop_r01 = pd.np.nansum([crop_r01c01, crop_r01c02, crop_r01c03,
crop_r01c04, crop_r01c05, crop_r01c06,
crop_r01c07, crop_r01c08, crop_r01c09,
crop_r01c10, crop_r01c11, crop_r01c12])
crop_r02 = pd.np.nansum([crop_r02c01, crop_r02c02, crop_r02c03,
crop_r02c04, crop_r02c05, crop_r02c06,
crop_r02c07, crop_r02c08, crop_r02c09,
crop_r02c10, crop_r02c11, crop_r02c12])
crop_r03 = pd.np.nansum([crop_r03c01, crop_r03c02, crop_r03c03,
crop_r03c04, crop_r03c05, crop_r03c06,
crop_r03c07, crop_r03c08, crop_r03c09,
crop_r03c10, crop_r03c11, crop_r03c12])
crop_r04 = crop_r02 + crop_r03
noncrop_r01 = pd.np.nansum([noncrop_r01c01, noncrop_r01c02])
noncrop_r02 = pd.np.nansum([noncrop_r02c01, noncrop_r02c02])
noncrop_r03 = pd.np.nansum([noncrop_r03c01, noncrop_r03c02])
noncrop_r04 = noncrop_r02 + noncrop_r03
ag_water_cons = crop_r01 + crop_r04 + noncrop_r01 + noncrop_r04
# Read csv file part 2
# Land productivity
lp_r01c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r02c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r03c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r04c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r01c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r02c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r03c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r04c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].LAND_PRODUCTIVITY)
lp_r01c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r01c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r02c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r03c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r04c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
lp_r05c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield")].LAND_PRODUCTIVITY)
lp_r06c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield rainfall")].LAND_PRODUCTIVITY)
lp_r07c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Incremental yield")].LAND_PRODUCTIVITY)
lp_r08c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Total yield")].LAND_PRODUCTIVITY)
# Water productivity
wp_r01c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c01 = float(df2c.loc[(df2c.TYPE == "Cereals") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c02 = float(df2c.loc[(df2c.SUBTYPE == "Root/tuber crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c03 = float(df2c.loc[(df2c.SUBTYPE == "Leguminous crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c04 = float(df2c.loc[(df2c.SUBTYPE == "Sugar crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r02c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r03c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r04c05 = float(df2c.loc[(df2c.TYPE == "Non-cereals") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r01c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c06 = float(df2c.loc[(df2c.SUBTYPE == "Vegetables & melons") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c07 = float(df2c.loc[(df2c.SUBTYPE == "Fruits & nuts") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r02c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Yield rainfall") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r03c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Incremental yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r04c08 = float(df2c.loc[(df2c.TYPE == "Fruit & vegetables") &
(df2c.SUBCLASS == "Total yield") &
(df2c.SUBTYPE == "Merged")].WATER_PRODUCTIVITY)
wp_r01c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c09 = float(df2c.loc[(df2c.TYPE == "Oilseeds") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c10 = float(df2c.loc[(df2c.TYPE == "Feed crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c11 = float(df2c.loc[(df2c.TYPE == "Beverage crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r01c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r02c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r03c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r04c12 = float(df2c.loc[(df2c.TYPE == "Other crops") &
(df2c.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c01 = float(df2n.loc[(df2n.SUBTYPE == "Meat") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c02 = float(df2n.loc[(df2n.SUBTYPE == "Milk") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c03 = float(df2n.loc[(df2n.TYPE == "Fish (Aquaculture)") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
wp_r05c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield")].WATER_PRODUCTIVITY)
wp_r06c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Yield rainfall")].WATER_PRODUCTIVITY)
wp_r07c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Incremental yield")].WATER_PRODUCTIVITY)
wp_r08c04 = float(df2n.loc[(df2n.TYPE == "Timber") &
(df2n.SUBCLASS == "Total yield")].WATER_PRODUCTIVITY)
# Calculations & modify svgs
if not template:
path = os.path.dirname(os.path.abspath(__file__))
svg_template_path_1 = os.path.join(path, 'svg', 'sheet_3_part1.svg')
svg_template_path_2 = os.path.join(path, 'svg', 'sheet_3_part2.svg')
else:
svg_template_path_1 = os.path.abspath(template[0])
svg_template_path_2 = os.path.abspath(template[1])
tree1 = ET.parse(svg_template_path_1)
tree2 = ET.parse(svg_template_path_2)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Titles
xml_txt_box = tree1.findall('''.//*[@id='basin']''')[0]
xml_txt_box.getchildren()[0].text = 'Basin: ' + basin
xml_txt_box = tree1.findall('''.//*[@id='period']''')[0]
xml_txt_box.getchildren()[0].text = 'Period: ' + period
xml_txt_box = tree1.findall('''.//*[@id='units']''')[0]
xml_txt_box.getchildren()[0].text = 'Part 1: Agricultural water consumption (' + units[0] + ')'
xml_txt_box = tree2.findall('''.//*[@id='basin2']''')[0]
xml_txt_box.getchildren()[0].text = 'Basin: ' + basin
xml_txt_box = tree2.findall('''.//*[@id='period2']''')[0]
xml_txt_box.getchildren()[0].text = 'Period: ' + period
xml_txt_box = tree2.findall('''.//*[@id='units2']''')[0]
xml_txt_box.getchildren()[0].text = 'Part 2: Land productivity (' + units[1] + ') and water productivity (' + units[2] + ')'
# Part 1
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c01']''')[0]
if not pd.isnull(crop_r01c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c02']''')[0]
if not pd.isnull(crop_r01c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c03']''')[0]
if not pd.isnull(crop_r01c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c04']''')[0]
if not pd.isnull(crop_r01c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c05']''')[0]
if not pd.isnull(crop_r01c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c06']''')[0]
if not pd.isnull(crop_r01c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c07']''')[0]
if not pd.isnull(crop_r01c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c08']''')[0]
if not pd.isnull(crop_r01c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c09']''')[0]
if not pd.isnull(crop_r01c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c10']''')[0]
if not pd.isnull(crop_r01c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c11']''')[0]
if not pd.isnull(crop_r01c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01c12']''')[0]
if not pd.isnull(crop_r01c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r01']''')[0]
if not pd.isnull(crop_r01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c01']''')[0]
if not pd.isnull(crop_r02c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c02']''')[0]
if not pd.isnull(crop_r02c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c03']''')[0]
if not pd.isnull(crop_r02c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c04']''')[0]
if not pd.isnull(crop_r02c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c05']''')[0]
if not pd.isnull(crop_r02c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c06']''')[0]
if not pd.isnull(crop_r02c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c07']''')[0]
if not pd.isnull(crop_r02c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c08']''')[0]
if not pd.isnull(crop_r02c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c09']''')[0]
if not pd.isnull(crop_r02c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c10']''')[0]
if not pd.isnull(crop_r02c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c11']''')[0]
if not pd.isnull(crop_r02c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02c12']''')[0]
if not pd.isnull(crop_r02c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r02']''')[0]
if not pd.isnull(crop_r02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c01']''')[0]
if not pd.isnull(crop_r03c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c02']''')[0]
if not pd.isnull(crop_r03c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c03']''')[0]
if not pd.isnull(crop_r03c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c04']''')[0]
if not pd.isnull(crop_r03c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c05']''')[0]
if not pd.isnull(crop_r03c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c06']''')[0]
if not pd.isnull(crop_r03c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c07']''')[0]
if not pd.isnull(crop_r03c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c08']''')[0]
if not pd.isnull(crop_r03c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c09']''')[0]
if not pd.isnull(crop_r03c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c10']''')[0]
if not pd.isnull(crop_r03c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c11']''')[0]
if not pd.isnull(crop_r03c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03c12']''')[0]
if not pd.isnull(crop_r03c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r03']''')[0]
if not pd.isnull(crop_r03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c01']''')[0]
if not pd.isnull(crop_r04c01):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c02']''')[0]
if not pd.isnull(crop_r04c02):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c03']''')[0]
if not pd.isnull(crop_r04c03):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c04']''')[0]
if not pd.isnull(crop_r04c04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c05']''')[0]
if not pd.isnull(crop_r04c05):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c06']''')[0]
if not pd.isnull(crop_r04c06):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c07']''')[0]
if not pd.isnull(crop_r04c07):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c08']''')[0]
if not pd.isnull(crop_r04c08):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c09']''')[0]
if not pd.isnull(crop_r04c09):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c10']''')[0]
if not pd.isnull(crop_r04c10):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c11']''')[0]
if not pd.isnull(crop_r04c11):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04c12']''')[0]
if not pd.isnull(crop_r04c12):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='crop_r04']''')[0]
if not pd.isnull(crop_r04):
xml_txt_box.getchildren()[0].text = '%.2f' % crop_r04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r01c01']''')[0]
if not pd.isnull(noncrop_r01c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r01c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r01c02']''')[0]
if not pd.isnull(noncrop_r01c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r01c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r01']''')[0]
if not pd.isnull(noncrop_r01) and noncrop_r01 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r02c01']''')[0]
if not pd.isnull(noncrop_r02c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r02c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r02c02']''')[0]
if not pd.isnull(noncrop_r02c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r02c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r02']''')[0]
if not pd.isnull(noncrop_r02) and noncrop_r02 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r03c01']''')[0]
if not pd.isnull(noncrop_r03c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r03c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r03c02']''')[0]
if not pd.isnull(noncrop_r03c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r03c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r03']''')[0]
if not pd.isnull(noncrop_r03) and noncrop_r03 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r04c01']''')[0]
if not pd.isnull(noncrop_r04c01):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r04c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r04c02']''')[0]
if not pd.isnull(noncrop_r04c02):
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r04c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree1.findall('''.//*[@id='noncrop_r04']''')[0]
if not pd.isnull(noncrop_r04) and noncrop_r04 > 0.001:
xml_txt_box.getchildren()[0].text = '%.2f' % noncrop_r04
else:
xml_txt_box.getchildren()[0].text = '-'
# Part 2
xml_txt_box = tree1.findall('''.//*[@id='ag_water_cons']''')[0]
if not pd.isnull(ag_water_cons):
xml_txt_box.getchildren()[0].text = '%.2f' % ag_water_cons
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c01']''')[0]
if not pd.isnull(lp_r01c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c02']''')[0]
if not pd.isnull(lp_r01c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c03']''')[0]
if not pd.isnull(lp_r01c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c04']''')[0]
if not pd.isnull(lp_r01c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c05']''')[0]
if not pd.isnull(lp_r01c05):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c06']''')[0]
if not pd.isnull(lp_r01c06):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c07']''')[0]
if not pd.isnull(lp_r01c07):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c08']''')[0]
if not pd.isnull(lp_r01c08):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c09']''')[0]
if not pd.isnull(lp_r01c09):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c10']''')[0]
if not pd.isnull(lp_r01c10):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c11']''')[0]
if not pd.isnull(lp_r01c11):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r01c12']''')[0]
if not pd.isnull(lp_r01c12):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r01c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c01']''')[0]
if not pd.isnull(lp_r02c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c02']''')[0]
if not pd.isnull(lp_r02c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c03']''')[0]
if not pd.isnull(lp_r02c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c04']''')[0]
if not pd.isnull(lp_r02c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c05']''')[0]
if not pd.isnull(lp_r02c05):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c06']''')[0]
if not pd.isnull(lp_r02c06):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c07']''')[0]
if not pd.isnull(lp_r02c07):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c08']''')[0]
if not pd.isnull(lp_r02c08):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c09']''')[0]
if not pd.isnull(lp_r02c09):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c10']''')[0]
if not pd.isnull(lp_r02c10):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c11']''')[0]
if not pd.isnull(lp_r02c11):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r02c12']''')[0]
if not pd.isnull(lp_r02c12):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r02c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c01']''')[0]
if not pd.isnull(lp_r03c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c02']''')[0]
if not pd.isnull(lp_r03c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c03']''')[0]
if not pd.isnull(lp_r03c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c04']''')[0]
if not pd.isnull(lp_r03c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c05']''')[0]
if not pd.isnull(lp_r03c05):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c06']''')[0]
if not pd.isnull(lp_r03c06):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c07']''')[0]
if not pd.isnull(lp_r03c07):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c08']''')[0]
if not pd.isnull(lp_r03c08):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c09']''')[0]
if not pd.isnull(lp_r03c09):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c10']''')[0]
if not pd.isnull(lp_r03c10):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c11']''')[0]
if not pd.isnull(lp_r03c11):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r03c12']''')[0]
if not pd.isnull(lp_r03c12):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r03c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c01']''')[0]
if not pd.isnull(lp_r04c01):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c02']''')[0]
if not pd.isnull(lp_r04c02):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c03']''')[0]
if not pd.isnull(lp_r04c03):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c04']''')[0]
if not pd.isnull(lp_r04c04):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c05']''')[0]
if not pd.isnull(lp_r04c05):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c06']''')[0]
if not pd.isnull(lp_r04c06):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c07']''')[0]
if not pd.isnull(lp_r04c07):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c08']''')[0]
if not pd.isnull(lp_r04c08):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c09']''')[0]
if not pd.isnull(lp_r04c09):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c10']''')[0]
if not pd.isnull(lp_r04c10):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c11']''')[0]
if not pd.isnull(lp_r04c11):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='lp_r04c12']''')[0]
if not pd.isnull(lp_r04c12):
xml_txt_box.getchildren()[0].text = '%.0f' % lp_r04c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c01']''')[0]
if not pd.isnull(wp_r01c01):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c02']''')[0]
if not pd.isnull(wp_r01c02):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c03']''')[0]
if not pd.isnull(wp_r01c03):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c04']''')[0]
if not pd.isnull(wp_r01c04):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c05']''')[0]
if not pd.isnull(wp_r01c05):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c06']''')[0]
if not pd.isnull(wp_r01c06):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c07']''')[0]
if not pd.isnull(wp_r01c07):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c08']''')[0]
if not pd.isnull(wp_r01c08):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c09']''')[0]
if not pd.isnull(wp_r01c09):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c10']''')[0]
if not pd.isnull(wp_r01c10):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c11']''')[0]
if not pd.isnull(wp_r01c11):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r01c12']''')[0]
if not pd.isnull(wp_r01c12):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r01c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c01']''')[0]
if not pd.isnull(wp_r02c01):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c02']''')[0]
if not pd.isnull(wp_r02c02):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c03']''')[0]
if not pd.isnull(wp_r02c03):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c04']''')[0]
if not pd.isnull(wp_r02c04):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c05']''')[0]
if not pd.isnull(wp_r02c05):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c06']''')[0]
if not pd.isnull(wp_r02c06):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c07']''')[0]
if not pd.isnull(wp_r02c07):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c08']''')[0]
if not pd.isnull(wp_r02c08):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c09']''')[0]
if not pd.isnull(wp_r02c09):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c10']''')[0]
if not pd.isnull(wp_r02c10):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c11']''')[0]
if not pd.isnull(wp_r02c11):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r02c12']''')[0]
if not pd.isnull(wp_r02c12):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r02c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c01']''')[0]
if not pd.isnull(wp_r03c01):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c02']''')[0]
if not pd.isnull(wp_r03c02):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c03']''')[0]
if not pd.isnull(wp_r03c03):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c04']''')[0]
if not pd.isnull(wp_r03c04):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c05']''')[0]
if not pd.isnull(wp_r03c05):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c06']''')[0]
if not pd.isnull(wp_r03c06):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c07']''')[0]
if not pd.isnull(wp_r03c07):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c08']''')[0]
if not pd.isnull(wp_r03c08):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c09']''')[0]
if not pd.isnull(wp_r03c09):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c10']''')[0]
if not pd.isnull(wp_r03c10):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c11']''')[0]
if not pd.isnull(wp_r03c11):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c11
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r03c12']''')[0]
if not pd.isnull(wp_r03c12):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r03c12
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c01']''')[0]
if not pd.isnull(wp_r04c01):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c01
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c02']''')[0]
if not pd.isnull(wp_r04c02):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c02
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c03']''')[0]
if not pd.isnull(wp_r04c03):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c03
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c04']''')[0]
if not pd.isnull(wp_r04c04):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c04
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c05']''')[0]
if not pd.isnull(wp_r04c05):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c05
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c06']''')[0]
if not pd.isnull(wp_r04c06):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c06
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c07']''')[0]
if not pd.isnull(wp_r04c07):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c07
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c08']''')[0]
if not pd.isnull(wp_r04c08):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c08
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c09']''')[0]
if not pd.isnull(wp_r04c09):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c09
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c10']''')[0]
if not pd.isnull(wp_r04c10):
xml_txt_box.getchildren()[0].text = '%.2f' % wp_r04c10
else:
xml_txt_box.getchildren()[0].text = '-'
xml_txt_box = tree2.findall('''.//*[@id='wp_r04c11']''')[0]
if not | pd.isnull(wp_r04c11) | pandas.isnull |
# -*- coding: utf-8 -*-
"""
Set up SimFin IDs to interface with SimFin API and reduce duplicitous API hits
04/14/2019
<NAME>
"""
import pandas as pd
import requests
import driver
def set_key(key):
"""
Instantiates an API key object to pull from the SimFin API.
"""
api_key = key
return api_key
def get_tickers():
"""
Gets S&P 500 tickers from Wikipedia.
"""
spylist = pd.read_html('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
tickers = spylist[0]["Symbol"].tolist()
return tickers
def get_sim_ids(tickers, api_key):
"""
Pulls SimFin IDs to pass into programatic API pulls. Takes as input a list
of standardized tickers, and a SimFin API key.
"""
sim_ids = []
for ticker in tickers:
url = f'https://simfin.com/api/v1/info/find-id/ticker/{ticker}?api-key={api_key}'
content = requests.get(url)
data = content.json()
print(data)
if "error" in data or not data:
sim_ids.append(None)
else:
for i, _ in enumerate(data):
sim_ids.append((data[i]['ticker'], data[i]['simId']))
return sim_ids
def load_sim_ids():
"""
Loads SimFin IDs and tickers generated in the simfin_setup.py execution
TO-DO: Pull from instance Postgres DB once loaded, instead of local csv
"""
ticker_id_map = | pd.read_csv('ticker_id_map.csv') | pandas.read_csv |
import os
import matplotlib.pyplot as plt
import pandas as pd
from tqdm import tqdm
import seaborn as sns
def get_immediate_subdirectories(a_dir):
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
def draw_boxplots(df, feature_cutoff, name, output_dir):
df_f = df[df['variable'] <= feature_cutoff]
f, ax = plt.subplots(figsize=(20, 10))
plt.ylim(0, 1)
sns.boxplot(x='variable',
y='value',
data=df_f,
ax=ax,
palette=['#7fbf7b', '#af8dc3'],
hue='run',
whis=0.95,
notch=True,
fliersize=0)
save_path = output_dir + 'b_' + str(feature_cutoff) + '_' + n
f.savefig(save_path + '.png')
f.savefig(save_path + '.pdf', format='pdf', bbox_inches="tight")
plt.close('all')
def draw_band(df, feature_cutoff, name, output_dir):
df_f = df[df['variable'] <= feature_cutoff]
f, ax = plt.subplots(figsize=(20, 10))
plt.ylim(0,1)
ax = sns.tsplot(time="variable",
value="value",
unit="rep",
condition="run",
data=df_f,
ci=[95, 99],
color=sns.color_palette(['#7fbf7b', '#af8dc3']))
save_path = output_dir + 'band_' + str(feature_cutoff) + '_' + n
f.savefig(save_path + '.png')
f.savefig(save_path + '.pdf', format='pdf', bbox_inches="tight")
plt.close('all')
def draw_points(df, feature_cutoff, name, output_dir):
df_f = df[df['variable'] <= feature_cutoff]
f, ax = plt.subplots(figsize=(20, 10))
plt.ylim(0,1)
f, ax = plt.subplots(figsize=(0.2 * 100, 5))
plt.ylim(-0.1, 1.1)
plt.xticks(rotation=90)
ax = sns.stripplot(x="variable", y="value", data=df_f,
jitter=True, hue='run',
palette=['#7fbf7b', '#af8dc3'], alpha=0.05)
save_path = output_dir + 'points_' + str(feature_cutoff) + '_' + n
f.savefig(save_path + '.png')
f.savefig(save_path + '.pdf', format='pdf', bbox_inches="tight")
plt.close('all')
databuilds = ['01']
for databuild in databuilds:
databuild_dir = 'feature_selection/' + databuild + '/'
dataset_types = get_immediate_subdirectories(databuild_dir)
for dataset_type in dataset_types:
base_folder = databuild_dir + dataset_type + '/'
runs = get_immediate_subdirectories(base_folder)
runs = sorted([x for x in runs if '_n' not in x])
run_names = list(set([x.split('_')[0] for x in runs]))
for n in tqdm(run_names):
rn = [x for x in runs if n in x]
run_subnames = [x.split('_')[-1] for x in rn]
auc_dfs = []
for s in run_subnames:
if s != n:
full_name = n + '_' + s
else:
full_name = n
path = base_folder + full_name + '/auc_score.csv'
df = | pd.read_csv(path) | pandas.read_csv |
"""
Multi criteria decision analysis
"""
from __future__ import division
from __future__ import print_function
import json
import os
import pandas as pd
import numpy as np
import cea.config
import cea.inputlocator
from cea.optimization.lca_calculations import lca_calculations
from cea.analysis.multicriteria.optimization_post_processing.electricity_imports_exports_script import electricity_import_and_exports
from cea.technologies.solar.photovoltaic import calc_Cinv_pv
from cea.optimization.constants import PUMP_ETA
from cea.constants import DENSITY_OF_WATER_AT_60_DEGREES_KGPERM3
from cea.optimization.constants import SIZING_MARGIN
from cea.analysis.multicriteria.optimization_post_processing.individual_configuration import calc_opex_PV
from cea.technologies.chiller_vapor_compression import calc_Cinv_VCC
from cea.technologies.chiller_absorption import calc_Cinv
from cea.technologies.cooling_tower import calc_Cinv_CT
import cea.optimization.distribution.network_opt_main as network_opt
from cea.analysis.multicriteria.optimization_post_processing.locating_individuals_in_generation_script import locating_individuals_in_generation_script
from cea.technologies.heat_exchangers import calc_Cinv_HEX
from math import ceil, log
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def multi_criteria_main(locator, config):
# local variables
generation = config.multi_criteria.generations
category = "optimization-detailed"
if not os.path.exists(locator.get_address_of_individuals_of_a_generation(generation)):
data_address = locating_individuals_in_generation_script(generation, locator)
else:
data_address = pd.read_csv(locator.get_address_of_individuals_of_a_generation(generation))
# initialize class
data_generation = preprocessing_generations_data(locator, generation)
objectives = data_generation['final_generation']['population']
individual_list = objectives.axes[0].values
data_processed = preprocessing_cost_data(locator, data_generation['final_generation'], individual_list[0], generation, data_address, config)
column_names = data_processed.columns.values
compiled_data = pd.DataFrame(np.zeros([len(individual_list), len(column_names)]), columns=column_names)
for i, individual in enumerate(individual_list):
data_processed = preprocessing_cost_data(locator, data_generation['final_generation'], individual, generation, data_address, config)
for name in column_names:
compiled_data.loc[i][name] = data_processed[name][0]
compiled_data = compiled_data.assign(individual=individual_list)
normalized_TAC = (compiled_data['TAC_Mio'] - min(compiled_data['TAC_Mio'])) / (
max(compiled_data['TAC_Mio']) - min(compiled_data['TAC_Mio']))
normalized_emissions = (compiled_data['total_emissions_kiloton'] - min(compiled_data['total_emissions_kiloton'])) / (
max(compiled_data['total_emissions_kiloton']) - min(compiled_data['total_emissions_kiloton']))
normalized_prim = (compiled_data['total_prim_energy_TJ'] - min(compiled_data['total_prim_energy_TJ'])) / (
max(compiled_data['total_prim_energy_TJ']) - min(compiled_data['total_prim_energy_TJ']))
normalized_Capex_total = (compiled_data['Capex_total_Mio'] - min(compiled_data['Capex_total_Mio'])) / (
max(compiled_data['Capex_total_Mio']) - min(compiled_data['Capex_total_Mio']))
normalized_Opex = (compiled_data['Opex_total_Mio'] - min(compiled_data['Opex_total_Mio'])) / (
max(compiled_data['Opex_total_Mio']) - min(compiled_data['Opex_total_Mio']))
normalized_renewable_share = (compiled_data['renewable_share_electricity'] - min(compiled_data['renewable_share_electricity'])) / (
max(compiled_data['renewable_share_electricity']) - min(compiled_data['renewable_share_electricity']))
compiled_data = compiled_data.assign(normalized_TAC=normalized_TAC)
compiled_data = compiled_data.assign(normalized_emissions=normalized_emissions)
compiled_data = compiled_data.assign(normalized_prim=normalized_prim)
compiled_data = compiled_data.assign(normalized_Capex_total=normalized_Capex_total)
compiled_data = compiled_data.assign(normalized_Opex=normalized_Opex)
compiled_data = compiled_data.assign(normalized_renewable_share=normalized_renewable_share)
compiled_data['TAC_rank'] = compiled_data['normalized_TAC'].rank(ascending=True)
compiled_data['emissions_rank'] = compiled_data['normalized_emissions'].rank(ascending=True)
compiled_data['prim_rank'] = compiled_data['normalized_prim'].rank(ascending=True)
# user defined mcda
compiled_data['user_MCDA'] = compiled_data['normalized_Capex_total'] * config.multi_criteria.capextotal * config.multi_criteria.economicsustainability + \
compiled_data['normalized_Opex'] * config.multi_criteria.opex * config.multi_criteria.economicsustainability + \
compiled_data['normalized_TAC'] * config.multi_criteria.annualizedcosts * config.multi_criteria.economicsustainability + \
compiled_data['normalized_emissions'] *config.multi_criteria.emissions * config.multi_criteria.environmentalsustainability + \
compiled_data['normalized_prim'] *config.multi_criteria.primaryenergy * config.multi_criteria.environmentalsustainability + \
compiled_data['normalized_renewable_share'] * config.multi_criteria.renewableshare * config.multi_criteria.socialsustainability
compiled_data['user_MCDA_rank'] = compiled_data['user_MCDA'].rank(ascending=True)
compiled_data.to_csv(locator.get_multi_criteria_analysis(generation))
return compiled_data
def preprocessing_generations_data(locator, generations):
data_processed = []
with open(locator.get_optimization_checkpoint(generations), "rb") as fp:
data = json.load(fp)
# get lists of data for performance values of the population
costs_Mio = [round(objectives[0] / 1000000, 2) for objectives in
data['population_fitness']] # convert to millions
emissions_kiloton = [round(objectives[1] / 1000000, 2) for objectives in
data['population_fitness']] # convert to tons x 10^3 (kiloton)
prim_energy_TJ = [round(objectives[2] / 1000000, 2) for objectives in
data['population_fitness']] # convert to gigajoules x 10^3 (Terajoules)
individual_names = ['ind' + str(i) for i in range(len(costs_Mio))]
df_population = pd.DataFrame({'Name': individual_names, 'costs_Mio': costs_Mio,
'emissions_kiloton': emissions_kiloton, 'prim_energy_TJ': prim_energy_TJ
}).set_index("Name")
individual_barcode = [[str(ind) if type(ind) == float else str(ind) for ind in
individual] for individual in data['population']]
def_individual_barcode = pd.DataFrame({'Name': individual_names,
'individual_barcode': individual_barcode}).set_index("Name")
# get lists of data for performance values of the population (hall_of_fame
costs_Mio_HOF = [round(objectives[0] / 1000000, 2) for objectives in
data['halloffame_fitness']] # convert to millions
emissions_kiloton_HOF = [round(objectives[1] / 1000000, 2) for objectives in
data['halloffame_fitness']] # convert to tons x 10^3
prim_energy_TJ_HOF = [round(objectives[2] / 1000000, 2) for objectives in
data['halloffame_fitness']] # convert to gigajoules x 10^3
individual_names_HOF = ['ind' + str(i) for i in range(len(costs_Mio_HOF))]
df_halloffame = pd.DataFrame({'Name': individual_names_HOF, 'costs_Mio': costs_Mio_HOF,
'emissions_kiloton': emissions_kiloton_HOF,
'prim_energy_TJ': prim_energy_TJ_HOF}).set_index("Name")
# get dataframe with capacity installed per individual
for i, individual in enumerate(individual_names):
dict_capacities = data['capacities'][i]
dict_network = data['disconnected_capacities'][i]["network"]
list_dict_disc_capacities = data['disconnected_capacities'][i]["disconnected_capacity"]
for building, dict_disconnected in enumerate(list_dict_disc_capacities):
if building == 0:
df_disc_capacities = pd.DataFrame(dict_disconnected, index=[dict_disconnected['building_name']])
else:
df_disc_capacities = df_disc_capacities.append(
pd.DataFrame(dict_disconnected, index=[dict_disconnected['building_name']]))
df_disc_capacities = df_disc_capacities.set_index('building_name')
dict_disc_capacities = df_disc_capacities.sum(axis=0).to_dict() # series with sum of capacities
if i == 0:
df_disc_capacities_final = pd.DataFrame(dict_disc_capacities, index=[individual])
df_capacities = pd.DataFrame(dict_capacities, index=[individual])
df_network = pd.DataFrame({"network": dict_network}, index=[individual])
else:
df_capacities = df_capacities.append(pd.DataFrame(dict_capacities, index=[individual]))
df_network = df_network.append( | pd.DataFrame({"network": dict_network}, index=[individual]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import ast
import geopy.distance
import csv
import requests
start_date = '2017-01-02'
end_date = '2017-01-02'
set_hour = 7
file_name = '1'
# Timestep to calculate if vehicle is accelerating/braking in seconds
delta_accel = 4
delta_timestep = 1
# Disable copy warning
pd.options.mode.chained_assignment = None
def fix_timesteps(df):
""" Changes the timestemps from miliseconds to a datetime property."""
orig_len = len(df)
df['timestamp_entry'] = pd.to_datetime(df['timestamp_entry'],
format='Timestamp(\'%Y-%m-%d %H:%M:%S.%f\')', errors='coerce')
idx_in = df[df['timestamp_entry'].isnull()].index
df.drop(idx_in, inplace=True)
df['timestamp_exit'] = pd.to_datetime(df['timestamp_exit'],
format='Timestamp(\'%Y-%m-%d %H:%M:%S.%f\')', errors='coerce')
idx_out = df[df['timestamp_exit'].isnull()].index
df.drop(idx_out, inplace=True)
idx = idx_in.append(idx_out)
print("Dropping", len(idx), "out of", orig_len, "datapoints because of wrong format.")
return df, idx
def preproc_gps_data(df):
""" Reformats data so it's easier to work with."""
df[['1_', '2_', '3_']] = df[0].str.split(',', expand=True)
df[['4_', '5_', '6_']] = df[1].str.split(',', expand=True)
df['gps_point_entry'] = (df['1_'] + ',' + df['2_']).str.replace(r'^\[', '')
df['timestamp_entry'] = (df['3_']).str.replace(r'\]$', '').str.strip()
df['gps_point_exit'] = (df['4_'] + ',' + df['5_']).str.replace(r'^\[', '')
df['timestamp_exit'] = (df['6_']).str.replace(r'\]$', '').str.strip()
df.drop([0, 1, '1_', '2_', '3_', '4_', '5_', '6_'], axis=1, inplace=True)
df, idx = fix_timesteps(df)
return df, idx
def calculate_distance(point1, point2):
""" Calculates distance from point A to point B. """
point1 = ast.literal_eval(point1)
point2 = ast.literal_eval(point2)
return geopy.distance.vincenty(point1, point2).m
def calculate_cmf(speed, distance):
""" Calculates the positive and negative constant motion factor.
Args:
speed: speed of Bus
distance: distance a bus has traveled
Returns:
cmf_pos: positive cmf
cmf_neg: negative cmf
"""
speed_squared = np.square(speed)
diff = np.diff(speed_squared)
cmf_pos = np.sum(np.maximum(diff, 0))/distance
cmf_neg = np.sum(np.maximum(-diff, 0))/distance
return cmf_pos, cmf_neg
def add_speeds(df, df_fenced):
""" Adds speed data to fenced DataFrame"""
counter = 0
all_speeds = {}
# df_fenced = df_fenced[:11]
total_rows = df_fenced.shape[0]
for index, row in df_fenced.iterrows():
timestamp_in = row['timestamp_entry']
timestamp_out = row['timestamp_exit']
timestamp_df = df[(df.index >= timestamp_in) & (df.index <= timestamp_out)]
speeds_in_fence = timestamp_df['speed'].values
len_list = len(speeds_in_fence)
# Certain amount of points is needed to calculate cmf, so will be ignored if too little points.
if len_list >= 5:
distance = calculate_distance(row['gps_point_entry'], row['gps_point_exit'])
cmf_pos, cmf_neg = calculate_cmf(speeds_in_fence, distance)
all_speeds[timestamp_in] = [np.average(speeds_in_fence), cmf_pos, cmf_neg]
else:
print("this pass only has", len_list, "instances in the fence and we deem that too few for actual measures...")
all_speeds[timestamp_in] = [np.nan, np.nan, np.nan]
counter += 1
print('Processed row', counter, '/', total_rows)
speed_df = pd.DataFrame.from_dict(all_speeds, orient='index').values
df_fenced['avg_speed'] = pd.DataFrame(speed_df[:, 0])
df_fenced['cmf_pos'] = pd.DataFrame(speed_df[:, 1])
df_fenced['cmf_neg'] = pd.DataFrame(speed_df[:, 2])
# df_fenced.to_csv('process_speed_proov_00' +file_name+ '.csv', sep=';', index=False)
return df_fenced
def csv_to_pd(filename):
""" Read in csv file with geofenced data and return dataframe. """
df = pd.read_csv(filename, names=['in', 'out'])
# Make file pretty
df.replace('\[', '', regex=True, inplace=True)
df.replace('\]', '', regex=True, inplace=True)
df.replace('\(', '', regex=True, inplace=True)
df.replace('\)', '', regex=True, inplace=True)
df.replace('Timestamp', '', regex=True, inplace=True)
df.replace("'", '', regex=True, inplace=True)
df.replace(',', '', regex=True, inplace=True)
# Split into neat columns for entering geofence
new = df['in'].str.split(' ', n=2, expand=True)
df['in_lat'] = new[0]
df['in_long'] = new[1]
df['in_time'] = | pd.to_datetime(new[2], format='%Y-%m-%d %H:%M:%S') | pandas.to_datetime |
"""
Routines for casting.
"""
from contextlib import suppress
from datetime import date, datetime, timedelta
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Sequence,
Set,
Sized,
Tuple,
Type,
Union,
)
import numpy as np
from pandas._libs import lib, tslib, tslibs
from pandas._libs.tslibs import (
NaT,
OutOfBoundsDatetime,
Period,
Timedelta,
Timestamp,
conversion,
iNaT,
ints_to_pydatetime,
ints_to_pytimedelta,
)
from pandas._libs.tslibs.timezones import tz_compare
from pandas._typing import AnyArrayLike, ArrayLike, Dtype, DtypeObj, Scalar, Shape
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
INT64_DTYPE,
POSSIBLY_CAST_DTYPES,
TD64NS_DTYPE,
ensure_int8,
ensure_int16,
ensure_int32,
ensure_int64,
ensure_object,
ensure_str,
is_bool,
is_bool_dtype,
is_categorical_dtype,
is_complex,
is_complex_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_sparse,
is_string_dtype,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_unsigned_integer_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype,
ExtensionDtype,
IntervalDtype,
PeriodDtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeArray,
ABCDatetimeIndex,
ABCExtensionArray,
ABCPeriodArray,
ABCPeriodIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import is_list_like
from pandas.core.dtypes.missing import (
is_valid_nat_for_dtype,
isna,
na_value_for_dtype,
notna,
)
if TYPE_CHECKING:
from pandas import Series
from pandas.core.arrays import ExtensionArray
from pandas.core.indexes.base import Index
_int8_max = np.iinfo(np.int8).max
_int16_max = np.iinfo(np.int16).max
_int32_max = np.iinfo(np.int32).max
_int64_max = np.iinfo(np.int64).max
def maybe_convert_platform(values):
""" try to do platform conversion, allow ndarray or list here """
if isinstance(values, (list, tuple, range)):
values = construct_1d_object_array_from_listlike(values)
if getattr(values, "dtype", None) == np.object_:
if hasattr(values, "_values"):
values = values._values
values = lib.maybe_convert_objects(values)
return values
def is_nested_object(obj) -> bool:
"""
return a boolean if we have a nested object, e.g. a Series with 1 or
more Series elements
This may not be necessarily be performant.
"""
if isinstance(obj, ABCSeries) and is_object_dtype(obj.dtype):
if any(isinstance(v, ABCSeries) for v in obj._values):
return True
return False
def maybe_box_datetimelike(value: Scalar, dtype: Optional[Dtype] = None) -> Scalar:
"""
Cast scalar to Timestamp or Timedelta if scalar is datetime-like
and dtype is not object.
Parameters
----------
value : scalar
dtype : Dtype, optional
Returns
-------
scalar
"""
if dtype == object:
pass
elif isinstance(value, (np.datetime64, datetime)):
value = tslibs.Timestamp(value)
elif isinstance(value, (np.timedelta64, timedelta)):
value = tslibs.Timedelta(value)
return value
def maybe_downcast_to_dtype(result, dtype: Union[str, np.dtype]):
"""
try to cast to the specified dtype (e.g. convert back to bool/int
or could be an astype of float64->float32
"""
do_round = False
if is_scalar(result):
return result
elif isinstance(result, ABCDataFrame):
# occurs in pivot_table doctest
return result
if isinstance(dtype, str):
if dtype == "infer":
inferred_type = lib.infer_dtype(ensure_object(result), skipna=False)
if inferred_type == "boolean":
dtype = "bool"
elif inferred_type == "integer":
dtype = "int64"
elif inferred_type == "datetime64":
dtype = "datetime64[ns]"
elif inferred_type == "timedelta64":
dtype = "timedelta64[ns]"
# try to upcast here
elif inferred_type == "floating":
dtype = "int64"
if issubclass(result.dtype.type, np.number):
do_round = True
else:
dtype = "object"
dtype = np.dtype(dtype)
elif dtype.type is Period:
from pandas.core.arrays import PeriodArray
with suppress(TypeError):
# e.g. TypeError: int() argument must be a string, a
# bytes-like object or a number, not 'Period
return PeriodArray(result, freq=dtype.freq)
converted = maybe_downcast_numeric(result, dtype, do_round)
if converted is not result:
return converted
# a datetimelike
# GH12821, iNaT is cast to float
if dtype.kind in ["M", "m"] and result.dtype.kind in ["i", "f"]:
if hasattr(dtype, "tz"):
# not a numpy dtype
if dtype.tz:
# convert to datetime and change timezone
from pandas import to_datetime
result = to_datetime(result).tz_localize("utc")
result = result.tz_convert(dtype.tz)
else:
result = result.astype(dtype)
return result
def maybe_downcast_numeric(result, dtype: DtypeObj, do_round: bool = False):
"""
Subset of maybe_downcast_to_dtype restricted to numeric dtypes.
Parameters
----------
result : ndarray or ExtensionArray
dtype : np.dtype or ExtensionDtype
do_round : bool
Returns
-------
ndarray or ExtensionArray
"""
if not isinstance(dtype, np.dtype):
# e.g. SparseDtype has no itemsize attr
return result
if isinstance(result, list):
# reached via groupby.agg._ohlc; really this should be handled earlier
result = np.array(result)
def trans(x):
if do_round:
return x.round()
return x
if dtype.kind == result.dtype.kind:
# don't allow upcasts here (except if empty)
if result.dtype.itemsize <= dtype.itemsize and result.size:
return result
if is_bool_dtype(dtype) or is_integer_dtype(dtype):
if not result.size:
# if we don't have any elements, just astype it
return trans(result).astype(dtype)
# do a test on the first element, if it fails then we are done
r = result.ravel()
arr = np.array([r[0]])
if isna(arr).any():
# if we have any nulls, then we are done
return result
elif not isinstance(r[0], (np.integer, np.floating, int, float, bool)):
# a comparable, e.g. a Decimal may slip in here
return result
if (
issubclass(result.dtype.type, (np.object_, np.number))
and notna(result).all()
):
new_result = trans(result).astype(dtype)
if new_result.dtype.kind == "O" or result.dtype.kind == "O":
# np.allclose may raise TypeError on object-dtype
if (new_result == result).all():
return new_result
else:
if np.allclose(new_result, result, rtol=0):
return new_result
elif (
issubclass(dtype.type, np.floating)
and not is_bool_dtype(result.dtype)
and not | is_string_dtype(result.dtype) | pandas.core.dtypes.common.is_string_dtype |
from typing import List, Tuple, Dict
import numpy as np
from numpy.lib import recfunctions as rfn
import pandas as pd
from frds.data import Dataset
from frds.measures import Measure
from frds.data.utils import filter_funda
NAME = "MarketToBookRatio"
DATASETS_REQUIRED: List[Dataset] = [
Dataset(
source="wrds",
library="comp",
table="funda",
vars=[
"datadate",
"gvkey",
"csho",
"prcc_f",
"ceq",
"indfmt",
"datafmt",
"popsrc",
"consol",
],
date_vars=["datadate"],
)
]
VARIABLE_LABELS = {NAME: "(PRCC_F*CSHO)/CEQ"}
class MarketToBookRatio(Measure):
"""Market to book ratio
common shares outstanding * share price at fiscal year end
= ----------------------------------------------------------
book value common equity
"""
def __init__(self):
super().__init__("Market to Book Ratio", DATASETS_REQUIRED)
def estimate(self, nparrays: List[np.recarray]):
nparray = filter_funda(nparrays[0])
# market value at fiscal year
mv = nparray.prcc_f * nparray.csho
# market-to-book = market value of equity / common equity
mtb = np.true_divide(mv, nparray.ceq, where=(nparray.ceq != 0))
# set mtb to missing if common equity is somehow missing
mtb[np.isnan(nparray.ceq)] = np.nan
# add book leverage to the result
nparray = rfn.rec_append_fields(nparray, NAME, mtb)
# keep only useful columns
cols = set(rfn.get_names_flat(nparray.dtype))
nparray.sort(order=(keys := ["gvkey", "datadate"]))
exclude_cols = cols - set([*keys, "prcc_f", "csho", "ceq", NAME])
return (
| pd.DataFrame.from_records(nparray, exclude=exclude_cols) | pandas.DataFrame.from_records |
import argparse
import os
import sys
import time
import apex
from apex import amp
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Subset
from tqdm import tqdm
from sklearn.metrics import roc_auc_score, log_loss
from src.config import get_cfg
from src.data import RSNAHemorrhageDS3d, Qure500DS
from src.solver import make_lr_scheduler, make_optimizer
from src.modeling import ResNet3D, WeightedBCEWithLogitsLoss
from src.utils import *
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default="",
help="config yaml path")
parser.add_argument("--load", type=str, default="",
help="path to model weight")
parser.add_argument("-ft", "--finetune", action="store_true",
help="path to model weight")
parser.add_argument("-m", "--mode", type=str, default="train",
help="model running mode (train/valid/test)")
parser.add_argument("--valid", action="store_true",
help="enable evaluation mode for validation")
parser.add_argument("--test", action="store_true",
help="enable evaluation mode for testset")
parser.add_argument("--test-qure", action="store_true",
help="run test on QURE500 dataset")
parser.add_argument("--tta", action="store_true",
help="enable tta infer")
parser.add_argument("-d", "--debug", action="store_true",
help="enable debug mode for test")
args = parser.parse_args()
if args.valid:
args.mode = "valid"
elif args.test:
args.mode = "test"
return args
def build_model(cfg):
model = ResNet3D
return model(cfg)
def create_submission(pred_df, sub_fpath):
imgid = pred_df["image"].values
output = pred_df.loc[:, pred_df.columns[1:]].values
data = [[iid]+[sub_o for sub_o in o] for iid, o in zip(imgid, output)]
table_data = []
for subdata in data:
table_data.append([subdata[0]+'_any', subdata[1]])
table_data.append([subdata[0]+'_intraparenchymal', subdata[2]])
table_data.append([subdata[0]+'_intraventricular', subdata[3]])
table_data.append([subdata[0]+'_subarachnoid', subdata[4]])
table_data.append([subdata[0]+'_subdural', subdata[5]])
table_data.append([subdata[0]+'_epidural', subdata[6]])
df = | pd.DataFrame(data=table_data, columns=['ID','Label']) | pandas.DataFrame |
"""Radial profiles.
Heavily inspired by pynbody (https://pynbody.github.io/).
"""
from bisect import bisect
from typing import Any, Callable, Collection, Dict, List, Optional, Tuple, Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from numpy import ndarray
from pandas import DataFrame
from ..snap import Snap
class Profile:
"""Radial profiles.
Parameters
----------
snap
The Snap object.
ndim
The dimension of the profile. For ndim == 2, the radial binning
is cylindrical in the xy-plane. For ndim == 3, the radial
binning is spherical. Default is 2.
radius_min
The minimum radius for binning. Defaults to minimum on the
particles.
radius_max
The maximum radius for binning. Defaults to the 99 percentile
distance.
n_bins
The number of radial bins. Default is 100.
ignore_accreted
Ignore particles accreted onto sinks. Default is True.
mask
Select a subset of all particles via a NumPy mask array.
"""
_profile_functions: Dict[str, Callable] = {}
def __init__(
self,
snap: Snap,
ndim: Optional[int] = 2,
radius_min: Optional[float] = None,
radius_max: Optional[float] = None,
n_bins: int = 100,
ignore_accreted: bool = True,
mask: Optional[ndarray] = None,
):
self.snap = snap
self.ndim = ndim
self._mask = self._setup_particle_mask(ignore_accreted, mask)
self._x = self._calculate_x()
self.range = self._set_range(radius_min, radius_max)
self.n_bins = n_bins
self.bin_edges, self.bin_sizes = self._setup_bins()
self.bin_centers = 0.5 * (self.bin_edges[:-1] + self.bin_edges[1:])
self._particle_bin = np.digitize(self._x, self.bin_edges)
self.bin_indicies = self._set_particle_bin_indicies()
self._profiles: Dict[str, ndarray] = {}
self._profiles['radius'] = self.bin_centers
self._profiles['number'] = np.histogram(self._x, self.bin_edges)[0]
def _setup_particle_mask(
self, ignore_accreted: bool, mask: Optional[ndarray]
) -> ndarray:
if ignore_accreted is False:
if mask is None:
return np.ones(len(self.snap), dtype=bool)
return mask
h: ndarray = self.snap['h']
if mask is None:
return h > 0
return mask & h > 0
def _calculate_x(self) -> ndarray:
pos = self.snap['xyz']
pos = pos[self._mask]
if self.ndim == 2:
return np.hypot(pos[:, 0], pos[:, 1])
elif self.ndim == 3:
return np.hypot(np.hypot(pos[:, 0], pos[:, 1]), pos[:, 2])
def _set_range(
self, radius_min: Optional[float], radius_max: Optional[float]
) -> Tuple[float, float]:
if radius_min is None:
rmin = self._x.min()
else:
rmin = radius_min
if radius_max is None:
rmax = np.percentile(self._x, 99, axis=0)
else:
rmax = radius_max
return rmin, rmax
def _setup_bins(self) -> ndarray:
bin_edges = np.linspace(self.range[0], self.range[1], self.n_bins + 1)
if self.ndim == 2:
bin_sizes = np.pi * (bin_edges[1:] ** 2 - bin_edges[:-1] ** 2)
elif self.ndim == 3:
bin_sizes = 4 / 3 * np.pi * (bin_edges[1:] ** 3 - bin_edges[:-1] ** 3)
return bin_edges, bin_sizes
def _set_particle_bin_indicies(self) -> List[ndarray]:
sortind = self._particle_bin.argsort()
sort_pind = self._particle_bin[sortind]
binind = list()
prev_index = bisect(sort_pind, 0)
for i in range(self.n_bins):
new_index = bisect(sort_pind, i + 1)
binind.append(np.sort(sortind[prev_index:new_index]))
prev_index = new_index
return binind
def _get_profile(self, name: str, args: Optional[Tuple[Any, ...]] = None):
"""Get a profile by name."""
if name in self._profiles:
return self._profiles[name]
elif name in Profile._profile_functions:
if args is not None:
self._profiles[name] = Profile._profile_functions[name](self, *args)
self._profiles[name] = Profile._profile_functions[name](self)
return self._profiles[name]
else:
raise ValueError('Profile not available')
def __getitem__(self, name: str) -> ndarray:
"""Return the profile of a given kind."""
if isinstance(name, tuple):
name, *args = name
return self._get_profile(name, args)
return self._get_profile(name)
def __setitem__(self, name: str, values: ndarray):
"""Set the profile directly."""
if name in self._profiles:
self._profiles[name] = values
else:
raise KeyError(f'{name} is not a valid profile')
def __delitem__(self, name):
"""Delete a profile from memory."""
del self._profiles[name]
def __repr__(self):
"""Object repr method."""
return f'<plonk.Profile: {self.n_bins} bins>'
def loaded_keys(self):
"""Return a listing of loaded profiles."""
return tuple(sorted(self._profiles.keys()))
def available_keys(self):
"""Return a listing of available profiles."""
loaded = list(self.loaded_keys())
available = list(self._profile_functions.keys())
return tuple(sorted(set(loaded + available)))
@staticmethod
def profile_property(fn):
"""Decorate profile functions."""
Profile._profile_functions[fn.__name__] = fn
return fn
def plot(self, x: str, y: Union[str, Collection[str]]):
"""Plot profile.
Parameters
----------
x
The x axis to plot as a string.
y
The y axis to plot. Can be multiple as a list or tuple.
"""
if x.lower() not in self.available_keys():
raise ValueError('Cannot determine x axis to plot')
_x = self._get_profile(x)
if isinstance(y, (list, tuple)):
for yi in y:
if yi.lower() not in self.available_keys():
raise ValueError('Cannot determine y axis to plot')
_y = self._get_profile(yi)
plt.plot(_x, _y)
elif isinstance(y, str):
if y.lower() not in self.available_keys():
raise ValueError('Cannot determine y axis to plot')
_y = self._get_profile(y)
plt.plot(_x, _y)
else:
raise ValueError('Cannot determine y axis to plot')
return plt.gcf(), plt.gca()
def to_dataframe(self, all_available: bool = False) -> DataFrame:
"""Convert Profile to DataFrame.
Parameters
----------
all_available
If True, this will calculate all available profiles before
converting to a DataFrame.
"""
if all_available:
columns = self.available_keys()
else:
columns = self.loaded_keys()
data = dict()
for column in columns:
data[column] = self[column]
return | pd.DataFrame(data) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # <<<<<<<<<<<<<<<<<<<< Tarea Número 4>>>>>>>>>>>>>>>>>>>>>>>>
# ## Estudiante: <NAME>
# ## Ejercicio 1
# In[1]:
import os
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
import numpy as np
from math import pi
# #### a) Cargue la tabla de datos SpotifyTop2018 40 V2
# In[2]:
# Cargando datos
data = pd.read_csv("SpotifyTop2018_40_V2.csv", delimiter = ',', decimal = '.', index_col=0)
print(data)
print(data.head())
# In[3]:
# Normalizando y centrando la tabla
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_values = scaler.fit_transform(data)
data.loc[:,:] = scaled_values
print(data)
datos = data
# #### b) Ejecute el metodo k−medias para k = 3. Modificaremos los atributos de la clase KMeans(...) como sigue: max iter : int, default: 300: Numero maximo de iteraciones del algoritmo kmedias para una sola ejecucion. Para este ejercicio utilice max iter = 1000. n init : int, default: 10 (Formas Fuertes): Numero de veces que el algoritmo kmedias se ejecutara con diferentes semillas de centroides. Los resultados finales seran la mejor salida de n init ejecuciones consecutivas en terminos de inercia intra-clases. Para este ejercicio utilice n init = 100.
# In[4]:
# Función para graficar los gráficos de Barras para la interpretación de clústeres
def bar_plot(centros, labels, cluster = None, var = None):
from math import ceil, floor
from seaborn import color_palette
colores = color_palette()
minimo = floor(centros.min()) if floor(centros.min()) < 0 else 0
def inside_plot(valores, labels, titulo):
plt.barh(range(len(valores)), valores, 1/1.5, color = colores)
plt.xlim(minimo, ceil(centros.max()))
plt.title(titulo)
if var is not None:
centros = np.array([n[[x in var for x in labels]] for n in centros])
colores = [colores[x % len(colores)] for x, i in enumerate(labels) if i in var]
labels = labels[[x in var for x in labels]]
if cluster is None:
for i in range(centros.shape[0]):
plt.subplot(1, centros.shape[0], i + 1)
inside_plot(centros[i].tolist(), labels, ('Cluster ' + str(i)))
plt.yticks(range(len(labels)), labels) if i == 0 else plt.yticks([])
else:
pos = 1
for i in cluster:
plt.subplot(1, len(cluster), pos)
inside_plot(centros[i].tolist(), labels, ('Cluster ' + str(i)))
plt.yticks(range(len(labels)), labels) if pos == 1 else plt.yticks([])
pos += 1
# In[5]:
# Función para graficar los gráficos tipo Radar para la interpretación de clústeres
def radar_plot(centros, labels):
from math import pi
centros = np.array([((n - min(n)) / (max(n) - min(n)) * 100) if
max(n) != min(n) else (n/n * 50) for n in centros.T])
angulos = [n / float(len(labels)) * 2 * pi for n in range(len(labels))]
angulos += angulos[:1]
ax = plt.subplot(111, polar = True)
ax.set_theta_offset(pi / 2)
ax.set_theta_direction(-1)
plt.xticks(angulos[:-1], labels)
ax.set_rlabel_position(0)
plt.yticks([10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
["10%", "20%", "30%", "40%", "50%", "60%", "70%", "80%", "90%", "100%"],
color = "grey", size = 8)
plt.ylim(-10, 100)
for i in range(centros.shape[1]):
valores = centros[:, i].tolist()
valores += valores[:1]
ax.plot(angulos, valores, linewidth = 1, linestyle = 'solid',
label = 'Cluster ' + str(i))
ax.fill(angulos, valores, alpha = 0.3)
plt.legend(loc='upper right', bbox_to_anchor = (0.1, 0.1))
# #### Formas fuertes (n_init) y Número de Iteraciones (max_iter) [Default]
# In[11]:
# Solo 3 iteraciones y una forma fuerte.
kmedias = KMeans(n_clusters=3, max_iter=300, n_init=10) # Declaración de la instancia
kmedias.fit(datos)
centros = np.array(kmedias.cluster_centers_)
print(centros)
plt.figure(1, figsize = (10, 10))
radar_plot(centros, datos.columns)
# #### Formas fuertes (n_init) y Número de Iteraciones (max_iter) [Modificado]
# In[12]:
# Volviendo a recargar datos para ver la asignacion final de los clusters
kmedias = KMeans(n_clusters=3, max_iter=1000, n_init=100)
kmedias.fit(datos)
centros = np.array(kmedias.cluster_centers_)
print(centros)
plt.figure(1, figsize = (10, 10))
radar_plot(centros, datos.columns)
# #### c) Interprete los resultados del ejercicio anterior usando graficos de barras y graficos tipo Radar. Compare respecto a los resultados obtenidos en la tarea anterior en la que uso Clustering Jerarquico.
# In[14]:
# Ejecuta k-medias con 3 clusters
kmedias = KMeans(n_clusters=3)
kmedias.fit(datos)
print(kmedias.predict(datos))
centros = np.array(kmedias.cluster_centers_)
print(centros)
# In[15]:
# Ploteando grafico de barras
plt.figure(1, figsize = (12, 8))
bar_plot(centros, datos.columns)
# ### Interpretacion
# In[15]:
# En cuanto a la interpretacion se puede ver lo siguiente:
# Despues de haber corrido el K-medias con el primer set de parametros, donde init = 10 y max_iter = 300:
# se obtiene:
# primer cluster de color azul, en el que spechiness, loudness, tempo y acoustiness son las variables mas altas, es decir
# las canciones clasificadas en un primer momento tienen registros de palabras en sus canciones, son acosticas y tienen
# los mayores tiempos por minutos expresados en beats, ademas de time_signature y danceability, es decir, las canciones
# son bailables y hay altos volumenes de beats en cada barra, las demas variables son bajas.
# Un segundo cluster naranja, tiene registros altos en cuanto a danceability, time signature, energy, loudness, valence
# e instrumentalness, es decir, estas canciones sosn buenas para bailar, hay altos beats por barra por minuto, tienen
# intensidades buenas, tienen alta sonoridad por pista, ademas de que son canciones bastante positivas asi como instrumen-
# tales, presentan cierto speechiness pero no mas de un 50% lo que quiere decir, es que hay moderada cantidad de palabras
# y la duracion en milisegundos de las canciones es muy baja, es decir, son canciones energeticas y buenas para bailar
# pero duran poco.
# En el cluster 3 (verde): se observa que son las canciones que tienen mayor duracion en milisegundos de todas, y
# presentan cierta acustica, asi como sonoridad y cierta intensidad pero es baja, en las demas variables son bajas.
# Segunda interpretacion con init = 100 y max_iter = 1000
# En este punto, se ve como las iteraciones estabilizan los clusters y estos cambian en algunas representaciones de
# variables ya que se tiene:
# Cluster 1 (azul): se mantienen spechiness, time_signature, danceability, acoustiness, y se agregan liveness y valance
# lo que quiere decir que las canciones en este cluster se caracterizan por tener niveles altos de beats por cada barra
# o medida, son canciones que registran altos registros de letras, son canciones bailables, son acusticas, y se detecta
# presencia de publica en ellas asi como alta positividad musical, es decir son canciones alegres y en la que la gente
# al escucharlas puede bailar y cantar, aunque por otro lado, son canciones cortas ya que presentan bajos registros de
# duration_ms, es decir su duracion en milisegundo es poca, al igual que su intensidad y su deteccion de instrumentalidad.
# Cluster 2 (naranja): se caracteriza por tener las variables mas altas en time_signature, danceability, energy, loudness,
# valence y liveness con respecto a los valores por default, no fue mucho el cambio que hubo y solo instrumentals fue el
# que se cambio, este cluster se caracteriza por tener canciones beats por barra de volumen muy altos, ser canciones
# aptas para bailar, poseen alta sonoridad en la pista medida en decibeles, son canciones que tienen alta presencia de
# positivismo en las letras y que presentan alta presencia de publico. En realidad este cluster es muy parecido al numero 1
# solo que presenta variables como energy y loudness que el cluster 1 no presenta, por otro lado en este cluster estan
# las canciones que registran baja presencia de palabras, acustica e instrumentalidad, y son canciones que tienen duraciones
# mayores en milisegundos que las del cluster 1, es decir, son aptas para bailar, son positivas pero quiza no son canciones
# aptas para cantar, porque registran indices bajos de esta variable.
# Cluster 3 (verde): con respecto al primer cluster por default, en este nuevo cluster ahora se presenta la variable
# de instrumentalidad, y otras como tempo y duration_ms siguen manteniendose, asi como ahora hay presencia moderada de
# energy y loudness. En este cluster va a estar representado por todas aquellas canciones que tienen lo registros mas
# altos de duracion por milisegundos, asi como las que poseen mayor instrumentalidad y tiempo aproximado por beats, asi
# como las que transmiten un relativo alto grado de positividad y presencia de publico pero bajos registros de intensidad
# y de sonoridad. Presenta bajos niveles de palabras en canciones y no son para nada bailables.
# Comparacion con Clustering Jerarquico:
# Se puede ver como el cluster 1 (azul) es bastante similar, habiendo solo uno ligero cambio a nivel de duration_ms ya que
# en Clustering Jerarquico se ve como mas de un 25% de los datos presentaban algo de duration_ms (duracion en milisegundos)
# sin embargo es apenas notorio.
# Con respecto al cluster 2 (naranja) hay muchis cambios, ya que en Jerarquico solo se tenian altas las variables de
# duration_ms, tempo y un poco acoustiness, mientras que en k-medias estas mismas variables no se encuentra altas
# y mas bien en k-medias estas estan bastante bajas y las que estaban bajas en Jerarquico aqui estan altas como es el caso
# de danceability, energy, etc.
# Con el cluster 3 (verde): las variables que se siguen manteniendo son intrsumentalness, tempo y un poco de loudness, aunque
# en Jerarquico instrumentalness estaba alta y en K-Medias esta en menos del 50% sin embargo este cluster sigue siendo
# caracterizado por canciones bastante instumentales y con beats por minuto bastante altos.
# #### d) Grafique usando colores sobre las dos primeras componentes del plano principal en el Analisis en Componentes Principales los clusteres obtenidos segun k-medias (usando k =3).
# In[22]:
pca = PCA(n_components=2)
componentes = pca.fit_transform(datos)
componentes
print(datos.shape)
print(componentes.shape)
plt.scatter(componentes[:, 0], componentes[:, 1],c=kmedias.predict(datos))
plt.xlabel('componente 1')
plt.ylabel('componente 2')
plt.title('3 Cluster K-Medias')
# #### e) Usando 50 ejecuciones del metodo k−medias grafique el “Codo de Jambu” para este ejemplo. ¿Se estabiliza en algun momento la inercia inter–clases?
# In[7]:
# Solo 3 iteraciones y usando 50 ejecuciones con valores con defecto de max_iter = 300 e init = 50
kmedias = KMeans(n_clusters=3, max_iter=300, n_init=50) # Declaración de la instancia
kmedias.fit(datos)
centros = np.array(kmedias.cluster_centers_)
print(centros)
# In[10]:
Nc = range(1, 20)
kmediasList = [KMeans(n_clusters=i) for i in Nc]
varianza = [kmediasList[i].fit(datos).inertia_ for i in range(len(kmediasList))]
plt.plot(Nc,varianza,'o-')
plt.xlabel('Número de clústeres')
plt.ylabel('Varianza explicada por cada cluster (Inercia Intraclases)')
plt.title('Codo de Jambu')
# #### Interpretacion
# In[11]:
# En este caso no hay mucha claridad, ya que en realidad en ningun punto se ve que se estabilice y se forme la linea
# recta, aunque tal vez se podria decir que en K = 5, K = 7 o K = 13 podrian ser opciones viables.
# ## Ejercicio #2
# #### a) Repita el ejercicio 1 usando k = 3 usando esta tabla de datos, usando solo las variables numericas. Modificaremos los atributos de la clase KMeans (...) como sigue: max iter : int, default: 300: Numero maximo de iteraciones del algoritmo kmedias para una sola ejecucion. Para este ejercicio utilice max iter = 2000, n init : int, default: 10 (Formas Fuertes): Numero de veces que el algoritmo kmedias se ejecutara con diferentes semillas de centroides. Los resultados finales sera la mejor salida de n init ejecuciones consecutivas en terminos de inercia intra-clases. Para este ejercicio utilice n init = 150.
# #### Carga de la Tabla de Datos SAHeart
# In[43]:
corazon = pd.read_csv('SAheart.csv', delimiter = ';', decimal = '.')
print(corazon)
# In[44]:
# Seleccionando solo variables numericas
corazon2 = pd.DataFrame(data = corazon2, columns = (['sbp', 'tobacco', 'ldl', 'adiposity', 'typea', 'obesity',
'alcohol', 'age']))
print(corazon2)
corazon2.head()
# In[45]:
# Normalizando y centrando la tabla
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_values = scaler.fit_transform(corazon2)
corazon2.loc[:,:] = scaled_values
print(corazon2)
# In[25]:
# Solo 3 iteraciones y valores modificables en max_iter = 2000 e init = 150
kmedias = KMeans(n_clusters=3, max_iter=2000, n_init=150) # Declaración de la instancia
kmedias.fit(datos)
centros = np.array(kmedias.cluster_centers_)
print(centros)
plt.figure(1, figsize = (10, 10))
radar_plot(centros, datos.columns)
# In[46]:
# Ejecuta k-medias con 3 clusters
kmedias = KMeans(n_clusters=3)
kmedias.fit(corazon2)
print(kmedias.predict(corazon2))
centros = np.array(kmedias.cluster_centers_)
print(centros)
# In[27]:
# Ploteando grafico de barras
plt.figure(1, figsize = (12, 8))
bar_plot(centros, datos.columns)
# #### Interprete los resultados del ejercicio anterior usando graficos de barras y graficos tipo Radar. Compare respecto a los resultados obtenidos en la tarea anterior en la que uso Clustering Jerarquico.
# In[41]:
# Comparando con el ejercicio pasado con Clustering Jerarquico se puede apreciar que en realidad el plot de radar con
# K - Means es practicamente identico al plot de radar pasado, se puede observar como los clusters mantienen igual
# casi todas sus variables, sin embargo el cambio mas grande que se tiene es en el numero de Cluster, ya que para el Jerarquico
# el Cluster 1, eran los individuos que tenian un alto typea A y las demas variables eran bastante bajas, en este caso
# con el k-means este paso a ser el cluster 2.
# El cluster 2 en el Jerarquico, representado por los indidvuos con un sbp alto, las edades mas altas, asi como presencia
# de alto colesterol, adiposidad y alto sobrepeso en el K - Means paso a ser el cluster 3 y ahora los individuos presentan
# mediciones mas altas de SBP y de adiposidad (llegando a lo mas alto) comparadi con el pasado.
# Finalmente el cluster 3 en el Jerarquico, ahora pasa a ser el cluster 1 en el K - medias y sigue teniendo las mismas variables
# originales, como alto colesterol, adiposidad, obesidad, relativamente alta presencia de mediciones alta de SBP y edad,
# pero ahora el K - medias incluyo a la variable typea A alta, y no en un estado medio como el clustering Jerarquico, haciendo
# que los individuos de este cluster sean los que presentan altas edades y enfermedades como obesidad, alto colesterol y
# adiposidad, pero ahora sumado con mayor medida un factor de tipo A asociado a personas mas competitivas y orientada a
# resultados pero que pasan mas estresadas y ansiosas.
# #### Grafique usando colores sobre las dos primeras componentes del plano principal en el Analisis en Componentes Principales los clusteres obtenidos segun k-medias (usando k =3).
# In[47]:
pca = PCA(n_components=2)
componentes = pca.fit_transform(corazon2)
componentes
print(corazon2.shape)
print(componentes.shape)
plt.scatter(componentes[:, 0], componentes[:, 1],c=kmedias.predict(corazon2))
plt.xlabel('componente 1')
plt.ylabel('componente 2')
plt.title('3 Cluster K-Medias')
# #### Usando 50 ejecuciones del metodo k−medias grafique el “Codo de Jambu” para este ejemplo. ¿Se estabiliza en algun momento la inercia inter–clases?
# In[48]:
# Solo 3 iteraciones y usando 50 ejecuciones con valores con defecto de max_iter = 300 e init = 50
kmedias = KMeans(n_clusters=3, max_iter=300, n_init=50) # Declaración de la instancia
kmedias.fit(corazon2)
centros = np.array(kmedias.cluster_centers_)
print(centros)
# In[49]:
Nc = range(1, 20)
kmediasList = [KMeans(n_clusters=i) for i in Nc]
varianza = [kmediasList[i].fit(corazon2).inertia_ for i in range(len(kmediasList))]
plt.plot(Nc,varianza,'o-')
plt.xlabel('Número de clústeres')
plt.ylabel('Varianza explicada por cada cluster (Inercia Intraclases)')
plt.title('Codo de Jambu')
# ### Interpretacion
# In[ ]:
# En este caso no hay mucha claridad, pero se podria decir que en K = 2 o K = 6 podrian ser opciones viables.
# #### b)Repita los ejercicios anteriores pero esta vez incluya las variables categoricas usando codigos disyuntivos completos. ¿Son mejores los resultados?
# In[28]:
# Recodificacion
def recodificar(col, nuevo_codigo):
col_cod = pd.Series(col, copy=True)
for llave, valor in nuevo_codigo.items():
col_cod.replace(llave, valor, inplace=True)
return col_cod
# #### Cargando las variables numericas asi como categoricas y convirtiendolas a codigo disyuntivo completo
# In[54]:
# Conviertiendo la variables en Dummy
datos_dummies = pd.get_dummies(corazon)
print(datos_dummies.head())
print(datos_dummies.dtypes)
# In[57]:
# Centrando y normalizando los datos convertidos en dummies
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_values = scaler.fit_transform(datos_dummies)
datos_dummies.loc[:,:] = scaled_values
print(datos_dummies)
dummy = datos_dummies
# In[63]:
# Solo 3 iteraciones y valores modificables en max_iter = 2000 e init = 150
kmedias = KMeans(n_clusters=3, max_iter=2000, n_init=150) # Declaración de la instancia
kmedias.fit(dummy)
centros = np.array(kmedias.cluster_centers_)
print(centros)
plt.figure(1, figsize = (10, 10))
radar_plot(centros, dummy.columns)
# In[33]:
# Ejecuta k-medias con 3 clusters
kmedias = KMeans(n_clusters=3)
kmedias.fit(datos_dummy)
print(kmedias.predict(datos_dummy))
centros = np.array(kmedias.cluster_centers_)
print(centros)
# In[34]:
# Ploteando grafico de barras
plt.figure(1, figsize = (12, 8))
bar_plot(centros, datos_dummy.columns)
# #### Interprete los resultados del ejercicio anterior usando graficos de barras y graficos tipo Radar. Compare respecto a los resultados obtenidos en la tarea anterior en la que uso Clustering Jerarquico.
# In[51]:
# En este caso se ve que de nuevo que los clusters junto con su asignacion de variables en cada uno comparado con el
# Jerarquico es similar, sin embargo paso el mismo problema de que se cambiaron los numeros de los cluster, por ejemplo
# el cluster 1 en el k - medias es el cluster 3 en el Jerarquico, se siguen manteniendo altas las variables de chd_no,
# no hay historial familiar, y el comportamiento tipo A, en K - medias es alto, mientras que en el jerarquico era medio
# otra diferencia es que en el jerarquico los individuos tenian altos indices de toma de alcohol mientras que en el k
# Medias estos sujetos presentan bajos indices de tomas de alcohol. Por lo demas estan iguales y se siguen manteniendo
# las variables no mencionadas bajas.
# El Cluster 2 en el k - medias es el cluster 1 en el jerarquico y se siguen reportando que en este cluster estan
# los individuos que han sido diagnosticados de enfermedad del corazon, pero ahora la herencia familiar esta un poco
# mas alta, se siguen reportando que son personas son edades altas, y ahora se suma otra variable que es una alta ingesta
# de alcohol (con respecto al Jerarquico esta era mas baja) y se sigue manteniendo la variable de obesidad como alta,
# pero ademas con el K-Means estos individuos ahora presentan altos indices de adiposidad, colesterol, consumen mas tabaco
# y tienen registros de presion cardiaca mas elevados, en el Jerarquico, estas 4 ultimas variables eran exactamente iguales
# (con excepcion de la adiposidad que en el K - Medias esta un poco mas baja) con la intromision de las variables categoricas
# varias variables tendieron a subir, y se ve una fuerte correlacion entre las variables categoricas y las numericas.
# Finalmente el Cluster 3 en el k - medias es el cluster 2 en el Jerarquico, pero muchas de las variables se mantienen
# igual como es el caso de la edad que se posiciona alta, la adiposidad, el colesterol, la ingesta de tabaco, asi como
# la medicion del ritmo cardiaco o sbp con el K - medias ahora esta mas alta que con el Jerarquico. Ambos siguen manteniendo
# que no se les ha detectado enfermedad del corazon a estos individuos y en el Jerarquico habia una alta presencia de
# historial familiar, mientras que en el K - medias este bajo levemente y la obesidad tambien bajo, pero en este nuevo
# se presenta una mayor ingesta de alcohol, misma que en el Jerarquico aparecia como baja o casi nula y ahora en el K -
# medias varia parte de los individuos presentan que no presentan historial familiar, mientras que en el Jerarquico era
# casi nulo o muy bajo.
# En este nuevo cluster formado por K - means, se ve que estan las personas que no han sido diagnosticadas de enfermedad
# del corazon pero una fuerte parte de los datos tiene historial familiar de padecimiento sumado al hecho de que son personas
# con edades altas y otras enfermedades y que ademas, consumen altos indices de alcohol.
# #### Grafique usando colores sobre las dos primeras componentes del plano principal en el Analisis en Componentes Principales los clusteres obtenidos segun k-medias (usando k =3).
# In[59]:
pca = PCA(n_components=2)
componentes = pca.fit_transform(dummy)
componentes
print(dummy.shape)
print(componentes.shape)
plt.scatter(componentes[:, 0], componentes[:, 1],c=kmedias.predict(dummy))
plt.xlabel('componente 1')
plt.ylabel('componente 2')
plt.title('3 Cluster K-Medias')
# #### Usando 50 ejecuciones del metodo k−medias grafique el “Codo de Jambu” para este ejemplo. ¿Se estabiliza en algun momento la inercia inter–clases?¶
# In[60]:
# Solo 3 iteraciones y usando 50 ejecuciones con valores con defecto de max_iter = 300 e init = 50
kmedias = KMeans(n_clusters=3, max_iter=300, n_init=50) # Declaración de la instancia
kmedias.fit(dummy)
centros = np.array(kmedias.cluster_centers_)
print(centros)
# In[61]:
Nc = range(1, 20)
kmediasList = [KMeans(n_clusters=i) for i in Nc]
varianza = [kmediasList[i].fit(dummy).inertia_ for i in range(len(kmediasList))]
plt.plot(Nc,varianza,'o-')
plt.xlabel('Número de clústeres')
plt.ylabel('Varianza explicada por cada cluster (Inercia Intraclases)')
plt.title('Codo de Jambu')
# ### Interpretacion
# In[62]:
# En este caso no hay mucha claridad, pero se podria decir que en K = 5 o K = 8 podrian ser opciones viables, ya que es
# donde se normaliza el codo
# ### Interpretacion Jerarquico con variables categoricas vs k - means con variables categoricas
# In[36]:
# Con la agregacion de las variables categoricas los resultados si se ven mucho mejores y se asemejan mucho a lo que
# habia dado cuando se hizo por Jerarquico, ya que se puede ver un primer cluster (azul) que esta representado por
# las personas "sanas" que son las que no presentan enfermedad de corazon, que no tienen historial familiar con este
# padecimiento pero que son altos en el comportamiento A, que significa que son personas mas orientadas a los resultados
# mas competitivas y que por ende pasan mas estresadas y tensas.
# En el cluster 2 (naranja): se ve que esta representado por las personas que no han sido diagnosticas de enfermedad del
# corazon pero que son obesas, ya tienen las mayores edades, presentan adiposidad, tienen una tendencia a la alta en el
# colesterol, asi como en las mediciones de la presion cardiaca, y consumen altos indices de alcohol y tambien fuman
# y algunos presentan historial familiar de padecimientos del corazon mientras que otros no. Este es el grupo de las
# personas que presentan cierta herencia de la enfermedad pero su condicion de salud se ve agrabada por su estilo de
# alimentacion y de vida.
# Finalmente en el cluster 3 (verde): estas las personas que ya han sido diagnosticas de enfermedad del corazon, tambien
# son personas con las mayores edades, tienen alta su mediciones de presion cardiaca y presentan colesterol, adiposidad
# sobrepeso, son de comportamiento tipo A, consumen mucho tabaco, toman indices altos de alcohol y la enfermedad del
# corazon a nivel hereditario esta muy altamente presente, en lo que se diferencia este cluster del 2 es que estos si han
# sido diagnosticados de enfermedad del corazon, mientras que los del cluster 2 no, pero son sujetos en riesgo.
# Con el primer radar y graficos de las variables numericas, arrojaba informacion pero era muy escueta y no se veia
# una alta correlacion entre las variables y como la herencia o estar diagnosticado o no jugaba un papel importante en el
# analisis.
# ## Ejercicio 3
# ### Programe la Jerarquia de clases de acuerdo al siguiente diagrama
# In[39]:
#Configuraciones para imagen
import pandas as pd
pd.options.display.max_rows = 10
from IPython.display import Image
Image(filename='/Users/heinerleivagmail.com/Jerarquia.png')
# In[1]:
import pandas as pd
import numpy as np
import scipy.linalg as la
from sklearn import preprocessing
import matplotlib.pyplot as plt
from math import sqrt
import os
import scipy.stats
import os
from math import pi
from sklearn.datasets import make_blobs
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import dendrogram, ward, single, complete,average,linkage, fcluster
import scipy.cluster.hierarchy as sch
from scipy.spatial.distance import pdist
from sklearn.preprocessing import StandardScaler
from math import ceil, floor
from seaborn import color_palette
from sklearn.decomposition import PCA
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
# In[3]:
class exploratorio:
def __init__(self, datos = | pd.DataFrame() | pandas.DataFrame |
# IMPORTATION STANDARD
import os
# IMPORTATION THIRDPARTY
import pandas as pd
import pytest
# IMPORTATION INTERNAL
from gamestonk_terminal.stocks.due_diligence import dd_controller
# pylint: disable=E1101
# pylint: disable=W0603
first_call = True
@pytest.mark.block_network
@pytest.mark.record_stdout
def test_menu_quick_exit(mocker):
mocker.patch("builtins.input", return_value="quit")
mocker.patch("gamestonk_terminal.stocks.due_diligence.dd_controller.session")
mocker.patch(
"gamestonk_terminal.stocks.due_diligence.dd_controller.session.prompt",
return_value="quit",
)
stock = pd.DataFrame()
dd_controller.menu(
ticker="TSLA", start="10/25/2021", interval="1440min", stock=stock
)
@pytest.mark.block_network
@pytest.mark.record_stdout
def test_menu_system_exit(mocker):
global first_call
first_call = True
def side_effect(arg):
global first_call
if first_call:
first_call = False
raise SystemExit()
return arg
m = mocker.Mock(return_value="quit", side_effect=side_effect)
mocker.patch("builtins.input", return_value="quit")
mocker.patch("gamestonk_terminal.stocks.due_diligence.dd_controller.session")
mocker.patch(
"gamestonk_terminal.stocks.due_diligence.dd_controller.session.prompt",
return_value="quit",
)
mocker.patch(
"gamestonk_terminal.stocks.due_diligence.dd_controller.DueDiligenceController.switch",
new=m,
)
stock = pd.DataFrame()
dd_controller.menu(
ticker="TSLA", start="10/25/2021", interval="1440min", stock=stock
)
@pytest.mark.block_network
@pytest.mark.record_stdout
def test_print_help():
dd = dd_controller.DueDiligenceController(
ticker="", start="", interval="", stock=pd.DataFrame()
)
dd.print_help()
@pytest.mark.block_network
def test_switch_empty():
dd = dd_controller.DueDiligenceController(
ticker="", start="", interval="", stock=pd.DataFrame()
)
result = dd.switch(an_input="")
assert result is None
@pytest.mark.block_network
@pytest.mark.record_stdout
def test_switch_help():
dd = dd_controller.DueDiligenceController(
ticker="", start="", interval="", stock=pd.DataFrame()
)
result = dd.switch(an_input="?")
assert result is None
@pytest.mark.block_network
def test_switch_cls(mocker):
mocker.patch("os.system")
dd = dd_controller.DueDiligenceController(
ticker="", start="", interval="", stock=pd.DataFrame()
)
result = dd.switch(an_input="cls")
assert result is None
os.system.assert_called_once_with("cls||clear")
@pytest.mark.block_network
def test_call_q():
dd = dd_controller.DueDiligenceController(
ticker="", start="", interval="", stock= | pd.DataFrame() | pandas.DataFrame |
import click
import os
import glob
import pandas as pd
from vlbi_tools import difmap
from vlbi_tools.utils import time_diff
@click.command()
@click.argument('data_path', type=click.Path(exists=True, dir_okay=True))
@click.argument('model_name')
@click.argument('out_path', type=click.Path(exists=False, dir_okay=True))
def main(data_path, model_name, out_path):
'''
Converts difmap models in fits format into csv catalog. Searches for model
files from a top level directory and summarizes all information in one
file.
DATA_PATH: path to top level directory of difmap models
MODEL_NAME: Name of the difmap model, all model files need to have the same
name
OUT_PATH: path to directory to save catalog
'''
if not os.path.exists(out_path):
os.makedirs(out_path)
data_paths = sorted(glob.glob(data_path+'/*/'+model_name+'.fits'))
catalog = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import requests
from fake_useragent import UserAgent
import io
import os
import time
import json
import demjson
from datetime import datetime
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
# Main Economic Indicators: https://alfred.stlouisfed.org/release?rid=205
url = {
"fred_econ": "https://fred.stlouisfed.org/graph/fredgraph.csv?",
"philfed": "https://www.philadelphiafed.org/surveys-and-data/real-time-data-research/",
"chicagofed": "https://www.chicagofed.org/~/media/publications/",
"OECD": "https://stats.oecd.org/sdmx-json/data/DP_LIVE/"
}
def date_transform(df, format_origin, format_after):
return_list = []
for i in range(0, len(df)):
return_list.append(datetime.strptime(df[i], format_origin).strftime(format_after))
return return_list
def gdp_quarterly(startdate="1947-01-01", enddate="2021-01-01"):
"""
Full Name: <NAME>omestic Product
Description: Billions of Dollars, Quarterly, Seasonally Adjusted Annual Rate
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "GDP",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df.columns = ["Date", "GDP"]
df["Date"] = pd.to_datetime(df["Date"], format = "%Y-%m-%d")
df["GDP"] = df["GDP"].astype(float)
return df
def gdpc1_quarterly(startdate="1947-01-01", enddate="2021-01-01"):
"""
Full Name: Real Gross Domestic Product
Description: Billions of Chained 2012 Dollars, Quarterly, Seasonally Adjusted Annual Rate
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "GDPC1",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
return df
def oecd_gdp_monthly(startdate="1947-01-01", enddate="2021-01-01"):
"""
Full Name: Real Gross Domestic Product
Description: Billions of Chained 2012 Dollars, Quarterly, Seasonally Adjusted Annual Rate
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "USALORSGPNOSTSAM",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
return df
def payems_monthly(startdate="1939-01-01", enddate="2021-01-01"):
"""
Full Name: All Employees, Total Nonfarm
Description: Thousands of Persons,Seasonally Adjusted, Monthly
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "PAYEMS",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df.columns = ["Date", "Payems"]
df["Date"] = pd.to_datetime(df["Date"], format = "%Y-%m-%d")
df["Payems"] = df["Payems"].astype(float)
return df
def ppi():
tmp_url = url["fred_econ"] + "bgcolor=%23e1e9f0&chart_type=line&drp=0&fo=open%20sans&graph_bgcolor=%23ffffff&height=450&mode=fred&recession_bars=on&txtcolor=%23444444&ts=12&tts=12&width=968&nt=0&thu=0&trc=0&show_legend=yes&show_axis_titles=yes&show_tooltip=yes&id=PPIACO,PCUOMFGOMFG&scale=left,left&cosd=1913-01-01,1984-12-01&coed=2021-04-01,2021-04-01&line_color=%234572a7,%23aa4643&link_values=false,false&line_style=solid,solid&mark_type=none,none&mw=3,3&lw=2,2&ost=-99999,-99999&oet=99999,99999&mma=0,0&fml=a,a&fq=Monthly,Monthly&fam=avg,avg&fgst=lin,lin&fgsnd=2020-02-01,2020-02-01&line_index=1,2&transformation=lin,lin&vintage_date=2021-06-10,2021-06-10&revision_date=2021-06-10,2021-06-10&nd=1913-01-01,1984-12-01"
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
r = requests.get(tmp_url, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df["DATE"] = pd.to_datetime(df["DATE"], format="%Y-%m-%d")
#df = df[list(df.columns[1:])].replace(".", np.nan).astype(float)
name_list = {
"PPIACO": "Producer Price Index by Commodity: All Commodities",
"PCUOMFGOMFG": "Producer Price Index by Industry: Total Manufacturing Industries"
}
df.replace(".", np.nan, inplace = True)
df.columns = ["Date", "PPI_C", "PPI_I"]
df["Date"] = pd.to_datetime(df["Date"], format = "%Y-%m-%d")
df[["PPI_C", "PPI_I"]] = df[["PPI_C", "PPI_I"]].astype(float)
return df
def pmi():
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ism_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ISM制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "28",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ism_pmi"
temp_df = temp_df.astype("float")
PMI_I = pd.DataFrame()
PMI_I["Date"] = pd.to_datetime(temp_df.index, format = "%Y-%m-%d")
PMI_I["ISM_PMI_I"] = np.array(temp_df).astype(float)
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ism_non_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ISM非制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "29",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ism_non_pmi"
temp_df = temp_df.astype("float")
PMI_NI = pd.DataFrame()
PMI_NI["Date"] = pd.to_datetime(temp_df.index, format = "%Y-%m-%d")
PMI_NI["ISM_PMI_NI"] = np.array(temp_df).astype(float)
PMI = pd.merge_asof(PMI_I, PMI_NI, on = "Date")
return PMI
def unrate(startdate="1948-01-01", enddate="2021-01-01"):
"""
Full Name: Unemployment Rate: Aged 15-64: All Persons for the United States
Description: Percent, Seasonally Adjusted, Monthly, Quarterly and Annually
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "LRUN64TTUSM156S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "LRUN64TTUSQ156S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "LRUN64TTUSA156S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_monthly,
df_quarterly,
on="DATE",
direction="backward")
df = pd.merge_asof(df, df_annually, on="DATE", direction="backward")
df.columns = ["Date", "UR_Monthly", "UR_Quarterly", "UR_Annually"]
return df
def erate(startdate="1955-01-01", enddate="2021-01-01"):
"""
Full Name: Employment Rate: Aged 25-54: All Persons for the United States
Description: Percent,Seasonally Adjusted, Monthly, Quarterly and Annually
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "LREM25TTUSM156S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "LREM25TTUSQ156S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "LREM25TTUSA156S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_monthly,
df_quarterly,
on="DATE",
direction="backward")
df = pd.merge_asof(df, df_annually, on="DATE", direction="backward")
df.columns = ["Date", "ER_Monthly", "ER_Quarterly", "ER_Annually"]
def pce_monthly(startdate="1959-01-01", enddate="2021-01-01"):
"""
Full Name: PCE
Description: Percent, Monthly, Seasonally Adjusted
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "PCE",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
return df
def cpi(startdate="1960-01-01", enddate="2021-01-01"):
"""
Full Name: Consumer Price Index: Total All Items for the United States
Description: Percent, Monthly, Quarterly and Annually, Seasonally Adjusted
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "CPALTT01USM661S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "CPALTT01USQ661S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "CPALTT01USA661S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_monthly,
df_quarterly,
on="DATE",
direction="backward")
df = pd.merge_asof(df, df_annually, on="DATE", direction="backward")
df.columns = ["Date", "CPI_Monthly", "CPI_Quarterly", "CPI_Annually"]
df["Date"] = pd.to_datetime(df["Date"], format = "%Y-%m-%d")
df[["CPI_Monthly", "CPI_Quarterly", "CPI_Annually"]] = df[["CPI_Monthly", "CPI_Quarterly", "CPI_Annually"]].astype(float)
return df
def m1(startdate="1960-01-01", enddate="2021-01-01"):
"""
Full Name: Consumer Price Index: M3 for the United States
Description: Growth Rate Previous Period, Monthly, Quarterly and Annually, Seasonally Adjusted
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "WM1NS",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_weekly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_weekly["DATE"] = pd.to_datetime(df_weekly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "MANMM101USM657S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "MANMM101USQ657S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "MANMM101USA657S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(df_weekly, df_monthly, on="DATE", direction="backward")
df = pd.merge_asof(df, df_quarterly, on="DATE", direction="backward")
df = pd.merge_asof(df, df_annually, on="DATE", direction="backward")
df.columns = [
"Date",
"M1_Weekly",
"M1_Monthly",
"M1_Quarterly",
"M1_Annually"]
return df
def m2(startdate="1960-01-01", enddate="2021-01-01"):
"""
Full Name: <NAME>
Description: Seasonally Adjusted, Weekly, Monthly, Quarterly and Annually, Seasonally Adjusted
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "WM2NS",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_weekly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_weekly["DATE"] = pd.to_datetime(df_weekly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "M2SL",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(df_weekly, df_monthly, on="DATE", direction="backward")
df.columns = ["Date", "M2_Weekly", "M2_Monthly"]
return df
def m3(startdate="1960-01-01", enddate="2021-01-01"):
"""
Full Name: Consumer Price Index: M3 for the United States
Description: Growth Rate Previous Period, Monthly, Quarterly and Annually, Seasonally Adjusted
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "MABMM301USM657S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "MABMM301USQ657S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "MABMM301USA657S",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_monthly,
df_quarterly,
on="DATE",
direction="backward")
df = pd.merge_asof(df, df_annually, on="DATE", direction="backward")
df.columns = ["Date", "M3_Monthly", "M3_Quarterly", "M3_Annually"]
return df
def ltgby_10(startdate="1955-01-01", enddate="2021-01-01"):
"""
Full Name: Long-Term Government Bond Yields: 10-year: Main (Including Benchmark) for the United States
Description: Percent,Not Seasonally Adjusted, Monthly, Quarterly and Annually
Return: pd.DataFrame
"""
tmp_url = url["fred_econ"]
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "IRLTLT01USM156N",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_monthly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_monthly["DATE"] = pd.to_datetime(df_monthly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "IRLTLT01USQ156N",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_quarterly = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_quarterly["DATE"] = pd.to_datetime(
df_quarterly["DATE"], format="%Y-%m-%d")
ua = UserAgent(verify_ssl=False)
request_header = {"User-Agent": ua.random}
request_params = {
"id": "IRLTLT01USA156N",
"cosd": "{}".format(startdate),
"coed": "{}".format(enddate)
}
r = requests.get(tmp_url, params=request_params, headers=request_header)
data_text = r.content
df_annually = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
df_annually["DATE"] = pd.to_datetime(
df_annually["DATE"], format="%Y-%m-%d")
df = pd.merge_asof(
df_monthly,
df_quarterly,
on="DATE",
direction="backward")
df = | pd.merge_asof(df, df_annually, on="DATE", direction="backward") | pandas.merge_asof |
import os
import json
import requests
import pandas as pd
from .analysis import *
from .utils import *
def nbayes_report(path, target, covariables, lim_inf_training, lim_sup_training,
lim_inf_validation, lim_sup_validation, modifier=None,
type_analysis=None, lim_inf_first=None, lim_sup_first=None):
'''
Description: This function makes a request to Epi-Puma API 1.0
Args:
- path: relative path for the reports to be created
- target: A list with some of the following variables
"COVID-19 CONFIRMADO", "COVID-19 NEGATIVO", "COVID-19 FALLECIDO".
- covariables: A list with some of the following variables "Demograficos",
"Pobreza", "Movilidad", "Infraestructura-salud", "Vulnerabilidad",
"Worldclim",
- lim_inf_training and lim_sup_training: define the training interval.
- lim_inf_validation and lim_inf_validation: define the validation
interval
- modifier: "cases", "incidence", "prevalence" or "lethality"
- type_analysis: "green", "red" or None
- lim_inf_first and lim_sup_first: define the first interval (before than
training interval)
- path:
Returns:
- absolute path covariables report
- absolute path cells report
- message
'''
covariable_filename = 'COVARIABLES::'
cells_filename = 'CELLS::'
try:
absolute_path = os.path.join(os.path.dirname(__file__), '../')
absolute_path = os.path.join(absolute_path, path)
print(absolute_path)
except Exception as e:
message = "There was a problem related path of the report"
return (None, None, message)
covariables_list, summary_list, message = nbayes_analysis(target, covariables,
lim_inf_training, lim_sup_training, lim_inf_validation, lim_sup_validation,
modifier, type_analysis, lim_inf_first, lim_sup_first)
covariable_filename += 'target:'
cells_filename += 'target:'
for t in target:
covariable_filename += t + ';'
cells_filename += t + ';'
covariable_filename += ';'
cells_filename += ';'
covariable_filename += 'covariables:'
cells_filename += 'covariables:'
for c in covariables:
covariable_filename += c + ';'
cells_filename += c + ';'
covariable_filename += ';'
cells_filename += ';'
covariable_filename += 'training:' + lim_inf_training + '_to_' + lim_sup_training + ';'
cells_filename += 'training:' + lim_inf_training + '_to_' + lim_sup_training + ';'
covariable_filename += 'validation:' + lim_inf_validation + '_to_' + lim_sup_validation + ';'
cells_filename += 'validation:' + lim_inf_validation + '_to_' + lim_sup_validation + ';'
if type_analysis == None:
covariable_filename += 'type:profiling;'
cells_filename += 'type:profiling;'
else:
covariable_filename += 'type:' + type_analysis + ';'
cells_filename += 'type:' + type_analysis + ';'
if modifier != None:
covariable_filename += 'modifier:' + modifier + ';'
cells_filename += 'modifier:' + modifier + ';'
covariable_filename += '.csv'
cells_filename += '.csv'
df_covariables = pd.DataFrame(covariables_list)
df_cells = | pd.DataFrame(summary_list) | pandas.DataFrame |
import os
import pandas as pd
from boxing_puzzle import boxing2
os.system("mkdir tetromino/")
for i, matrix in enumerate(boxing2.find_some(
x_length = 8,
y_length = 5,
piece_size = 4,
same_piece_limit = 2,
max_trial = 530000
)):
| pd.DataFrame(matrix) | pandas.DataFrame |
import pandas as pd
import numpy as np
import torch
import os.path
from glob import glob
from datetime import datetime
from base.torchvision_dataset import TorchvisionDataset
from torch.utils.data import TensorDataset
class HR_Dataset(TorchvisionDataset):
def __init__(self, root:str, normal_class):
super().__init__(root)
self.normal_class = normal_class
# x_array = [[[0 for k in range(3)] for j in range(11932)]]
# load lists of participant ids
# id_fb, id_nfb = load_id('/workspace/HR_WearablesData/')
# id_fb = np.load("/workspace/fitbit_id.npy")
# id_nfb = np.load("/workspace/nonfitbit_id.npy")
# id_anomalies = load_labels('/workspace/datasets/Health New Labeling.xlsx')
# df = load_fitbit_data(id_fb[0])
# x_array = cut_to_same_length(df, x_array)
# y_array = np.zeros(x_array.shape[0])
# index_array = np.arange(x_array.shape[0])
print("start")
dim1_train = pd.read_csv("/workspace/dim1_train.txt").to_numpy()
dim2_train = pd.read_csv("/workspace/dim2_train.txt").to_numpy()
dim3_train = pd.read_csv("/workspace/dim3_train.txt").to_numpy()
dim1_test = pd.read_csv("/workspace/dim1_test.txt").to_numpy()
dim2_test = | pd.read_csv("/workspace/dim2_test.txt") | pandas.read_csv |
import optuna
import numpy as np
import pandas as pd
from functools import partial
from . import model_bank
import mlflow
from .AAMPreprocessor import AAMPreprocessor
import joblib
from .FastAIutils import *
from .metrics import model_metrics, pretty_scores, get_scores
from loguru import logger
from pathlib import Path
from tabulate import tabulate
import pprint
import random
class ProjectConfigurator:
def __init__(self, config) -> None:
if config:
self.key_attrs = [i for i in dir(config) if not i.startswith('__')]
for key in self.key_attrs:
setattr(self, key, getattr(config, key))
self.create_project_folder()
self.add_logger_path()
self.add_project_config_to_logs(config)
def create_project_folder(self):
self.output_path = Path(self.BASE_OUTPUT_PATH) / Path(self.PROJECT_NAME) / Path(self.SUB_PROJECT_NAME)
self.output_path.mkdir(parents=True, exist_ok=True)
self.models_path = self.output_path / 'models'
self.models_path.mkdir(parents=True, exist_ok=True)
# def copy_config_file(self):
# import shutil
# shutil.copy('config.py', str(self.output_path))
def add_logger_path(self):
logger_name = str(random.randint(0,10000))
self.logger = logger.bind(name = logger_name)
self.logger.add(str(self.output_path/'logfile.log'), filter=lambda record: record["extra"].get("name") == logger_name)
def add_project_config_to_logs(self, config):
bc_attrs = {i : getattr(config, i) for i in self.key_attrs }
self.logger.info('\n'+pprint.pformat(bc_attrs))
class ARKAutoML(AAMPreprocessor):
def __init__(self, data = None, config=None,
n_folds= 5, eval_metric='recall',
n_trials=10, model_algos=['xgb','rf'], loading=False):
self.config = ProjectConfigurator(config)
if not loading:
super().__init__(data, cat_cols=config.cat_cols, cont_cols=config.cont_cols, y_names=config.TARGET_COL,
n_folds=n_folds, fold_method=config.FOLD_METHOD)
self.eval_metric = eval_metric
self.n_trials = n_trials
self.model_algos = model_algos
self.logger = self.config.logger
self.total_features = len(self.cat_cols + self.cont_cols)
self.mpb = model_bank.ModelParamBank(total_features = self.total_features)
def create_optuna_optimization(self):
self.study = optuna.create_study(direction='maximize', study_name=self.config.PROJECT_NAME, load_if_exists=True)
mlflow.set_experiment(self.config.PROJECT_NAME)
optimization_function = partial(self.objective)
self.study.optimize(optimization_function, n_trials=self.n_trials)
def objective(self, trial):
valid_metrics = {}
for fold in range(self.n_folds):
self.mpb = model_bank.ModelParamBank(total_features = self.total_features, trial = trial)
# self.trial_number_model[trial.number] = model_algo
model_algo = trial.suggest_categorical("model_algo", self.model_algos)
model = self.mpb.get_model_with_optuna_params(model_algo)
model.fit(self.X_train[fold], self.y_train[fold])
# train_metrics = self.model_metrics(model, self.X_train[fold], self.y_train[fold])
valid_metrics[fold] = model_metrics(model, self.X_test[fold], self.y_test[fold], self.logger)
cross_validated_metrics = pd.DataFrame(valid_metrics).mean(axis=1).to_dict()
self.logger.info(f'''Trial No : {trial.number}, {self.eval_metric} : {np.round(cross_validated_metrics[self.eval_metric], 4)}, Params : {trial.params}
{pretty_scores(cross_validated_metrics)}''')
with mlflow.start_run():
mlflow.log_params(trial.params)
for fold in range(self.n_folds): mlflow.log_metrics(valid_metrics[fold]) # metrics for each fold
mlflow.log_metrics(cross_validated_metrics) # Adding the cross validated metrics
tags = {
'eval_metric' : self.eval_metric,
'model_type' : 'classification',
'model_algo' : model_algo,
'train_shape' : self.X_train[fold].shape,
'test_shape' : self.X_test[fold].shape,
'sub_project' : self.config.SUB_PROJECT_NAME
}
mlflow.set_tags(tags)
return cross_validated_metrics[self.eval_metric]
@staticmethod
def calculate_metrics_based_on_different_cut_offs(model, X, y, cut_offs):
full_metrics = []
for co in cut_offs:
loop_dict = get_scores(y, np.where(model.predict_proba(X)[:,1]>co, 1, 0))
loop_dict['prob_cut_off'] = co
full_metrics.append(loop_dict)
cols = ['prob_cut_off'] + [i for i in loop_dict.keys() if i!='prob_cut_off'] #Reordering the columns
return pd.DataFrame(full_metrics)[cols]
def get_feature_importance(self, model, model_algo):
if model_algo == 'xgb':
fi = model.get_booster().get_fscore()
fi_df = pd.DataFrame(fi, index=['importance']).T
return (
(fi_df/fi_df['importance'].sum()).reset_index()
.sort_values('importance', ascending=False)
.rename(columns={'index':'features'})
)
elif model_algo in ('lgb', 'rf'):
fi = (
pd.DataFrame(model.feature_importances_, self.X.columns, columns=['importance'])
.sort_values('importance', ascending=False)
.reset_index()
.rename(columns={'index':'features'})
)
fi['importance'] = fi['importance'] / fi['importance'].sum()
return fi
def get_crossvalidated_results_for_best_model(self, folds, params_dict = None, model_algo = None,
cut_offs=[0.5,.55,.6,.65,.7]):
if pd.isnull(params_dict):
params_dict = dict(self.study.best_trial.params.items())
model_algo = params_dict.pop('model_algo')
model = self.mpb.get_fresh_model(model_name = model_algo, params=params_dict)
self.logger.info(f'Best Model : {model_algo}, Model Params : {params_dict}')
valid_metrics = {}
metrics_by_cut_off = []
feature_importances = []
for fold in range(folds):
model.fit(self.X_train[fold], self.y_train[fold])
self.logger.info(f'Fold : {fold+1}, Train Shape : {self.X_train[fold].shape}, Test Shape : {self.X_test[fold].shape}')
valid_metrics[fold] = model_metrics(model, self.X_test[fold], self.y_test[fold], self.logger, print_metrics=True)
fold_metrics_by_cut_off = self.calculate_metrics_based_on_different_cut_offs(model, self.X_test[fold], self.y_test[fold], cut_offs)
fold_metrics_by_cut_off['fold'] = fold
metrics_by_cut_off.append(fold_metrics_by_cut_off)
feature_importances.append(self.get_feature_importance(model, model_algo))
full_fi = pd.concat(feature_importances)
full_fi = (full_fi.groupby('features')['importance'].sum() / full_fi['importance'].sum()).reset_index().sort_values('importance', ascending=False).reset_index(drop=True)
self.logger.info(f"Feature Importance : \n{tabulate(full_fi.head(30), headers='keys', tablefmt='psql')}")
metrics_by_cut_off = pd.concat(metrics_by_cut_off)
final_metrics_by_cut_off = metrics_by_cut_off.drop('fold', axis=1).groupby('prob_cut_off').mean().reset_index()
self.logger.info(f"Metrics by different Cut Offs : \n{tabulate(final_metrics_by_cut_off, headers='keys', tablefmt='psql', showindex=False)}")
cross_validated_metrics = pd.DataFrame(valid_metrics).mean(axis=1).to_dict()
self.logger.info(f'*** Cross Validated Final Results : *** \n{pretty_scores(cross_validated_metrics)}')
with mlflow.start_run():
mlflow.log_params(params_dict)
for fold in range(folds): mlflow.log_metrics(valid_metrics[fold]) # metrics for each fold
mlflow.log_metrics(cross_validated_metrics) # Adding the cross validated metrics
tags = {
'eval_metric' : self.eval_metric,
'model_type' : 'classification',
'model_algo' : model_algo,
'train_shape' : self.X_train[fold].shape,
'test_shape' : self.X_test[fold].shape,
'sub_project' : self.config.SUB_PROJECT_NAME
}
mlflow.set_tags(tags)
return cross_validated_metrics, final_metrics_by_cut_off
def create_only_one_train_and_one_valid(self, valid):
'''
This method is NOT RECOMMENDED!!
But can be used in low computational resource scenatio.
'''
self.X_train[0], self.y_train[0] = self.X, self.y
self.create_new_oos_test(valid)
self.X_test[0], self.y_test[0] = self.X_valid, self.y_valid
self.n_folds = 1
def train_best_model(self, params_dict = None, model_algo = None):
# super().create_no_split_train()
if | pd.isnull(params_dict) | pandas.isnull |
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day, prev_market_trade_day
from qteasy.utilfuncs import next_market_trade_day
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator
from qteasy.history import stack_dataframes
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.database import DataSource
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000, 20000, 10000])
self.op = np.array([0, 1, -0.33333333])
self.prices = np.array([10, 20, 10])
self.r = qt.Cost()
def test_rate_creation(self):
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
def test_rate_operations(self):
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r(self.amounts), [0.003, 0.003, 0.003]), True, 'fee calculation wrong')
def test_rate_fee(self):
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""Test transaction cost calculated by rate with min_fee"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# 已经取出所有的点,围绕其中10个点生成十个subspaces
# 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""测试从一个点生成一个space"""
# 生成一个space,指定space中的一个点以及distance,生成一个sub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31'),
Timestamp('2031-12-29'),
Timestamp('2032-12-29'),
Timestamp('2033-12-29'),
Timestamp('2034-12-29'),
Timestamp('2035-12-29'),
Timestamp('2036-12-29'),
Timestamp('2037-12-29'),
Timestamp('2038-12-29'),
Timestamp('2039-12-29'),
Timestamp('2040-12-29'),
Timestamp('2041-12-29'),
Timestamp('2042-12-29')])
self.assertEqual(cp_mul_float2.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
| Timestamp('2027-12-31') | pandas.Timestamp |
import pandas as pd
import numpy as np
import random
import math
import argparse
import time
#self-defined
from handler.student_handler import Student
from handler.room_handler import Room
from .static.config import PREFERENCE_DICT, NATIONALITIES, LOCAL_NATIONALITY, logging
from .init_helper import df2object_student
from .match_helper import get_room_type_quota, get_freq
seed = 30
random.seed(seed)
#testing
def random_gen_preferences(num):
all_room_types_symbol = sorted(list(PREFERENCE_DICT.values()))
data = []
for s in range(num):
s_prefs = []
for pref in range(3):
s_prefs.extend(random.choices(all_room_types_symbol))
data.append(s_prefs)
return data
def random_gen_studentData(STUDENTNUM):
dataFrame_col = ['ID','pref_1', 'pref_2', 'pref_3','nationality']
data = random_gen_preferences(STUDENTNUM)
students = pd.DataFrame(data=data,columns=dataFrame_col[1:4])
students[dataFrame_col[4]] = [random.choice(NATIONALITIES) for i in range(STUDENTNUM)]
students.insert(loc=0, column = dataFrame_col[0], value = [random.randrange(100, 200) for i in range(STUDENTNUM)])
return students
def get_country_by_pop(students_data):
count = get_freq(students_data, col = 'nationality')
return sorted([(key, count[key]) for key in count], key=lambda tupl: tupl[1], reverse=True)
def student_by_nation_df(df, gender, sortedNations):
df_2d = pd.DataFrame()
for nation, freq in sortedNations:
nationgroup = df[df['nationality']==nation]
students_lis = df2object_student(nationgroup, 1)
df_2d[nation] = pd.Series(students_lis)
# groupbyPrefs2 = list(nationgroup.groupby('pref_2'))
# groupbyPrefs3 = list(nationgroup.groupby('pref_3'))
# nationgroup['']
return df_2d
def int_match(sortedNations, all_rooms_objs, student_by_nation_df):
res=""
local_stud_index = 0
local_group = student_by_nation_df[LOCAL_NATIONALITY]
arranged_studs_lis = []
for room in all_rooms_objs:
logging.debug("matching Room:{}, Type:{}".format(room.getNum(), room.getType()))
logging.debug("available beds: {}".format(room.available_beds))
room_type = room.getType()
priority = 0
nation_index = 0
picked_nation = set()
#place a Taiwanese student first
if (len(local_group) > local_stud_index):
student = local_group[local_stud_index]
room.addDweller(student)
student.setArranged(True)
local_stud_index+=1
picked_nation.add(LOCAL_NATIONALITY)
arranged_studs_lis.append(student)
logging.debug("success arrange one student!")
#放不同國籍相同偏好
while (priority<3):
nation_index = 0
while (nation_index < len(sortedNations)):
end = False
#select diff nationalities
while(sortedNations[nation_index][0] in picked_nation):
nation_index+=1
if (nation_index >= len(sortedNations)):
end = True
break
if (end):
break
nation = sortedNations[nation_index][0]
nationgroup = student_by_nation_df[nation]
for student in nationgroup:
if( | pd.notnull(student) | pandas.notnull |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.3'
# jupytext_version: 1.0.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
# %autosave 0
# %load_ext autoreload
# %autoreload 2
import logging
import numpy as np
import pandas as pd
import pkg_resources
import seaborn as sns
import matplotlib.pyplot as plt
from natsort import natsorted
from crispy.QCPlot import QCplot
from crispy.CRISPRData import Library
LOG = logging.getLogger("Crispy")
DPATH = pkg_resources.resource_filename("crispy", "data/")
RPATH = pkg_resources.resource_filename("notebooks", "minlib/reports/")
# Libraries
#
master_lib = (
Library.load_library("MasterLib_v1.csv.gz", set_index=False)
.dropna(subset=["WGE_Sequence"])
.set_index(["sgRNA_ID", "Library"])
)
#
#
polyt = dict(polyt4="TTTT", polyt5="TTTTT")
# Count
polyt_df = pd.DataFrame(
{
i: {p: int(polyt[p] in s[:-3]) for p in polyt}
for i, s in master_lib["WGE_Sequence"].iteritems()
}
).T
polyt_count_lib = polyt_df.reset_index().groupby("level_1").sum()
# Start position
polyt_pos_df = pd.DataFrame(
{
i: {p: s.index(polyt[p]) if polyt[p] in s else np.nan for p in polyt}
for i, s in master_lib.loc[
polyt_df[polyt_df.sum(1) != 0].index, "WGE_Sequence"
].iteritems()
}
).T
# Plot counts per library
#
plot_df = polyt_count_lib.unstack().reset_index()
plot_df.columns = ["polyt", "library", "count"]
g = sns.catplot(
"polyt",
"count",
data=plot_df,
kind="bar",
col="library",
color=QCplot.PAL_DBGD[0],
sharey=True,
height=2.5,
aspect=0.5,
linewidth=0,
)
for ax in g.axes[0]:
ax.grid(True, ls=":", lw=0.1, alpha=1.0, zorder=0, axis="y")
sns.despine(top=False, right=False)
plt.savefig(f"{RPATH}/ployt_count_library.pdf", bbox_inches="tight", transparent=True)
plt.close("all")
#
#
row_order = ["KS", "JACKS", "RuleSet2"]
fig, axs = plt.subplots(
len(row_order),
2,
figsize=(5, 2 * len(row_order)),
sharey="none",
sharex="none",
dpi=600,
gridspec_kw={"width_ratios": [10, 1]},
)
for i, mtype in enumerate(row_order):
plot_df = | pd.concat([polyt_pos_df, master_lib.loc[polyt_pos_df.index, mtype]], axis=1) | pandas.concat |
# coding=utf-8
# Author: <NAME>
# Date: Sept 02, 2019
#
# Description: Reads the similarity-celltype and plots the results.
#
#
import math
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
import matplotlib as mpl
mpl.rcParams['font.family'] = 'Helvetica'
mpl.rcParams['mathtext.fontset'] = 'cm'
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from utils import ensurePathExists
from data import *
# Confidence Interval
def calc_ci(x):
mean, count, std = x.mean(), x.count(), x.std()
return pd.Series({
'mean': mean,
'std': std,
'ci95-max': (mean + 1.96 * std / math.sqrt(count)),
'ci95-min': (mean - 1.96 * std / math.sqrt(count))})
def plot_module_null_model(celltype='spermatocyte', network='thr', threshold=0.5, layer='DM'):
threshold_str = str(threshold).replace('.', 'p')
print('Plotting {celltype:s}-{network:s}-{threshold:s}-{layer:s}'.format(celltype=celltype, network=network, threshold=threshold_str, layer=layer))
rCSVFile = 'results/module_null/{celltype:s}/module-null-{celltype:s}-{network:s}-{threshold:s}-{layer:s}.csv.gz'.format(celltype=celltype, network=network, threshold=threshold_str, layer=layer)
df = pd.read_csv(rCSVFile, index_col=0, encoding='utf-8')
df['mod-id'] = df['mod-id'].astype(str)
df['values'] = df['values'].apply(eval)
#colors = {'HS': '#2ca02c', 'MM': '#7f7f7f', 'DM': '#ff7f0e'}
#color = colors[layer]
data_cell = data_cells[celltype]
modules = data_cell['modules-svd']['modules'][layer]
fig = plt.figure(figsize=(6, 4))
gs = gridspec.GridSpec(ncols=1, nrows=1, figure=fig)
ax = fig.add_subplot(gs[0, 0])
ax.set_title('Gene (page)rank and SVD modules\n{celltype:s}-{network:s}-{threshold:.1f}-{layer:s}'.format(celltype=celltype, network=network, threshold=threshold, layer=layer))
bins = np.linspace(0, 0.0005, 8)
for mid, dfg in df.groupby('mod-id'):
mname = dfg['mod-name'].iloc[0]
print("M{mid:s} - {mname:s}".format(mid=str(mid), mname=mname))
try:
midcolor = int(mid)
except:
midcolor = mid
else:
midcolor = int(mid)
for module in modules:
if module['id'] == midcolor:
color = module['facecolor']
dfT = df.loc[((df['run'] == 'real') & (df['mod-id'] == mid)), :]
dfN = df.loc[(~(df['run'] == 'real') & (df['mod-id'] == mid)), :]
rN = []
for idx, row in dfN.iterrows():
dfNtmp = pd.DataFrame({'values': | pd.Series(row['values']) | pandas.Series |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = | remove_na(x) | pandas.core.series.remove_na |
"""syphon.tests.archive.test_archive.py
Copyright (c) 2017-2018 Keithley Instruments, LLC.
Licensed under MIT (https://github.com/ehall/syphon/blob/master/LICENSE)
"""
import os
import pytest
from pandas import concat, DataFrame, read_csv
from pandas.testing import assert_frame_equal
from sortedcontainers import SortedDict, SortedList
from syphon import Context
from syphon.archive import archive
from syphon.init import init
from .. import get_data_path
@pytest.fixture(params=[
(
'iris.csv',
SortedDict({'0': 'Name'})
),
(
'iris_plus.csv',
SortedDict({'0': 'Species', '1': 'PetalColor'})
),
(
'auto-mpg.csv',
SortedDict({'0': 'model year', '1': 'cylinders', '2': 'origin'})
)
])
def archive_params(request):
return request.param
def _get_expected_paths(
path: str, schema: SortedDict, subset: DataFrame, filename: str,
path_list=None) -> SortedList:
# prevent mutable default parameter
if path_list is None:
path_list = SortedList()
this_schema = schema.copy()
header = None
try:
_, header = this_schema.popitem(last=False)
except KeyError:
path_list.add(os.path.join(path, filename))
return path_list
if header not in subset.columns:
return path_list
for value in subset.get(header).drop_duplicates().values:
new_subset = subset.loc[subset.get(header) == value]
value = value.lower().replace(' ', '_')
if value[-1] == '.':
value = value[:-1]
path_list = _get_expected_paths(
os.path.join(path, value),
this_schema,
new_subset,
filename,
path_list=path_list)
return path_list
def test_archive(archive_params, archive_dir, overwrite):
filename, schema = archive_params
context = Context()
context.archive = str(archive_dir)
context.data = os.path.join(get_data_path(), filename)
context.overwrite = overwrite
context.schema = schema
init(context)
expected_df = DataFrame(read_csv(context.data, dtype=str))
expected_df.sort_values(list(expected_df.columns), inplace=True)
expected_df.reset_index(drop=True, inplace=True)
expected_paths = _get_expected_paths(
context.archive,
schema,
expected_df,
filename
)
if context.overwrite:
for e in expected_paths:
os.makedirs(os.path.dirname(e), exist_ok=True)
with open(e, mode='w') as f:
f.write('content')
archive(context)
actual_frame = DataFrame()
actual_paths = SortedList()
for root, _, files in os.walk(context.archive):
for f in files:
if '.csv' in f:
filepath = os.path.join(root, f)
actual_paths.add(filepath)
actual_frame = concat([
actual_frame,
DataFrame(read_csv(filepath, dtype=str))
])
actual_frame.sort_values(list(actual_frame.columns), inplace=True)
actual_frame.reset_index(drop=True, inplace=True)
assert expected_paths == actual_paths
| assert_frame_equal(expected_df, actual_frame) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""
Time class.
"""
__all__ = ['Time']
class Time(object):
"""Class that defines the time of a unit or model
Examples::
>>> th = Time('2017-01-01', '2017-01-02', freq = '2H')
>>> th.dt
7200.0
.. _tab_tag_freq:
.. table:: Freq available tags in Pandas
====== ==============================================
Tag Description
====== ==============================================
B business day frequency
C custom business day frequency (experimental)
D calendar day frequency
W weekly frequency
M month end frequency
BM business month end frequency
CBM custom business month end frequency
MS month start frequency
BMS business month start frequency
CBMS custom business month start frequency
Q quarter end frequency
BQ business quarter endfrequency
QS quarter start frequency
BQS business quarter start frequency
A year end frequency
BA business year end frequency
AS year start frequency
BAS business year start frequency
BH business hour frequency
H hourly frequency
T, min minutely frequency
S secondly frequency
L, ms milliseonds
U, us microseconds
N nanoseconds
====== ==============================================
"""
def __init__(self, start=None, end=None, freq=None, *args, **kwds):
"""
:param args:
:param kwds:
:param start: starting value, datetime-like
:param end: end time, datetime-like
:param freq: string or pandas offset object
"""
super().__init__(*args, **kwds)
import pandas as pd
from numpy import linspace
from pandas.tseries.frequencies import to_offset
if freq is None:
freq = 'H'
self.freq = freq
if isinstance(start, str) and isinstance(end, str):
try:
self.start = pd.Timestamp(start)
self.end = pd.Timestamp(end)
except ValueError as e:
raise e
try:
self.freq = to_offset(freq)
except Exception as e:
raise e
self.datetime = | pd.date_range(freq=freq, start=start, end=end) | pandas.date_range |
# Copyright 2021 The WAX-ML Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import haiku as hk
import jax
import jax.numpy as jnp
import pandas as pd
import pytest
from jax.config import config
from wax.compile import jit_init_apply
from wax.modules.ewma import EWMA
from wax.unroll import dynamic_unroll_fori_loop, unroll
@pytest.mark.parametrize("dtype", ["float32", "float64"])
def test_init_and_first_step_ema(dtype):
if dtype == "float64":
config.update("jax_enable_x64", True)
else:
config.update("jax_enable_x64", False)
seq = hk.PRNGSequence(42)
x = jax.random.normal(shape=(3,), key=next(seq), dtype=jnp.float64)
@jit_init_apply
@hk.transform_with_state
def model(x):
return EWMA(0.1, adjust=True)(x)
params, state = model.init(next(seq), x)
ema, state = model.apply(params, state, next(seq), x)
assert ema.dtype == jnp.dtype(dtype)
def test_run_ema_vs_pandas_not_adjust():
config.update("jax_enable_x64", True)
seq = hk.PRNGSequence(42)
x = jax.random.normal(shape=(10, 3), key=next(seq), dtype=jnp.float64)
@jit_init_apply
@hk.transform_with_state
def model(x):
return EWMA(0.1, adjust=False)(x)
ema, state = unroll(model, dynamic=False, return_final_state=True)(x)
pandas_ema = | pd.DataFrame(x) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import os
import sys
import seaborn as sns
import scipy.stats as stats
import pickle
from pathlib import Path
import statistics
from matplotlib import pyplot as plt
from matplotlib.pyplot import figure
import numpy as np
def ReadFileToList(dirFile):
print("\nreading ", dirFile)
lst = []
with open(dirFile, 'r') as filehandle:
for line in filehandle:
# remove linebreak which is the last character of the string
currentPlace = line[:-1]
# add item to the list
lst.append(currentPlace)
return lst
def hist_dist(df, filename, filenamesuffix):
print("\nplotting for ", filename, " and ", filenamesuffix)
figure(figsize=(20, 10))
# plot 1:
ax = plt.subplot(3, 1, 1)
plt.scatter(df.index, df['OCHIAI'], label='OCHIAI')
plt.scatter(df.index, df['BLEU'], label='BLEU')
# plt.xlabel('Mutants')
# plt.ylabel('Scale')
# plt.title('Semantic-Syntactic co-relation')
plt.legend()
ax.axes.xaxis.set_visible(False)
# plot 2:
ax = plt.subplot(3, 1, 2)
plt.scatter(df.index, df['OCHIAI'], label='OCHIAI')
plt.scatter(df.index, df['JACCARD'], label='JACCARD')
# plt.xlabel('Mutants')
# plt.ylabel('Scale')
plt.legend()
ax.axes.xaxis.set_visible(False)
# plot 2:
ax = plt.subplot(3, 1, 3)
plt.scatter(df.index, df['OCHIAI'], label='OCHIAI')
plt.scatter(df.index, df['COSINE'], label='COSINE')
# plt.xlabel('Mutants')
# plt.ylabel('Scale')
plt.legend()
ax.axes.xaxis.set_visible(False)
if filenamesuffix == "all":
stitle = "All"
elif filenamesuffix == "sem-gt80p":
stitle = "Semantic similarity >= 0.8"
elif filenamesuffix == "syn-gt80p":
stitle = "Syntactic similarity >= 0.8"
elif filenamesuffix == "patch-based":
stitle = "Mutants on patch changed fns"
plt.suptitle('Semantic vs Syntactic - ' + stitle)
filename = filename + "-" + stitle
plt.savefig(dirSimilarity + "/" + filename + '.pdf')
plt.savefig(dirSimilarity + "/" + filename + '.png')
#plt.show()
def scatter_plot(data, parax, paray, filename, filenamesuffix):
print("\nplotting for ", filename, " and ", filenamesuffix)
label_parax = parax
label_paray = paray
if "RQ3" in filenamesuffix:
label_parax = "\u0394" + label_parax + " |M2 - M1|"
label_paray = "\u0394" + label_paray + " |M2 - M1|"
if "RQ4" in filenamesuffix:
label_parax = "Syntactic Distance (1 - " + label_parax + ")"
label_paray = "Failing tests (#)"
#axeLeft = sns.jointplot(data=data, x=parax, y=paray, kind="reg")
axeLeft = sns.JointGrid(data=data, x=parax, y=paray)
axeLeft.plot(sns.regplot, sns.boxplot)
axeLeft.set_axis_labels(xlabel=label_parax, ylabel=label_paray, fontsize=12)
pr_axeLeft, pp_axeLeft = stats.pearsonr(data[parax], data[paray])
kr_axeLeft, kp_axeLeft = stats.kendalltau(data[parax], data[paray])
# # if you choose to write your own legend, then you should adjust the properties then
phantom_axeLeft, = axeLeft.ax_joint.plot([], [], linestyle="", alpha=0)
# # here graph is not a ax but a joint grid, so we access the axis through ax_joint method
#label_axeLeft = 'pearson: r={:f}, p={:f}\nkendall: r={:f}, p={:f}'.format(pr_axeLeft, pp_axeLeft, kr_axeLeft, kp_axeLeft)
label_axeLeft = 'pearson: r={:.3f}, p={:.3f}\nkendall: r={:.3f}, p={:.3f}'.format(
round(pr_axeLeft, 3),
round(pp_axeLeft, 3),
round(kr_axeLeft, 3),
round(kp_axeLeft, 3))
# # label_pearson = 'r={:f}, p={:f}'.format(pr, pp)
axeLeft.ax_joint.legend([phantom_axeLeft], [label_axeLeft], fontsize="15")
#plt.tight_layout()
# if filenamesuffix == "RQ1_all___Box_plot":
# stitle = "All"
# elif filenamesuffix == "RQ1_sem_gt80p___Box_plot":
# stitle = "Semantic similarity >= 0.8"
# elif filenamesuffix == "RQ1_syn_gt80p___Box_plot":
# stitle = "Syntactic similarity >= 0.8"
# elif filenamesuffix == "RQ2_patch_based___Box_plot":
# stitle = "Mutants on patch changed fns"
# elif filenamesuffix == "RQ3_Random_Lines___Box_plot":
# stitle = "Difference in scores based on random sentences"
# elif filenamesuffix == "RQ3_Changed_Lines___Box_plot":
# stitle = "Difference in scores based on patch affected sentences"
# else:
# stitle = filenamesuffix
# plt.suptitle('Semantic vs Syntactic - ' + stitle)
filename = filename + "_" + filenamesuffix
plt.savefig(dirSimilarity + "/" + filename + ".pdf", format='pdf')
plt.savefig(dirSimilarity + "/" + filename + ".png", format='png')
#plt.show()
#print()
def plot_this(df_arg, filenamesuffix):
if len(df_arg) <= 0:
print("\ncannot plot ", filenamesuffix, " due to empty set")
return
print("\nplotting ", filenamesuffix)
#df = df.sort_values(['OCHIAI', 'BLEU'], ascending=True)
#df = df.reset_index(drop=True)
#hist_dist(df, "plot-" + technique, filenamesuffix)
# scatter_plot(df, "BLEU", "OCHIAI", "scatter_plot_bleu_ochiai-" + technique, filenamesuffix)
# scatter_plot(df, "JACCARD", "OCHIAI", "scatter_plot_jaccard_ochiai-" + technique, filenamesuffix)
# scatter_plot(df, "COSINE", "OCHIAI", "scatter_plot_cosine_ochiai-" + technique, filenamesuffix)
scatter_plot(df_arg, "BLEU", "OCHIAI", "Scatter-Plot-" + technique + "_bleu_ochiai", filenamesuffix)
scatter_plot(df_arg, "JACCARD", "OCHIAI", "Scatter-Plot-" + technique + "_jaccard_ochiai", filenamesuffix)
scatter_plot(df_arg, "COSINE", "OCHIAI", "Scatter-Plot-" + technique + "_cosine_ochiai", filenamesuffix)
def try_parse_int(string):
'''helper to parse int from string without erroring on empty or misformed string'''
try:
return int(string)
except Exception:
return 0
def get_locations(dirMain_arg, technique_arg, df_Compilable):
strSimilarityFolderName = "similarity" + "-" + technique
dirSimilarity = dirMain + "/" + strSimilarityFolderName
strLocationsPickleName = "locations.pkl"
dirLocationsPickle = dirSimilarity + "/" + strLocationsPickleName
fileLocationsPickle = Path(dirLocationsPickle)
if not fileLocationsPickle.is_file():
print("\nfile not found ", dirLocationsPickle)
dirMutants = dirMain_arg + "/" "experiment_mutants-" + technique_arg
strLocationMapFileName = "locationmap.txt"
dirLocationMap = dirMutants + "/" + strLocationMapFileName
fileLocationMap = open(dirLocationMap,"r")
lstLocations = fileLocationMap.readlines()
df_Locations_Returned = | pd.DataFrame(columns=['BUG','MUTANT','ORIGINAL', 'LOCATION']) | pandas.DataFrame |
import coinmarketcap
import json
import pandas as pd
import time
import datetime
market = coinmarketcap.Market()
coins = market.ticker()
now = datetime.datetime.now()
for i in range(96):
#this creates a dataframe with the top 100 coins
coinArray = pd.DataFrame([ | pd.Series(coins[i]) | pandas.Series |
import shutil
import logging
import unittest
import tempfile
import pandas as pd
from pathlib import Path
from dicom_tools._dicom_io import (copy_from_list,
copy_from_file,
copy_headers,
print_info,
create_dataset_summary,
_LOGGER_ID)
from dicom_tools._utils import setup_logging
class TestCopyFromListBase(unittest.TestCase):
def setUp(self):
self.in_dir = Path(tempfile.mkdtemp())
self.out_dir = Path(tempfile.mkdtemp())
self.n_files = 10
for i in range(self.n_files):
path = self.in_dir / ("file%02d" % i)
path.touch()
self.test_files = [f.name for f in self.in_dir.glob("file*")]
def tearDown(self):
shutil.rmtree(self.in_dir)
shutil.rmtree(self.out_dir)
def check_results(self, list_in, list_ret, out_dir=None):
if out_dir is None:
out_dir = self.out_dir
self.assertIsNotNone(list_ret)
self.assertIsInstance(list_ret, list)
for filepath in list_ret:
self.assertIsInstance(filepath, Path)
self.assertTrue(filepath.is_file())
list_copied = [str(f.relative_to(out_dir)) for f in list_ret]
self.assertListEqual(list(list_in), list(list_copied))
class TestCopyFromList(TestCopyFromListBase):
def test_copy_invalid_input(self):
to_copy = self.test_files[::2]
ret = copy_from_list(in_dir="/some/invalid/input/directory",
out_dir=self.out_dir,
to_copy=to_copy)
self.assertIsNone(ret)
ret = copy_from_list(in_dir=self.in_dir,
out_dir="/some/invalid/input/directory",
to_copy=to_copy)
self.assertIsNone(ret)
def test_partial_copy(self):
to_copy = self.test_files[::2].copy()
ret = copy_from_list(in_dir=self.in_dir,
out_dir=self.out_dir,
to_copy=to_copy,
show_progress=False)
self.check_results(list_in=to_copy, list_ret=ret)
def test_extended_copy_list(self):
to_copy = self.test_files[::2].copy()
to_copy_extended = to_copy + ["this-is-an-imaginary-file",
"this-is-another-imaginary-file"]
with self.assertLogs("dicom", level="WARNING") as cm:
ret = copy_from_list(in_dir=self.in_dir,
out_dir=self.out_dir,
to_copy=to_copy_extended,
raise_if_missing=False,
show_progress=False)
self.check_results(list_in=to_copy, list_ret=ret)
self.assertEqual(len(cm.output), 2) # Show exactly two warnings.
class TestCopyFromFile(TestCopyFromListBase):
def test_copy(self):
to_copy = self.test_files[::2].copy()
to_copy = | pd.Series(to_copy) | pandas.Series |
import os
import requests
import time
from datetime import datetime
import pandas as pd
import re
from threading import Thread
def get_home_dir():
cwd = os.getcwd()
cwd_list = cwd.split('/')
repo_position = [i for i, s in enumerate(cwd_list) if s == 'crypto_predict']
if len(repo_position) > 1:
print("error! more than one intance of repo name in path")
return None
home_dir = '/'.join(cwd_list[:repo_position[0] + 1]) + '/'
return home_dir
def get_all_currency_pairs(show_mkts=False):
res = requests.get('https://bittrex.com/api/v1.1/public/getmarkets')
if res.json()['success']:
markets = res.json()['result']
market_names = []
for m in markets:
if show_mkts:
print(m['MarketName'])
market_names.append(m['MarketName'])
return market_names
else:
print('error! ', res.json()['message'])
return None
HOME_DIR = get_home_dir()
MARKETS = get_all_currency_pairs()
# just for private stuff, don't run for now
if False:
apikey = os.environ.get('btx_key')
apisecret = os.environ.get('btx_sec')
nonce = time.time()
uri = 'https://bittrex.com/api/v1.1/market/getopenorders?apikey=' + apikey + '&nonce=' + nonce
h = hmac()
sign = h.digest('sha512', uri, apisecret)
r = requests.get(uri, header={'apisign':sign})
def get_all_summaries():
res = requests.get('https://bittrex.com/api/v1.1/public/getmarketsummaries')
if res.json()['success']:
summary = res.json()['result']
return summary
else:
print('error! ', res.json()['message'])
return None
def get_ticker(m):
res = requests.get('https://bittrex.com/api/v1.1/public/getticker?market=' + m)
if res.json()['success']:
t = res.json()['result']
if t is None:
print('error for', m + '!', 'result was None. Message:', res.json()['message'])
return None
return t
else:
print('error for', m + '!', res.json()['message'])
return None
def get_all_tickers():
tickers = []
for m in MARKETS:
res = requests.get('https://bittrex.com/api/v1.1/public/getticker?market=' + m)
if res.json()['success']:
t = res.json()['result']
if t is None:
print('error for', m + '!', 'result was None. Message:', res.json()['message'])
continue
t['MarketName'] = m
tickers.append(t)
else:
print('error for', m + '!', res.json()['message'])
df = pd.io.json.json_normalize(tickers)
df.set_index('MarketName', inplace=True)
return df
def get_trade_history(market):
res = requests.get('https://bittrex.com/api/v1.1/public/getmarkethistory?market=' + market)
if res.json()['success']:
history = res.json()['result']
return history
else:
print('error! ', res.json()['message'])
return None
def save_all_trade_history():
for m in MARKETS:
print('saving', m, 'trade history')
history = get_trade_history(m)
if history is None or len(history) == 0:
print('no history!')
continue
df = make_history_df(history)
filename = HOME_DIR + 'data/trade_history/' + re.sub('-', '_', m) + '.csv.gz'
if os.path.exists(filename):
old_df = pd.read_csv(filename, index_col='TimeStamp')
full_df = old_df.append(df)
full_df.drop_duplicates(inplace=True)
else:
full_df = df
full_df.to_csv(filename, compression='gzip')
def read_history(market):
filename = HOME_DIR + 'data/trade_history/' + re.sub('-', '_', market) + '.csv.gz'
df = | pd.read_csv(filename, index_col='TimeStamp') | pandas.read_csv |
from anndata import AnnData
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from joblib import delayed
from tqdm import tqdm
import sys
import igraph
from .utils import ProgressParallel
from .. import logging as logg
from .. import settings
def pseudotime(adata: AnnData, n_jobs: int = 1, n_map: int = 1, copy: bool = False):
"""\
Compute pseudotime.
Projects cells onto the tree, and uses distance from the root as a pseudotime value.
Parameters
----------
adata
Annotated data matrix.
n_jobs
Number of cpu processes to use in case of performing multiple mapping.
n_map
number of probabilistic mapping of cells onto the tree to use. If n_map=1 then likelihood cell mapping is used.
copy
Return a copy instead of writing to adata.
Returns
-------
adata : anndata.AnnData
if `copy=True` it returns or else add fields to `adata`:
`.obs['edge']`
assigned edge.
`.obs['t']`
assigned pseudotime value.
`.obs['seg']`
assigned segment of the tree.
`.obs['milestone']`
assigned region surrounding forks and tips.
`.uns['pseudotime_list']`
list of cell projection from all mappings.
"""
if "root" not in adata.uns["graph"]:
raise ValueError(
"You need to run `tl.root` or `tl.roots` before projecting cells."
)
adata = adata.copy() if copy else adata
graph = adata.uns["graph"]
reassign, recolor = False, False
if "milestones" in adata.obs:
if adata.obs.milestones.dtype.name == "category":
tmp_mil = adata.obs.milestones.cat.categories.copy()
reassign = True
if "milestones_colors" in adata.uns:
tmp_mil_col = adata.uns["milestones_colors"].copy()
recolor = True
logg.info("projecting cells onto the principal graph", reset=True)
if n_map == 1:
df_l = [map_cells(graph, multi=False)]
else:
df_l = ProgressParallel(
n_jobs=n_jobs, total=n_map, file=sys.stdout, desc=" mappings"
)(delayed(map_cells)(graph=graph, multi=True) for m in range(n_map))
# formatting cell projection data
df_summary = df_l[0]
df_summary["seg"] = df_summary["seg"].astype("category")
df_summary["edge"] = df_summary["edge"].astype("category")
# remove pre-existing palette to avoid errors with plotting
if "seg_colors" in adata.uns:
del adata.uns["seg_colors"]
if set(df_summary.columns.tolist()).issubset(adata.obs.columns):
adata.obs[df_summary.columns] = df_summary
else:
adata.obs = pd.concat([adata.obs, df_summary], axis=1)
# list(map(lambda x: x.column))
# todict=list(map(lambda x: dict(zip(["cells"]+["_"+s for s in x.columns.tolist()],
# [x.index.tolist()]+x.to_numpy().T.tolist())),df_l))
names = np.arange(len(df_l)).astype(str).tolist()
# vals = todict
dictionary = dict(zip(names, df_l))
adata.uns["pseudotime_list"] = dictionary
if n_map > 1:
adata.obs["t_sd"] = (
pd.concat(
list(
map(
lambda x: pd.Series(x["t"]),
list(adata.uns["pseudotime_list"].values()),
)
),
axis=1,
)
.apply(np.std, axis=1)
.values
)
milestones = pd.Series(index=adata.obs_names)
for seg in graph["pp_seg"].n:
cell_seg = adata.obs.loc[adata.obs["seg"] == seg, "t"]
if len(cell_seg) > 0:
milestones[
cell_seg.index[
(cell_seg - min(cell_seg) - (max(cell_seg - min(cell_seg)) / 2) < 0)
]
] = graph["pp_seg"].loc[int(seg), "from"]
milestones[
cell_seg.index[
(cell_seg - min(cell_seg) - (max(cell_seg - min(cell_seg)) / 2) > 0)
]
] = graph["pp_seg"].loc[int(seg), "to"]
adata.obs["milestones"] = milestones
adata.obs.milestones = (
adata.obs.milestones.astype(int).astype("str").astype("category")
)
adata.uns["graph"]["milestones"] = dict(
zip(
adata.obs.milestones.cat.categories,
adata.obs.milestones.cat.categories.astype(int),
)
)
while reassign:
if "tmp_mil_col" not in locals():
break
if len(tmp_mil_col) != len(adata.obs.milestones.cat.categories):
break
rename_milestones(adata, tmp_mil)
if recolor:
adata.uns["milestones_colors"] = tmp_mil_col
reassign = False
logg.info(" finished", time=True, end=" " if settings.verbosity > 2 else "\n")
logg.hint(
"added\n"
" .obs['edge'] assigned edge.\n"
" .obs['t'] pseudotime value.\n"
" .obs['seg'] segment of the tree assigned.\n"
" .obs['milestones'] milestone assigned.\n"
" .uns['pseudotime_list'] list of cell projection from all mappings."
)
return adata if copy else None
def map_cells(graph, multi=False):
import igraph
g = igraph.Graph.Adjacency((graph["B"] > 0).tolist(), mode="undirected")
# Add edge weights and node labels.
g.es["weight"] = graph["B"][graph["B"].nonzero()]
if multi:
rrm = (
np.apply_along_axis(
lambda x: np.random.choice(np.arange(len(x)), size=1, p=x),
axis=1,
arr=graph["R"],
)
).T.flatten()
else:
rrm = np.apply_along_axis(np.argmax, axis=1, arr=graph["R"])
def map_on_edges(v):
vcells = np.argwhere(rrm == v)
if vcells.shape[0] > 0:
nv = np.array(g.neighborhood(v, order=1))
nvd = np.array(g.shortest_paths(v, nv)[0])
spi = np.apply_along_axis(np.argmax, axis=1, arr=graph["R"][vcells, nv[1:]])
ndf = pd.DataFrame(
{
"cell": vcells.flatten(),
"v0": v,
"v1": nv[1:][spi],
"d": nvd[1:][spi],
}
)
p0 = graph["R"][vcells, v].flatten()
p1 = np.array(
list(
map(lambda x: graph["R"][vcells[x], ndf.v1[x]], range(len(vcells)))
)
).flatten()
alpha = np.random.uniform(size=len(vcells))
f = np.abs(
(np.sqrt(alpha * p1 ** 2 + (1 - alpha) * p0 ** 2) - p0) / (p1 - p0)
)
ndf["t"] = (
graph["pp_info"].loc[ndf.v0, "time"].values
+ (
graph["pp_info"].loc[ndf.v1, "time"].values
- graph["pp_info"].loc[ndf.v0, "time"].values
)
* alpha
)
ndf["seg"] = 0
isinfork = (graph["pp_info"].loc[ndf.v0, "PP"].isin(graph["forks"])).values
ndf.loc[isinfork, "seg"] = (
graph["pp_info"].loc[ndf.loc[isinfork, "v1"], "seg"].values
)
ndf.loc[~isinfork, "seg"] = (
graph["pp_info"].loc[ndf.loc[~isinfork, "v0"], "seg"].values
)
return ndf
else:
return None
df = list(map(map_on_edges, range(graph["B"].shape[1])))
df = | pd.concat(df) | pandas.concat |
import os
from datetime import datetime
import time
import tqdm
import pickle
import pandas as pd
import random
from sklearn.preprocessing import LabelEncoder
import numpy as np
import torch
import math
import copy
import random
from multiprocessing import Pool
import multiprocessing
from collections import Counter
class Preprocess:
def __init__(self,args):
self.args = args
self.train_data = None
self.test_data = None
def get_train_data(self):
return self.train_data
def get_test_data(self):
return self.test_data
def split_data(self, data, ratio=0.7, shuffle=True, seed=0):
"""
split data into two parts with a given ratio.
"""
if self.args.cv_strategy :
if self.args.sep_grade :
# valid_user_path = os.path.join(self.args.data_dir,'cv_strategy',"cv_train_2.pkl")
valid_user_path = os.path.join(self.args.data_dir,"cv_valid_index.pickle")
if os.path.exists(valid_user_path):
with open(valid_user_path,"rb") as file :
valid_idx = pickle.load(file)
data_1 = data[~data['userID'].isin(valid_idx)]
data_2 = data[data['userID'].isin(valid_idx)]
else :
valid_user_path = os.path.join(self.args.data_dir,"cv_valid_index.pickle")
if os.path.exists(valid_user_path):
with open(valid_user_path,"rb") as file :
valid_idx = pickle.load(file)
data_1 = data[~data['userID'].isin(valid_idx)]
data_2 = data[data['userID'].isin(valid_idx)]
# else :
# train_user_path = os.path.join(self.args.data_dir,"cv_train_index.pickle")
# valid_user_path = os.path.join(self.args.data_dir,"cv_valid_index.pickle")
# with open(train_user_path,"rb") as file :
# train_idx = pickle.load(file)
# with open(valid_user_path,"rb") as file :
# valid_idx = pickle.load(file)
# data_1 = data[data['userID'].isin(train_idx)]
# data_2 = data[data['userID'].isin(valid_idx)]
else :
idx_list = list(set(data['userID']))
size = int(len(idx_list) * ratio)
train_idx = random.sample(idx_list,size)
data_1 = data[data['userID'].isin(train_idx)]
data_2 = data[~data['userID'].isin(train_idx)]
return data_1, data_2
def __save_labels(self, encoder, name):
le_path = os.path.join(self.args.asset_dir, name + '_classes.npy')
np.save(le_path, encoder.classes_)
def __preprocessing(self, df, is_train = True):
# Encoding the Categorical Embedding
cols = list(set(self.args.cate_col + self.args.temp_col)) # not to encode twice
if not os.path.exists(self.args.asset_dir):
os.makedirs(self.args.asset_dir)
other = []
for col in cols:
le = LabelEncoder()
if col in ['assessmentItemID', 'testId', 'KnowledgeTag', 'grade', 'last_problem', 'problem_number'] :
if is_train:
#For UNKNOWN class
a = df[col].unique().tolist()
a = sorted(a) if str(type(a[0])) == "<class 'int'>" else a
a = a + ['unknown']
le.fit(a)
self.__save_labels(le, col)
else:
label_path = os.path.join(self.args.asset_dir,col+'_classes.npy')
le.classes_ = np.load(label_path)
df[col]= df[col].astype(str)
df[col] = df[col].apply(lambda x: x if x in le.classes_ else 'unknown')
#모든 컬럼이 범주형이라고 가정
df[col]= df[col].astype(str)
test = le.transform(df[col])
df[col] = test
else :
if is_train :
unq = df[col].unique().tolist()
unq = sorted(unq) if str(type(unq[0])) == "<class 'int'>" else unq
other += list(map(lambda x : col+'_'+str(x),unq))
df[col] = df[col].apply(lambda x : col+'_'+str(x))
else :
label_path = os.path.join(self.args.asset_dir,'other_classes.npy')
le.classes_ = np.load(label_path)
df[col]= df[col].astype(str)
df[col] = df[col].apply(lambda x : col+'_'+x)
df[col] = df[col].apply(lambda x: x if x in le.classes_ else 'unknown')
if other :
other += ['unknown']
le = LabelEncoder()
le.fit(other)
self.__save_labels(le, 'other')
label_path = os.path.join(self.args.asset_dir,'other_classes.npy')
if os.path.exists(label_path):
le.classes_ = np.load(label_path)
for col in cols:
if col in ['assessmentItemID', 'testId', 'KnowledgeTag', 'grade', 'last_problem', 'problem_number'] :
continue
else :
df[col]= df[col].astype(str)
test = le.transform(df[col])
df[col] = test
if not is_train and self.args.sep_grade:
ddf = df[df['answerCode']==-1]
df = df[df.set_index(['userID','grade']).index.isin(ddf.set_index(['userID','grade']).index)]
return df
def __feature_engineering(self, df,file_name, is_train):
data_path = os.path.join(self.args.asset_dir,f"{file_name[:-4]}_FE.pkl") # .csv빼고
if os.path.exists(data_path):
df = pd.read_pickle(data_path)
else :
df.sort_values(by=['userID','Timestamp'], inplace=True)
df['hour'] = df['Timestamp'].dt.hour
df['dow'] = df['Timestamp'].dt.dayofweek
diff = df.loc[:, ['userID','Timestamp']].groupby('userID').diff().fillna(pd.Timedelta(seconds=0))
diff = diff.fillna(pd.Timedelta(seconds=0))
diff = diff['Timestamp'].apply(lambda x: x.total_seconds())
# 푸는 시간
df['elapsed'] = diff
df['elapsed'] = df['elapsed'].apply(lambda x : x if x <650 and x >=0 else 0)
df['grade']=df['testId'].apply(lambda x : int(x[1:4])//10)
df['mid'] = df['testId'].apply(lambda x : int(x[-3:]))
df['problem_number'] = df['assessmentItemID'].apply(lambda x : int(x[-3:]))
if is_train :
sub_data_path = os.path.join("/opt/ml/input/data/train_dataset/test_data.csv") # .csv빼고
sub_df = pd.read_csv(sub_data_path)
full_df = pd.concat([df,sub_df[sub_df['answerCode']!= -1]])
else :
sub_data_path = os.path.join(self.args.asset_dir,"train_data_FE.csv") # .csv빼고
sub_df = pd.read_csv(sub_data_path)
full_df = pd.concat([df[df['answerCode']!= -1],sub_df])
correct_t = full_df.groupby(['testId'])['answerCode'].agg(['mean', 'sum'])
correct_t.columns = ["test_mean", 'test_sum']
correct_k = full_df.groupby(['KnowledgeTag'])['answerCode'].agg(['mean', 'sum'])
correct_k.columns = ["tag_mean", 'tag_sum']
correct_a = full_df.groupby(['assessmentItemID'])['answerCode'].agg(['mean', 'sum'])
correct_a.columns = ["ass_mean", 'ass_sum']
correct_p = full_df.groupby(['problem_number'])['answerCode'].agg(['mean', 'sum'])
correct_p.columns = ["prb_mean", 'prb_sum']
correct_h = full_df.groupby(['hour'])['answerCode'].agg(['mean', 'sum'])
correct_h.columns = ["hour_mean", 'hour_sum']
correct_d = full_df.groupby(['dow'])['answerCode'].agg(['mean', 'sum'])
correct_d.columns = ["dow_mean", 'dow_sum']
df = pd.merge(df, correct_t, on=['testId'], how="left")
df = | pd.merge(df, correct_k, on=['KnowledgeTag'], how="left") | pandas.merge |
from __future__ import division
from datetime import datetime
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
import pandas as pd
import numpy as np
import random
from nose.tools import assert_almost_equal as aae
import bt
import bt.algos as algos
def test_algo_name():
class TestAlgo(algos.Algo):
pass
actual = TestAlgo()
assert actual.name == 'TestAlgo'
class DummyAlgo(algos.Algo):
def __init__(self, return_value=True):
self.return_value = return_value
self.called = False
def __call__(self, target):
self.called = True
return self.return_value
def test_algo_stack():
algo1 = DummyAlgo(return_value=True)
algo2 = DummyAlgo(return_value=False)
algo3 = DummyAlgo(return_value=True)
target = mock.MagicMock()
stack = bt.AlgoStack(algo1, algo2, algo3)
actual = stack(target)
assert not actual
assert algo1.called
assert algo2.called
assert not algo3.called
def test_print_temp_data():
target = mock.MagicMock()
target.temp={}
target.temp['selected'] = ['c1','c2']
target.temp['weights'] = [0.5,0.5]
algo = algos.PrintTempData()
assert algo( target )
algo = algos.PrintTempData( 'Selected: {selected}')
assert algo( target )
def test_print_info():
target = bt.Strategy('s', [])
target.temp={}
algo = algos.PrintInfo()
assert algo( target )
algo = algos.PrintInfo( '{now}: {name}')
assert algo( target )
def test_run_once():
algo = algos.RunOnce()
assert algo(None)
assert not algo(None)
assert not algo(None)
def test_run_period():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunPeriod()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
target.now = None
assert not algo(target)
# run on first date
target.now = dts[0]
assert not algo(target)
# run on first supplied date
target.now = dts[1]
assert algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert not algo(target)
algo = algos.RunPeriod(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
# run on first date
target.now = dts[0]
assert not algo(target)
# first supplied date
target.now = dts[1]
assert not algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert algo(target)
# date not in index
target.now = datetime(2009, 2, 15)
assert not algo(target)
def test_run_daily():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunDaily()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('',[algo]),
data
)
target.data = backtest.data
target.now = dts[1]
assert algo(target)
def test_run_weekly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunWeekly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert not algo(target)
# new week
target.now = dts[3]
assert algo(target)
algo = algos.RunWeekly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert algo(target)
# new week
target.now = dts[3]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8),datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_monthly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunMonthly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert not algo(target)
# new month
target.now = dts[31]
assert algo(target)
algo = algos.RunMonthly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert algo(target)
# new month
target.now = dts[31]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_quarterly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunQuarterly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert not algo(target)
# new quarter
target.now = dts[90]
assert algo(target)
algo = algos.RunQuarterly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert algo(target)
# new quarter
target.now = dts[90]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_yearly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunYearly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert not algo(target)
# new year
target.now = dts[365]
assert algo(target)
algo = algos.RunYearly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert algo(target)
# new year
target.now = dts[365]
assert not algo(target)
def test_run_on_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunOnDate('2010-01-01', '2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
def test_run_if_out_of_bounds():
algo = algos.RunIfOutOfBounds(0.5)
dts = pd.date_range('2010-01-01', periods=3)
s = bt.Strategy('s')
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.temp['selected'] = ['c1', 'c2']
s.temp['weights'] = {'c1': .5, 'c2':.5}
s.update(dts[0])
s.children['c1'] = bt.core.SecurityBase('c1')
s.children['c2'] = bt.core.SecurityBase('c2')
s.children['c1']._weight = 0.5
s.children['c2']._weight = 0.5
assert not algo(s)
s.children['c1']._weight = 0.25
s.children['c2']._weight = 0.75
assert not algo(s)
s.children['c1']._weight = 0.24
s.children['c2']._weight = 0.76
assert algo(s)
s.children['c1']._weight = 0.75
s.children['c2']._weight = 0.25
assert not algo(s)
s.children['c1']._weight = 0.76
s.children['c2']._weight = 0.24
assert algo(s)
def test_run_after_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunAfterDate('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-03')
assert algo(target)
def test_run_after_days():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunAfterDays(3)
assert not algo(target)
assert not algo(target)
assert not algo(target)
assert algo(target)
def test_set_notional():
algo = algos.SetNotional('notional')
s = bt.FixedIncomeStrategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
notional = pd.Series(index=dts[:2], data=[1e6, 5e6])
s.setup( data, notional = notional )
s.update(dts[0])
assert algo(s)
assert s.temp['notional_value'] == 1e6
s.update(dts[1])
assert algo(s)
assert s.temp['notional_value'] == 5e6
s.update(dts[2])
assert not algo(s)
def test_rebalance():
algo = algos.Rebalance()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c1 = s['c1']
assert c1.value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000
assert c2.position == 10
assert c2.weight == 1.
def test_rebalance_with_commissions():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 999
assert s.capital == 99
c1 = s['c1']
assert c1.value == 900
assert c1.position == 9
assert c1.weight == 900 / 999.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 997
assert s.capital == 97
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 900
assert c2.position == 9
assert c2.weight == 900. / 997
def test_rebalance_with_cash():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
# set cash amount
s.temp['cash'] = 0.5
assert algo(s)
assert s.value == 999
assert s.capital == 599
c1 = s['c1']
assert c1.value == 400
assert c1.position == 4
assert c1.weight == 400.0 / 999
s.temp['weights'] = {'c2': 1}
# change cash amount
s.temp['cash'] = 0.25
assert algo(s)
assert s.value == 997
assert s.capital == 297
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 700
assert c2.position == 7
assert c2.weight == 700.0 / 997
def test_rebalance_updatecount():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.use_integer_positions(False)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4','c5'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 0.25, 'c2':0.25, 'c3':0.25, 'c4':0.25}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
assert s.value == 1000
assert s.capital == 0
# Update is called once when each weighted security is created (4)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 8
s.update(dts[1])
s.temp['weights'] = {'c1': 0.5, 'c2':0.5}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
# Update is called once for each weighted security before allocation (4)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 8
s.update(dts[2])
s.temp['weights'] = {'c1': 0.25, 'c2':0.25, 'c3':0.25, 'c4':0.25}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
# Update is called once for each weighted security before allocation (2)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 6
def test_rebalance_fixedincome():
algo = algos.Rebalance()
c1 = bt.Security('c1')
c2 = bt.CouponPayingSecurity('c2')
s = bt.FixedIncomeStrategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
coupons = pd.DataFrame(index=dts, columns=['c2'], data=0)
s.setup(data, coupons=coupons)
s.update(dts[0])
s.temp['notional_value'] = 1000
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 0.
assert s.notional_value == 1000
assert s.capital == -1000
c1 = s['c1']
assert c1.value == 1000
assert c1.notional_value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 0.
assert s.notional_value == 1000
assert s.capital == -1000*100
c2 = s['c2']
assert c1.value == 0
assert c1.notional_value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000*100
assert c2.notional_value == 1000
assert c2.position == 1000
assert c2.weight == 1.
def test_select_all():
algo = algos.SelectAll()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectAll(include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectAll(include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_randomly_n_none():
algo = algos.SelectRandomly(n=None) # Behaves like SelectAll
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectRandomly(n=None, include_no_data=True)
assert algo2(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectRandomly(n=None, include_negative=True)
assert algo3(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_randomly():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100.)
data['c1'][dts[0]] = np.nan
data['c2'][dts[0]] = 95
data['c3'][dts[0]] = -5
s.setup(data)
s.update(dts[0])
algo = algos.SelectRandomly(n=1)
assert algo(s)
assert s.temp.pop('selected') == ['c2']
random.seed(1000)
algo = algos.SelectRandomly(n=1, include_negative=True)
assert algo(s)
assert s.temp.pop('selected') == ['c3']
random.seed(1009)
algo = algos.SelectRandomly(n=1, include_no_data=True)
assert algo(s)
assert s.temp.pop('selected') == ['c1']
random.seed(1009)
# If selected already set, it will further filter it
s.temp['selected'] = ['c2']
algo = algos.SelectRandomly(n=1, include_no_data=True)
assert algo(s)
assert s.temp.pop('selected') == ['c2']
def test_select_these():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
algo = algos.SelectThese( ['c1'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
# make sure don't keep nan
s.update(dts[1])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectThese( ['c1', 'c2'], include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectThese(['c1', 'c2'], include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_where_all():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
s.setup(data, where = where)
s.update(dts[0])
algo = algos.SelectWhere('where')
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectWhere('where', include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectWhere('where', include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_where():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
where.loc[ dts[1] ] = False
where['c1'].loc[ dts[2] ] = False
algo = algos.SelectWhere('where')
s.setup(data, where=where)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
s.update(dts[1])
assert algo(s)
assert s.temp['selected'] == []
s.update(dts[2])
assert algo(s)
assert s.temp['selected'] == ['c2']
def test_select_where_legacy():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
where.loc[ dts[1] ] = False
where['c1'].loc[ dts[2] ] = False
algo = algos.SelectWhere(where)
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
s.update(dts[1])
assert algo(s)
assert s.temp['selected'] == []
s.update(dts[2])
assert algo(s)
assert s.temp['selected'] == ['c2']
def test_select_regex():
s = bt.Strategy('s')
algo = algos.SelectRegex( 'c1' )
s.temp['selected'] = ['a1', 'c1', 'c2', 'c11', 'cc1']
assert algo( s )
assert s.temp['selected'] == ['c1', 'c11', 'cc1']
algo = algos.SelectRegex( '^c1$' )
assert algo( s )
assert s.temp['selected'] == ['c1']
def test_resolve_on_the_run():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'b1'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c2'][dts[2]] = -5
on_the_run = pd.DataFrame(index=dts, columns=['c'], data='c1')
on_the_run.loc[dts[2], 'c'] = 'c2'
s.setup(data, on_the_run = on_the_run)
s.update(dts[0])
s.temp['selected'] = ['c', 'b1']
algo = algos.ResolveOnTheRun( 'on_the_run' )
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'b1' in selected
# make sure don't keep nan
s.update(dts[1])
s.temp['selected'] = ['c', 'b1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'b1' in selected
# if specify include_no_data then 2
algo2 = algos.ResolveOnTheRun('on_the_run', include_no_data=True)
s.temp['selected'] = ['c', 'b1']
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'b1' in selected
# behavior on negative prices
s.update(dts[2])
s.temp['selected'] = ['c', 'b1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'b1' in selected
algo3 = algos.ResolveOnTheRun('on_the_run', include_negative=True)
s.temp['selected'] = ['c', 'b1']
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c2' in selected
assert 'b1' in selected
def test_select_types():
c1 = bt.Security('c1')
c2 = bt.CouponPayingSecurity('c2')
c3 = bt.HedgeSecurity('c3')
c4 = bt.CouponPayingHedgeSecurity('c4')
c5 = bt.FixedIncomeSecurity('c5')
s = bt.Strategy('p', children = [c1, c2, c3, c4, c5])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4', 'c5'], data=100.)
coupons = pd.DataFrame(index=dts, columns=['c2', 'c4'], data=0.)
s.setup(data, coupons = coupons)
i = 0
s.update(dts[i])
algo = algos.SelectTypes(include_types=(bt.Security, bt.HedgeSecurity), exclude_types=())
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c3'])
algo = algos.SelectTypes(include_types=(bt.core.SecurityBase,), exclude_types=(bt.CouponPayingSecurity,))
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c3', 'c5'])
s.temp['selected'] = ['c1', 'c2', 'c3']
algo = algos.SelectTypes(include_types=(bt.core.SecurityBase,))
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c2', 'c3'])
def test_weight_equally():
algo = algos.WeighEqually()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.update(dts[0])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.5
assert 'c2' in weights
assert weights['c2'] == 0.5
def test_weight_specified():
algo = algos.WeighSpecified(c1=0.6, c2=0.4)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.6
assert 'c2' in weights
assert weights['c2'] == 0.4
def test_scale_weights():
s = bt.Strategy('s')
algo = algos.ScaleWeights( -0.5 )
s.temp['weights'] = {'c1': 0.5, 'c2': -0.4, 'c3':0 }
assert algo( s )
assert s.temp['weights'] == {'c1':-0.25, 'c2':0.2, 'c3':0}
def test_select_has_data():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=10)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[0]] = np.nan
data['c1'].loc[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
def test_select_has_data_preselected():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[0]] = np.nan
data['c1'].loc[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 0
@mock.patch('ffn.calc_erc_weights')
def test_weigh_erc(mock_erc):
algo = algos.WeighERC(lookback=pd.DateOffset(days=5))
mock_erc.return_value = pd.Series({'c1': 0.3, 'c2': 0.7})
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
assert mock_erc.called
rets = mock_erc.call_args[0][0]
assert len(rets) == 4
assert 'c1' in rets
assert 'c2' in rets
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.3
assert weights['c2'] == 0.7
def test_weigh_target():
algo = algos.WeighTarget('target')
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
target = pd.DataFrame(index=dts[:2], columns=['c1', 'c2'], data=0.5)
target['c1'].loc[dts[1]] = 1.0
target['c2'].loc[dts[1]] = 0.0
s.setup( data, target = target )
s.update(dts[0])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.5
assert weights['c2'] == 0.5
s.update(dts[1])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 1.0
assert weights['c2'] == 0.0
s.update(dts[2])
assert not algo(s)
def test_weigh_inv_vol():
algo = algos.WeighInvVol(lookback=pd.DateOffset(days=5))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
# high vol c1
data['c1'].loc[dts[1]] = 105
data['c1'].loc[dts[2]] = 95
data['c1'].loc[dts[3]] = 105
data['c1'].loc[dts[4]] = 95
# low vol c2
data['c2'].loc[dts[1]] = 100.1
data['c2'].loc[dts[2]] = 99.9
data['c2'].loc[dts[3]] = 100.1
data['c2'].loc[dts[4]] = 99.9
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c2'] > weights['c1']
aae(weights['c1'], 0.020, 3)
aae(weights['c2'], 0.980, 3)
@mock.patch('ffn.calc_mean_var_weights')
def test_weigh_mean_var(mock_mv):
algo = algos.WeighMeanVar(lookback=pd.DateOffset(days=5))
mock_mv.return_value = pd.Series({'c1': 0.3, 'c2': 0.7})
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
assert mock_mv.called
rets = mock_mv.call_args[0][0]
assert len(rets) == 4
assert 'c1' in rets
assert 'c2' in rets
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.3
assert weights['c2'] == 0.7
def test_weigh_randomly():
s = bt.Strategy('s')
s.temp['selected'] = ['c1', 'c2', 'c3']
algo = algos.WeighRandomly()
assert algo(s)
weights = s.temp['weights']
assert len( weights ) == 3
assert sum( weights.values() ) == 1.
algo = algos.WeighRandomly( (0.3,0.5), 0.95)
assert algo(s)
weights = s.temp['weights']
assert len( weights ) == 3
aae( sum( weights.values() ), 0.95 )
for c in s.temp['selected']:
assert weights[c] <= 0.5
assert weights[c] >= 0.3
def test_set_stat():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[1]] = 105
data['c2'].loc[dts[1]] = 95
stat = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=4.)
stat['c1'].loc[dts[1]] = 5.
stat['c2'].loc[dts[1]] = 6.
algo = algos.SetStat( 'test_stat' )
s.setup(data, test_stat = stat)
s.update(dts[0])
print()
print(s.get_data('test_stat'))
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 4.
assert stat['c2'] == 4.
s.update(dts[1])
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 5.
assert stat['c2'] == 6.
def test_set_stat_legacy():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[1]] = 105
data['c2'].loc[dts[1]] = 95
stat = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=4.)
stat['c1'].loc[dts[1]] = 5.
stat['c2'].loc[dts[1]] = 6.
algo = algos.SetStat( stat )
s.setup(data)
s.update(dts[0])
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 4.
assert stat['c2'] == 4.
s.update(dts[1])
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 5.
assert stat['c2'] == 6.
def test_stat_total_return():
algo = algos.StatTotalReturn(lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
stat = s.temp['stat']
assert len(stat) == 2
assert stat['c1'] == 105.0 / 100 - 1
assert stat['c2'] == 95.0 / 100 - 1
def test_select_n():
algo = algos.SelectN(n=1, sort_descending=True)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['stat'] = data.calc_total_return()
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
algo = algos.SelectN(n=1, sort_descending=False)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# return 2 we have if all_or_none false
algo = algos.SelectN(n=3, sort_descending=False)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# return 0 we have if all_or_none true
algo = algos.SelectN(n=3, sort_descending=False, all_or_none=True)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 0
def test_select_n_perc():
algo = algos.SelectN(n=0.5, sort_descending=True)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['stat'] = data.calc_total_return()
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
def test_select_momentum():
algo = algos.SelectMomentum(n=1, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
actual = s.temp['selected']
assert len(actual) == 1
assert 'c1' in actual
def test_limit_weights():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.temp['weights'] = {'c1': 0.6, 'c2':0.2, 'c3':0.2}
algo = algos.LimitWeights(0.5)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.5
assert w['c2'] == 0.25
assert w['c3'] == 0.25
algo = algos.LimitWeights(0.3)
assert algo(s)
w = s.temp['weights']
assert w == {}
s.temp['weights'] = {'c1': 0.4, 'c2':0.3, 'c3':0.3}
algo = algos.LimitWeights(0.5)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.4
assert w['c2'] == 0.3
assert w['c3'] == 0.3
def test_limit_deltas():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.temp['weights'] = {'c1': 1}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.1
s.temp['weights'] = {'c1': 0.05}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.05
s.temp['weights'] = {'c1': 0.5, 'c2': 0.5}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == 0.1
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == -0.1
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas({'c1': 0.1})
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == -0.5
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas({'c1': 0.1, 'c2': 0.3})
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == -0.3
# set exisitng weight
s.children['c1'] = bt.core.SecurityBase('c1')
s.children['c1']._weight = 0.3
s.children['c2'] = bt.core.SecurityBase('c2')
s.children['c2']._weight = -0.7
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.4
assert w['c2'] == -0.6
def test_rebalance_over_time():
target = mock.MagicMock()
rb = mock.MagicMock()
algo = algos.RebalanceOverTime(n=2)
# patch in rb function
algo._rb = rb
target.temp = {}
target.temp['weights'] = {'a': 1, 'b': 0}
a = mock.MagicMock()
a.weight = 0.
b = mock.MagicMock()
b.weight = 1.
target.children = {'a': a, 'b': b}
assert algo(target)
w = target.temp['weights']
assert len(w) == 2
assert w['a'] == 0.5
assert w['b'] == 0.5
assert rb.called
called_tgt = rb.call_args[0][0]
called_tgt_w = called_tgt.temp['weights']
assert len(called_tgt_w) == 2
assert called_tgt_w['a'] == 0.5
assert called_tgt_w['b'] == 0.5
# update weights for next call
a.weight = 0.5
b.weight = 0.5
# clear out temp - same as would Strategy
target.temp = {}
assert algo(target)
w = target.temp['weights']
assert len(w) == 2
assert w['a'] == 1.
assert w['b'] == 0.
assert rb.call_count == 2
# update weights for next call
# should do nothing now
a.weight = 1
b.weight = 0
# clear out temp - same as would Strategy
target.temp = {}
assert algo(target)
# no diff in call_count since last time
assert rb.call_count == 2
def test_require():
target = mock.MagicMock()
target.temp = {}
algo = algos.Require(lambda x: len(x) > 0, 'selected')
assert not algo(target)
target.temp['selected'] = []
assert not algo(target)
target.temp['selected'] = ['a', 'b']
assert algo(target)
def test_run_every_n_periods():
target = mock.MagicMock()
target.temp = {}
algo = algos.RunEveryNPeriods(n=3, offset=0)
target.now = pd.to_datetime('2010-01-01')
assert algo(target)
# run again w/ no date change should not trigger
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
target.now = pd.to_datetime('2010-01-04')
assert algo(target)
target.now = pd.to_datetime('2010-01-05')
assert not algo(target)
def test_run_every_n_periods_offset():
target = mock.MagicMock()
target.temp = {}
algo = algos.RunEveryNPeriods(n=3, offset=1)
target.now = pd.to_datetime('2010-01-01')
assert not algo(target)
# run again w/ no date change should not trigger
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
target.now = pd.to_datetime('2010-01-04')
assert not algo(target)
target.now = pd.to_datetime('2010-01-05')
assert algo(target)
def test_not():
target = mock.MagicMock()
target.temp = {}
#run except on the 1/2/18
runOnDateAlgo = algos.RunOnDate(pd.to_datetime('2018-01-02'))
notAlgo = algos.Not(runOnDateAlgo)
target.now = pd.to_datetime('2018-01-01')
assert notAlgo(target)
target.now = pd.to_datetime('2018-01-02')
assert not notAlgo(target)
def test_or():
target = mock.MagicMock()
target.temp = {}
#run on the 1/2/18
runOnDateAlgo = algos.RunOnDate(pd.to_datetime('2018-01-02'))
runOnDateAlgo2 = algos.RunOnDate(pd.to_datetime('2018-01-03'))
runOnDateAlgo3 = algos.RunOnDate(pd.to_datetime('2018-01-04'))
runOnDateAlgo4 = algos.RunOnDate(pd.to_datetime('2018-01-04'))
orAlgo = algos.Or([runOnDateAlgo, runOnDateAlgo2, runOnDateAlgo3, runOnDateAlgo4])
#verify it returns false when neither is true
target.now = pd.to_datetime('2018-01-01')
assert not orAlgo(target)
# verify it returns true when the first is true
target.now = pd.to_datetime('2018-01-02')
assert orAlgo(target)
# verify it returns true when the second is true
target.now = pd.to_datetime('2018-01-03')
assert orAlgo(target)
# verify it returns true when both algos return true
target.now = pd.to_datetime('2018-01-04')
assert orAlgo(target)
def test_TargetVol():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=7)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
# high vol c1
data.loc[dts[0],'c1'] = 95
data.loc[dts[1],'c1'] = 105
data.loc[dts[2],'c1'] = 95
data.loc[dts[3],'c1'] = 105
data.loc[dts[4],'c1'] = 95
data.loc[dts[5],'c1'] = 105
data.loc[dts[6],'c1'] = 95
# low vol c2
data.loc[dts[0], 'c2'] = 99
data.loc[dts[1], 'c2'] = 101
data.loc[dts[2], 'c2'] = 99
data.loc[dts[3], 'c2'] = 101
data.loc[dts[4], 'c2'] = 99
data.loc[dts[5], 'c2'] = 101
data.loc[dts[6], 'c2'] = 99
targetVolAlgo = algos.TargetVol(
0.1,
lookback=pd.DateOffset(days=5),
lag=pd.DateOffset(days=1),
covar_method='standard',
annualization_factor=1
)
s.setup(data)
s.update(dts[6])
s.temp['weights'] = {'c1':0.5, 'c2':0.5}
assert targetVolAlgo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert np.isclose(weights['c2'],weights['c1'])
unannualized_c2_weight = weights['c1']
targetVolAlgo = algos.TargetVol(
0.1*np.sqrt(252),
lookback=pd.DateOffset(days=5),
lag=pd.DateOffset(days=1),
covar_method='standard',
annualization_factor=252
)
s.setup(data)
s.update(dts[6])
s.temp['weights'] = {'c1': 0.5, 'c2': 0.5}
assert targetVolAlgo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert np.isclose(weights['c2'], weights['c1'])
assert np.isclose(unannualized_c2_weight, weights['c2'])
def test_PTE_Rebalance():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=30*4)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
# high vol c1
# low vol c2
for i,dt in enumerate(dts[:-2]):
if i % 2 == 0:
data.loc[dt,'c1'] = 95
data.loc[dt,'c2'] = 101
else:
data.loc[dt, 'c1'] = 105
data.loc[dt, 'c2'] = 99
dt = dts[-2]
data.loc[dt,'c1'] = 115
data.loc[dt,'c2'] = 97
s.setup(data)
s.update(dts[-2])
s.adjust(1000000)
s.rebalance(0.4,'c1')
s.rebalance(0.6,'c2')
wdf = pd.DataFrame(
np.zeros(data.shape),
columns=data.columns,
index=data.index
)
wdf['c1'] = 0.5
wdf['c2'] = 0.5
PTE_rebalance_Algo = bt.algos.PTE_Rebalance(
0.01,
wdf,
lookback=pd.DateOffset(months=3),
lag=pd.DateOffset(days=1),
covar_method='standard',
annualization_factor=252
)
assert PTE_rebalance_Algo(s)
s.rebalance(0.5, 'c1')
s.rebalance(0.5, 'c2')
assert not PTE_rebalance_Algo(s)
def test_close_positions_after_date():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
c3 = bt.Security('c3')
s = bt.Strategy('s', children = [c1, c2, c3])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100)
c1 = s['c1']
c2 = s['c2']
c3 = s['c3']
cutoffs= pd.DataFrame( { 'date' : [ dts[1], dts[2] ] }, index = ['c1','c2'] )
algo = algos.ClosePositionsAfterDates( 'cutoffs' )
s.setup(data, cutoffs=cutoffs)
s.update(dts[0])
s.transact( 100, 'c1')
s.transact( 100, 'c2')
s.transact( 100, 'c3')
algo(s)
assert c1.position == 100
assert c2.position == 100
assert c3.position == 100
# Don't run anything on dts[1], even though that's when c1 closes
s.update( dts[2])
algo(s)
assert c1.position == 0
assert c2.position == 0
assert c3.position == 100
assert s.perm['closed'] == set(['c1', 'c2'])
def test_roll_positions_after_date():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
c3 = bt.Security('c3')
s = bt.Strategy('s', children = [c1, c2, c3])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100)
c1 = s['c1']
c2 = s['c2']
c3 = s['c3']
roll = pd.DataFrame( { 'date' : [ dts[1], dts[2] ], 'target' : [ 'c3', 'c1' ], 'factor' : [ 0.5, 2.0 ] }, index = ['c1','c2'] )
algo = algos.RollPositionsAfterDates( 'roll' )
s.setup(data, roll=roll)
s.update(dts[0])
s.transact( 100, 'c1')
s.transact( 100, 'c2')
s.transact( 100, 'c3')
algo(s)
assert c1.position == 100
assert c2.position == 100
assert c3.position == 100
# Don't run anything on dts[1], even though that's when c1 closes
s.update( dts[2])
algo(s)
assert c1.position == 200 # From c2
assert c2.position == 0
assert c3.position == 100 + 50
assert s.perm['rolled'] == set(['c1', 'c2'])
def test_replay_transactions():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
s = bt.Strategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100)
c1 = s['c1']
c2 = s['c2']
transactions = pd.DataFrame( [ ( pd.Timestamp( '2009-12-01 00'), 'c1', 100, 99.5),
( pd.Timestamp( '2010-01-01 10'), 'c1', -100, 101),
( pd.Timestamp( '2010-01-02 00'), 'c2', 50, 103)
],
columns = ['Date', 'Security', 'quantity', 'price'])
transactions = transactions.set_index( ['Date','Security'])
algo = algos.ReplayTransactions( 'transactions' )
s.setup(data, bidoffer={}, transactions=transactions) # Pass bidoffer so it will track bidoffer paid
s.adjust(1000)
s.update(dts[0])
algo(s)
assert c1.position == 100
assert c2.position == 0
assert c1.bidoffer_paid == -50
s.update(dts[1])
algo(s)
assert c1.position == 0
assert c2.position == 50
assert c1.bidoffer_paid == -100
assert c2.bidoffer_paid == 150
def test_replay_transactions_consistency():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
s = bt.Strategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100)
transactions = pd.DataFrame( [ ( pd.Timestamp( '2010-01-01 00'), 'c1', -100., 101.),
( pd.Timestamp( '2010-01-02 00'), 'c2', 50., 103.)
],
columns = ['Date', 'Security', 'quantity', 'price'])
transactions = transactions.set_index( ['Date','Security'])
algo = algos.ReplayTransactions( 'transactions' )
strategy = bt.Strategy('strategy', algos = [ algo ], children = [c1, c2])
backtest = bt.backtest.Backtest(strategy, data, name='Test',
additional_data={'bidoffer':{}, 'transactions':transactions})
out = bt.run(backtest)
t1 = transactions.sort_index(axis=1)
t2 = out.get_transactions().sort_index(axis=1)
assert t1.equals( t2 )
def test_simulate_rfq_transactions():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
s = bt.Strategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100)
c1 = s['c1']
c2 = s['c2']
rfqs = pd.DataFrame( [ ( 'A', pd.Timestamp( '2009-12-01 00'), 'c1', 100),
( 'B', pd.Timestamp( '2010-01-01 10'), 'c1', -100),
( 'C', pd.Timestamp( '2010-01-01 12'), 'c1', 75),
( 'D', pd.Timestamp( '2010-01-02 00'), 'c2', 50)
],
columns = ['id', 'Date', 'Security', 'quantity'])
rfqs = rfqs.set_index(['Date','Security'])
def model( rfqs, target ):
# Dummy model - in practice this model would rely on positions and values in target
transactions = rfqs[ ['quantity']]
prices = {'A' : 99.5, 'B' : 101, 'D':103}
transactions[ 'price' ] = rfqs.id.apply( lambda x : prices.get(x) )
return transactions.dropna()
algo = algos.SimulateRFQTransactions( 'rfqs', model )
s.setup(data, bidoffer={}, rfqs=rfqs) # Pass bidoffer so it will track bidoffer paid
s.adjust(1000)
s.update(dts[0])
algo(s)
assert c1.position == 100
assert c2.position == 0
assert c1.bidoffer_paid == -50
s.update(dts[1])
algo(s)
assert c1.position == 0
assert c2.position == 50
assert c1.bidoffer_paid == -100
assert c2.bidoffer_paid == 150
def test_update_risk():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
s = bt.Strategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'].loc[dts[1]] = 105
data['c2'].loc[dts[1]] = 95
c1 = s['c1']
c2 = s['c2']
algo = algos.UpdateRisk('Test', history=False)
s.setup(data, unit_risk={'Test':data})
s.adjust(1000)
s.update(dts[0])
assert algo( s )
assert s.risk['Test'] == 0
assert c1.risk['Test'] == 0
assert c2.risk['Test'] == 0
s.transact( 1, 'c1')
s.transact( 5, 'c2')
assert algo( s )
assert s.risk['Test'] == 600
assert c1.risk['Test'] == 100
assert c2.risk['Test'] == 500
s.update(dts[1])
assert algo( s )
assert s.risk['Test'] == 105 + 5*95
assert c1.risk['Test'] == 105
assert c2.risk['Test'] == 5*95
assert not hasattr( s, 'risks' )
assert not hasattr( c1, 'risks' )
assert not hasattr( c2, 'risks' )
def test_update_risk_history_1():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
s = bt.Strategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'].loc[dts[1]] = 105
data['c2'].loc[dts[1]] = 95
c1 = s['c1']
c2 = s['c2']
algo = algos.UpdateRisk('Test', history=1)
s.setup(data, unit_risk={'Test':data})
s.adjust(1000)
s.update(dts[0])
assert algo( s )
assert s.risks['Test'][0] == 0
s.transact( 1, 'c1')
s.transact( 5, 'c2')
assert algo( s )
assert s.risks['Test'][0] == 600
s.update(dts[1])
assert algo( s )
assert s.risks['Test'][0] == 600
assert s.risks['Test'][1] == 105 + 5*95
assert not hasattr( c1, 'risks' )
assert not hasattr( c2, 'risks' )
def test_update_risk_history_2():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
s = bt.Strategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'].loc[dts[1]] = 105
data['c2'].loc[dts[1]] = 95
c1 = s['c1']
c2 = s['c2']
algo = algos.UpdateRisk('Test', history=2)
s.setup(data, unit_risk={'Test':data})
s.adjust(1000)
s.update(dts[0])
assert algo( s )
assert s.risks['Test'][0] == 0
assert c1.risks['Test'][0] == 0
assert c2.risks['Test'][0] == 0
s.transact( 1, 'c1')
s.transact( 5, 'c2')
assert algo( s )
assert s.risks['Test'][0] == 600
assert c1.risks['Test'][0] == 100
assert c2.risks['Test'][0] == 500
s.update(dts[1])
assert algo( s )
assert s.risks['Test'][0] == 600
assert c1.risks['Test'][0] == 100
assert c2.risks['Test'][0] == 500
assert s.risks['Test'][1] == 105 + 5*95
assert c1.risks['Test'][1] == 105
assert c2.risks['Test'][1] == 5*95
def test_hedge_risk():
c1 = bt.Security('c1')
c2 = bt.Security('c2')
c3 = bt.Security('c3')
s = bt.Strategy('s', children = [c1, c2, c3])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100)
c1 = s['c1']
c2 = s['c2']
c3 = s['c3']
risk1 = | pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=0) | pandas.DataFrame |
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
import math
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day
from qteasy.utilfuncs import next_market_trade_day, unify, mask_to_signal, list_or_slice, labels_to_dict
from qteasy.utilfuncs import weekday_name, prev_market_trade_day, is_number_like, list_truncate, input_to_list
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator, TimingDMA, TimingMACD, TimingCDL, TimingTRIX
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic, stock_company
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.history import stack_dataframes, dataframe_to_hp, HistoryPanel
from qteasy.database import DataSource
from qteasy.strategy import Strategy, SimpleTiming, RollingTiming, SimpleSelecting, FactoralSelecting
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
from qteasy.blender import _exp_to_token, blender_parser, signal_blend
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000., 20000., 10000.])
self.op = np.array([0., 1., -0.33333333])
self.amounts_to_sell = np.array([0., 0., -3333.3333])
self.cash_to_spend = np.array([0., 20000., 0.])
self.prices = np.array([10., 20., 10.])
self.r = qt.Cost(0.0)
def test_rate_creation(self):
"""测试对象生成"""
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
self.assertEqual(self.r.buy_fix, 0)
self.assertEqual(self.r.sell_fix, 0)
def test_rate_operations(self):
"""测试交易费率对象"""
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r.calculate(self.amounts),
[0.003, 0.003, 0.003]),
True,
'fee calculation wrong')
def test_rate_fee(self):
"""测试买卖交易费率"""
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 0.
self.r.sell_min = 0.
self.r.slipage = 0.
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1.))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
"""测试最低交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""测试最低交易费用对其他交易费率参数的影响"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
"""测试固定交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
"""测试交易滑点"""
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# 已经取出所有的点,围绕其中10个点生成十个subspaces
# 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""测试从一个点生成一个space"""
# 生成一个space,指定space中的一个点以及distance,生成一个sub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31'),
Timestamp('2031-12-29'),
| Timestamp('2032-12-29') | pandas.Timestamp |
from builtins import int
import pandas as pd
from datetime import datetime
import openpyxl
def excel_time_2_string(excel_time):
t = | pd.to_datetime('1899-12-30') | pandas.to_datetime |
"""
上市公司公告查询
来源:[巨潮资讯网](http://www.cninfo.com.cn/new/commonUrl?url=disclosure/list/notice-sse#)
备注
使用实际公告时间
如查询公告日期为2018-12-15 实际公告时间为2018-12-14 16:00:00
"""
import asyncio
from aiohttp.client_exceptions import ContentTypeError
import math
import time
import aiohttp
import logbook
import pandas as pd
import requests
from logbook.more import ColorizedStderrHandler
from sqlalchemy import func
from cnswd.sql.base import get_engine, get_session
from cnswd.sql.info import Disclosure
logger = logbook.Logger('公司公告')
URL = 'http://www.cninfo.com.cn/new/hisAnnouncement/query'
COLUMNS = ['序号', '股票代码', '股票简称', '公告标题', '公告时间', '下载网址']
HEADERS = {
'Host': 'www.cninfo.com.cn',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:63.0) Gecko/20100101 Firefox/63.0',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Accept-Encoding': 'gzip, deflate',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'Connection': 'Keep-Alive',
'Referer': 'http://www.cninfo.com.cn/new/commonUrl?url=disclosure/list/notice',
}
CATEGORIES = {
'全部': None,
'年报': 'category_nbbg_szsh',
'半年报': 'category_bndbg_szsh',
'一季报': 'category_yjdbg_szsh',
'三季报': 'category_sjdbg_szsh',
'业绩预告': 'category_yjygjxz_szsh',
'权益分派': 'category_qyfpxzcs_szsh',
'董事会': 'category_dshgg_szsh',
'监事会': 'category_jshgg_szsh',
'股东大会': 'category_gddh_szsh',
'日常经营': 'category_rcjy_szsh',
'公司治理': 'category_gszl_szsh',
'中介报告': 'category_zj_szsh',
'首发': 'category_sf_szsh',
'增发': 'category_zf_szsh',
'股权激励': 'category_gqjl_szsh',
'配股': 'category_pg_szsh',
'解禁': 'category_jj_szsh',
'债券': 'category_zq_szsh',
'其他融资': 'category_qtrz_szsh',
'股权变动': 'category_gqbd_szsh',
'补充更正': 'category_bcgz_szsh',
'澄清致歉': 'category_cqdq_szsh',
'风险提示': 'category_fxts_szsh',
'特别处理和退市': 'category_tbclts_szsh',
}
PLATES = {
'sz': ('szse', '深市'),
'shmb': ('sse', '沪市')
}
def _get_total_record_num(data):
"""公告总数量"""
return math.ceil(int(data['totalRecordNum']) / 30)
def _to_dataframe(data):
def f(page_data):
res = []
for row in page_data['announcements']:
to_add = (
row['announcementId'],
row['secCode'],
row['secName'],
row['announcementTitle'],
pd.Timestamp(row['announcementTime'], unit='ms'),
'http://www.cninfo.com.cn/' + row['adjunctUrl'],
)
res.append(to_add)
df = pd.DataFrame.from_records(res, columns=COLUMNS)
return df
dfs = []
for page_data in data:
try:
dfs.append(f(page_data))
except Exception:
pass
return pd.concat(dfs)
async def _fetch_disclosure_async(session, plate, category, date_str, page):
assert plate in PLATES.keys(), f'可接受范围{PLATES}'
assert category in CATEGORIES.keys(), f'可接受分类范围:{CATEGORIES}'
market = PLATES[plate][1]
sedate = f"{date_str}+~+{date_str}"
kwargs = dict(
tabName='fulltext',
seDate=sedate,
category=CATEGORIES[category],
plate=plate,
column=PLATES[plate][0],
pageNum=page,
pageSize=30,
)
# 如果太频繁访问,容易导致关闭连接
async with session.post(URL, data=kwargs, headers=HEADERS) as r:
msg = f"{market} {date_str} 第{page}页 响应状态:{r.status}"
logger.info(msg)
await asyncio.sleep(1)
try:
return await r.json()
except ContentTypeError:
return {}
async def _fetch_one_day(session, plate, date_str):
"""获取深交所或上交所指定日期所有公司公告"""
data = await _fetch_disclosure_async(session, plate, '全部', date_str, 1)
page_num = _get_total_record_num(data)
if page_num == 0:
return pd.DataFrame()
logger.notice(f"{PLATES[plate][1]} {date_str} 共{page_num}页", page_num)
tasks = []
for i in range(page_num):
tasks.append(_fetch_disclosure_async(
session, plate, '全部', date_str, i+1))
# Schedule calls *concurrently*:
data = await asyncio.gather(
*tasks
)
return _to_dataframe(data)
async def fetch_one_day(session, date):
"""获取指定日期全部公司公告"""
date_str = date.strftime(r'%Y-%m-%d')
tasks = [_fetch_one_day(session, plate, date_str)
for plate in PLATES.keys()]
dfs = await asyncio.gather(
*tasks
)
if any([not df.empty for df in dfs]):
# 按序号降序排列
return pd.concat(dfs).sort_values('序号', ascending=False)
else:
return pd.DataFrame(columns=COLUMNS)
async def init_disclosure():
"""初始化历史公告"""
df_session = get_session(db_dir_name='info')
sdate = pd.Timestamp('2010-01-01')
edate = pd.Timestamp('today')
date_rng = pd.date_range(sdate, edate)
async def is_completed(web_session, d, times):
# reader = _get_reader(d, web_session)
try:
df = await fetch_one_day(web_session, d)
logger.info(f"提取网络数据 {d.strftime(r'%Y-%m-%d')} 共{len(df)}行")
_refresh(df, df_session)
return True
except ValueError as e:
logger.warn(f"{d.strftime(r'%Y-%m-%d')} 无数据")
return True
except Exception as e:
logger.warn(f"第{times}次尝试失败。 {d.strftime(r'%Y-%m-%d')} {e!r}")
return False
async with aiohttp.ClientSession() as web_session:
for d in date_rng:
# 重复3次
for i in range(1, 4):
status = await is_completed(web_session, d, i)
if status:
break
else:
await asyncio.sleep(4)
await asyncio.sleep(4)
df_session.close()
def has_data(session, num):
"""查询项目中是否存在指定序号的数据"""
q = session.query(Disclosure).filter(
Disclosure.序号 == num,
)
return session.query(q.exists()).scalar()
def _refresh(df, session):
if df.empty:
return
to_add = []
for _, row in df.iterrows():
num = row['序号']
if not has_data(session, num):
obj = Disclosure()
obj.序号 = num
obj.股票代码 = row['股票代码']
obj.股票简称 = row['股票简称']
obj.公告标题 = row['公告标题']
obj.公告时间 = row['公告时间']
obj.下载网址 = row['下载网址']
to_add.append(obj)
session.add_all(to_add)
session.commit()
if len(to_add) > 0:
dt = df.公告时间.dt.strftime(r'%Y-%m-%d').iloc[0]
logger.info(f"{dt} 添加{len(to_add)}行")
def last_date(session):
"""查询公司公告最后一天"""
return session.query(func.max(Disclosure.公告时间)).scalar()
async def refresh_disclosure():
"""刷新公司公告"""
session = get_session(db_dir_name='info')
today = | pd.Timestamp('today') | pandas.Timestamp |
import os
import sys
import glob
import pickle as pkl
import warnings
from pathlib import Path
import numpy as np
import pandas as pd
from scipy.stats import ttest_rel
def load_stratified_prediction_results(results_dir, experiment_descriptor):
"""Load results of stratified prediction experiments.
Arguments
---------
results_dir (str): directory to look in for results, subdirectories should
be experiments for individual genes or cancer types
experiment_descriptor (str): string describing this experiment, can be
useful to segment analyses involving multiple
experiments or results sets
Returns
-------
results_df (pd.DataFrame): results of classification experiments
"""
results_df = pd.DataFrame()
results_dir = Path(results_dir)
for identifier in results_dir.iterdir():
identifier_dir = Path(results_dir, identifier)
if identifier_dir.is_file(): continue
for results_file in identifier_dir.iterdir():
if not results_file.is_file(): continue
results_filename = str(results_file.stem)
# skip compressed files here, use load_compressed* functions
# to load that data separately
if check_compressed_file(results_filename): continue
if ('classify' not in results_filename or
'metrics' not in results_filename): continue
if results_filename[0] == '.': continue
id_results_df = pd.read_csv(results_file, sep='\t')
id_results_df['experiment'] = experiment_descriptor
results_df = | pd.concat((results_df, id_results_df)) | pandas.concat |
# Copyright (c) 2020 Huawei Technologies Co., Ltd.
# <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from typing import Dict, List
import numpy as np
import pandas as pd
from collections import Counter
from src.compress import compress
# 日志器
from src.logger_setting.my_logger import get_logger
from src.setting import setting
LOGGER = get_logger()
def groupby_calc(df):
df['esn'] = df['esn'].astype('str')
df = df.groupby(['esn'])
return df
def calc_total(series):
series = series.values
count = 0
for d in range(len(series)):
if d < len(series) - 1:
if pd.isna(series[d]) or pd.isna(series[d + 1]):
continue
if float(series[d]) <= float(series[d + 1]):
count += float(series[d + 1]) - float(series[d])
else:
count += float(series[d + 1])
return count
def is_active(series):
series = calc_total(series)
if float(series) / setting.mb > 10:
return 1
else:
return 0
def get_max(series):
if series:
return np.max(series)
else:
return setting.INVALID_VALUE
def get_min(series):
if series:
return np.min(series)
else:
return setting.INVALID_VALUE
def get_avg(values, counts):
count = sum(counts) if type(counts) == list else counts
if count == 0:
return setting.INVALID_VALUE
else:
return sum(values) / count if type(values) == list else values / count
def get_avg_max_min(df, avg_name, max_name, min_name, counts):
avg_list = list(filter(lambda x: int(x) != setting.INVALID_VALUE, df[avg_name].values))
sum_value = get_sum(avg_list)
cnt = get_sum(list(df[counts].values))
avg = sum_value / cnt if cnt != 0 else setting.INVALID_VALUE
max_list = list(filter(lambda x: int(x) != setting.INVALID_VALUE, df[max_name].values))
max_value = get_max(max_list)
min_list = list(filter(lambda x: int(x) != setting.INVALID_VALUE, df[min_name].values))
min_value = get_min(min_list)
return {avg_name: avg,
max_name: max_value,
min_name: min_value}
def get_sum(series):
if series:
return np.sum(series)
else:
return setting.INVALID_VALUE
def get_std(series):
if series:
return np.std(series)
else:
return setting.INVALID_VALUE
def get_all_day():
all_day_file = compress.get_all_csv_file(os.path.join(setting.data_path, 'extractData'))
day_list = []
for file in all_day_file:
day_list.append(os.path.split(file)[1].split("\\")[-1].split('_')[0])
return list(set(day_list))
def merge_day_data(day_dict: Dict[str, List[str]]):
for day in day_dict.keys():
file_list: List[str] = day_dict.get(day)
df = pd.concat(pd.read_csv(file, error_bad_lines=False, index_col=False) for file in file_list)
df.columns = setting.parameter_json["extract_data_columns"]
df = df.sort_values('collectTime', ascending=True)
# 把-9999变成了NaN,但是原来是空的值,在读进来的时候已经变成NaN了,所有空值和-9999都变成了NaN
df = df.replace(setting.INVALID_VALUE, np.nan)
grouped = groupby_calc(df).agg(
MaxRSRP=pd.NamedAgg(column='RSRP', aggfunc=max),
MinRSRP=pd.NamedAgg(column='RSRP', aggfunc=min),
AvgRSRP=pd.NamedAgg(column='RSRP', aggfunc=sum),
CntRSRP=pd.NamedAgg(column='RSRP', aggfunc="count"),
MaxCQI=pd.NamedAgg(column='CQI', aggfunc=max),
MinCQI=pd.NamedAgg(column='CQI', aggfunc=min),
AvgCQI=pd.NamedAgg(column='CQI', aggfunc=sum),
CntCQI=pd.NamedAgg(column='CQI', aggfunc="count"),
MaxRSRQ=pd.NamedAgg(column='RSRQ', aggfunc=max),
MinRSRQ=pd.NamedAgg(column='RSRQ', aggfunc=min),
AvgRSRQ=pd.NamedAgg(column='RSRQ', aggfunc=sum),
CntRSRQ=pd.NamedAgg(column='RSRQ', aggfunc="count"),
MaxRSSI=pd.NamedAgg(column='RSSI', aggfunc=max),
MinRSSI=pd.NamedAgg(column='RSSI', aggfunc=min),
AvgRSSI=pd.NamedAgg(column='RSSI', aggfunc=sum),
CntRSSI=pd.NamedAgg(column='RSSI', aggfunc="count"),
MaxSINR=pd.NamedAgg(column='SINR', aggfunc=max),
MinSINR=pd.NamedAgg(column='SINR', aggfunc=min),
AvgSINR=pd.NamedAgg(column='SINR', aggfunc=sum),
CntSINR=pd.NamedAgg(column='SINR', aggfunc="count"),
TotalDownload=pd.NamedAgg(column='TotalDownload', aggfunc=calc_total),
TotalUpload=pd.NamedAgg(column='TotalUpload', aggfunc=calc_total),
TotalConnectTime=pd.NamedAgg(column='TotalConnectTime', aggfunc=calc_total),
ModelName=pd.NamedAgg(column='ModelName', aggfunc=lambda x: x.iloc[-1]),
IMSI=pd.NamedAgg(column='IMSI', aggfunc=lambda x: x.iloc[-1]),
IMEI=pd.NamedAgg(column='IMEI', aggfunc=lambda x: x.iloc[-1]),
MSISDN=pd.NamedAgg(column='MSISDN', aggfunc=lambda x: x.iloc[-1]),
isActive=pd.NamedAgg(column='TotalDownload', aggfunc=is_active),
AvgDlThroughput=pd.NamedAgg(column='MaxDLThroughput', aggfunc=sum),
CntDlThroughput=pd.NamedAgg(column='MaxDLThroughput', aggfunc="count"),
AvgUlThroughput=pd.NamedAgg(column='MaxULThroughput', aggfunc=sum),
CntUlThroughput=pd.NamedAgg(column='MaxULThroughput', aggfunc="count"),
WiFiUserQty=pd.NamedAgg(column='WiFiUserQty', aggfunc=sum),
CntWiFiUserQty=pd.NamedAgg(column='WiFiUserQty', aggfunc="count"),
HostNumberOfEntries=pd.NamedAgg(column='HostNumberOfEntries', aggfunc=sum),
CntHostNumberOfEntries=pd.NamedAgg(column='HostNumberOfEntries', aggfunc="count"),
ECGI= | pd.NamedAgg(column='ECGI', aggfunc=get_main_cell) | pandas.NamedAgg |
# interesting links:
# * https://news.ycombinator.com/item?id=19214650
import os
import sys
PACKAGE_PARENT = '../..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
import pandas as pd
import numpy as np
import datetime as dt
import bisect
import scipy.stats as st
import os
import matplotlib.pyplot as plt
import matplotlib.ticker as plttick
import seaborn as sns
import plotly.graph_objects as go
import plotly.express as px
from abc import ABC, abstractmethod
from typing import List, Tuple, Generator, Optional, Callable, Iterable, Generic, Dict
from trader.common.helpers import best_fit_distribution, fit_distribution, window, reformat_large_tick_values, pct_change_adjust
from trader.common.distributions import Distribution, CsvContinuousDistribution, TestDistribution
from collections import deque
from enum import Enum
from dateutil.relativedelta import relativedelta
from trader.portfolio.quantum_harmonic import QuantumHarmonic
from trader.common.logging_helper import setup_logging
from trader.common.helpers import dateify, date_range
logging = setup_logging(module_name='vector_life')
ROOT = '../../'
SANDP_DISTRIBUTION = ROOT + 'data/sandp2000-2019.csv' if 'finance' in os.getcwd() else 'finance/data/sandp2000-2019.csv'
LIFE_EXPECTANCY = ROOT + 'data/life_expectancy.csv' if 'finance' in os.getcwd() else 'finance/data/life_expectancy.csv'
AUSTRALIA_INFLATION = ROOT + 'data/rba_inflation_data.csv' if 'finance' in os.getcwd() else 'finance/data/rba_inflation_data.csv'
TEST_DISTRIBUTION = ROOT + 'data/model.csv' if 'finance' in os.getcwd() else 'finance/data/model.csv'
QUANTUM_HARMONIC = ROOT + 'data/quantumharmonic.csv' if 'finance' in os.getcwd() else 'finance/data/quantumharmonic.csv'
class AssetType(Enum):
CASH = 1
STOCK = 2
BOND = 4
PROPERTY = 3
INCOME = 5
class TransactionType(Enum):
BUY = 1
SELL = 2
YIELDED = 3
DIVIDEND = 4
TAX = 5
class TaxType(Enum):
CAPITAL_GAINS = 1
INCOME = 2
class Tax():
def __init__(self, amount: float, tax_type: TaxType):
self.amount = amount
self.tax_type = tax_type
class AssetGenerator():
def __init__(self, asset_type: AssetType):
self.asset_type: AssetType = asset_type
class SalaryGenerator(AssetGenerator):
def __init__(self, yearly_salary: float):
super().__init__(AssetType.INCOME)
self.yearly_salary: float = yearly_salary
def generate(self,
start_date: dt.datetime,
freq: str = 'M',
periods: int = 480) -> pd.DataFrame:
index = pd.date_range(start=start_date, periods=periods, freq=freq)
data = np.full(periods, self.yearly_salary)
return pd.DataFrame(index=index, data=data)
class AssetTransaction():
def __init__(self,
transaction_cost: float,
transaction_type: TransactionType,
amount: float,
price: float,
date_time: dt.datetime,
yield_percentage: Optional[float] = None):
self.transaction_cost: float = transaction_cost
self.transaction_type: TransactionType = transaction_type
self.amount: float = amount
self.price: float = price
self.date_time: dt.datetime = date_time
self.yield_percentage: Optional[float] = yield_percentage
class AssetTick():
def __init__(self):
self.value: float = 0.0
self.asset_transaction: Optional[AssetTransaction] = None
self.date_time: dt.datetime = dt.datetime(dt.MINYEAR, 1, 1)
self.tick_yield: float = 0.0
self.price: float = 0.0
def __str__(self):
return 'value: {}, tick_yield: {}, asset_transaction: {}'.format(round(self.value, 2),
self.tick_yield,
self.asset_transaction)
def __repr__(self):
return self.__str__()
def to_dict(self):
return {'value': self.value,
'date_time': self.date_time,
'tick_yield': self.tick_yield,
'price': self.price}
class AustralianInflation(Distribution):
def __init__(self):
self.distribution = CsvContinuousDistribution(name='australian_inflation',
csv_file=AUSTRALIA_INFLATION,
data_column='inflation')
def sample(self) -> float:
return self.distribution.sample()
class Asset():
def __init__(self,
name: str,
initial_value: float,
initial_price: float,
asset_type: AssetType,
asset_init_date: dt.datetime,
capital_gain_applicable: bool,
yield_generator: Callable[[AssetTick], float],
yield_interval_days: int):
self.ticks: List[AssetTick] = []
self.transactions: List[AssetTick] = [] # fast path
self.name: str = name
self.initial_value = initial_value
self.initial_price = initial_price
self.asset_type: AssetType = asset_type
self.asset_init_date: dt.datetime = dt.datetime(asset_init_date.year, asset_init_date.month, asset_init_date.day)
self.capital_gain_applicable = capital_gain_applicable
self.yield_generator: Callable[[AssetTick], float] = yield_generator
self.yield_interval_days: int = yield_interval_days
def init(self) -> None:
# we need to initialize the first tick, because a buy() will try and call transaction_cost()
# on the subclass, and it won't be cooked yet if we do it in the constructor
asset_tick = AssetTick()
asset_tick.asset_transaction = AssetTransaction(transaction_cost=0.0,
transaction_type=TransactionType.BUY,
amount=self.initial_value,
price=self.initial_price,
date_time=self.asset_init_date)
asset_tick.price = self.initial_price
asset_tick.date_time = self.asset_init_date
asset_tick.value = self.initial_value
asset_tick.tick_yield = self.yield_generator(asset_tick)
self.append_tick(asset_tick)
def append_tick(self, tick: AssetTick) -> None:
self.ticks.append(tick)
if tick.asset_transaction is not None:
self.transactions.append(tick)
def generate_tick(self, date_time: dt.datetime) -> Optional[AssetTick]:
if len(self.ticks) == 0:
self.init()
last_transaction_tick: AssetTick
last_tick: AssetTick
perc_return: float
# make sure we're generating ticks after the init date
if date_time < self.asset_init_date:
return None
# grab the last tick
last_tick = self.ticks[-1]
trans_filter: Callable[[AssetTransaction], bool] = \
lambda t: t.transaction_type == TransactionType.BUY or t.transaction_type == TransactionType.YIELDED
last_transaction_tick = self.get_last_tick_transaction(trans_filter)
asset_tick = AssetTick()
asset_tick.date_time = date_time
# check to see if we need to generate some yield
if (date_time - last_transaction_tick.date_time).days >= self.yield_interval_days:
# aggregate ticks in between and apply the rate
total_yield = self.sum_yield(last_transaction_tick, last_tick)
# apply that yield to this tick
yielded_amount = last_tick.value * total_yield
asset_tick.value = last_tick.value + yielded_amount
transaction = AssetTransaction(transaction_cost=self.transaction_cost(TransactionType.YIELDED,
yielded_amount,
date_time),
transaction_type=TransactionType.YIELDED,
amount=yielded_amount,
price=self.generate_price(last_tick, date_time),
date_time=date_time,
yield_percentage=total_yield)
asset_tick.asset_transaction = transaction
else:
asset_tick.value = last_tick.value
asset_tick.tick_yield = self.yield_generator(last_tick)
self.append_tick(asset_tick)
return asset_tick
def __start_fin_year(self, end_fin_year: dt.datetime) -> dt.datetime:
start_fin_year = end_fin_year - relativedelta(years=1)
return start_fin_year
def __filter_fin_year(self, collection: List, end_fin_year: dt.datetime) -> List:
ret = []
start_fin_year = self.__start_fin_year(end_fin_year)
for i in collection:
if i.date_time >= start_fin_year and i.date_time < end_fin_year:
ret.append(i)
return ret
@abstractmethod
def transaction_cost(self, transaction_type: TransactionType, amount: float, date_time: dt.datetime):
pass
def generate_price(self, last_tick: AssetTick, date_time: dt.datetime):
pass
@abstractmethod
def taxable_income(self, end_fin_year: dt.datetime) -> List[Tax]:
pass
def sum_yield(self, start: AssetTick, end: AssetTick) -> float:
if start == end:
return start.tick_yield
start_index = self.ticks.index(start)
end_index = self.ticks.index(end)
total_yield = 0.0
for i in range(start_index, end_index):
total_yield += self.ticks[i].tick_yield
return total_yield
def sum_yield_from_ticks(self, ticks: List[AssetTick]) -> float:
total_yield = 0.0
for t in ticks:
total_yield += t.tick_yield
return total_yield
def get_financial_year_ticks(self, end_fin_year: dt.datetime) -> List[AssetTick]:
start_fin_year = self.__start_fin_year(end_fin_year)
# let's start from the end, because we're usually calling this during a simulation
return_ticks = []
# ticks = [d for d in self.ticks if d.date_time >= start_fin_year and d.date_time < end_fin_year]
for t in reversed(self.ticks):
if t.date_time >= start_fin_year and t.date_time < end_fin_year:
return_ticks.append(t)
if t.date_time < start_fin_year:
break
return list(reversed(return_ticks))
def get_last_tick_transaction(self,
filter_: Callable[[AssetTransaction], bool] = lambda t: True) -> AssetTick:
for t in reversed(self.transactions):
if t.asset_transaction is not None:
if filter_(t.asset_transaction):
return t
raise ValueError('filter produced no transaction ticks')
def get_ticks_with_transactions(self,
filter_: Optional[Callable[[AssetTransaction], bool]],
end_fin_year: Optional[dt.datetime] = None) -> List[AssetTick]:
return_ticks = []
tick_collection = self.transactions
if end_fin_year:
tick_collection = self.__filter_fin_year(tick_collection, end_fin_year)
for t in tick_collection:
if t.asset_transaction is not None:
if filter_ is not None and filter_(t.asset_transaction):
return_ticks.append(t)
elif filter is None:
return_ticks.append(t)
return return_ticks
def get_value(self) -> float:
return self.ticks[-1].value
def perform_buysell(self,
amount: float,
trans_type: TransactionType,
date_time: dt.datetime) -> None:
if trans_type != TransactionType.SELL and trans_type != TransactionType.BUY:
raise ValueError('must be a buy or sell transaction')
if trans_type == TransactionType.SELL and amount > self.get_value():
raise ValueError('SELL amount is greater than asset value')
if trans_type == TransactionType.SELL and amount > 0.0:
amount = amount * -1.0
last_tick = self.ticks[-1]
asset_tick = AssetTick()
asset_tick.asset_transaction = AssetTransaction(transaction_cost=self.transaction_cost(trans_type,
amount,
date_time),
transaction_type=trans_type,
amount=amount,
price=last_tick.price,
date_time=date_time)
asset_tick.price = last_tick.price
asset_tick.date_time = date_time
asset_tick.value = last_tick.value + amount
self.append_tick(asset_tick)
def sell(self, amount: float, date_time: dt.datetime) -> None:
return self.perform_buysell(amount, TransactionType.SELL, date_time)
def buy(self, amount: float, date_time: dt.datetime, price: Optional[float]) -> None:
return self.perform_buysell(amount, TransactionType.BUY, date_time)
def __str__(self):
return '{}, len ticks: {}, asset_type: {}'.format(self.name, len(self.ticks), self.asset_type.name)
def __repr__(self):
return self.__str__() + '\n' + ',\n'.join([str(a) for a in self.ticks])
class AssetStock(Asset):
def __init__(self,
name: str,
initial_value: float,
initial_price: float,
asset_init_date: dt.datetime):
super().__init__(name=name,
initial_value=initial_value,
initial_price=initial_price,
asset_type=AssetType.STOCK,
asset_init_date=asset_init_date,
capital_gain_applicable=True,
yield_generator=self.sample_yield,
yield_interval_days=1)
# self.distribution = CsvContinuousDistribution(name=name,
# csv_file=SANDP_DISTRIBUTION,
# data_column='adj_close',
# cache_size=365 * 10,
# data_column_apply=pct_change_adjust,
# distribution=st.loglaplace)
parameters = [0.2, 0.2, 0.086, 0.182, 0.133, 0.928]
self.distribution = QuantumHarmonic(name=name, csv_file=QUANTUM_HARMONIC, parameters=parameters)
def sample_yield(self, last_tick: AssetTick) -> float:
return self.distribution.sample()
def transaction_cost(self, transaction_type: TransactionType, amount: float, date_time: dt.datetime) -> float:
return 10.0
def taxable_income(self, end_fin_year: dt.datetime) -> List[Tax]:
start_fin_year = end_fin_year - relativedelta(years=1)
sell_filter: Callable[[AssetTransaction], bool] = \
lambda t: t.transaction_type == TransactionType.SELL
buy_filter: Callable[[AssetTransaction], bool] = \
lambda t: t.transaction_type == TransactionType.BUY
fin_year_sell_ticks = [t.asset_transaction for t in self.get_ticks_with_transactions(sell_filter, end_fin_year)
if t.asset_transaction is not None]
buys: List[Tuple[dt.datetime, float]]
buys = [(t.date_time, t.asset_transaction.amount) for t in reversed(self.get_ticks_with_transactions(buy_filter))
if t.asset_transaction is not None]
# TODO:// figure out dividend income
# [capital_gain, income]
tax = [0.0, 0.0]
tax_switch = 0
# for each of the sell's, we need to figure out how long ago they were bought, to calculate
# capital gain vs. income
for sell in fin_year_sell_ticks:
sell_remaining = sell.amount
for i in range(0, len(buys)):
if buys[i][0] < (sell.date_time - relativedelta(years=1)):
tax_switch = 0
else:
tax_switch = 1
if buys[i][1] >= sell_remaining:
tax[tax_switch] += sell_remaining
buys[i] = (buys[i][0], (buys[i][1] - sell_remaining))
sell_remaining = 0
else:
tax[tax_switch] += buys[i][1]
sell_remaining -= buys[i][1]
buys[i] = (buys[i][0], 0)
# sell_remaining should be zero
if sell_remaining != 0.0:
raise ValueError('this should not happen')
return [Tax(amount=tax[0], tax_type=TaxType.CAPITAL_GAINS),
Tax(amount=tax[1], tax_type=TaxType.INCOME)]
class AssetCash(Asset):
def __init__(self,
name: str,
initial_value: float,
asset_init_date: dt.datetime):
super().__init__(name=name,
initial_value=initial_value,
initial_price=1.0,
asset_type=AssetType.CASH,
asset_init_date=asset_init_date,
capital_gain_applicable=False,
yield_generator=self.sample_yield,
yield_interval_days=365)
def sample_yield(self, last_tick: AssetTick) -> float:
return 0.028 / self.yield_interval_days
def transaction_cost(self, transaction_type: TransactionType, amount: float, date_time: dt.datetime) -> float:
# regardless of BUY, SELL, YIELD, it's all the same
return 0.0
def generate_price(self, last_tick: AssetTick, date_time: dt.datetime) -> float:
return 1.0
def taxable_income(self, end_fin_year: dt.datetime) -> List[Tax]:
income: float = 0.0
trans_filter: Callable[[AssetTransaction], bool] = \
lambda t: t.transaction_type == TransactionType.YIELDED
fin_year_ticks = self.get_ticks_with_transactions(filter_=trans_filter,
end_fin_year=end_fin_year)
income += sum([t.asset_transaction.amount for t in fin_year_ticks if t.asset_transaction])
return [Tax(amount=income, tax_type=TaxType.INCOME)]
class TaxReturn():
def __init__(self):
self.date: dt.datetime
self.tax_paid: float
self.carried_losses: float
class Book():
def __init__(self,
start_date: dt.datetime):
self.assets: List[Asset] = []
self.tax_returns: List[TaxReturn] = []
self.start_date: dt.datetime = start_date
def calculate_net_worth(self):
total = 0.0
for a in self.assets:
total += a.ticks[-1].value
return total
def to_dataframe(self) -> Dict[str, pd.DataFrame]:
assets = {}
for a in self.assets:
tick_dicts = [t.to_dict() for t in a.ticks]
assets[a.name] = | pd.DataFrame(tick_dicts) | pandas.DataFrame |
import pandas as pd
import datetime
from pandas import DataFrame
from pandasql import sqldf
loc = locals()
def calculate_average_ticker_price(prices: {}, total_quantity: float) -> float:
"""
:param prices: a list of price * quantity needed to calculate the average price of each stock
:param total_quantity: the total amount of the stock held, required to calculate the average price per stock.
:return: the average price for that particular ticker stock given
1. the different purchase price
2. the different quantities purchased
"""
if total_quantity > 0:
total_price = sum(prices)
return total_price / total_quantity
def strip_action(action: str) -> str:
"""
removes whitespace and changes all characters to lower case
:param action: the name of the action taken on a position
:return: the input string minus the above mentioned
"""
action = action.replace(" ", "")
action = action.casefold()
return action
def profit_from_sale(ticker_number: str, sale_price: float, quantity: float, action: str) -> float:
"""
Calculates the amount of profit/loss realised from a sale of a stock.
:param ticker_number: ticker name of the stock
:param sale_price: sale/cover price of the stock
:param quantity: the number of stock sold/bought.
:param action: is this position a "longsell" or a "shortcover"
:return: profit/loss of the action taken
"""
if action == "longsell":
profit_or_loss = (sale_price - price_list[ticker_number]) * quantity
return profit_or_loss
elif action == "shortcover":
profit_or_loss = (price_list[ticker_number] - sale_price) * quantity
return profit_or_loss
def date_remove_time(date: datetime) -> datetime:
"""
converts a datetime format of %Y-%m-%d %H:%M:%S.%f to %d/%m/%Y
:param date: date
:return: a cleaner date without the above mentioned
"""
return datetime.datetime.strptime(date,'%Y-%m-%d %H:%M:%S.%f').strftime('%d/%m/%Y')
myportfolio = pd.read_excel('portfoliodataset.xlsx', index_col=False)
# PANDAS SETTINGS
| pd.set_option('display.max_rows', 500) | pandas.set_option |
# The analyser
import pandas as pd
import matplotlib.pyplot as plt
import dill
import os
import numpy as np
from funcs import store_namespace
from funcs import load_namespace
import datetime
from matplotlib.font_manager import FontProperties
from matplotlib import rc
community = 'ResidentialCommunity'
sim_ids = ['MinEne_0-2']
model_id = 'R2CW_HP'
bldg_list = load_namespace(os.path.join('path to models', 'teaser_bldgs_residential'))
#
bldg_list = [bldg_list[0], bldg_list[1]]
print(bldg_list)
folder = 'results'
step = 300
nodynprice=0
mon = 'jan'
constr_folder = 'decentr_enemin_constr_'+mon
#bldg_list = bldg_list[0:1]
if mon == 'jan':
start = '1/7/2017 16:30:00'
end = '1/7/2017 19:00:00'
controlseq_time = '01/07/2017 16:55:00'
elif mon == 'mar':
start = '3/1/2017 16:30:00'
end = '3/1/2017 19:00:00'
controlseq_time = '03/01/2017 16:55:00'
elif mon=='nov':
start = '11/20/2017 16:30:00'
end = '11/20/2017 19:00:00'
controlseq_time = '11/20/2017 16:55:00'
sim_range = pd.date_range(start, end, freq = str(step)+'S')
simu_path = "path to simulation folder"
other_input = {}
price = {}
flex_cost = {}
ref_profile = {}
controlseq = {}
opt_control = {}
emutemps = {}
mpctemps = {}
opt_stats = {}
flex_down = {}
flex_up = {}
power = {}
for bldg in bldg_list:
building = bldg+'_'+model_id
for sim_id in sim_ids:
opt_stats[sim_id] = {}
controlseq[sim_id] = {}
mpctemps[sim_id] = {}
emutemps[sim_id] = {}
power[sim_id] = {}
for time_idx in sim_range:
time_idx = time_idx.strftime('%m/%d/%Y %H:%M:%S')
t = time_idx.replace('/','-').replace(':','-').replace(' ','-')
opt_stats[sim_id][time_idx] = load_namespace(os.path.join(simu_path, folder, 'opt_stats_'+sim_id+'_'+t))
emutemps[sim_id][time_idx] = load_namespace(os.path.join(simu_path, folder, 'emutemps_'+sim_id+'_'+t))
mpctemps[sim_id][time_idx] = load_namespace(os.path.join(simu_path, folder, 'mpctemps_'+sim_id+'_'+t))
controlseq[sim_id][time_idx] = load_namespace(os.path.join(simu_path, folder, 'controlseq_'+sim_id)+'_'+t)
power[sim_id][time_idx] = load_namespace(os.path.join(simu_path, folder, 'power_'+sim_id)+'_'+t)
#flex_down[sim_id] = load_namespace(os.path.join(simu_path, folder, 'flex_down'+sim_id))
#flex_up[sim_id] = load_namespace(os.path.join(simu_path, folder, 'flex_up'+sim_id))
i=0
for sim_id in sim_ids:
if i == 0:
emutemps_df = pd.DataFrame.from_dict(emutemps[sim_id],orient='index')
emutemps_df.index = pd.to_datetime(emutemps_df.index)
emutemps_df.index = emutemps_df.index.shift(1, freq=str(step)+'S')
power_df = pd.DataFrame.from_dict(power[sim_id],orient='index')
power_df.index = pd.to_datetime(power_df.index)
opt_stats_df = pd.DataFrame.from_dict(opt_stats[sim_id],orient='index')
opt_stats_df.index = pd.to_datetime(opt_stats_df.index)
power_df.index = power_df.index.shift(1, freq=str(step)+'S')
else:
emutemps_df1 = pd.DataFrame.from_dict(emutemps[sim_id],orient='index')
emutemps_df1.index = pd.to_datetime(emutemps_df1.index)
emutemps_df1.index = emutemps_df1.index.shift(1, freq=str(step) + 'S')
emutemps_df = pd.concat([emutemps_df, emutemps_df1])
power_df1 = pd.DataFrame.from_dict(power[sim_id],orient='index')
power_df1.index = pd.to_datetime(power_df1.index)
power_df1.index = power_df1.index.shift(1, freq=str(step)+'S')
power_df = pd.concat([power_df, power_df1])
opt_stats_df1 = pd.DataFrame.from_dict(opt_stats[sim_id],orient='index')
opt_stats_df1.index = pd.to_datetime(opt_stats_df1.index)
opt_stats_df = pd.concat([opt_stats, opt_stats_df1])
i = i+1
store_namespace(os.path.join(simu_path, folder,'emutemps'),emutemps_df)
store_namespace(os.path.join(simu_path, folder,'mpctemps'),mpctemps)
store_namespace(os.path.join(simu_path, folder,'opt_stats'),opt_stats_df)
constraints = {}
for bldg in bldg_list:
setpoint_dict = load_namespace(os.path.join(simu_path, constr_folder, 'constraints_'+bldg+'_'+model_id)).data['TAir']
constraints[bldg] = {}
for param in setpoint_dict.keys():
constraints[bldg]['hi'] = setpoint_dict['Slack_LTE'].display_data().resample(str(step)+'S').ffill()
constraints[bldg]['lo'] = setpoint_dict['Slack_GTE'].display_data().resample(str(step)+'S').ffill()
constraints_df = pd.DataFrame.from_dict(constraints, orient = 'index')
#print(constraints_df['hi'].values)
weather = load_namespace(os.path.join(simu_path, folder, 'weather'))
price = load_namespace(os.path.join(simu_path, folder, 'sim_price'))
price = price.display_data()
if nodynprice==1:
price = pd.Series(50, price.index,name='pi_e')
#print(weather)
# """""""""""" Comfort violations """""""""""""""""""
#print(constraints_df)
#print(emutemps_df)
violation = {}
#print(constraints_df.loc['Detached_0']['lo'])
for bldg in bldg_list:
violation[bldg] = {}
for time in emutemps_df[bldg+'_'+model_id].index:
#print(emutemps_df[bldg+'_'+model_id][time])
emutemp = emutemps_df[bldg+'_'+model_id][time]
#emutemp = emutemp[time]
#emutemp = emutemp.values()
#print(emutemp)
constraint_hi = constraints_df.loc[bldg]['hi'][time]-273.15
constraint_lo = constraints_df.loc[bldg]['lo'][time]-273.15 #print(time)
#print(constraint_hi)
#print(constraint_lo)
if emutemp > constraint_hi:
violation[bldg][time] = (emutemp - constraint_hi)*step/3600
elif emutemp < constraint_lo:
violation[bldg][time] = (constraint_lo-emutemp)*step/3600
else:
violation[bldg][time] = 0
violation_df = pd.DataFrame.from_dict(violation, orient = 'columns')
print(violation_df)
store_namespace(os.path.join(simu_path, folder,'violation_df'),violation_df)
aggr = {}
dt = []
#print(controlseq.keys())
for time in controlseq[sim_ids[0]].keys():
control_start = datetime.datetime.strptime(time, '%m/%d/%Y %H:%M:%S')
control_end = datetime.datetime.strptime(time, '%m/%d/%Y %H:%M:%S') + datetime.timedelta(seconds = 10*int(step))
dt.append(control_start)
aggr[time] = pd.DataFrame.from_dict(controlseq[sim_ids[0]][time],orient='columns')
dt = pd.DataFrame(dt,columns = ['Dates'])
dt = dt.set_index(pd.DatetimeIndex(dt['Dates']))
index = dt.index
index = index.tz_localize('UTC')
index = index.sort_values()
mast_index = index
last_str = index[-1].strftime('%m/%d/%Y %H:%M:%S')
#real_cont = pd.DataFrame.from_dict(controlseq[sim_ids[0]][last_str],orient='columns')[index[0]:index[-1]]
real_cont = power_df
real_cont_aggr = real_cont.sum(axis=1)
aggrcom = {}
for time in aggr.keys():
aggrcom[time] = aggr[time].sum(axis=1)
store_namespace(os.path.join(simu_path,folder,'real_cont'), real_cont)
store_namespace(os.path.join(simu_path,folder,'aggr'), aggr)
store_namespace(os.path.join(simu_path,folder,'aggrcom'), aggrcom)
# --------------------- Flexibility factor and peak power ---------------
if mon == 'jan':
ff_date = '01/07/2017'
if mon == 'mar':
ff_date = '03/01/2017'
if mon == 'nov':
ff_date = '11/20/2017'
hc_start = datetime.datetime.strptime(ff_date + ' 18:00:00', '%m/%d/%Y %H:%M:%S')
hc_end = index[-1]
lc_start = index[0]
lc_end = datetime.datetime.strptime(ff_date + ' 17:59:00', '%m/%d/%Y %H:%M:%S')
peak_comm = real_cont_aggr.max()
peak_time_comm = real_cont_aggr.idxmax()
peak_time_comm_hh = real_cont_aggr.resample(str(step)+'S').mean().idxmax()
peak_comm_hh = real_cont_aggr.resample(str(step)+'S').mean().max()
peak_comm =(peak_comm, peak_time_comm)
peak_comm_hh =(peak_comm_hh, peak_time_comm_hh)
print(peak_comm)
print(peak_comm_hh)
peak = {}
peak_hh = {}
cons_hc = real_cont[hc_start:hc_end]
cons_lc = real_cont[lc_start:lc_end]
print(cons_hc)
real_cont_hh = real_cont.resample(str(step)+'S').mean()
for bldg in bldg_list:
bldg = bldg+'_'+model_id
peak_val = real_cont.loc[:][bldg].max()
peak_idx = real_cont.loc[:][bldg].idxmax()
peak_hh_val = real_cont_hh.loc[:][bldg].max()
peak_hh_idx = real_cont_hh.loc[:][bldg].idxmax()
peak[bldg] = (peak_val, peak_idx)
peak_hh[bldg] = (peak_hh_val, peak_hh_idx)
peak = pd.DataFrame.from_dict(peak, orient='index')
peak_hh = pd.DataFrame.from_dict(peak_hh, orient='index')
print(peak_hh)
print(peak)
# -----------------------------------------------
print('%%%%%%%%%---- Plots ----%%%%%%%')
fig_folder = os.path.join(simu_path, folder, 'figs')
#print(controlseq)
#print(flex_cost.display_data())
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Prices
fig = plt.figure(figsize=(11.69,8.27))
ax = fig.gca()
ax1 = ax.twinx()
i = 0
plot_times=[0,4,8,12,18]
i=0
for bldg in [bldg_list[0]]:
ax.plot(real_cont.index, real_cont[bldg+'_'+model_id].values/1000,'-', label='ref_profile')
#resamp_index = index.asfreq('1800S')
ax.set_ylabel('Heat demand [kW]', fontsize=18)
ax1.plot(price.index, price.values, '--o', label="Price")
#ax1.plot(flex_cost[bldg_list[0]+'_'+model_id].index, flex_cost[bldg_list[0]+'_'+model_id].values, '--o', label="Flex Cost")
handles,labels = [],[]
for ax in fig.axes:
for h,l in zip(*ax.get_legend_handles_labels()):
handles.append(h)
labels.append(l)
ax1.set_ylabel(r'Price [pounds / kWh]', fontsize=18)
#ax.legend(fontsize=14, loc = 0)
#plt.legend(handles,labels, bbox_to_anchor = (1.04,0.5), loc ='center left')
plt.xticks(rotation=35)
plt.xlabel("Time",fontsize=18)
plt.title("Decentralised Algorithm:\n Heat demand under dynamic pricing and loadshaping",fontsize=22)
# We change the fontsize of minor ticks label
plt.tick_params(axis='both', which='major', labelsize=12)
plt.tick_params(axis='both', which='minor', labelsize=12)
plt.savefig(os.path.join(simu_path, folder, "mincost_price.png"))
plt.clf()
#plt.close()
#plt.close('all')
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Outside temperature and optimised control sequence
fig = plt.figure(figsize=(11.69,8.27))
ax = fig.gca()
ax1 = ax.twinx()
#aggr = {}
i = 0
#price = price.display_data()
plot_times=[0,4,8,12,18]
#print(controlseq.keys())
i=0
ax.plot(real_cont_aggr.index, real_cont_aggr.values/1000,'-x', label='realised')
ax.set_ylabel('Heat demand [kW]', fontsize=18)
ax1.plot(price.index, price.values, '--o', label="Price")
handles,labels = [],[]
for ax in fig.axes:
for h,l in zip(*ax.get_legend_handles_labels()):
handles.append(h)
labels.append(l)
ax1.set_ylabel(r'Price [pounds / kWh]', fontsize=18)
#ax.legend(fontsize=14, loc = 0)
#plt.legend(handles,labels, bbox_to_anchor = (1.04,0.5), loc ='center left')
plt.xticks(rotation=35)
plt.xlabel("Time",fontsize=18)
plt.title("Decentralised Algorithm:\n Power demand",fontsize=22)
# We change the fontsize of minor ticks label
plt.tick_params(axis='both', which='major', labelsize=12)
plt.tick_params(axis='both', which='minor', labelsize=12)
plt.savefig(os.path.join(simu_path, folder, "mincost_price_aggr.png"))
plt.clf()
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Temperatures
fig = plt.figure(figsize=(11.69,8.27))
ax = fig.gca()
#ax1 = ax.twinx()
plot_bldgs = [0]
plot_times=[0,1,2,3,4]
i= 0
#print(emutemps)
for sim_id in sim_ids:
i = 0
for time in mpctemps[sim_id].keys():
j = 0
for bldg in mpctemps[sim_id][time].keys():
if j in plot_bldgs:
ax.plot(mpctemps[sim_id][time][bldg].index, mpctemps[sim_id][time][bldg].values, '-' , label='mpc_'+bldg)
j = j+1
i = i+1
handles,labels = [],[]
for ax in fig.axes:
for h,l in zip(*ax.get_legend_handles_labels()):
handles.append(h)
labels.append(l)
#ax.legend(fontsize=14)
plt.xlabel("Time",fontsize=18)
plt.ylabel(r"Temperature [$^\circ$C]",fontsize=18)
plt.title("Predicted Temperatures with Cost Minimisation",fontsize=22)
plt.xticks(rotation=35)
# We change the fontsize of minor ticks label
plt.tick_params(axis='both', which='major', labelsize=12)
plt.tick_params(axis='both', which='minor', labelsize=12)
plt.legend(handles,labels, bbox_to_anchor = (1.04,0.5), loc ='center left')
plt.savefig(os.path.join(simu_path, folder, "temps_mpc.pdf"),bbox_inches="tight")
plt.savefig(os.path.join(simu_path, folder, "temps_mpc.png"),bbox_inches="tight")
plt.clf()
#print(ref_heatinput)
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Temperatures
fig = plt.figure(figsize=(11.69,8.27))
ax = fig.gca()
#ax1 = ax.twinx()
plot_bldgs = [0]
i= 0
#print(emutemps)
for sim_id in sim_ids:
if i == 0:
emutemps_df = pd.DataFrame.from_dict(emutemps[sim_id],orient='index')
emutemps_df.index = | pd.to_datetime(emutemps_df.index) | pandas.to_datetime |
"""
Author: <NAME>
Classes and functions for linearized DSGEs.
"""
import warnings
import pandas as pd
from tqdm import tqdm
from sympy import simplify, Matrix
from scipy.linalg import qz
from scipy.stats import beta, gamma, invgamma, norm, uniform
import matplotlib.pyplot as plt
from pykalman import KalmanFilter
from numpy.linalg import svd, inv, eig
from tables import PerformanceWarning
from scipy.optimize import minimize, basinhopping
from numpy.random import multivariate_normal, rand, seed
from numpy import diagonal, vstack, array, eye, where, diag, sqrt, hstack, zeros, \
arange, exp, log, inf, nan, isnan, isinf, set_printoptions, matrix, linspace
pd.set_option('display.max_columns', 20)
set_printoptions(precision=4, suppress=True, linewidth=150)
warnings.filterwarnings('ignore', category=RuntimeWarning)
warnings.filterwarnings('ignore', category=PerformanceWarning)
class DSGE(object):
"""
This is the main class which holds a DSGE model with all its attributes and methods.
"""
chains = None
prior_info = None
has_solution = False
posterior_table = None
def __init__(self, endog, endogl, exog, expec, state_equations, obs_equations=None, estimate_params=None,
calib_dict=None, prior_dict=None, obs_data=None, verbose=False):
"""
Model declaration requires passing SymPy symbols as variables and parameters. Some arguments can be left empty
if you are working with simulations of calibrated models.
:param endog: SymPy matrix of symbols containing the endogenous variables.
:param endogl: SymPy matrix of symbols containing the lagged endogenous variables.
:param exog: SymPy matrix of symbols containing the exogenous shocks.
:param expec: SymPy matrix of symbols containing the expectational errors.
:param state_equations: SymPy matrix of symbolic expressions representing the model's equilibrium conditions,
with zeros on the right-hand side of the equality.
:param obs_equations: SymPy matrix of symbolic expressions representing the model's observation equations, with
observable variables on the left-hand side of the equation. This is only required if the
model is going to be estimated. You do not need to provide observation equations to run
simulations on a calibrated model.
:param estimate_params: SymPy matrix of symbols containing the parameters that are free to be estimated.
:param calib_dict: dict. Keys are the symbols of parameters that are going to be calibrated, and values are
their calibrated value.
:param prior_dict: dict. Entries must have symbols of parameters that are going to be estimated. Values are
dictionaries containing the following entries:
- 'dist': prior distribution. 'normal', 'beta', 'gamma' or 'invgamma'.
- 'mean': mean of the prior distribution.
- 'std': standard deviation of the prior distribution.
- 'label': str with name/representation of the estimated parameter. This argument accepts
LaTeX representations.
:param obs_data: pandas DataFrame with the observable variables. Columns must be in the same order as the
'obs_equations' declarations.
:param verbose: <not implemented yet>
"""
self.verbose = verbose
self.endog = endog
self.endogl = endogl
self.exog = exog
self.expec = expec
self.params = estimate_params
self.state_equations = state_equations
self.obs_equations = obs_equations
self.prior_dict = prior_dict
self.data = obs_data
self.n_state = len(endog)
self.n_obs = len(endog) if obs_equations is None else len(obs_equations)
self.n_param = None if estimate_params is None else len(estimate_params)
# TODO aqui pode vir um check the obs data e obs equations
if (obs_equations is None) and (obs_data is None):
generate_obs = True
else:
generate_obs = False
self._get_jacobians(generate_obs=generate_obs)
if estimate_params is None:
# If no parameters are going to be estimated, calibrate the whole model
self.Gamma0, self.Gamma1, self.Psi, self.Pi, self.C_in, self.obs_matrix, self.obs_offset = \
self._eval_matrix(calib_dict, to_array=True)
self.G1, self.C_out, self.impact, self.fmat, self.fwt, self.ywt, self.gev, self.eu, self.loose = \
gensys(self.Gamma0, self.Gamma1, self.C_in, self.Psi, self.Pi)
# TODO assert that there are no symbols left
self.has_solution = True
else:
# Otherwise, calibrate only the required parameters
self.Gamma0, self.Gamma1, self.Psi, self.Pi, self.C_in, self.obs_matrix, self.obs_offset = \
self._eval_matrix(calib_dict, to_array=False)
self.prior_info = self._get_prior_info()
def simulate(self, n_obs=100, random_seed=None):
"""
Given a calibrate or estimated model, simulates values of the endogenous variables based on random samples of
the exogenous shocks.
:param n_obs: number of observation in the time dimension.
:param random_seed: random seed for the simulation.
:return: pandas DataFrame. 'df_obs' contains the simualtions for the observable variables. 'df_state' contains
the simulations for the state/endogenous variables.
"""
# TODO se não tiver equações de observações, retornar None para o 'df_obs'
assert self.has_solution, "No solution was generated yet"
if not (random_seed is None):
seed(random_seed)
kf = KalmanFilter(self.G1, self.obs_matrix, self.impact @ self.impact.T, None,
self.C_out.reshape(self.n_state), self.obs_offset.reshape(self.n_obs))
simul_data = kf.sample(n_obs)
state_names = [str(s) for s in list(self.endog)]
obs_names = [f'obs {i+1}' for i in range(self.obs_matrix.shape[0])]
df_obs = pd.DataFrame(data=simul_data[1], columns=obs_names)
df_states = pd.DataFrame(data=simul_data[0], columns=state_names)
return df_obs, df_states
def estimate(self, file_path, nsim=1000, ck=0.2):
"""
Run the MCMC estimation.
:param file_path: str. Save path where the MCMC chains are saved. The file format is HDF5 (.h5). This file
format gets very heavy but has very fast read/write speed. If the file already exists, the
estimation will resume from these previously simulated chains.
:param nsim: Length of the MCMC chains to be generated. If the chains are already stable, this is the number of
draws from the posterior distribution.
:param ck: float. Scaling factor of the hessian matrix of the mode of the posterior distribution, which is used
as the covariance matrix for the MCMC algorithm. Bayesian literature says this value needs to be
calibrated in order to achieve your desired acceptance rate from the posterior draws.
:return: the 'chains' attribute of this DSGE instance is generated.
"""
try:
df_chains = pd.read_hdf(file_path, key='chains')
sigmak = pd.read_hdf(file_path, key='sigmak')
start = df_chains.index[-1]
except FileNotFoundError:
def obj_func(theta_irr):
theta_irr = {k: v for k, v in zip(self.params, theta_irr)}
theta_res = self._irr2res(theta_irr)
return -1 * self._calc_posterior(theta_res)
theta_res0 = {k: v for k, v in zip(self.params, self.prior_info['mean'].values)}
theta_irr0 = self._res2irr(theta_res0)
theta_irr0 = array(list(theta_irr0.values()))
# Optimization - SciPy minimize
res = minimize(obj_func, theta_irr0, options={'disp': True}, method='BFGS')
theta_mode_irr = {k: v for k, v in zip(self.params, res.x)}
theta_mode_res = self._irr2res(theta_mode_irr)
sigmak = ck * res.hess_inv
if self.verbose:
print('===== Posterior Mode =====')
print(theta_mode_res, '\n')
print('===== MH jump covariance =====')
print(sigmak, '\n')
print('===== Eigenvalues of MH jump convariance =====')
print(eig(sigmak)[0], '\n')
# Optimization - Basinhoping
# res = basinhopping(obj_func, theta_irr0)
# theta_mode_irr = {k: v for k, v in zip(self.params, res.x)}
# theta_mode_res = self._irr2res(theta_mode_irr)
# sigmak = ck * res.hess_inv
# Overrides the result of the optimization
# theta_mode_res = self.prior_info['mean']
# sigmak = ck * eye(self.n_param)
df_chains = pd.DataFrame(columns=[str(p) for p in list(self.params)], index=range(nsim))
df_chains.loc[0] = list(theta_mode_res.values())
start = 0
# Metropolis-Hastings
muk = zeros(self.n_param)
accepted = 0
for ii in tqdm(range(start + 1, start+nsim), 'Metropolis-Hastings'):
theta1 = {k: v for k, v in zip(self.params, df_chains.loc[ii - 1].values)}
pos1 = self._calc_posterior(theta1)
omega1 = self._res2irr(theta1)
omega2 = array(list(omega1.values())) + multivariate_normal(muk, sigmak)
omega2 = {k: v for k, v in zip(self.params, omega2)}
theta2 = self._irr2res(omega2)
pos2 = self._calc_posterior(theta2)
ratio = exp(pos2 - pos1)
if ratio > rand(1)[0]:
accepted += 1
df_chains.loc[ii] = list(theta2.values())
else:
df_chains.loc[ii] = df_chains.loc[ii - 1]
if ii % 100 == 0:
store = pd.HDFStore(file_path)
store['chains'] = df_chains
store['sigmak'] = pd.DataFrame(data=sigmak)
store.close()
store = pd.HDFStore(file_path)
store['chains'] = df_chains
store['sigmak'] = | pd.DataFrame(data=sigmak) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.